id
int64
11
59.9k
original
stringlengths
33
150k
modified
stringlengths
37
150k
42,355
def validate_tag_embed_fields(fields: dict) -> None: """Raises a ValidationError if any of the given embed fields is invalid.""" field_validators = { 'name': (MaxLengthValidator(limit_value=256),), 'value': (MaxLengthValidator(limit_value=1024),), 'inline': (is_bool_validator,), } required_fields = ('name', 'value') for field in fields: if not isinstance(field, Mapping): raise ValidationError("Embed fields must be a mapping.") if not all(required_field in field for required_field in required_fields): raise ValidationError( f"Embed fields must contain the following fields: {', '.join(required_fields)}." ) for field_name, value in field.items(): if field_name not in field_validators: raise ValidationError(f"Unknown embed field field: {field_name!r}.") for validator in field_validators[field_name]: validator(value)
def validate_tag_embed_fields(fields: Dict[str, Union[str, bool]]) -> None: """Raises a ValidationError if any of the given embed fields is invalid.""" field_validators = { 'name': (MaxLengthValidator(limit_value=256),), 'value': (MaxLengthValidator(limit_value=1024),), 'inline': (is_bool_validator,), } required_fields = ('name', 'value') for field in fields: if not isinstance(field, Mapping): raise ValidationError("Embed fields must be a mapping.") if not all(required_field in field for required_field in required_fields): raise ValidationError( f"Embed fields must contain the following fields: {', '.join(required_fields)}." ) for field_name, value in field.items(): if field_name not in field_validators: raise ValidationError(f"Unknown embed field field: {field_name!r}.") for validator in field_validators[field_name]: validator(value)
38,620
def create_deps_count(graph): # TODO: create unit test deps_count = {} for test, deps in graph.items(): try: deps_count[test] += 1 except KeyError: deps_count[test] = 0 for dep in deps: try: deps_count[dep] += 1 except KeyError: deps_count[dep] = 0 return deps_count
def create_deps_count(graph): # TODO: create unit test deps_count = {} for node, deps in graph.items(): try: deps_count[test] += 1 except KeyError: deps_count[test] = 0 for dep in deps: try: deps_count[dep] += 1 except KeyError: deps_count[dep] = 0 return deps_count
2,207
def top_k_accuracy_score(y_true, y_score, *, k=2, normalize=True, sample_weight=None, labels=None): """Top-k Accuracy classification score. This metric computes the number of times where the correct label is among the top `k` labels predicted (ranked by predicted scores). Note that the multilabel case isn't covered here. Read more in the :ref:`User Guide <top_k_accuracy_score>` Parameters ---------- y_true : array-like of shape (n_samples,) True labels. y_score : array-like of shape (n_samples,) or (n_samples, n_classes) Target scores. These can be either probability estimates or non-thresholded decision values (as returned by :term:`decision_function` on some classifiers). The binary case expects scores with shape (n_samples,) while the multiclass case expects scores with shape (n_samples, n_classes). In the multiclass case, the order of the class scores must correspond to the order of ``labels``, if provided, or else to the numerical or lexicographical order of the labels in ``y_true``. k : int, default=2 Number of most likely outcomes considered to find the correct label. normalize : bool, default=True If `True`, return the fraction of correctly classified samples. Otherwise, return the number of correctly classified samples. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If `None`, all samples are given the same weight. labels : array-like of shape (n_classes,), default=None Multiclass only. List of labels that index the classes in ``y_score``. If ``None``, the numerical or lexicographical order of the labels in ``y_true`` is used. Returns ------- score : float The top-k accuracy score. The best performance is 1 with `normalize == True` and the number of samples with `normalize == False`. See also -------- accuracy_score Notes ----- In cases where two or more labels are assigned equal predicted scores, the labels with the highest indices will be chosen first. This might impact the result if the correct label falls after the threshold because of that. Examples -------- >>> import numpy as np >>> from sklearn.metrics import top_k_accuracy_score >>> y_true = np.array([0, 1, 2, 2]) >>> y_score = np.array([[0.5, 0.2, 0.2], # 0 is in top 2 ... [0.3, 0.4, 0.2], # 1 is in top 2 ... [0.2, 0.4, 0.3], # 2 is in top 2 ... [0.7, 0.2, 0.1]]) # 2 isn't in top 2 >>> top_k_accuracy_score(y_true, y_score, k=2) 0.75 >>> # Not normalizing gives the number of "correctly" classified samples >>> top_k_accuracy_score(y_true, y_score, k=2, normalize=False) 3 """ y_true = check_array(y_true, ensure_2d=False, dtype=None) y_true = column_or_1d(y_true) y_type = type_of_target(y_true) if y_type == "binary" and labels and len(labels) > 2: y_type = "multiclass" y_score = check_array(y_score, ensure_2d=False) y_score = column_or_1d(y_score) if y_type == 'binary' else y_score check_consistent_length(y_true, y_score, sample_weight) if y_type not in {'binary', 'multiclass'}: raise ValueError( f"y type must be 'binary' or 'multiclass', got '{y_type}' instead." ) y_score_n_classes = y_score.shape[1] if y_score.ndim == 2 else 2 if labels is None: classes = _unique(y_true) n_classes = len(classes) if n_classes != y_score_n_classes: raise ValueError( f"Number of classes in 'y_true' ({n_classes}) not equal " f"to the number of classes in 'y_score' ({y_score_n_classes})." ) else: labels = column_or_1d(labels) classes = _unique(labels) n_labels = len(labels) n_classes = len(classes) if n_classes != n_labels: raise ValueError("Parameter 'labels' must be unique.") if not np.array_equal(classes, labels): raise ValueError("Parameter 'labels' must be ordered.") if n_classes != y_score_n_classes: raise ValueError( f"Number of given labels ({n_classes}) not equal to the " f"number of classes in 'y_score' ({y_score_n_classes})." ) if len(np.setdiff1d(y_true, classes)): raise ValueError( "'y_true' contains labels not in parameter 'labels'." ) if k >= n_classes: warnings.warn( f"'k' ({k}) greater than or equal to 'n_classes' ({n_classes}) " "will result in a perfect score and is therefore meaningless.", UndefinedMetricWarning ) y_true_encoded = _encode(y_true, uniques=classes) if y_type == 'binary': if k == 1: threshold = .5 if y_score.min() >= 0 and y_score.max() <= 1 else 0 y_pred = (y_score > threshold).astype(np.int64) hits = y_pred == y_true_encoded else: hits = np.ones_like(y_score, dtype=np.bool_) elif y_type == 'multiclass': sorted_pred = np.argsort(y_score, axis=1, kind='mergesort')[:, ::-1] hits = (y_true_encoded == sorted_pred[:, :k].T).any(axis=0) if normalize: return np.average(hits, weights=sample_weight) elif sample_weight is None: return np.sum(hits) else: return np.dot(hits, sample_weight)
def top_k_accuracy_score(y_true, y_score, *, k=2, normalize=True, sample_weight=None, labels=None): """Top-k Accuracy classification score. This metric computes the number of times where the correct label is among the top `k` labels predicted (ranked by predicted scores). Note that the multilabel case isn't covered here. Read more in the :ref:`User Guide <top_k_accuracy_score>` Parameters ---------- y_true : array-like of shape (n_samples,) True labels. y_score : array-like of shape (n_samples,) or (n_samples, n_classes) Target scores. These can be either probability estimates or non-thresholded decision values (as returned by :term:`decision_function` on some classifiers). The binary case expects scores with shape (n_samples,) while the multiclass case expects scores with shape (n_samples, n_classes). In the multiclass case, the order of the class scores must correspond to the order of ``labels``, if provided, or else to the numerical or lexicographical order of the labels in ``y_true``. k : int, default=2 Number of most likely outcomes considered to find the correct label. normalize : bool, default=True If `True`, return the fraction of correctly classified samples. Otherwise, return the number of correctly classified samples. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If `None`, all samples are given the same weight. labels : array-like of shape (n_classes,), default=None Multiclass only. List of labels that index the classes in ``y_score``. If ``None``, the numerical or lexicographical order of the labels in ``y_true`` is used. Returns ------- score : float The top-k accuracy score. The best performance is 1 with `normalize == True` and the number of samples with `normalize == False`. See also -------- accuracy_score Notes ----- In cases where two or more labels are assigned equal predicted scores, the labels with the highest indices will be chosen first. This might impact the result if the correct label falls after the threshold because of that. Examples -------- >>> import numpy as np >>> from sklearn.metrics import top_k_accuracy_score >>> y_true = np.array([0, 1, 2, 2]) >>> y_score = np.array([[0.5, 0.2, 0.2], # 0 is in top 2 ... [0.3, 0.4, 0.2], # 1 is in top 2 ... [0.2, 0.4, 0.3], # 2 is in top 2 ... [0.7, 0.2, 0.1]]) # 2 isn't in top 2 >>> top_k_accuracy_score(y_true, y_score, k=2) 0.75 >>> # Not normalizing gives the number of "correctly" classified samples >>> top_k_accuracy_score(y_true, y_score, k=2, normalize=False) 3 """ y_true = check_array(y_true, ensure_2d=False, dtype=None) y_true = column_or_1d(y_true) y_type = type_of_target(y_true) if y_type == "binary" and labels is not None and len(labels) > 2: y_type = "multiclass" y_score = check_array(y_score, ensure_2d=False) y_score = column_or_1d(y_score) if y_type == 'binary' else y_score check_consistent_length(y_true, y_score, sample_weight) if y_type not in {'binary', 'multiclass'}: raise ValueError( f"y type must be 'binary' or 'multiclass', got '{y_type}' instead." ) y_score_n_classes = y_score.shape[1] if y_score.ndim == 2 else 2 if labels is None: classes = _unique(y_true) n_classes = len(classes) if n_classes != y_score_n_classes: raise ValueError( f"Number of classes in 'y_true' ({n_classes}) not equal " f"to the number of classes in 'y_score' ({y_score_n_classes})." ) else: labels = column_or_1d(labels) classes = _unique(labels) n_labels = len(labels) n_classes = len(classes) if n_classes != n_labels: raise ValueError("Parameter 'labels' must be unique.") if not np.array_equal(classes, labels): raise ValueError("Parameter 'labels' must be ordered.") if n_classes != y_score_n_classes: raise ValueError( f"Number of given labels ({n_classes}) not equal to the " f"number of classes in 'y_score' ({y_score_n_classes})." ) if len(np.setdiff1d(y_true, classes)): raise ValueError( "'y_true' contains labels not in parameter 'labels'." ) if k >= n_classes: warnings.warn( f"'k' ({k}) greater than or equal to 'n_classes' ({n_classes}) " "will result in a perfect score and is therefore meaningless.", UndefinedMetricWarning ) y_true_encoded = _encode(y_true, uniques=classes) if y_type == 'binary': if k == 1: threshold = .5 if y_score.min() >= 0 and y_score.max() <= 1 else 0 y_pred = (y_score > threshold).astype(np.int64) hits = y_pred == y_true_encoded else: hits = np.ones_like(y_score, dtype=np.bool_) elif y_type == 'multiclass': sorted_pred = np.argsort(y_score, axis=1, kind='mergesort')[:, ::-1] hits = (y_true_encoded == sorted_pred[:, :k].T).any(axis=0) if normalize: return np.average(hits, weights=sample_weight) elif sample_weight is None: return np.sum(hits) else: return np.dot(hits, sample_weight)
15,828
def get_options_value(config_entry, key, default): """Get an options value and fall back to a default.""" if config_entry.options: return config_entry.options.get(key, default) return default
def get_options_value(config_entry, key, default): """Get an options value or fall back to a default.""" if config_entry.options: return config_entry.options.get(key, default) return default
7,905
def _calculate_cexs_nuclide(this, types, temperature=294., sab_name=None, cross_sections=None): """Calculates continuous-energy cross sections of a requested type. Parameters ---------- this : openmc.Nuclide Nuclide object to source data from types : Iterable of str or Integral The type of cross sections to calculate; values can either be those in openmc.PLOT_TYPES or keys from openmc.data.REACTION_NUMBER which correspond to a reaction description e.g '(n,2n)' or integers which correspond to reaction channel (MT) numbers. temperature : float, optional Temperature in Kelvin to plot. If not specified, a default temperature of 294K will be plotted. Note that the nearest temperature in the library for each nuclide will be used as opposed to using any interpolation. sab_name : str, optional Name of S(a,b) library to apply to MT=2 data when applicable. cross_sections : str, optional Location of cross_sections.xml file. Default is None. Returns ------- energy_grid : numpy.ndarray Energies at which cross sections are calculated, in units of eV data : Iterable of Callable Requested cross section functions """ # Load the library library = openmc.data.DataLibrary.from_xml(cross_sections) # Convert temperature to format needed for access in the library strT = "{}K".format(int(round(temperature))) T = temperature # Now we can create the data sets to be plotted energy_grid = [] xs = [] lib = library.get_by_material(this) if lib is not None: nuc = openmc.data.IncidentNeutron.from_hdf5(lib['path']) # Obtain the nearest temperature if strT in nuc.temperatures: nucT = strT else: delta_T = np.array(nuc.kTs) - T * openmc.data.K_BOLTZMANN closest_index = np.argmin(np.abs(delta_T)) nucT = nuc.temperatures[closest_index] # Prep S(a,b) data if needed if sab_name: sab = openmc.data.ThermalScattering.from_hdf5(sab_name) # Obtain the nearest temperature if strT in sab.temperatures: sabT = strT else: delta_T = np.array(sab.kTs) - T * openmc.data.K_BOLTZMANN closest_index = np.argmin(np.abs(delta_T)) sabT = sab.temperatures[closest_index] # Create an energy grid composed the S(a,b) and the nuclide's grid grid = nuc.energy[nucT] sab_Emax = 0. sab_funcs = [] if sab.elastic is not None: elastic = sab.elastic.xs[sabT] if isinstance(elastic, openmc.data.CoherentElastic): grid = np.union1d(grid, elastic.bragg_edges) if elastic.bragg_edges[-1] > sab_Emax: sab_Emax = elastic.bragg_edges[-1] elif isinstance(elastic, openmc.data.Tabulated1D): grid = np.union1d(grid, elastic.x) if elastic.x[-1] > sab_Emax: sab_Emax = elastic.x[-1] sab_funcs.append(elastic) if sab.inelastic is not None: inelastic = sab.inelastic.xs[sabT] grid = np.union1d(grid, inelastic.x) if inelastic.x[-1] > sab_Emax: sab_Emax = inelastic.x[-1] sab_funcs.append(inelastic) energy_grid = grid else: energy_grid = nuc.energy[nucT] # Parse the types mts = [] ops = [] yields = [] for line in types: if line in PLOT_TYPES: tmp_mts = [mtj for mti in PLOT_TYPES_MT[line] for mtj in nuc.get_reaction_components(mti)] mts.append(tmp_mts) if line.startswith('nu'): yields.append(True) else: yields.append(False) if XI_MT in tmp_mts: ops.append((np.add,) * (len(tmp_mts) - 2) + (np.multiply,)) else: ops.append((np.add,) * (len(tmp_mts) - 1)) elif line in openmc.data.REACTION_NUMBER.keys(): mt_number = openmc.data.REACTION_NUMBER[line] cv.check_type('MT in types', mt_number, Integral) cv.check_greater_than('MT in types', mt_number, 0) tmp_mts = nuc.get_reaction_components(mt_number) mts.append(tmp_mts) ops.append((np.add,) * (len(tmp_mts) - 1)) yields.append(False) elif isinstance(line, int): # Not a built-in type, we have to parse it ourselves cv.check_type('MT in types', line, Integral) cv.check_greater_than('MT in types', line, 0) tmp_mts = nuc.get_reaction_components(line) mts.append(tmp_mts) ops.append((np.add,) * (len(tmp_mts) - 1)) yields.append(False) else: raise TypeError("Invalid type", line) for i, mt_set in enumerate(mts): # Get the reaction xs data from the nuclide funcs = [] op = ops[i] for mt in mt_set: if mt == 2: if sab_name: # Then we need to do a piece-wise function of # The S(a,b) and non-thermal data sab_sum = openmc.data.Sum(sab_funcs) pw_funcs = openmc.data.Regions1D( [sab_sum, nuc[mt].xs[nucT]], [sab_Emax]) funcs.append(pw_funcs) else: funcs.append(nuc[mt].xs[nucT]) elif mt in nuc: if yields[i]: # Get the total yield first if available. This will be # used primarily for fission. for prod in chain(nuc[mt].products, nuc[mt].derived_products): if prod.particle == 'neutron' and \ prod.emission_mode == 'total': func = openmc.data.Combination( [nuc[mt].xs[nucT], prod.yield_], [np.multiply]) funcs.append(func) break else: # Total doesn't exist so we have to create from # prompt and delayed. This is used for scatter # multiplication. func = None for prod in chain(nuc[mt].products, nuc[mt].derived_products): if prod.particle == 'neutron' and \ prod.emission_mode != 'total': if func: func = openmc.data.Combination( [prod.yield_, func], [np.add]) else: func = prod.yield_ if func: funcs.append(openmc.data.Combination( [func, nuc[mt].xs[nucT]], [np.multiply])) else: # If func is still None, then there were no # products. In that case, assume the yield is # one as its not provided for some summed # reactions like MT=4 funcs.append(nuc[mt].xs[nucT]) else: funcs.append(nuc[mt].xs[nucT]) elif mt == UNITY_MT: funcs.append(lambda x: 1.) elif mt == XI_MT: awr = nuc.atomic_weight_ratio alpha = ((awr - 1.) / (awr + 1.))**2 xi = 1. + alpha * np.log(alpha) / (1. - alpha) funcs.append(lambda x: xi) else: funcs.append(lambda x: 0.) funcs = funcs if funcs else [lambda x: 0.] xs.append(openmc.data.Combination(funcs, op)) else: raise ValueError(this + " not in library") return energy_grid, xs
def _calculate_cexs_nuclide(this, types, temperature=294., sab_name=None, cross_sections=None): """Calculates continuous-energy cross sections of a requested type. Parameters ---------- this : openmc.Nuclide Nuclide object to source data from types : Iterable of str or Integral The type of cross sections to calculate; values can either be those in openmc.PLOT_TYPES or keys from openmc.data.REACTION_MT which correspond to a reaction description e.g '(n,2n)' or integers which correspond to reaction channel (MT) numbers. temperature : float, optional Temperature in Kelvin to plot. If not specified, a default temperature of 294K will be plotted. Note that the nearest temperature in the library for each nuclide will be used as opposed to using any interpolation. sab_name : str, optional Name of S(a,b) library to apply to MT=2 data when applicable. cross_sections : str, optional Location of cross_sections.xml file. Default is None. Returns ------- energy_grid : numpy.ndarray Energies at which cross sections are calculated, in units of eV data : Iterable of Callable Requested cross section functions """ # Load the library library = openmc.data.DataLibrary.from_xml(cross_sections) # Convert temperature to format needed for access in the library strT = "{}K".format(int(round(temperature))) T = temperature # Now we can create the data sets to be plotted energy_grid = [] xs = [] lib = library.get_by_material(this) if lib is not None: nuc = openmc.data.IncidentNeutron.from_hdf5(lib['path']) # Obtain the nearest temperature if strT in nuc.temperatures: nucT = strT else: delta_T = np.array(nuc.kTs) - T * openmc.data.K_BOLTZMANN closest_index = np.argmin(np.abs(delta_T)) nucT = nuc.temperatures[closest_index] # Prep S(a,b) data if needed if sab_name: sab = openmc.data.ThermalScattering.from_hdf5(sab_name) # Obtain the nearest temperature if strT in sab.temperatures: sabT = strT else: delta_T = np.array(sab.kTs) - T * openmc.data.K_BOLTZMANN closest_index = np.argmin(np.abs(delta_T)) sabT = sab.temperatures[closest_index] # Create an energy grid composed the S(a,b) and the nuclide's grid grid = nuc.energy[nucT] sab_Emax = 0. sab_funcs = [] if sab.elastic is not None: elastic = sab.elastic.xs[sabT] if isinstance(elastic, openmc.data.CoherentElastic): grid = np.union1d(grid, elastic.bragg_edges) if elastic.bragg_edges[-1] > sab_Emax: sab_Emax = elastic.bragg_edges[-1] elif isinstance(elastic, openmc.data.Tabulated1D): grid = np.union1d(grid, elastic.x) if elastic.x[-1] > sab_Emax: sab_Emax = elastic.x[-1] sab_funcs.append(elastic) if sab.inelastic is not None: inelastic = sab.inelastic.xs[sabT] grid = np.union1d(grid, inelastic.x) if inelastic.x[-1] > sab_Emax: sab_Emax = inelastic.x[-1] sab_funcs.append(inelastic) energy_grid = grid else: energy_grid = nuc.energy[nucT] # Parse the types mts = [] ops = [] yields = [] for line in types: if line in PLOT_TYPES: tmp_mts = [mtj for mti in PLOT_TYPES_MT[line] for mtj in nuc.get_reaction_components(mti)] mts.append(tmp_mts) if line.startswith('nu'): yields.append(True) else: yields.append(False) if XI_MT in tmp_mts: ops.append((np.add,) * (len(tmp_mts) - 2) + (np.multiply,)) else: ops.append((np.add,) * (len(tmp_mts) - 1)) elif line in openmc.data.REACTION_NUMBER.keys(): mt_number = openmc.data.REACTION_NUMBER[line] cv.check_type('MT in types', mt_number, Integral) cv.check_greater_than('MT in types', mt_number, 0) tmp_mts = nuc.get_reaction_components(mt_number) mts.append(tmp_mts) ops.append((np.add,) * (len(tmp_mts) - 1)) yields.append(False) elif isinstance(line, int): # Not a built-in type, we have to parse it ourselves cv.check_type('MT in types', line, Integral) cv.check_greater_than('MT in types', line, 0) tmp_mts = nuc.get_reaction_components(line) mts.append(tmp_mts) ops.append((np.add,) * (len(tmp_mts) - 1)) yields.append(False) else: raise TypeError("Invalid type", line) for i, mt_set in enumerate(mts): # Get the reaction xs data from the nuclide funcs = [] op = ops[i] for mt in mt_set: if mt == 2: if sab_name: # Then we need to do a piece-wise function of # The S(a,b) and non-thermal data sab_sum = openmc.data.Sum(sab_funcs) pw_funcs = openmc.data.Regions1D( [sab_sum, nuc[mt].xs[nucT]], [sab_Emax]) funcs.append(pw_funcs) else: funcs.append(nuc[mt].xs[nucT]) elif mt in nuc: if yields[i]: # Get the total yield first if available. This will be # used primarily for fission. for prod in chain(nuc[mt].products, nuc[mt].derived_products): if prod.particle == 'neutron' and \ prod.emission_mode == 'total': func = openmc.data.Combination( [nuc[mt].xs[nucT], prod.yield_], [np.multiply]) funcs.append(func) break else: # Total doesn't exist so we have to create from # prompt and delayed. This is used for scatter # multiplication. func = None for prod in chain(nuc[mt].products, nuc[mt].derived_products): if prod.particle == 'neutron' and \ prod.emission_mode != 'total': if func: func = openmc.data.Combination( [prod.yield_, func], [np.add]) else: func = prod.yield_ if func: funcs.append(openmc.data.Combination( [func, nuc[mt].xs[nucT]], [np.multiply])) else: # If func is still None, then there were no # products. In that case, assume the yield is # one as its not provided for some summed # reactions like MT=4 funcs.append(nuc[mt].xs[nucT]) else: funcs.append(nuc[mt].xs[nucT]) elif mt == UNITY_MT: funcs.append(lambda x: 1.) elif mt == XI_MT: awr = nuc.atomic_weight_ratio alpha = ((awr - 1.) / (awr + 1.))**2 xi = 1. + alpha * np.log(alpha) / (1. - alpha) funcs.append(lambda x: xi) else: funcs.append(lambda x: 0.) funcs = funcs if funcs else [lambda x: 0.] xs.append(openmc.data.Combination(funcs, op)) else: raise ValueError(this + " not in library") return energy_grid, xs
7,834
def test_plane(): s = openmc.Plane(a=1, b=2, c=-1, d=3, name='my plane') assert s.a == 1 assert s.b == 2 assert s.c == -1 assert s.d == 3 assert s.boundary_type == 'transmission' assert s.name == 'my plane' assert s.type == 'plane' # Generic planes don't have well-defined bounding boxes assert_infinite_bb(s) # evaluate method x, y, z = (4, 3, 6) assert s.evaluate((x, y, z)) == pytest.approx(s.a*x + s.b*y + s.c*z - s.d) # translate method st = s.translate((1.0, 0.0, 0.0)) assert (st.a, st.b, st.c, st.d) == (s.a, s.b, s.c, 4) # rotate method yp = openmc.YPlane(np.abs(s.d)/np.sqrt(s.a**2 + s.b**2 + s.c**2)) psi = np.rad2deg(np.arctan2(1, 2)) phi = np.rad2deg(np.arctan2(1, np.sqrt(5))) sr = s.rotate((phi, 0., psi), order='zyx') assert yp.normalize() == pytest.approx(sr.normalize()) # test rotation ordering phi = np.rad2deg(np.arctan2(1, np.sqrt(2))) sr = s.rotate((0., -45., phi), order='xyz') assert yp.normalize() == pytest.approx(sr.normalize()) # Make sure repr works repr(s)
def test_plane(): s = openmc.Plane(a=1, b=2, c=-1, d=3, name='my plane') assert s.a == 1 assert s.b == 2 assert s.c == -1 assert s.d == 3 assert s.boundary_type == 'transmission' assert s.name == 'my plane' assert s.type == 'plane' # Generic planes don't have well-defined bounding boxes assert_infinite_bb(s) # evaluate method x, y, z = (4, 3, 6) assert s.evaluate((x, y, z)) == pytest.approx(s.a*x + s.b*y + s.c*z - s.d) # translate method st = s.translate((1.0, 0.0, 0.0)) assert (st.a, st.b, st.c, st.d) == (s.a, s.b, s.c, 4) # rotate method yp = openmc.YPlane(abs(s.d)/math.sqrt(s.a**2 + s.b**2 + s.c**2)) psi = np.rad2deg(np.arctan2(1, 2)) phi = np.rad2deg(np.arctan2(1, np.sqrt(5))) sr = s.rotate((phi, 0., psi), order='zyx') assert yp.normalize() == pytest.approx(sr.normalize()) # test rotation ordering phi = np.rad2deg(np.arctan2(1, np.sqrt(2))) sr = s.rotate((0., -45., phi), order='xyz') assert yp.normalize() == pytest.approx(sr.normalize()) # Make sure repr works repr(s)
39,695
def main(): module = ForemanPuppetAutosignModule( foreman_spec=dict( id=dict(type='str', required=True), puppet_proxy=dict(type='entity', flat_name='smart_proxy', resource_type='smart_proxies', required=True), organization=dict(type='entity'), location=dict(type='entity'), ), entity_name='autosign', entity_scope=['smart_proxy'], entity_opts=dict( resource_type='autosign', ), ) with module.api_connection(): module.run()
def main(): module = ForemanPuppetAutosignModule( foreman_spec=dict( id=dict(type='str', required=True), smart_proxy=dict(type='entity', required=True, aliases=['puppet_proxy']), organization=dict(type='entity'), location=dict(type='entity'), ), entity_name='autosign', entity_scope=['smart_proxy'], entity_opts=dict( resource_type='autosign', ), ) with module.api_connection(): module.run()
6,355
def get_palette(may_use_fancy_formats: bool, theme: str = "classic") -> list: """ Load the requested theme and return a list containing all palette entries needed to highlight the debugger UI, including syntax highlighting. """ inheritance_overrides = {} if may_use_fancy_formats: def add_setting(color, setting): return f"{color}, {setting}" else: def add_setting(color, setting): return color def link(child: str, parent: str): inheritance_overrides[child] = parent # {{{ themes if theme == "classic": # {{{ classic theme link("current breakpoint", "current frame name") link("focused current breakpoint", "focused current frame name") palette_dict = { # {{{ base styles "background": ("black", "light gray"), "selectable": ("black", "dark cyan"), "focused selectable": ("black", "light cyan"), "highlighted": ("dark blue", "yellow"), "hotkey": (add_setting("black", "underline"), "light gray"), # }}} # {{{ general ui "header": ("dark blue", "light gray"), "dialog title": (add_setting("white", "bold"), "dark blue"), "warning": (add_setting("white", "bold"), "dark red"), # }}} # {{{ source view "source": ("yellow", "dark blue"), "current source": ("dark blue", "dark green"), "breakpoint source": ( add_setting("yellow", "bold"), "dark red"), "line number": ("light gray", "dark blue"), "breakpoint marker": ( add_setting("dark red", "bold"), "dark blue"), # }}} # {{{ sidebar "sidebar two": ("dark blue", "dark cyan"), "sidebar three": ("dark gray", "dark cyan"), "focused sidebar two": ("dark blue", "light cyan"), "focused sidebar three": ("dark gray", "light cyan"), # }}} # {{{ variables view "return label": ("white", "dark blue"), "focused return label": ("light gray", "dark blue"), # }}} # {{{ stack "current frame name": ( add_setting("white", "bold"), "dark cyan"), "focused current frame name": ( add_setting("black", "bold"), "light cyan"), # }}} # {{{ shell "command line output": ("light cyan", "dark blue"), "command line prompt": ( add_setting("white", "bold"), "dark blue"), "command line error": ( add_setting("light green", "bold"), "dark blue"), "command line clear button": ( add_setting("white", "bold"), "dark blue"), "command line focused button": ("dark blue", "dark cyan"), # }}} # {{{ Code syntax "keyword": (add_setting("white", "bold"), "dark blue"), "function": ("light cyan", "dark blue"), "literal": (add_setting("light green", "bold"), "dark blue"), "punctuation": ("light gray", "dark blue"), "comment": ("dark cyan", "dark blue"), # }}} } # }}} elif theme == "vim": # {{{ vim theme link("current breakpoint", "current frame name") link("focused current breakpoint", "focused current frame name") palette_dict = { # {{{ base styles "background": ("black", "light gray"), "selectable": ("black", "dark cyan"), "focused selectable": ("black", "light cyan"), "hotkey": (add_setting("black", "bold, underline"), "light gray"), "highlighted": ("black", "yellow"), # }}} # {{{ general ui "header": (add_setting("black", "bold"), "light gray"), "group head": ("dark blue", "light gray"), "dialog title": (add_setting("white", "bold"), "dark blue"), "input": ("black", "dark cyan"), "focused input": ("black", "light cyan"), "warning": (add_setting("dark red", "bold"), "white"), "header warning": (add_setting("dark red", "bold"), "light gray"), # }}} # {{{ source view "source": ("black", "white"), "current source": ("black", "dark cyan"), "breakpoint source": ("dark red", "light gray"), "line number": ("dark gray", "white"), "current line marker": ("dark red", "white"), "breakpoint marker": ("dark red", "white"), # }}} # {{{ sidebar "sidebar one": ("black", "dark cyan"), "sidebar two": ("dark blue", "dark cyan"), "sidebar three": ("dark gray", "dark cyan"), "focused sidebar one": ("black", "light cyan"), "focused sidebar two": ("dark blue", "light cyan"), "focused sidebar three": ("dark gray", "light cyan"), # }}} # {{{ variables view "highlighted var label": ("dark blue", "yellow"), "return label": ("white", "dark blue"), "focused return label": ("light gray", "dark blue"), # }}} # {{{ stack "current frame name": ( add_setting("white", "bold"), "dark cyan"), "focused current frame name": ( add_setting("black", "bold"), "light cyan"), # }}} # {{{ shell "command line output": ( add_setting("dark gray", "bold"), "white"), # }}} # {{{ Code syntax "keyword2": ("dark magenta", "white"), "namespace": ("dark magenta", "white"), "literal": ("dark red", "white"), "exception": ("dark red", "white"), "comment": ("dark gray", "white"), "function": ("dark blue", "white"), "pseudo": ("dark gray", "white"), "builtin": ("light blue", "white"), # }}} } # }}} elif theme == "dark vim": # {{{ dark vim link("current breakpoint", "current frame name") link("focused current breakpoint", "focused current frame name") palette_dict = { # {{{ base styles "background": ("black", "light gray"), "selectable": ("white", "dark gray"), "focused selectable": (add_setting("white", "bold"), "light blue"), "highlighted": ("black", "dark green"), "hotkey": (add_setting("dark blue", "underline"), "light gray"), # }}} # {{{ general ui "header": ("dark blue", "light gray"), "dialog title": (add_setting("white", "bold"), "black"), "warning": (add_setting("light red", "bold"), "black"), "header warning": (add_setting("light red", "bold"), "light gray"), # }}} # {{{ source view "source": ("white", "black"), "current source": (add_setting("white", "bold"), "dark gray"), "line number": (add_setting("dark gray", "bold"), "black"), "breakpoint marker": (add_setting("light red", "bold"), "black"), "breakpoint source": (add_setting("white", "bold"), "dark red"), # }}} # {{{ sidebar "sidebar two": ("yellow", "dark gray"), "focused sidebar two": ("light cyan", "light blue"), "sidebar three": ("light gray", "dark gray"), "focused sidebar three": ("yellow", "light blue"), # }}} # {{{ stack "current frame name": ( add_setting("white", "bold"), "dark gray"), # }}} # {{{ shell "command line output": (add_setting("yellow", "bold"), "black"), # }}} # {{{ Code syntax "keyword": ("yellow", "black"), "literal": ("light magenta", "black"), "function": (add_setting("light cyan", "bold"), "black"), "punctuation": ("yellow", "black"), "comment": ("dark cyan", "black"), "exception": ("light red", "black"), "builtin": ("light green", "black"), "pseudo": ("dark green", "black"), # }}} } # }}} elif theme == "midnight": # {{{ midnight # Based on XCode's midnight theme # Looks best in a console with green text against black background link("current breakpoint", "current frame name") link("focused current breakpoint", "focused current frame name") palette_dict = { # {{{ base styles "background": ("black", "light gray"), "selectable": ("black", "dark cyan"), "focused selectable": ("black", "dark green"), "hotkey": (add_setting("black", "underline, italics"), "light gray"), "highlighted": ("white", "dark cyan"), # }}} # {{{ general ui "input": (add_setting("yellow", "bold"), "dark blue"), "warning": (add_setting("white", "bold"), "dark red"), "search box": ("white", "black"), "dialog title": (add_setting("white", "bold"), "dark cyan"), "group head": (add_setting("dark blue", "bold"), "light gray"), "focused sidebar": ("black", "white"), "button": (add_setting("white", "bold"), "dark blue"), "focused button": ("light cyan", "black"), "value": (add_setting("yellow", "bold"), "dark blue"), "fixed value": ("light gray", "dark blue"), # }}} # {{{ source view "source": ("dark green", "black"), "highlighted source": ("black", "dark green"), "current source": ("black", "brown"), "current focused source": ("black", "yellow"), "focused source": ("white", "dark blue"), "breakpoint source": (add_setting("yellow", "bold"), "dark red"), "current breakpoint source": ("black", "dark red"), "line number": ("light gray", "black"), "current line marker": ("dark red", "black"), "breakpoint marker": ("dark red", "black"), # }}} # {{{ sidebar # }}} # {{{ variables view "variables": ("white", "black"), "var label": ("light blue", "black"), "var value": ("white", "black"), "variable separator": ("dark cyan", "light gray"), "focused var label": ("white", "dark blue"), "focused var value": ("white", "dark blue"), "highlighted var label": ("black", "dark green"), "highlighted var value": ("black", "dark green"), "focused highlighted var label": ("black", "light green"), "focused highlighted var value": ("black", "light green"), "return label": ("white", "dark blue"), "return value": ("black", "dark cyan"), "focused return label": ("light gray", "dark blue"), "focused return value": ("black", "dark blue"), # }}} # {{{ stack "stack": ("white", "black"), "frame name": ("white", "black"), "frame class": ("light blue", "black"), "frame location": ("light cyan", "black"), "current frame name": (add_setting("white", "bold"), "black"), "current frame class": (add_setting("light blue", "bold"), "black"), "current frame location": (add_setting("light cyan", "bold"), "black"), "focused frame name": ("white", "dark blue"), "focused frame class": ("white", "dark blue"), "focused frame location": ("white", "dark blue"), "focused current frame name": ( add_setting("white", "bold"), "dark blue"), "focused current frame class": ( add_setting("white", "bold"), "dark blue"), "focused current frame location": ( add_setting("white", "bold"), "dark blue"), # }}} # {{{ breakpoints view "breakpoint": ("white", "black"), "disabled breakpoint": ("dark gray", "black"), "focused breakpoint": ("white", "dark blue"), "focused disabled breakpoint": ("light gray", "dark blue"), "current breakpoint": (add_setting("white", "bold"), "black"), "disabled current breakpoint": ( add_setting("dark gray", "bold"), "black"), "focused current breakpoint": ( add_setting("white", "bold"), "dark blue"), "focused disabled current breakpoint": ( add_setting("light gray", "bold"), "dark blue"), # }}} # {{{ shell "command line edit": ("white", "black"), "command line prompt": (add_setting("white", "bold"), "black"), "command line output": ("white", "black"), "command line input": ("white", "black"), "command line error": (add_setting("light red", "bold"), "black"), "focused command line output": ("white", "dark blue"), "focused command line input": ( "white", "dark blue"), "focused command line error": ("black", "light red"), "command line clear button": (add_setting("white", "bold"), "black"), "command line focused button": ("black", "light gray"), # }}} # {{{ Code syntax "keyword": ("dark magenta", "black"), "pseudo": ("light magenta", "black"), "function": (add_setting("light blue", "bold"), "black"), "builtin": ("dark gray", "black"), "literal": ("dark cyan", "black"), "string": ("dark red", "black"), "doublestring": ("dark red", "black"), "docstring": ("yellow", "black"), "backtick": ("light green", "black"), "punctuation": ("white", "black"), "comment": ("white", "black"), "exception": ("light green", "black"), # }}} } # }}} elif theme == "solarized": # {{{ solarized palette_dict = { # {{{ base styles "background": ("light green", "light gray"), "selectable": ("light green", "white"), "focused selectable": ("white", "dark blue"), "highlighted": ("white", "dark cyan"), "hotkey": (add_setting("black", "underline"), "light gray"), # }}} # {{{ general ui "dialog title": (add_setting("white", "bold"), "dark cyan"), "warning": (add_setting("light red", "bold"), "white"), "header warning": (add_setting("light red", "bold"), "light gray"), "focused sidebar": ("dark red", "light gray"), "group head": (add_setting("yellow", "bold"), "light gray"), # }}} # {{{ source view "source": ("yellow", "white"), "breakpoint source": ("light red", "light gray"), "current source": ("light gray", "light blue"), "line number": ("light blue", "white"), "current line marker": ( add_setting("light blue", "bold"), "white"), "breakpoint marker": ( add_setting("light red", "bold"), "white"), # }}} # {{{ sidebar "sidebar two": ("dark blue", "white"), "sidebar three": ("light cyan", "white"), "focused sidebar three": ("light gray", "dark blue"), # }}} # {{{ variables view "return label": ("white", "yellow"), "focused return label": ("white", "yellow"), # }}} # {{{ stack "current frame name": ( add_setting("light green", "bold"), "white"), "focused current frame name": ( add_setting("white", "bold"), "dark blue"), # }}} # {{{ shell "command line output": ("light green", "white"), # }}} # {{{ Code syntax "namespace": ("dark red", "white"), "exception": ("light red", "white"), "keyword": ("brown", "white"), "keyword2": ("dark magenta", "white"), "function": ("dark green", "white"), "literal": ("dark cyan", "white"), "builtin": ("dark blue", "white"), "comment": ("light cyan", "white"), "pseudo": ("light cyan", "white"), # }}} } # }}} elif theme == "agr-256": # {{{ agr-256 # Give the colors some comprehensible names black = "h235" blacker = "h233" dark_cyan = "h24" dark_gray = "h241" dark_green = "h22" dark_red = "h88" dark_teal = "h23" light_blue = "h111" light_cyan = "h80" light_gray = "h252" light_green = "h113" light_red = "h160" medium_gray = "h246" salmon = "h223" orange = "h173" white = "h255" yellow = "h192" link("focused breakpoint", "focused selectable") link("current breakpoint", "current frame name") link("focused current breakpoint", "focused current frame name") palette_dict = { # {{{ base styles "background": (black, light_gray), "selectable": (white, blacker), "focused selectable": (yellow, dark_cyan), "hotkey": (add_setting(black, "underline"), light_gray), "highlighted": (white, dark_green), # }}} # {{{ general ui "focused sidebar": (dark_cyan, light_gray), "group head": (add_setting(dark_cyan, "bold"), light_gray), "dialog title": (add_setting(light_gray, "bold"), black), "warning": (add_setting(white, "bold"), dark_red), "fixed value": (add_setting(white, "bold"), dark_gray), "button": (add_setting(white, "bold"), black), "focused button": (add_setting(yellow, "bold"), dark_cyan), # }}} # {{{ source view "line number": (dark_gray, black), "current line marker": (add_setting(yellow, "bold"), black), "breakpoint marker": (add_setting(light_red, "bold"), black), "source": (white, black), "breakpoint source": (add_setting(white, "bold"), dark_red), "current source": (add_setting(light_gray, "bold"), dark_teal), # }}} # {{{ sidebar "sidebar two": (light_blue, blacker), "focused sidebar two": (light_gray, dark_cyan), "sidebar three": (medium_gray, blacker), "focused sidebar three": (salmon, dark_cyan), # }}} # {{{ variables view "highlighted var label": (light_gray, dark_green), "return label": (light_green, blacker), "focused return label": ( add_setting(light_gray, "bold"), dark_cyan), # }}} # {{{ stack "current frame name": (yellow, blacker), "focused current frame name": ( add_setting(yellow, "bold"), dark_cyan), # }}} # {{{ shell "command line prompt": (add_setting(yellow, "bold"), black), "command line output": (light_cyan, black), "command line error": (light_red, black), # }}} # {{{ Code syntax "comment": (medium_gray, black), "exception": (orange, black), "function": (yellow, black), "keyword": (light_blue, black), "literal": (orange, black), "operator": (yellow, black), "pseudo": (medium_gray, black), "punctuation": (salmon, black), "string": (light_green, black), # }}} } # }}} elif theme == "monokai": # {{{ monokai link("current breakpoint", "current frame name") link("focused current breakpoint", "focused current frame name") palette_dict = { # {{{ base styles "background": ("black", "light gray"), "selectable": ("white", "black"), "focused selectable": ("white", "dark gray"), "highlighted": ("black", "dark green"), "hotkey": (add_setting("black", "underline"), "light gray"), # }}} # {{{ general ui "input": ("white", "black"), "button": (add_setting("white", "bold"), "black"), "focused button": (add_setting("white", "bold"), "dark gray"), "focused sidebar": ("dark blue", "light gray"), "warning": (add_setting("white", "bold"), "dark red"), "group head": (add_setting("black", "bold"), "light gray"), "dialog title": (add_setting("white", "bold"), "black"), # }}} # {{{ source view "current source": ("black", "dark cyan"), "breakpoint source": (add_setting("white", "bold"), "dark red"), "line number": ("dark gray", "black"), "current line marker": (add_setting("dark cyan", "bold"), "black"), "breakpoint marker": (add_setting("dark red", "bold"), "black"), # }}} # {{{ sidebar "sidebar two": ("light cyan", "black"), "focused sidebar two": ("light cyan", "dark gray"), "sidebar three": ("light magenta", "black"), "focused sidebar three": ("light magenta", "dark gray"), # }}} # {{{ variables view "return label": ("light green", "black"), "focused return label": ("light green", "dark gray"), # }}} # {{{ stack "current frame name": ("light green", "black"), "focused current frame name": ("light green", "dark gray"), # }}} # {{{ shell "command line prompt": (add_setting("yellow", "bold"), "black"), "command line output": ("light cyan", "black"), "command line error": ("yellow", "black"), "focused command line output": ("light cyan", "dark gray"), "focused command line error": ( add_setting("yellow", "bold"), "dark gray"), # }}} # {{{ Code syntax "literal": ("light magenta", "black"), "builtin": ("light cyan", "black"), "exception": ("light cyan", "black"), "keyword2": ("light cyan", "black"), "function": ("light green", "black"), "class": (add_setting("light green", "underline"), "black"), "keyword": ("light red", "black"), "operator": ("light red", "black"), "comment": ("dark gray", "black"), "docstring": ("dark gray", "black"), "argument": ("brown", "black"), "pseudo": ("brown", "black"), "string": ("yellow", "black"), # }}} } # }}} elif theme == "monokai-256": # {{{ monokai-256 # Give the colors some comprehensible names black = "h236" blacker = "h234" dark_gray = "h240" dark_green = "h28" dark_red = "h124" dark_teal = "h30" dark_magenta = "h141" light_blue = "h111" light_cyan = "h51" light_gray = "h252" light_green = "h155" light_red = "h160" light_magenta = "h198" medium_gray = "h243" orange = "h208" white = "h255" yellow = "h228" link("current breakpoint", "current frame name") link("focused current breakpoint", "focused current frame name") palette_dict = { # {{{ base styles "background": (black, light_gray), "selectable": (white, blacker), "focused selectable": (white, dark_gray), "highlighted": (white, dark_green), "hotkey": (add_setting(black, "underline"), light_gray), # }}} # {{{ general ui "input": (white, black), "button": (add_setting(white, "bold"), black), "focused button": (add_setting(white, "bold"), dark_gray), "focused sidebar": (dark_teal, light_gray), "warning": (add_setting(white, "bold"), dark_red), "group head": (add_setting(black, "bold"), light_gray), "dialog title": (add_setting(white, "bold"), blacker), # }}} # {{{ source view "source": (white, black), "current source": (add_setting(light_gray, "bold"), dark_teal), "breakpoint source": (add_setting(white, "bold"), dark_red), "line number": (dark_gray, black), "current line marker": (add_setting(light_cyan, "bold"), black), "breakpoint marker": (add_setting(light_red, "bold"), black), # }}} # {{{ sidebar "sidebar two": (light_cyan, blacker), "focused sidebar two": (light_cyan, dark_gray), "sidebar three": (dark_magenta, blacker), "focused sidebar three": (dark_magenta, dark_gray), # }}} # {{{ variables view "highlighted var label": (light_gray, dark_green), "return label": (light_green, blacker), "focused return label": (light_green, dark_gray), # }}} # {{{ stack "current frame name": (light_green, blacker), "focused current frame name": (light_green, dark_gray), # }}} # {{{ shell "command line prompt": ( add_setting(yellow, "bold"), black), "command line output": (light_cyan, black), "command line error": (orange, black), "focused command line output": (light_cyan, dark_gray), "focused command line error": ( add_setting(orange, "bold"), dark_gray), # }}} # {{{ Code syntax "literal": (dark_magenta, black), "builtin": (light_cyan, black), "exception": (light_cyan, black), "keyword2": (light_cyan, black), "function": (light_green, black), "class": (add_setting(light_green, "underline"), black), "keyword": (light_magenta, black), "operator": (light_magenta, black), "comment": (medium_gray, black), "docstring": (medium_gray, black), "argument": (orange, black), "pseudo": (orange, black), "string": (yellow, black), # }}} } # }}} elif theme == "mono": # {{{ mono palette_dict = { "background": ("standout",), "selectable": (), "focused selectable": ("underline",), "highlighted": ("bold",), "hotkey": ("underline, standout",), } # }}} else: # {{{ custom try: # {{{ base styles palette_dict = { "background": ("black", "light gray"), "hotkey": (add_setting("black", "underline"), "light gray"), "selectable": ("black", "dark cyan"), "focused selectable": ("black", "dark green"), "input": (add_setting("yellow", "bold"), "dark blue"), "warning": (add_setting("white", "bold"), "dark red"), "highlighted": ("white", "dark cyan"), "source": ("white", "dark blue"), } # }}} symbols = { "palette": palette_dict, "add_setting": add_setting, "link": link, } from os.path import expanduser, expandvars fname = expanduser(expandvars(theme)) with open(fname) as inf: exec(compile(inf.read(), fname, "exec"), symbols) except FileNotFoundError: ui_log.error("Unable to locate custom theme file {!r}" .format(theme)) return None except Exception: ui_log.exception("Error when importing theme:") return None # }}} # }}} # Apply style inheritance for style_name in set(INHERITANCE_MAP.keys()).union(BASE_STYLES.keys()): get_style(palette_dict, style_name, inheritance_overrides) palette_list = [ astuple(entry) for entry in palette_dict.values() if isinstance(entry, PaletteEntry) ] return palette_list
def get_palette(may_use_fancy_formats: bool, theme: str = "classic") -> list: """ Load the requested theme and return a list containing all palette entries needed to highlight the debugger UI, including syntax highlighting. """ inheritance_overrides = {} if may_use_fancy_formats: def add_setting(color, setting): return f"{color}, {setting}" else: def add_setting(color, setting): return color def link(child: str, parent: str): inheritance_overrides[child] = parent # {{{ themes if theme == "classic": # {{{ classic theme link("current breakpoint", "current frame name") link("focused current breakpoint", "focused current frame name") palette_dict = { # {{{ base styles "background": ("black", "light gray"), "selectable": ("black", "dark cyan"), "focused selectable": ("black", "light cyan"), "highlighted": ("dark blue", "yellow"), "hotkey": (add_setting("black", "underline"), "light gray"), # }}} # {{{ general ui "header": ("dark blue", "light gray"), "dialog title": (add_setting("white", "bold"), "dark blue"), "warning": (add_setting("white", "bold"), "dark red"), # }}} # {{{ source view "source": ("yellow", "dark blue"), "current source": ("dark blue", "dark green"), "breakpoint source": ( add_setting("yellow", "bold"), "dark red"), "line number": ("light gray", "dark blue"), "breakpoint marker": ( add_setting("dark red", "bold"), "dark blue"), # }}} # {{{ sidebar "sidebar two": ("dark blue", "dark cyan"), "sidebar three": ("dark gray", "dark cyan"), "focused sidebar two": ("dark blue", "light cyan"), "focused sidebar three": ("dark gray", "light cyan"), # }}} # {{{ variables view "return label": ("white", "dark blue"), "focused return label": ("light gray", "dark blue"), # }}} # {{{ stack "current frame name": ( add_setting("white", "bold"), "dark cyan"), "focused current frame name": ( add_setting("black", "bold"), "light cyan"), # }}} # {{{ shell "command line output": ("light cyan", "dark blue"), "command line prompt": ( add_setting("white", "bold"), "dark blue"), "command line error": ( add_setting("light green", "bold"), "dark blue"), "command line clear button": ( add_setting("white", "bold"), "dark blue"), "command line focused button": ("dark blue", "dark cyan"), # }}} # {{{ Code syntax "keyword": (add_setting("white", "bold"), "dark blue"), "function": ("light cyan", "dark blue"), "literal": (add_setting("light green", "bold"), "dark blue"), "punctuation": ("light gray", "dark blue"), "comment": ("dark cyan", "dark blue"), # }}} } # }}} elif theme == "vim": # {{{ vim theme link("current breakpoint", "current frame name") link("focused current breakpoint", "focused current frame name") palette_dict = { # {{{ base styles "background": ("black", "light gray"), "selectable": ("black", "dark cyan"), "focused selectable": ("black", "light cyan"), "hotkey": (add_setting("black", "bold, underline"), "light gray"), "highlighted": ("black", "yellow"), # }}} # {{{ general ui "header": (add_setting("black", "bold"), "light gray"), "group head": ("dark blue", "light gray"), "dialog title": (add_setting("white", "bold"), "dark blue"), "input": ("black", "dark cyan"), "focused input": ("black", "light cyan"), "warning": (add_setting("dark red", "bold"), "white"), "header warning": (add_setting("dark red", "bold"), "light gray"), # }}} # {{{ source view "source": ("black", "white"), "current source": ("black", "dark cyan"), "breakpoint source": ("dark red", "light gray"), "line number": ("dark gray", "white"), "current line marker": ("dark red", "white"), "breakpoint marker": ("dark red", "white"), # }}} # {{{ sidebar "sidebar one": ("black", "dark cyan"), "sidebar two": ("dark blue", "dark cyan"), "sidebar three": ("dark gray", "dark cyan"), "focused sidebar one": ("black", "light cyan"), "focused sidebar two": ("dark blue", "light cyan"), "focused sidebar three": ("dark gray", "light cyan"), # }}} # {{{ variables view "highlighted var label": ("dark blue", "yellow"), "return label": ("white", "dark blue"), "focused return label": ("light gray", "dark blue"), # }}} # {{{ stack "current frame name": ( add_setting("white", "bold"), "dark cyan"), "focused current frame name": ( add_setting("black", "bold"), "light cyan"), # }}} # {{{ shell "command line output": ( add_setting("dark gray", "bold"), "white"), # }}} # {{{ Code syntax "keyword2": ("dark magenta", "white"), "namespace": ("dark magenta", "white"), "literal": ("dark red", "white"), "exception": ("dark red", "white"), "comment": ("dark gray", "white"), "function": ("dark blue", "white"), "pseudo": ("dark gray", "white"), "builtin": ("light blue", "white"), # }}} } # }}} elif theme == "dark vim": # {{{ dark vim link("current breakpoint", "current frame name") link("focused current breakpoint", "focused current frame name") palette_dict = { # {{{ base styles "background": ("black", "light gray"), "selectable": ("white", "dark gray"), "focused selectable": (add_setting("white", "bold"), "light blue"), "highlighted": ("black", "dark green"), "hotkey": (add_setting("dark blue", "underline"), "light gray"), # }}} # {{{ general ui "header": ("dark blue", "light gray"), "dialog title": (add_setting("white", "bold"), "black"), "warning": (add_setting("light red", "bold"), "black"), "header warning": (add_setting("light red", "bold"), "light gray"), # }}} # {{{ source view "source": ("white", "black"), "current source": (add_setting("white", "bold"), "dark gray"), "line number": (add_setting("dark gray", "bold"), "black"), "breakpoint marker": (add_setting("light red", "bold"), "black"), "breakpoint source": (add_setting("white", "bold"), "dark red"), # }}} # {{{ sidebar "sidebar two": ("yellow", "dark gray"), "focused sidebar two": ("light cyan", "light blue"), "sidebar three": ("light gray", "dark gray"), "focused sidebar three": ("yellow", "light blue"), # }}} # {{{ stack "current frame name": ( add_setting("white", "bold"), "dark gray"), # }}} # {{{ shell "command line output": (add_setting("yellow", "bold"), "black"), # }}} # {{{ Code syntax "keyword": ("yellow", "black"), "literal": ("light magenta", "black"), "function": (add_setting("light cyan", "bold"), "black"), "punctuation": ("yellow", "black"), "comment": ("dark cyan", "black"), "exception": ("light red", "black"), "builtin": ("light green", "black"), "pseudo": ("dark green", "black"), # }}} } # }}} elif theme == "midnight": # {{{ midnight # Based on XCode's midnight theme # Looks best in a console with green text against black background link("current breakpoint", "current frame name") link("focused current breakpoint", "focused current frame name") palette_dict = { # {{{ base styles "background": ("black", "light gray"), "selectable": ("black", "dark cyan"), "focused selectable": ("black", "dark green"), "hotkey": (add_setting("black", "underline, italics"), "light gray"), "highlighted": ("white", "dark cyan"), # }}} # {{{ general ui "input": (add_setting("yellow", "bold"), "dark blue"), "warning": (add_setting("white", "bold"), "dark red"), "search box": ("white", "black"), "dialog title": (add_setting("white", "bold"), "dark cyan"), "group head": (add_setting("dark blue", "bold"), "light gray"), "focused sidebar": ("black", "white"), "button": (add_setting("white", "bold"), "dark blue"), "focused button": ("light cyan", "black"), "fixed value": ("dark blue", "light gray"), # }}} # {{{ source view "source": ("dark green", "black"), "highlighted source": ("black", "dark green"), "current source": ("black", "brown"), "current focused source": ("black", "yellow"), "focused source": ("white", "dark blue"), "breakpoint source": (add_setting("yellow", "bold"), "dark red"), "current breakpoint source": ("black", "dark red"), "line number": ("light gray", "black"), "current line marker": ("dark red", "black"), "breakpoint marker": ("dark red", "black"), # }}} # {{{ sidebar # }}} # {{{ variables view "variables": ("white", "black"), "var label": ("light blue", "black"), "var value": ("white", "black"), "variable separator": ("dark cyan", "light gray"), "focused var label": ("white", "dark blue"), "focused var value": ("white", "dark blue"), "highlighted var label": ("black", "dark green"), "highlighted var value": ("black", "dark green"), "focused highlighted var label": ("black", "light green"), "focused highlighted var value": ("black", "light green"), "return label": ("white", "dark blue"), "return value": ("black", "dark cyan"), "focused return label": ("light gray", "dark blue"), "focused return value": ("black", "dark blue"), # }}} # {{{ stack "stack": ("white", "black"), "frame name": ("white", "black"), "frame class": ("light blue", "black"), "frame location": ("light cyan", "black"), "current frame name": (add_setting("white", "bold"), "black"), "current frame class": (add_setting("light blue", "bold"), "black"), "current frame location": (add_setting("light cyan", "bold"), "black"), "focused frame name": ("white", "dark blue"), "focused frame class": ("white", "dark blue"), "focused frame location": ("white", "dark blue"), "focused current frame name": ( add_setting("white", "bold"), "dark blue"), "focused current frame class": ( add_setting("white", "bold"), "dark blue"), "focused current frame location": ( add_setting("white", "bold"), "dark blue"), # }}} # {{{ breakpoints view "breakpoint": ("white", "black"), "disabled breakpoint": ("dark gray", "black"), "focused breakpoint": ("white", "dark blue"), "focused disabled breakpoint": ("light gray", "dark blue"), "current breakpoint": (add_setting("white", "bold"), "black"), "disabled current breakpoint": ( add_setting("dark gray", "bold"), "black"), "focused current breakpoint": ( add_setting("white", "bold"), "dark blue"), "focused disabled current breakpoint": ( add_setting("light gray", "bold"), "dark blue"), # }}} # {{{ shell "command line edit": ("white", "black"), "command line prompt": (add_setting("white", "bold"), "black"), "command line output": ("white", "black"), "command line input": ("white", "black"), "command line error": (add_setting("light red", "bold"), "black"), "focused command line output": ("white", "dark blue"), "focused command line input": ( "white", "dark blue"), "focused command line error": ("black", "light red"), "command line clear button": (add_setting("white", "bold"), "black"), "command line focused button": ("black", "light gray"), # }}} # {{{ Code syntax "keyword": ("dark magenta", "black"), "pseudo": ("light magenta", "black"), "function": (add_setting("light blue", "bold"), "black"), "builtin": ("dark gray", "black"), "literal": ("dark cyan", "black"), "string": ("dark red", "black"), "doublestring": ("dark red", "black"), "docstring": ("yellow", "black"), "backtick": ("light green", "black"), "punctuation": ("white", "black"), "comment": ("white", "black"), "exception": ("light green", "black"), # }}} } # }}} elif theme == "solarized": # {{{ solarized palette_dict = { # {{{ base styles "background": ("light green", "light gray"), "selectable": ("light green", "white"), "focused selectable": ("white", "dark blue"), "highlighted": ("white", "dark cyan"), "hotkey": (add_setting("black", "underline"), "light gray"), # }}} # {{{ general ui "dialog title": (add_setting("white", "bold"), "dark cyan"), "warning": (add_setting("light red", "bold"), "white"), "header warning": (add_setting("light red", "bold"), "light gray"), "focused sidebar": ("dark red", "light gray"), "group head": (add_setting("yellow", "bold"), "light gray"), # }}} # {{{ source view "source": ("yellow", "white"), "breakpoint source": ("light red", "light gray"), "current source": ("light gray", "light blue"), "line number": ("light blue", "white"), "current line marker": ( add_setting("light blue", "bold"), "white"), "breakpoint marker": ( add_setting("light red", "bold"), "white"), # }}} # {{{ sidebar "sidebar two": ("dark blue", "white"), "sidebar three": ("light cyan", "white"), "focused sidebar three": ("light gray", "dark blue"), # }}} # {{{ variables view "return label": ("white", "yellow"), "focused return label": ("white", "yellow"), # }}} # {{{ stack "current frame name": ( add_setting("light green", "bold"), "white"), "focused current frame name": ( add_setting("white", "bold"), "dark blue"), # }}} # {{{ shell "command line output": ("light green", "white"), # }}} # {{{ Code syntax "namespace": ("dark red", "white"), "exception": ("light red", "white"), "keyword": ("brown", "white"), "keyword2": ("dark magenta", "white"), "function": ("dark green", "white"), "literal": ("dark cyan", "white"), "builtin": ("dark blue", "white"), "comment": ("light cyan", "white"), "pseudo": ("light cyan", "white"), # }}} } # }}} elif theme == "agr-256": # {{{ agr-256 # Give the colors some comprehensible names black = "h235" blacker = "h233" dark_cyan = "h24" dark_gray = "h241" dark_green = "h22" dark_red = "h88" dark_teal = "h23" light_blue = "h111" light_cyan = "h80" light_gray = "h252" light_green = "h113" light_red = "h160" medium_gray = "h246" salmon = "h223" orange = "h173" white = "h255" yellow = "h192" link("focused breakpoint", "focused selectable") link("current breakpoint", "current frame name") link("focused current breakpoint", "focused current frame name") palette_dict = { # {{{ base styles "background": (black, light_gray), "selectable": (white, blacker), "focused selectable": (yellow, dark_cyan), "hotkey": (add_setting(black, "underline"), light_gray), "highlighted": (white, dark_green), # }}} # {{{ general ui "focused sidebar": (dark_cyan, light_gray), "group head": (add_setting(dark_cyan, "bold"), light_gray), "dialog title": (add_setting(light_gray, "bold"), black), "warning": (add_setting(white, "bold"), dark_red), "fixed value": (add_setting(white, "bold"), dark_gray), "button": (add_setting(white, "bold"), black), "focused button": (add_setting(yellow, "bold"), dark_cyan), # }}} # {{{ source view "line number": (dark_gray, black), "current line marker": (add_setting(yellow, "bold"), black), "breakpoint marker": (add_setting(light_red, "bold"), black), "source": (white, black), "breakpoint source": (add_setting(white, "bold"), dark_red), "current source": (add_setting(light_gray, "bold"), dark_teal), # }}} # {{{ sidebar "sidebar two": (light_blue, blacker), "focused sidebar two": (light_gray, dark_cyan), "sidebar three": (medium_gray, blacker), "focused sidebar three": (salmon, dark_cyan), # }}} # {{{ variables view "highlighted var label": (light_gray, dark_green), "return label": (light_green, blacker), "focused return label": ( add_setting(light_gray, "bold"), dark_cyan), # }}} # {{{ stack "current frame name": (yellow, blacker), "focused current frame name": ( add_setting(yellow, "bold"), dark_cyan), # }}} # {{{ shell "command line prompt": (add_setting(yellow, "bold"), black), "command line output": (light_cyan, black), "command line error": (light_red, black), # }}} # {{{ Code syntax "comment": (medium_gray, black), "exception": (orange, black), "function": (yellow, black), "keyword": (light_blue, black), "literal": (orange, black), "operator": (yellow, black), "pseudo": (medium_gray, black), "punctuation": (salmon, black), "string": (light_green, black), # }}} } # }}} elif theme == "monokai": # {{{ monokai link("current breakpoint", "current frame name") link("focused current breakpoint", "focused current frame name") palette_dict = { # {{{ base styles "background": ("black", "light gray"), "selectable": ("white", "black"), "focused selectable": ("white", "dark gray"), "highlighted": ("black", "dark green"), "hotkey": (add_setting("black", "underline"), "light gray"), # }}} # {{{ general ui "input": ("white", "black"), "button": (add_setting("white", "bold"), "black"), "focused button": (add_setting("white", "bold"), "dark gray"), "focused sidebar": ("dark blue", "light gray"), "warning": (add_setting("white", "bold"), "dark red"), "group head": (add_setting("black", "bold"), "light gray"), "dialog title": (add_setting("white", "bold"), "black"), # }}} # {{{ source view "current source": ("black", "dark cyan"), "breakpoint source": (add_setting("white", "bold"), "dark red"), "line number": ("dark gray", "black"), "current line marker": (add_setting("dark cyan", "bold"), "black"), "breakpoint marker": (add_setting("dark red", "bold"), "black"), # }}} # {{{ sidebar "sidebar two": ("light cyan", "black"), "focused sidebar two": ("light cyan", "dark gray"), "sidebar three": ("light magenta", "black"), "focused sidebar three": ("light magenta", "dark gray"), # }}} # {{{ variables view "return label": ("light green", "black"), "focused return label": ("light green", "dark gray"), # }}} # {{{ stack "current frame name": ("light green", "black"), "focused current frame name": ("light green", "dark gray"), # }}} # {{{ shell "command line prompt": (add_setting("yellow", "bold"), "black"), "command line output": ("light cyan", "black"), "command line error": ("yellow", "black"), "focused command line output": ("light cyan", "dark gray"), "focused command line error": ( add_setting("yellow", "bold"), "dark gray"), # }}} # {{{ Code syntax "literal": ("light magenta", "black"), "builtin": ("light cyan", "black"), "exception": ("light cyan", "black"), "keyword2": ("light cyan", "black"), "function": ("light green", "black"), "class": (add_setting("light green", "underline"), "black"), "keyword": ("light red", "black"), "operator": ("light red", "black"), "comment": ("dark gray", "black"), "docstring": ("dark gray", "black"), "argument": ("brown", "black"), "pseudo": ("brown", "black"), "string": ("yellow", "black"), # }}} } # }}} elif theme == "monokai-256": # {{{ monokai-256 # Give the colors some comprehensible names black = "h236" blacker = "h234" dark_gray = "h240" dark_green = "h28" dark_red = "h124" dark_teal = "h30" dark_magenta = "h141" light_blue = "h111" light_cyan = "h51" light_gray = "h252" light_green = "h155" light_red = "h160" light_magenta = "h198" medium_gray = "h243" orange = "h208" white = "h255" yellow = "h228" link("current breakpoint", "current frame name") link("focused current breakpoint", "focused current frame name") palette_dict = { # {{{ base styles "background": (black, light_gray), "selectable": (white, blacker), "focused selectable": (white, dark_gray), "highlighted": (white, dark_green), "hotkey": (add_setting(black, "underline"), light_gray), # }}} # {{{ general ui "input": (white, black), "button": (add_setting(white, "bold"), black), "focused button": (add_setting(white, "bold"), dark_gray), "focused sidebar": (dark_teal, light_gray), "warning": (add_setting(white, "bold"), dark_red), "group head": (add_setting(black, "bold"), light_gray), "dialog title": (add_setting(white, "bold"), blacker), # }}} # {{{ source view "source": (white, black), "current source": (add_setting(light_gray, "bold"), dark_teal), "breakpoint source": (add_setting(white, "bold"), dark_red), "line number": (dark_gray, black), "current line marker": (add_setting(light_cyan, "bold"), black), "breakpoint marker": (add_setting(light_red, "bold"), black), # }}} # {{{ sidebar "sidebar two": (light_cyan, blacker), "focused sidebar two": (light_cyan, dark_gray), "sidebar three": (dark_magenta, blacker), "focused sidebar three": (dark_magenta, dark_gray), # }}} # {{{ variables view "highlighted var label": (light_gray, dark_green), "return label": (light_green, blacker), "focused return label": (light_green, dark_gray), # }}} # {{{ stack "current frame name": (light_green, blacker), "focused current frame name": (light_green, dark_gray), # }}} # {{{ shell "command line prompt": ( add_setting(yellow, "bold"), black), "command line output": (light_cyan, black), "command line error": (orange, black), "focused command line output": (light_cyan, dark_gray), "focused command line error": ( add_setting(orange, "bold"), dark_gray), # }}} # {{{ Code syntax "literal": (dark_magenta, black), "builtin": (light_cyan, black), "exception": (light_cyan, black), "keyword2": (light_cyan, black), "function": (light_green, black), "class": (add_setting(light_green, "underline"), black), "keyword": (light_magenta, black), "operator": (light_magenta, black), "comment": (medium_gray, black), "docstring": (medium_gray, black), "argument": (orange, black), "pseudo": (orange, black), "string": (yellow, black), # }}} } # }}} elif theme == "mono": # {{{ mono palette_dict = { "background": ("standout",), "selectable": (), "focused selectable": ("underline",), "highlighted": ("bold",), "hotkey": ("underline, standout",), } # }}} else: # {{{ custom try: # {{{ base styles palette_dict = { "background": ("black", "light gray"), "hotkey": (add_setting("black", "underline"), "light gray"), "selectable": ("black", "dark cyan"), "focused selectable": ("black", "dark green"), "input": (add_setting("yellow", "bold"), "dark blue"), "warning": (add_setting("white", "bold"), "dark red"), "highlighted": ("white", "dark cyan"), "source": ("white", "dark blue"), } # }}} symbols = { "palette": palette_dict, "add_setting": add_setting, "link": link, } from os.path import expanduser, expandvars fname = expanduser(expandvars(theme)) with open(fname) as inf: exec(compile(inf.read(), fname, "exec"), symbols) except FileNotFoundError: ui_log.error("Unable to locate custom theme file {!r}" .format(theme)) return None except Exception: ui_log.exception("Error when importing theme:") return None # }}} # }}} # Apply style inheritance for style_name in set(INHERITANCE_MAP.keys()).union(BASE_STYLES.keys()): get_style(palette_dict, style_name, inheritance_overrides) palette_list = [ astuple(entry) for entry in palette_dict.values() if isinstance(entry, PaletteEntry) ] return palette_list
30,861
def main(): """ PARSE AND VALIDATE INTEGRATION PARAMS """ params = demisto.params() verify_certificate = not params.get('insecure', False) proxy = params.get('proxy', False) command = demisto.command() LOG(f'Command being called is {command}') commands = { 'test-module': test_module, 'adp-get-worker': get_worker_command, 'adp-get-all-workers-trigger-async': get_all_workers_trigger_async_command, 'adp-get-all-workers': get_all_workers_command } adp_credentials = params.get('adp_credentials', {}) client_id = adp_credentials.get('identifier') client_secret = adp_credentials.get('password') credentials = adp_credentials.get('credentials') cert = credentials.get('sshkey') if credentials.get('sshkey') else params.get('cert_file') if not cert: raise Exception('ADP Certificate and Key is required to call the APIs') client = Client( base_url=BASE_URL, verify=verify_certificate, headers={ 'Accept': 'application/json' }, proxy=proxy, cert=cert, client_id=client_id, client_secret=client_secret ) try: if command in commands: human_readable, outputs, raw_response = commands[command](client, demisto.args()) return_outputs(readable_output=human_readable, outputs=outputs, raw_response=raw_response) # Log exceptions except Exception: return_error(f'Failed to execute {demisto.command()} command. Traceback: {traceback.format_exc()}')
def main(): """ PARSE AND VALIDATE INTEGRATION PARAMS """ params = demisto.params() verify_certificate = not params.get('insecure', False) proxy = params.get('proxy', False) command = demisto.command() LOG(f'Command being called is {command}') commands = { 'test-module': test_module, 'adp-get-worker': get_worker_command, 'adp-get-all-workers-trigger-async': get_all_workers_trigger_async_command, 'adp-get-all-workers': get_all_workers_command } adp_credentials = params.get('adp_credentials', {}) client_id = adp_credentials.get('identifier') client_secret = adp_credentials.get('password') credentials = adp_credentials.get('credentials', {}) cert = credentials.get('sshkey') if credentials.get('sshkey') else params.get('cert_file') if not cert: raise Exception('ADP Certificate and Key is required to call the APIs') client = Client( base_url=BASE_URL, verify=verify_certificate, headers={ 'Accept': 'application/json' }, proxy=proxy, cert=cert, client_id=client_id, client_secret=client_secret ) try: if command in commands: human_readable, outputs, raw_response = commands[command](client, demisto.args()) return_outputs(readable_output=human_readable, outputs=outputs, raw_response=raw_response) # Log exceptions except Exception: return_error(f'Failed to execute {demisto.command()} command. Traceback: {traceback.format_exc()}')
31,951
def main(): """ main function, parses params and runs command functions """ params = demisto.params() feed_tags = argToList(params.get('feedTags')) tlp_color = params.get('tlp_color') limit = int(params.get('limit', 10)) filter_ = params.get('filter') # If your Client class inherits from BaseClient, SSL verification is # handled out of the box by it, just pass ``verify_certificate`` to # the Client constructor insecure = not params.get('insecure', False) # If your Client class inherits from BaseClient, system proxy is handled # out of the box by it, just pass ``proxy`` to the Client constructor proxy = params.get('proxy', False) command = demisto.command() demisto.debug(f'Command being called is {command}') try: client = Client( base_url='https://www.virustotal.com/api/v3/', verify=insecure, proxy=proxy, headers={ 'x-apikey': params['credentials']['password'], 'x-tool': 'CortexVirusTotalLivehuntFeed', } ) if command == 'test-module': # This is the call made when pressing the integration Test button. return_results(test_module(client, {})) elif command == 'vt-livehunt-get-indicators': # This is the command that fetches a limited number of indicators # from the feed source and displays them in the war room. return_results(get_indicators_command(client, params, demisto.args())) elif command == 'fetch-indicators': # This is the command that initiates a request to the feed endpoint # and create new indicators objects from the data fetched. If the # integration instance is configured to fetch indicators, then this # is the commandthat will be executed at the specified feed fetch # interval. indicators = fetch_indicators_command(client, tlp_color, feed_tags, limit, filter_) for iter_ in batch(indicators, batch_size=2000): demisto.createIndicators(iter_) else: raise NotImplementedError(f'Command {command} is not implemented.') # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # Print the traceback return_error(f'Failed to execute {command} command.\nError:\n{str(e)}')
def main(): """ main function, parses params and runs command functions """ params = demisto.params() feed_tags = argToList(params.get('feedTags')) tlp_color = params.get('tlp_color') limit = int(params.get('limit', 10)) filter_ = params.get('filter') # If your Client class inherits from BaseClient, SSL verification is # handled out of the box by it, just pass ``verify_certificate`` to # the Client constructor insecure = not params.get('insecure', False) # If your Client class inherits from BaseClient, system proxy is handled # out of the box by it, just pass ``proxy`` to the Client constructor proxy = params.get('proxy', False) command = demisto.command() demisto.debug(f'Command being called is {command}') try: client = Client( base_url='https://www.virustotal.com/api/v3/', verify=insecure, proxy=proxy, headers={ 'x-apikey': params['credentials']['password'], 'x-tool': 'CortexVirusTotalLivehuntFeed', } ) if command == 'test-module': # This is the call made when pressing the integration Test button. return_results(test_module(client, {})) elif command == 'vt-livehunt-get-indicators': # This is the command that fetches a limited number of indicators # from the feed source and displays them in the war room. return_results(get_indicators_command(client, params, demisto.args())) elif command == 'fetch-indicators': # This is the command that initiates a request to the feed endpoint # and create new indicators objects from the data fetched. If the # integration instance is configured to fetch indicators, then this # is the command that will be executed at the specified feed fetch # interval. indicators = fetch_indicators_command(client, tlp_color, feed_tags, limit, filter_) for iter_ in batch(indicators, batch_size=2000): demisto.createIndicators(iter_) else: raise NotImplementedError(f'Command {command} is not implemented.') # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # Print the traceback return_error(f'Failed to execute {command} command.\nError:\n{str(e)}')
25,791
def _csv_to_json(): """ Convert all CSV json files to JSON and ensure consistent diffs with ordered keys """ for lang_object in utils.supported_languages(include_in_context=True): locale_path = utils.local_locale_path(lang_object) perseus_path = utils.local_perseus_locale_path(lang_object) csv_locale_dir_path = os.path.join( utils.local_locale_csv_path(), lang_object["crowdin_code"] ) perseus_locale_dir_path = os.path.join( utils.local_perseus_locale_csv_path(), lang_object["crowdin_code"] ) # Make sure that the Perseus directory for CSV_FILES/{lang_code} exists if not os.path.exists(perseus_locale_dir_path): os.makedirs(perseus_locale_dir_path) csv_dirs = os.listdir(csv_locale_dir_path) + os.listdir(perseus_locale_dir_path) for file_name in csv_dirs: if "csv" not in file_name: continue if file_name is PERSEUS_CSV: csv_path = os.path.join(perseus_locale_dir_path, file_name) else: csv_path = os.path.join(csv_locale_dir_path, file_name) # Account for csv reading differences in Pythons 2 and 3 try: if sys.version_info[0] < 3: csv_file = open(csv_path, "rb") else: csv_file = open(csv_path, "r", newline="") except FileNotFoundError as e: logging.info("Failed to find CSV file in: {}".format(csv_path)) continue with csv_file as f: csv_data = list(row for row in csv.DictReader(f)) data = _locale_data_from_csv(csv_data) if file_name in PERSEUS_CSV: utils.json_dump_formatted( data, perseus_path, file_name.replace("csv", "json") ) else: utils.json_dump_formatted( data, locale_path, file_name.replace("csv", "json") )
def _csv_to_json(): """ Convert all CSV json files to JSON and ensure consistent diffs with ordered keys """ for lang_object in utils.supported_languages(include_in_context=True): locale_path = utils.local_locale_path(lang_object) perseus_path = utils.local_perseus_locale_path(lang_object) csv_locale_dir_path = os.path.join( utils.local_locale_csv_path(), lang_object["crowdin_code"] ) perseus_locale_dir_path = os.path.join( utils.local_perseus_locale_csv_path(), lang_object["crowdin_code"] ) # Make sure that the Perseus directory for CSV_FILES/{lang_code} exists if not os.path.exists(perseus_locale_dir_path): os.makedirs(perseus_locale_dir_path) csv_dirs = os.listdir(csv_locale_dir_path) + os.listdir(perseus_locale_dir_path) for file_name in csv_dirs: if "csv" not in file_name: continue if file_name is PERSEUS_CSV: csv_path = os.path.join(perseus_locale_dir_path, file_name) else: csv_path = os.path.join(csv_locale_dir_path, file_name) # Account for csv reading differences in Pythons 2 and 3 try: if sys.version_info[0] < 3: csv_file = open(csv_path, "rb") else: csv_file = open(csv_path, "r", newline="") except FileNotFoundError as e: logging.info("Failed to find CSV file in: {}".format(csv_path)) continue with csv_file as f: csv_data = list(row for row in csv.DictReader(f)) data = _locale_data_from_csv(csv_data) if file_name is PERSEUS_CSV: utils.json_dump_formatted( data, perseus_path, file_name.replace("csv", "json") ) else: utils.json_dump_formatted( data, locale_path, file_name.replace("csv", "json") )
32,229
def panorama_check_logs_status_command(args: dict): """ Check query logs status """ job_id = args.get('job_id') job_ids = argToList(job_id) for job_id in job_ids: result = panorama_get_traffic_logs(job_id, args) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: message = '. Reason is: ' + result['response']['msg']['line'] raise Exception('Query logs failed' + message) else: raise Exception('Query logs failed.') query_logs_status_output = { 'JobID': job_id, 'Status': 'Pending' } if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result'] \ or 'status' not in result['response']['result']['job']: raise Exception('Missing JobID status in response.') if result['response']['result']['job']['status'] == 'FIN': query_logs_status_output['Status'] = 'Completed' return_results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Query Logs status:', query_logs_status_output, ['JobID', 'Status'], removeNull=True), 'EntryContext': {"Panorama.Monitor(val.JobID == obj.JobID)": query_logs_status_output} })
def panorama_check_logs_status_command(job_id: str, target: Optional[str] = None): """ Check query logs status """ job_id = args.get('job_id') job_ids = argToList(job_id) for job_id in job_ids: result = panorama_get_traffic_logs(job_id, args) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: message = '. Reason is: ' + result['response']['msg']['line'] raise Exception('Query logs failed' + message) else: raise Exception('Query logs failed.') query_logs_status_output = { 'JobID': job_id, 'Status': 'Pending' } if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result'] \ or 'status' not in result['response']['result']['job']: raise Exception('Missing JobID status in response.') if result['response']['result']['job']['status'] == 'FIN': query_logs_status_output['Status'] = 'Completed' return_results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Query Logs status:', query_logs_status_output, ['JobID', 'Status'], removeNull=True), 'EntryContext': {"Panorama.Monitor(val.JobID == obj.JobID)": query_logs_status_output} })
50,438
def send_account_recovery_email_for_user(user, request, email=None): """ Send out a account recovery email for the given user. Arguments: user (User): Django User object request (HttpRequest): Django request object email (str): Send email to this address. """ site = get_current_site() message_context = get_base_template_context(site) logistration_mfe_enabled = configuration_helpers.get_value( 'ENABLE_LOGISTRATION_MICROFRONTEND', settings.FEATURES.get('ENABLE_LOGISTRATION_MICROFRONTEND') ) site_name = settings.LOGISTRATION_MICROFRONTEND_URL.split('//')[1] if logistration_mfe_enabled \ else configuration_helpers.get_value('SITE_NAME', settings.SITE_NAME) message_context.update({ 'request': request, # Used by google_analytics_tracking_pixel 'email': email, 'platform_name': configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME), 'reset_link': '{protocol}://{site}{link}?is_account_recovery=true'.format( protocol='https' if request.is_secure() else 'http', site=site_name, link=reverse('password_reset_confirm', kwargs={ 'uidb36': int_to_base36(user.id), 'token': default_token_generator.make_token(user), }), ) }) msg = AccountRecoveryMessage().personalize( recipient=Recipient(user.username, email), language=get_user_preference(user, LANGUAGE_KEY), user_context=message_context, ) ace.send(msg)
def send_account_recovery_email_for_user(user, request, email=None): """ Send out a account recovery email for the given user. Arguments: user (User): Django User object request (HttpRequest): Django request object email (str): Send email to this address. """ site = get_current_site() message_context = get_base_template_context(site) site_name = settings.LOGISTRATION_MICROFRONTEND_URL_DOMAIN if should_redirect_to_logistration_mircrofrontend() \ else configuration_helpers.get_value('SITE_NAME', settings.SITE_NAME) site_name = settings.LOGISTRATION_MICROFRONTEND_URL.split('//')[1] if logistration_mfe_enabled \ else configuration_helpers.get_value('SITE_NAME', settings.SITE_NAME) message_context.update({ 'request': request, # Used by google_analytics_tracking_pixel 'email': email, 'platform_name': configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME), 'reset_link': '{protocol}://{site}{link}?is_account_recovery=true'.format( protocol='https' if request.is_secure() else 'http', site=site_name, link=reverse('password_reset_confirm', kwargs={ 'uidb36': int_to_base36(user.id), 'token': default_token_generator.make_token(user), }), ) }) msg = AccountRecoveryMessage().personalize( recipient=Recipient(user.username, email), language=get_user_preference(user, LANGUAGE_KEY), user_context=message_context, ) ace.send(msg)
51,536
def _parse_gitmodules(ds): gitmodules = ds.pathobj / '.gitmodules' if not gitmodules.exists(): return {} # pull out file content out, err = ds.repo._git_custom_command( '', ['git', 'config', '-z', '-l', '--file', '.gitmodules']) # abuse our config parser db, _ = _parse_gitconfig_dump(out, {}, None, True) mods = {} for k, v in iteritems(db): if not k.startswith('submodule.'): # we don't know what this is continue k_l = k.split('.') mod_name = k_l[1] mod = mods.get(mod_name, {}) mod['.'.join(k_l[2:])] = v mods[mod_name] = mod out = {} # bring into traditional shape for name, props in iteritems(mods): if 'path' not in props: lgr.debug("Failed to get '%s.path', skipping section", name) continue modprops = {'gitmodule_{}'.format(k): v for k, v in iteritems(props) if not (k.startswith('__') or k == 'path')} modpath = ds.pathobj / PurePosixPath(props['path']) modprops['gitmodule_name'] = name out[modpath] = modprops return out
def _parse_gitmodules(ds): gitmodules = ds.pathobj / '.gitmodules' if not gitmodules.exists(): return {} # pull out file content out, err = ds.repo._git_custom_command( '', ['git', 'config', '-z', '-l', '--file', '.gitmodules']) # abuse our config parser db, _ = _parse_gitconfig_dump(out, {}, None, True) mods = {} for k, v in iteritems(db): if not k.startswith('submodule.'): # we don't know what this is continue # module name could contain '.' - need to split from the left to get "submodule" (not used) # and then from the right to get field mod_name, field = k.split('.', 1)[1].rsplit('.', 1) mod = mods.get(mod_name, {}) mod[mod_name] = field mod_name = k_l[1] mod = mods.get(mod_name, {}) mod['.'.join(k_l[2:])] = v mods[mod_name] = mod out = {} # bring into traditional shape for name, props in iteritems(mods): if 'path' not in props: lgr.debug("Failed to get '%s.path', skipping section", name) continue modprops = {'gitmodule_{}'.format(k): v for k, v in iteritems(props) if not (k.startswith('__') or k == 'path')} modpath = ds.pathobj / PurePosixPath(props['path']) modprops['gitmodule_name'] = name out[modpath] = modprops return out
7,224
def brain(): """Subset of data from the University of North Carolina Volume Rendering Test Data Set. The full dataset is available at [1]_. Returns ------- image : (10, 256, 256) uint16 ndarray Notes ----- The 3D volume consists of 10 layers from the larger volume. References ---------- .. [1] https://graphics.stanford.edu/data/voldata/ """ return _load("data/brain.tiff")
def brain(): """Subset of data from the University of North Carolina Volume Rendering Test Data Set. The full dataset is available at [1]_. Returns ------- image : (10, 256, 256) uint16 ndarray Notes ----- The 3D volume consists of 10 layers from the larger volume. References ---------- .. [1] https://graphics.stanford.edu/data/voldata/ """ return _load(_fetch("data/brain.tiff"))
1,378
def median_absolute_error(y_true, y_pred, multioutput='uniform_average'): """Median absolute error regression loss Read more in the :ref:`User Guide <median_absolute_error>`. Parameters ---------- y_true : array-like of shape = (n_samples) or (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs) Estimated target values. multioutput : string in ['raw_values', 'uniform_average'] or array-like of shape (n_outputs) Defines aggregating of multiple output values. Array-like value defines weights used to average errors. 'raw_values' : Returns a full set of errors in case of multioutput input. 'uniform_average' : Errors of all outputs are averaged with uniform weight. Returns ------- loss : float or ndarray of floats If multioutput is 'raw_values', then mean absolute error is returned for each output separately. If multioutput is 'uniform_average' or an ndarray of weights, then the weighted average of all output errors is returned. Median absolute error output is non-negative floating point. The best value is 0.0. Examples -------- >>> from sklearn.metrics import median_absolute_error >>> y_true = [3, -0.5, 2, 7] >>> y_pred = [2.5, 0.0, 2, 8] >>> median_absolute_error(y_true, y_pred) 0.5 >>> y_true = [[0.5, 1], [-1, 1], [7, -6]] >>> y_pred = [[0, 2], [-1, 2], [8, -5]] >>> median_absolute_error(y_true, y_pred) 0.75 >>> median_absolute_error(y_true, y_pred, multioutput='raw_values') array([0.5, 1. ]) >>> median_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7]) 0.85 """ y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred, None) output_errors = np.median(np.abs(y_pred - y_true), axis=0) if isinstance(multioutput, str): if multioutput == 'raw_values': return output_errors elif multioutput == 'uniform_average': # pass None as weights to np.average: uniform mean multioutput = None return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred, multioutput='uniform_average'): """Median absolute error regression loss Read more in the :ref:`User Guide <median_absolute_error>`. Parameters ---------- y_true : array-like of shape = (n_samples) or (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs) Estimated target values. multioutput : string in ['raw_values', 'uniform_average'] or array-like of shape (n_outputs,) Defines aggregating of multiple output values. Array-like value defines weights used to average errors. 'raw_values' : Returns a full set of errors in case of multioutput input. 'uniform_average' : Errors of all outputs are averaged with uniform weight. Returns ------- loss : float or ndarray of floats If multioutput is 'raw_values', then mean absolute error is returned for each output separately. If multioutput is 'uniform_average' or an ndarray of weights, then the weighted average of all output errors is returned. Median absolute error output is non-negative floating point. The best value is 0.0. Examples -------- >>> from sklearn.metrics import median_absolute_error >>> y_true = [3, -0.5, 2, 7] >>> y_pred = [2.5, 0.0, 2, 8] >>> median_absolute_error(y_true, y_pred) 0.5 >>> y_true = [[0.5, 1], [-1, 1], [7, -6]] >>> y_pred = [[0, 2], [-1, 2], [8, -5]] >>> median_absolute_error(y_true, y_pred) 0.75 >>> median_absolute_error(y_true, y_pred, multioutput='raw_values') array([0.5, 1. ]) >>> median_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7]) 0.85 """ y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred, None) output_errors = np.median(np.abs(y_pred - y_true), axis=0) if isinstance(multioutput, str): if multioutput == 'raw_values': return output_errors elif multioutput == 'uniform_average': # pass None as weights to np.average: uniform mean multioutput = None return np.average(output_errors, weights=multioutput)
21,992
def update_local_associations( sydent, db: sqlite3.Connection, send_email: bool, dry_run: bool, test=False, ): """Update the DB table local_threepid_associations so that all stored emails are casefolded, and any duplicate mxid's associated with the given email are deleted. :return: None """ cur = db.cursor() res = cur.execute( "SELECT address, mxid FROM local_threepid_associations WHERE medium = 'email'" "ORDER BY ts DESC" ) # a dict that associates an email address with correspoinding mxids and lookup hashes associations: Dict[str, List[Tuple[str, str, str]]] = {} # iterate through selected associations, casefold email, rehash it, and add to # associations dict for address, mxid in res.fetchall(): casefold_address = address.casefold() # rehash email since hashes are case-sensitive lookup_hash = calculate_lookup_hash(sydent, casefold_address) if casefold_address in associations: associations[casefold_address].append((address, mxid, lookup_hash)) else: associations[casefold_address] = [(address, mxid, lookup_hash)] # list of arguments to update db with db_update_args: List[Tuple[str, str, str, str]] = [] # list of mxids to delete to_delete: List[Tuple[str]] = [] # The MXIDs associated with rows we're about to delete, indexed by the casefolded # address they're associated with. to_delete_mxids: Dict[str, Set[str]] = {} # The MXIDs associated with rows we're not going to delete, so we can compare the one # associated with a given casefolded address with the one(s) we want to delete for the # same address and figure out if we want to send them an email. to_keep_mxids: Dict[str, str] = {} for casefold_address, assoc_tuples in associations.items(): db_update_args.append( ( casefold_address, assoc_tuples[0][2], assoc_tuples[0][0], assoc_tuples[0][1], ) ) if len(assoc_tuples) > 1: # Iterate over all associations except for the first one, since we've already # processed it. to_delete_mxids[casefold_address] = set() to_keep_mxids[casefold_address] = assoc_tuples[0][1].lower() for address, mxid, _ in assoc_tuples[1:]: to_delete.append((address,)) to_delete_mxids[casefold_address].add(mxid.lower()) if not test: print( f"{len(to_delete)} rows to delete, {len(db_update_args)} rows to update in local_threepid_associations" ) # Update the database before sending the emails, that way if the update fails the # affected users haven't been notified. if not dry_run: if len(to_delete) > 0: cur.executemany( "DELETE FROM local_threepid_associations WHERE address = ?", to_delete ) if len(db_update_args) > 0: cur.executemany( "UPDATE local_threepid_associations SET address = ?, lookup_hash = ? WHERE address = ? AND mxid = ?", db_update_args, ) # We've finished updating the database, committing the transaction. db.commit() # iterate through the mxids and send emails if send_email and not dry_run: for address, mxids in to_delete_mxids.items(): for mxid in mxids: # If the MXID is one that will still be associated with this email address # after this run, don't send an email for it. if mxid == to_keep_mxids[address]: continue # Send the email with exponential backoff - that way we don't stop # sending halfway through if the SMTP server rejects our email (e.g. # because of rate limiting). The alternative would mean the first # addresses of the list receive duplicate emails. def sendWithBackoff(backoff): time.sleep(backoff) try: templateFile = sydent.get_branded_template( None, "migration_template.eml", ("email", "email.template"), ) sendEmail( sydent, templateFile, address, {"mxid": mxid, "subject_header_value": EMAIL_SUBJECT}, log_send_errors=False, ) if not test: print("Sent email to %s" % address) except EmailSendException: if not test: print( "Failed to send email to %s, retrying in %ds" % (address, backoff * 2) ) sendWithBackoff(backoff * 2) sendWithBackoff(1 if not test else 0)
def update_local_associations( sydent, db: sqlite3.Connection, send_email: bool, dry_run: bool, test=False, ) -> None: """Update the DB table local_threepid_associations so that all stored emails are casefolded, and any duplicate mxid's associated with the given email are deleted. :return: None """ cur = db.cursor() res = cur.execute( "SELECT address, mxid FROM local_threepid_associations WHERE medium = 'email'" "ORDER BY ts DESC" ) # a dict that associates an email address with correspoinding mxids and lookup hashes associations: Dict[str, List[Tuple[str, str, str]]] = {} # iterate through selected associations, casefold email, rehash it, and add to # associations dict for address, mxid in res.fetchall(): casefold_address = address.casefold() # rehash email since hashes are case-sensitive lookup_hash = calculate_lookup_hash(sydent, casefold_address) if casefold_address in associations: associations[casefold_address].append((address, mxid, lookup_hash)) else: associations[casefold_address] = [(address, mxid, lookup_hash)] # list of arguments to update db with db_update_args: List[Tuple[str, str, str, str]] = [] # list of mxids to delete to_delete: List[Tuple[str]] = [] # The MXIDs associated with rows we're about to delete, indexed by the casefolded # address they're associated with. to_delete_mxids: Dict[str, Set[str]] = {} # The MXIDs associated with rows we're not going to delete, so we can compare the one # associated with a given casefolded address with the one(s) we want to delete for the # same address and figure out if we want to send them an email. to_keep_mxids: Dict[str, str] = {} for casefold_address, assoc_tuples in associations.items(): db_update_args.append( ( casefold_address, assoc_tuples[0][2], assoc_tuples[0][0], assoc_tuples[0][1], ) ) if len(assoc_tuples) > 1: # Iterate over all associations except for the first one, since we've already # processed it. to_delete_mxids[casefold_address] = set() to_keep_mxids[casefold_address] = assoc_tuples[0][1].lower() for address, mxid, _ in assoc_tuples[1:]: to_delete.append((address,)) to_delete_mxids[casefold_address].add(mxid.lower()) if not test: print( f"{len(to_delete)} rows to delete, {len(db_update_args)} rows to update in local_threepid_associations" ) # Update the database before sending the emails, that way if the update fails the # affected users haven't been notified. if not dry_run: if len(to_delete) > 0: cur.executemany( "DELETE FROM local_threepid_associations WHERE address = ?", to_delete ) if len(db_update_args) > 0: cur.executemany( "UPDATE local_threepid_associations SET address = ?, lookup_hash = ? WHERE address = ? AND mxid = ?", db_update_args, ) # We've finished updating the database, committing the transaction. db.commit() # iterate through the mxids and send emails if send_email and not dry_run: for address, mxids in to_delete_mxids.items(): for mxid in mxids: # If the MXID is one that will still be associated with this email address # after this run, don't send an email for it. if mxid == to_keep_mxids[address]: continue # Send the email with exponential backoff - that way we don't stop # sending halfway through if the SMTP server rejects our email (e.g. # because of rate limiting). The alternative would mean the first # addresses of the list receive duplicate emails. def sendWithBackoff(backoff): time.sleep(backoff) try: templateFile = sydent.get_branded_template( None, "migration_template.eml", ("email", "email.template"), ) sendEmail( sydent, templateFile, address, {"mxid": mxid, "subject_header_value": EMAIL_SUBJECT}, log_send_errors=False, ) if not test: print("Sent email to %s" % address) except EmailSendException: if not test: print( "Failed to send email to %s, retrying in %ds" % (address, backoff * 2) ) sendWithBackoff(backoff * 2) sendWithBackoff(1 if not test else 0)
4,438
def _constrain_fig_resolution(fig, width): """Limit the resolution (DPI) of a figure Parameters ---------- fig : matplotlib.figure.Figure The figure whose DPI to adjust. width : int The max. allowed width, in pixels. Returns ------- Nothing, alters the figure's properties in-place. """ dpi = min(fig.get_dpi(), MAX_IMG_WIDTH / fig.get_size_inches()[0]) fig.set_dpi(dpi)
def _constrain_fig_resolution(fig, width): """Limit the resolution (DPI) of a figure. Parameters ---------- fig : matplotlib.figure.Figure The figure whose DPI to adjust. width : int The max. allowed width, in pixels. Returns ------- Nothing, alters the figure's properties in-place. """ dpi = min(fig.get_dpi(), MAX_IMG_WIDTH / fig.get_size_inches()[0]) fig.set_dpi(dpi)
54,003
def get_current_grype_db_metadata() -> json: """ Return the json contents of the metadata file for the in-use version of grype db """ global grype_db_dir # Get the path to the latest grype_db metadata file latest_grype_db_metadata_file = os.path.join(grype_db_dir, METADATA_FILE_NAME) # Ensure the file exists if not os.path.exists(latest_grype_db_metadata_file): # If not, return None return None else: # Get the contents of the file with open(latest_grype_db_metadata_file) as read_file: json_output = json.load(read_file) return json_output
def get_current_grype_db_metadata() -> json: """ Return the json contents of the metadata file for the in-use version of grype db """ global grype_db_dir # Get the path to the latest grype_db metadata file latest_grype_db_metadata_file = os.path.join(grype_db_dir, METADATA_FILE_NAME) # Ensure the file exists if not os.path.exists(latest_grype_db_metadata_file): # If not, return None return None else: # Get the contents of the file with open(latest_grype_db_metadata_file) as read_file: try: return json.load(read_file) except JSONDecodeError: logger.error("unable to decode file into json: %s", read_file) raise # not sure if this is useful for this function return json_output
34,174
def convert_training_data( data_file: Text, out_file: Text, output_format: Text, language: Text ): if not os.path.exists(data_file): print_error( "Data file '{}' does not exist. Provide a valid data file using the " "'--data-file' argument.".format(data_file) ) return if output_format == "json": td = training_data.load_data(data_file, language) output = td.as_json(indent=2) elif output_format == "md": td = training_data.load_data(data_file, language) output = td.as_markdown() else: print_error( "Did not recognize data format. Supported data formats: 'json' and 'md'. " "Specify the desired data format with '--format'." ) return write_to_file(out_file, output)
def convert_training_data( data_file: Text, out_file: Text, output_format: Text, language: Text ): if not os.path.exists(data_file): print_error( "Data file '{}' does not exist. Provide a valid NLU data file using " "'--data-file' argument.".format(data_file) ) return if output_format == "json": td = training_data.load_data(data_file, language) output = td.as_json(indent=2) elif output_format == "md": td = training_data.load_data(data_file, language) output = td.as_markdown() else: print_error( "Did not recognize data format. Supported data formats: 'json' and 'md'. " "Specify the desired data format with '--format'." ) return write_to_file(out_file, output)
14,305
def merge_charstrings(glyphOrder, num_masters, top_dicts, masterModel): vsindex_dict = {} vsindex_by_key = {} varDataList = [] masterSupports = [] default_charstrings = top_dicts[0].CharStrings for gid, gname in enumerate(glyphOrder): all_cs = [ _get_cs(td.CharStrings, gname) for td in top_dicts] if len([gs for gs in all_cs if gs is not None]) == 1: continue model, model_cs = masterModel.getSubModel(all_cs) # create the first pass CFF2 charstring, from # the default charstring. default_charstring = model_cs[0] var_pen = CFF2CharStringMergePen([], gname, num_masters, 0) # We need to override outlineExtractor because these # charstrings do have widths in the 'program'; we need to drop these # values rather than post assertion error for them. default_charstring.outlineExtractor = MergeOutlineExtractor default_charstring.draw(var_pen) # Add the coordinates from all the other regions to the # blend lists in the CFF2 charstring. region_cs = model_cs[1:] for region_idx, region_charstring in enumerate(region_cs, start=1): var_pen.restart(region_idx) region_charstring.outlineExtractor = MergeOutlineExtractor region_charstring.draw(var_pen) # Collapse each coordinate list to a blend operator and its args. new_cs = var_pen.getCharString( private=default_charstring.private, globalSubrs=default_charstring.globalSubrs, var_model=model, optimize=True) default_charstrings[gname] = new_cs if (not var_pen.seen_moveto) or ('blend' not in new_cs.program): # If this is not a marking glyph, or if there are no blend # arguments, then we can use vsindex 0. No need to # check if we need a new vsindex. continue # If the charstring required a new model, create # a VarData table to go with, and set vsindex. key = tuple(v is not None for v in all_cs) try: vsindex = vsindex_by_key[key] except KeyError: vsindex = _add_new_vsindex(model, key,masterSupports, vsindex_dict, vsindex_by_key, varDataList) # We do not need to check for an existing new_cs.private.vsindex, # as we know it doesn't exist yet. if vsindex != 0: new_cs.program[:0] = [vsindex, 'vsindex'] # If there is no variation in any of the charstrings, then vsindex_dict # never gets built. This is could still be needed if there is variation # in the PrivatDict, so we will build the default data for vsindex = 0. if not vsindex_dict: key = (True)*num_masters _add_new_vsindex(model, key, masterSupports, vsindex_dict, vsindex_by_key, varDataList) cvData = CVarData(varDataList=varDataList, masterSupports=masterSupports, vsindex_dict=vsindex_dict) # XXX To do: optimize use of vsindex between the PrivateDicts and # charstrings return cvData
def merge_charstrings(glyphOrder, num_masters, top_dicts, masterModel): vsindex_dict = {} vsindex_by_key = {} varDataList = [] masterSupports = [] default_charstrings = top_dicts[0].CharStrings for gid, gname in enumerate(glyphOrder): all_cs = [ _get_cs(td.CharStrings, gname) for td in top_dicts] if len([gs for gs in all_cs if gs is not None]) == 1: continue model, model_cs = masterModel.getSubModel(all_cs) # create the first pass CFF2 charstring, from # the default charstring. default_charstring = model_cs[0] var_pen = CFF2CharStringMergePen([], gname, num_masters, 0) # We need to override outlineExtractor because these # charstrings do have widths in the 'program'; we need to drop these # values rather than post assertion error for them. default_charstring.outlineExtractor = MergeOutlineExtractor default_charstring.draw(var_pen) # Add the coordinates from all the other regions to the # blend lists in the CFF2 charstring. region_cs = model_cs[1:] for region_idx, region_charstring in enumerate(region_cs, start=1): var_pen.restart(region_idx) region_charstring.outlineExtractor = MergeOutlineExtractor region_charstring.draw(var_pen) # Collapse each coordinate list to a blend operator and its args. new_cs = var_pen.getCharString( private=default_charstring.private, globalSubrs=default_charstring.globalSubrs, var_model=model, optimize=True) default_charstrings[gname] = new_cs if (not var_pen.seen_moveto) or ('blend' not in new_cs.program): # If this is not a marking glyph, or if there are no blend # arguments, then we can use vsindex 0. No need to # check if we need a new vsindex. continue # If the charstring required a new model, create # a VarData table to go with, and set vsindex. key = tuple(v is not None for v in all_cs) try: vsindex = vsindex_by_key[key] except KeyError: vsindex = _add_new_vsindex(model, key,masterSupports, vsindex_dict, vsindex_by_key, varDataList) # We do not need to check for an existing new_cs.private.vsindex, # as we know it doesn't exist yet. if vsindex != 0: new_cs.program[:0] = [vsindex, 'vsindex'] # If there is no variation in any of the charstrings, then vsindex_dict # never gets built. This could still be needed if there is variation # in the PrivatDict, so we will build the default data for vsindex = 0. if not vsindex_dict: key = (True)*num_masters _add_new_vsindex(model, key, masterSupports, vsindex_dict, vsindex_by_key, varDataList) cvData = CVarData(varDataList=varDataList, masterSupports=masterSupports, vsindex_dict=vsindex_dict) # XXX To do: optimize use of vsindex between the PrivateDicts and # charstrings return cvData
14,137
def _remove_id_from_member_of_ensembles(json_dict): """ Older PROJ versions will not recognize IDs of datum ensemble members that were added in more recent PROJ database versions. Cf https://github.com/opengeospatial/geoparquet/discussions/110 and https://github.com/OSGeo/PROJ/pull/3221 Mimicking the patch to GDAL from https://github.com/OSGeo/gdal/pull/5872 """ for key, value in json_dict.items(): if isinstance(value, dict): _remove_id_from_member_of_ensembles(value) elif key == "members" and isinstance(value, list): for member in value: member.pop("id")
def _remove_id_from_member_of_ensembles(json_dict): """ Older PROJ versions will not recognize IDs of datum ensemble members that were added in more recent PROJ database versions. Cf https://github.com/opengeospatial/geoparquet/discussions/110 and https://github.com/OSGeo/PROJ/pull/3221 Mimicking the patch to GDAL from https://github.com/OSGeo/gdal/pull/5872 """ for key, value in json_dict.items(): if isinstance(value, dict): _remove_id_from_member_of_ensembles(value) elif key == "members" and isinstance(value, list): for member in value: member.pop("id", None)
4,542
def _smooth_array(arr, affine, fwhm=None, ensure_finite=True, copy=True): """Smooth images by applying a Gaussian filter. Apply a Gaussian filter along the three first dimensions of arr. Parameters ---------- arr: numpy.ndarray 4D array, with image number as last dimension. 3D arrays are also accepted. affine: numpy.ndarray (4, 4) matrix, giving affine transformation for image. (3, 3) matrices are also accepted (only these coefficients are used). If fwhm='fast', the affine is not used and can be None fwhm: scalar, numpy.ndarray/tuple/list, 'fast' or None Smoothing strength, as a full-width at half maximum, in millimeters. If a nonzero scalar is given, width is identical in all 3 directions. A numpy.ndarray/list/tuple must have 3 elements, giving the FWHM along each axis. If any of the elements is zero or None, smoothing is not performed along that axis. If fwhm == 'fast', a fast smoothing will be performed with a filter [0.2, 1, 0.2] in each direction and a normalisation to preserve the local average value. If fwhm is None, no filtering is performed (useful when just removal of non-finite values is needed). ensure_finite: bool if True, replace every non-finite values (like NaNs) by zero before filtering. copy: bool if True, input array is not modified. True by default: the filtering is not performed in-place. Returns ------- filtered_arr: numpy.ndarray arr, filtered. Notes ----- This function is most efficient with arr in C order. """ # Here, we have to investigate use cases of fwhm. Particularly, if fwhm=0. # See issue #1537 if isinstance(fwhm, (int, float)) and (fwhm == 0.0): warnings.warn("The parameter 'fwhm' for smoothing is specified " "as {0}. Setting it to None " "(no smoothing will be performed)" .format(fwhm)) fwhm = None if arr.dtype.kind == 'i': if arr.dtype == np.int64: arr = arr.astype(np.float64) else: arr = arr.astype(np.float32) # We don't need crazy precision. if copy: arr = arr.copy() if ensure_finite: # SPM tends to put NaNs in the data outside the brain arr[np.logical_not(np.isfinite(arr))] = 0 if isinstance(fwhm, str) and (fwhm == 'fast'): arr = _fast_smooth_array(arr) elif fwhm is not None: fwhm = np.asarray(fwhm) fwhm = np.asarray([0. if elem is None else elem for elem in fwhm]) affine = affine[:3, :3] # Keep only the scale part. fwhm_over_sigma_ratio = np.sqrt(8 * np.log(2)) # FWHM to sigma. vox_size = np.sqrt(np.sum(affine ** 2, axis=0)) sigma = fwhm / (fwhm_over_sigma_ratio * vox_size) for n, s in enumerate(sigma): if s > 0.0: ndimage.gaussian_filter1d(arr, s, output=arr, axis=n) return arr
def _smooth_array(arr, affine, fwhm=None, ensure_finite=True, copy=True): """Smooth images by applying a Gaussian filter. Apply a Gaussian filter along the three first dimensions of arr. Parameters ---------- arr: numpy.ndarray 4D array, with image number as last dimension. 3D arrays are also accepted. affine: numpy.ndarray (4, 4) matrix, giving affine transformation for image. (3, 3) matrices are also accepted (only these coefficients are used). If fwhm='fast', the affine is not used and can be None fwhm: scalar, numpy.ndarray/tuple/list, 'fast' or None Smoothing strength, as a full-width at half maximum, in millimeters. If a nonzero scalar is given, width is identical in all 3 directions. A numpy.ndarray/list/tuple must have 3 elements, giving the FWHM along each axis. If any of the elements is zero or None, smoothing is not performed along that axis. If fwhm == 'fast', a fast smoothing will be performed with a filter [0.2, 1, 0.2] in each direction and a normalisation to preserve the local average value. If fwhm is None, no filtering is performed (useful when just removal of non-finite values is needed). ensure_finite: bool if True, replace every non-finite values (like NaNs) by zero before filtering. copy: bool if True, input array is not modified. True by default: the filtering is not performed in-place. Returns ------- filtered_arr: numpy.ndarray arr, filtered. Notes ----- This function is most efficient with arr in C order. """ # Here, we have to investigate use cases of fwhm. Particularly, if fwhm=0. # See issue #1537 if isinstance(fwhm, (int, float)) and (fwhm == 0.0): warnings.warn("The parameter 'fwhm' for smoothing is specified " "as {0}. Setting it to None " "(no smoothing will be performed)" .format(fwhm)) fwhm = None if arr.dtype.kind == 'i': if arr.dtype == np.int64: arr = arr.astype(np.float64) else: arr = arr.astype(np.float32) # We don't need crazy precision. if copy: arr = arr.copy() if ensure_finite: # SPM tends to put NaNs in the data outside the brain arr[np.logical_not(np.isfinite(arr))] = 0 if isinstance(fwhm, str) and (fwhm == 'fast'): arr = _fast_smooth_array(arr) elif fwhm is not None: fwhm = np.asarray([fwhm]).ravel() fwhm = np.asarray([0. if elem is None else elem for elem in fwhm]) affine = affine[:3, :3] # Keep only the scale part. fwhm_over_sigma_ratio = np.sqrt(8 * np.log(2)) # FWHM to sigma. vox_size = np.sqrt(np.sum(affine ** 2, axis=0)) sigma = fwhm / (fwhm_over_sigma_ratio * vox_size) for n, s in enumerate(sigma): if s > 0.0: ndimage.gaussian_filter1d(arr, s, output=arr, axis=n) return arr
54,197
def _entrate_sp(x, sm_window): """ Calculate the entropy rate of a stationary Gaussian random process using spectrum estimation with smoothing window. Parameters ---------- x : sm_window : Returns ------- out : """ n = x.shape # Normalize x_sb to be unit variance x_std = np.std(np.reshape(x, (np.prod(n), 1))) if x_std < 1e-10: x_std = 1e-10 x = x / x_std if (sm_window == 1): M = [int(i) for i in np.ceil(np.array(n) / 10)] if (x.ndim >= 3): parzen_w_3 = np.zeros((2 * n[2] - 1, )) parzen_w_3[(n[2] - M[2] - 1):(n[2] + M[2])] = _parzen_win(2 * M[2] + 1) if (x.ndim >= 2): parzen_w_2 = np.zeros((2 * n[1] - 1, )) parzen_w_2[(n[1] - M[1] - 1):(n[1] + M[1])] = _parzen_win(2 * M[1] + 1) if (x.ndim >= 1): parzen_w_1 = np.zeros((2 * n[0] - 1, )) parzen_w_1[(n[0] - M[0] - 1):(n[0] + M[0])] = _parzen_win(2 * M[0] + 1) if x.ndim == 2 and min(n) == 1: # 1D xc = _autocorr(x) xc = xc * parzen_w_1 xf = fftshift(fft(xc)) elif x.ndim == 2 and min(n) != 1: # 2D xc = _autocorr(x) # default option: computes raw correlations with NO # normalization -- Matlab help on xcorr # Bias correction v1 = np.hstack((np.arange(1, n[0] + 1), np.arange(n[0] - 1, 0, -1)))[np.newaxis, :] v2 = np.hstack((np.arange(1, n[1] + 1), np.arange(n[1] - 1, 0, -1)))[np.newaxis, :] vd = np.dot(v1.T, v2) xc = xc / vd parzen_window_2D = np.dot(parzen_w_1, parzen_w_2.T) xc = xc * parzen_window_2D xf = fftshift(fft2(xc)) elif x.ndim == 3 and min(n) != 1: # 3D xc = np.zeros((2 * n[0] - 1, 2 * n[1] - 1, 2 * n[2] - 1)) for m3 in range(n[2] - 1): temp = np.zeros((2 * n[0] - 1, 2 * n[1] - 1)) for k in range(n[2] - m3): temp = temp + correlate2d(x[:, :, k + m3], x[:, :, k]) # default option: # computes raw correlations with NO normalization # -- Matlab help on xcorr xc[:, :, (n[2] - 1) - m3] = temp xc[:, :, (n[2] - 1) + m3] = temp # Bias correction v1 = np.hstack((np.arange(1, n[0] + 1), np.arange(n[0] - 1, 0, -1)))[np.newaxis, :] v2 = np.hstack((np.arange(1, n[1] + 1), np.arange(n[1] - 1, 0, -1)))[np.newaxis, :] v3 = np.arange(n[2], 0, -1) vd = np.dot(v1.T, v2) vcu = np.zeros((2 * n[0] - 1, 2 * n[1] - 1, 2 * n[2] - 1)) for m3 in range(n[2]): vcu[:, :, (n[2] - 1) - m3] = vd * v3[m3] vcu[:, :, (n[2] - 1) + m3] = vd * v3[m3] # Possible source of NAN values xc = xc / vcu parzen_window_2D = np.dot(parzen_w_1[np.newaxis, :].T, parzen_w_2[np.newaxis, :]) parzen_window_3D = np.zeros((2 * n[0] - 1, 2 * n[1] - 1, 2 * n[2] - 1)) for m3 in range(n[2] - 1): parzen_window_3D[:, :, (n[2] - 1) - m3] = np.dot( parzen_window_2D, parzen_w_3[n[2] - 1 - m3]) parzen_window_3D[:, :, (n[2] - 1) + m3] = np.dot( parzen_window_2D, parzen_w_3[n[2] - 1 + m3]) xc = xc * parzen_window_3D xf = fftshift(fftn(xc)) else: raise ValueError('Unrecognized matrix dimension.') xf = abs(xf) xf[xf < 1e-4] = 1e-4 out = 0.5 * np.log(2 * np.pi * np.exp(1)) + _sumN(np.log(abs( (xf)))) / 2 / _sumN(abs(xf)) return out
def _entrate_sp(x, sm_window): """ Calculate the entropy rate of a stationary Gaussian random process using spectrum estimation with smoothing window. Parameters ---------- x : sm_window : Returns ------- out : """ n = x.shape # Normalize x_sb to be unit variance x_std = np.std(np.reshape(x, (np.prod(n), 1))) if x_std < 1e-10: x_std = 1e-10 x = x / x_std if (sm_window == 1): M = [int(i) for i in np.ceil(np.array(n) / 10)] if (x.ndim >= 3): parzen_w_3 = np.zeros((2 * n[2] - 1, )) parzen_w_3[(n[2] - M[2] - 1):(n[2] + M[2])] = _parzen_win(2 * M[2] + 1) if (x.ndim >= 2): parzen_w_2 = np.zeros((2 * n[1] - 1, )) parzen_w_2[(n[1] - M[1] - 1):(n[1] + M[1])] = _parzen_win(2 * M[1] + 1) if (x.ndim >= 1): parzen_w_1 = np.zeros((2 * n[0] - 1, )) parzen_w_1[(n[0] - M[0] - 1):(n[0] + M[0])] = _parzen_win(2 * M[0] + 1) if x.ndim == 2 and min(n) == 1: # 1D xc = _autocorr(x) xc = xc * parzen_w_1 xf = fftshift(fft(xc)) elif x.ndim == 2 and min(n) != 1: # 2D xc = _autocorr(x) # default option: computes raw correlations with NO # normalization -- Matlab help on xcorr # Bias correction v1 = np.hstack((np.arange(1, n[0] + 1), np.arange(n[0] - 1, 0, -1)))[np.newaxis, :] v2 = np.hstack((np.arange(1, n[1] + 1), np.arange(n[1] - 1, 0, -1)))[np.newaxis, :] vd = np.dot(v1.T, v2) xc = xc / vd parzen_window_2D = np.dot(parzen_w_1, parzen_w_2.T) xc = xc * parzen_window_2D xf = fftshift(fft2(xc)) elif x.ndim == 3 and min(n) != 1: # 3D xc = np.zeros((2 * n[0] - 1, 2 * n[1] - 1, 2 * n[2] - 1)) for m3 in range(n[2] - 1): temp = np.zeros((2 * n[0] - 1, 2 * n[1] - 1)) for k in range(n[2] - m3): temp = temp + correlate2d(x[:, :, k + m3], x[:, :, k]) # default option: # computes raw correlations with NO normalization xc[:, :, (n[2] - 1) - m3] = temp xc[:, :, (n[2] - 1) + m3] = temp # Bias correction v1 = np.hstack((np.arange(1, n[0] + 1), np.arange(n[0] - 1, 0, -1)))[np.newaxis, :] v2 = np.hstack((np.arange(1, n[1] + 1), np.arange(n[1] - 1, 0, -1)))[np.newaxis, :] v3 = np.arange(n[2], 0, -1) vd = np.dot(v1.T, v2) vcu = np.zeros((2 * n[0] - 1, 2 * n[1] - 1, 2 * n[2] - 1)) for m3 in range(n[2]): vcu[:, :, (n[2] - 1) - m3] = vd * v3[m3] vcu[:, :, (n[2] - 1) + m3] = vd * v3[m3] # Possible source of NAN values xc = xc / vcu parzen_window_2D = np.dot(parzen_w_1[np.newaxis, :].T, parzen_w_2[np.newaxis, :]) parzen_window_3D = np.zeros((2 * n[0] - 1, 2 * n[1] - 1, 2 * n[2] - 1)) for m3 in range(n[2] - 1): parzen_window_3D[:, :, (n[2] - 1) - m3] = np.dot( parzen_window_2D, parzen_w_3[n[2] - 1 - m3]) parzen_window_3D[:, :, (n[2] - 1) + m3] = np.dot( parzen_window_2D, parzen_w_3[n[2] - 1 + m3]) xc = xc * parzen_window_3D xf = fftshift(fftn(xc)) else: raise ValueError('Unrecognized matrix dimension.') xf = abs(xf) xf[xf < 1e-4] = 1e-4 out = 0.5 * np.log(2 * np.pi * np.exp(1)) + _sumN(np.log(abs( (xf)))) / 2 / _sumN(abs(xf)) return out
7,513
def test_init_table_with_names_and_structured_dtype(): """Test fix for #10393""" arr = np.ones(2, dtype=np.dtype([('a', 'i4'), ('b', 'f4')])) t1 = Table(arr) for dtype in (arr.dtype, t1.dtype): t2 = Table(names=['x', 'y'], dtype=arr.dtype) assert t2.colnames == ['x', 'y'] assert str(t2['x'].dtype) == 'int32' assert str(t2['y'].dtype) == 'float32' assert len(t2) == 0
def test_init_table_with_names_and_structured_dtype(): """Test fix for #10393""" arr = np.ones(2, dtype=np.dtype([('a', 'i4'), ('b', 'f4')])) t1 = Table(arr) for dtype in (arr.dtype, t1.dtype): t2 = Table(names=['x', 'y'], dtype=dtype) assert t2.colnames == ['x', 'y'] assert str(t2['x'].dtype) == 'int32' assert str(t2['y'].dtype) == 'float32' assert len(t2) == 0
25,948
def update_vnet(cmd, instance, vnet_prefixes=None, dns_servers=None, ddos_protection=None, vm_protection=None, ddos_protection_plan=None, flowtimeout=None): # server side validation reports pretty good error message on invalid CIDR, # so we don't validate at client side AddressSpace, DhcpOptions, SubResource = cmd.get_models('AddressSpace', 'DhcpOptions', 'SubResource') if vnet_prefixes and instance.address_space: instance.address_space.address_prefixes = vnet_prefixes elif vnet_prefixes: instance.address_space = AddressSpace(address_prefixes=vnet_prefixes) if dns_servers == ['']: instance.dhcp_options.dns_servers = None elif dns_servers and instance.dhcp_options: instance.dhcp_options.dns_servers = dns_servers elif dns_servers: instance.dhcp_options = DhcpOptions(dns_servers=dns_servers) if ddos_protection is not None: instance.enable_ddos_protection = ddos_protection if vm_protection is not None: instance.enable_vm_protection = vm_protection if ddos_protection_plan == '': instance.ddos_protection_plan = None elif ddos_protection_plan is not None: instance.ddos_protection_plan = SubResource(id=ddos_protection_plan) if flowtimeout: instance.flow_timeout_in_minutes = flowtimeout return instance
def update_vnet(cmd, instance, vnet_prefixes=None, dns_servers=None, ddos_protection=None, vm_protection=None, ddos_protection_plan=None, flowtimeout=None): # server side validation reports pretty good error message on invalid CIDR, # so we don't validate at client side AddressSpace, DhcpOptions, SubResource = cmd.get_models('AddressSpace', 'DhcpOptions', 'SubResource') if vnet_prefixes and instance.address_space: instance.address_space.address_prefixes = vnet_prefixes elif vnet_prefixes: instance.address_space = AddressSpace(address_prefixes=vnet_prefixes) if dns_servers == ['']: instance.dhcp_options.dns_servers = None elif dns_servers and instance.dhcp_options: instance.dhcp_options.dns_servers = dns_servers elif dns_servers: instance.dhcp_options = DhcpOptions(dns_servers=dns_servers) if ddos_protection is not None: instance.enable_ddos_protection = ddos_protection if vm_protection is not None: instance.enable_vm_protection = vm_protection if ddos_protection_plan == '': instance.ddos_protection_plan = None elif ddos_protection_plan is not None: instance.ddos_protection_plan = SubResource(id=ddos_protection_plan) if flowtimeout is not None: instance.flow_timeout_in_minutes = flowtimeout return instance
32,625
def scan_report(client: Client, args: dict) -> CommandResults: """ Retrieves scan report for previously submitted domain or URL in context or as a File (default) """ domain = args["domain"] if not domain: raise ValueError('domain is missing') report = client._api_request(domain=domain, request_type="GET", operation="statscan") return CommandResults( outputs_prefix="QutteraWebsiteMalwareScanning.report", outputs_key_field="error", outputs=report )
def scan_report(client: Client, args: dict) -> CommandResults: """ Retrieves scan report for previously submitted domain or URL in context or as a File (default) """ domain = args.get('domain') if not domain: raise ValueError('domain is missing') report = client._api_request(domain=domain, request_type="GET", operation="statscan") return CommandResults( outputs_prefix="QutteraWebsiteMalwareScanning.report", outputs_key_field="error", outputs=report )
31,467
def fetch_incidents(client: Client, max_results: int, last_run: Dict[str, int], first_fetch_time: Optional[int], alert_status: Optional[str], min_severity: str, alert_type: Optional[str] ) -> Tuple[Dict[str, int], List[dict]]: last_fetch = last_run.get('last_fetch', None) if last_fetch is None: last_fetch = first_fetch_time else: last_fetch = int(last_fetch) latest_created_time = cast(int, last_fetch) incidents: List[Dict[str, Any]] = [] alerts = client.search_alerts( alert_type=alert_type, alert_status=alert_status, max_results=max_results, severity='' ) demisto.debug("Alerts Fetched") for alert in alerts: incident_created_time = int(alert.get('created_sec', '0')) if last_fetch: if incident_created_time <= last_fetch: continue incident_name = 'SOC Case ' + alert['reference'] demisto.debug("JSON debug alert") demisto.debug(json.dumps(alert)) incident = { 'name': incident_name, 'occurred': alert.get('created'), 'event_id': alert.get('id'), 'rawJSON': json.dumps(alert), 'severity': convert_to_demisto_severity(alert.get('severity', 'Low')), } incidents.append(incident) if incident_created_time > latest_created_time: latest_created_time = incident_created_time next_run = {'last_fetch': latest_created_time} return next_run, incidents
def fetch_incidents(client: Client, max_results: int, last_run: Dict[str, int], first_fetch_time: Optional[int], alert_status: Optional[str], min_severity: str, alert_type: Optional[str] ) -> Tuple[Dict[str, int], List[dict]]: last_fetch = last_run.get('last_fetch', None) if last_fetch is None: last_fetch = first_fetch_time else: last_fetch = int(last_fetch) latest_created_time = cast(int, last_fetch) incidents: List[Dict[str, Any]] = [] alerts = client.search_alerts( alert_type=alert_type, alert_status=alert_status, max_results=max_results, severity='' ) demisto.debug("Alerts Fetched") for alert in alerts: incident_created_time = int(alert.get('created_sec', '0')) if last_fetch: if incident_created_time <= last_fetch: continue incident_name = 'SOC Case ' + alert.get('reference') demisto.debug("JSON debug alert") demisto.debug(json.dumps(alert)) incident = { 'name': incident_name, 'occurred': alert.get('created'), 'event_id': alert.get('id'), 'rawJSON': json.dumps(alert), 'severity': convert_to_demisto_severity(alert.get('severity', 'Low')), } incidents.append(incident) if incident_created_time > latest_created_time: latest_created_time = incident_created_time next_run = {'last_fetch': latest_created_time} return next_run, incidents
34,493
def convert(args: argparse.Namespace): output = Path(args.output[0]) if not os.path.exists(output): print_error_and_exit( f"The output path {output} doesn't exist. Please make sure to specify " f"existing directory and try again." ) return for training_data_path in args.training_data: if not os.path.exists(training_data_path): print_error_and_exit( f"The training data path {training_data_path} doesn't exist " f"and will be skipped." ) loop = asyncio.get_event_loop() num_of_files_converted = 0 for file in os.listdir(training_data_path): source_path = Path(training_data_path) / file output_path = Path(output) / f"{source_path.stem}{CONVERTED_FILE_POSTFIX}" if MarkdownReader.is_markdown_nlu_file(source_path): convert_nlu(source_path, output_path, source_path) num_of_files_converted += 1 elif MarkdownStoryReader.is_markdown_story_file(source_path): loop.run_until_complete( convert_core(source_path, output_path, source_path) ) num_of_files_converted += 1 else: print_warning( f"Skipped file '{source_path}' since it's neither NLU " "nor Core training data file." ) print_info(f"Converted {num_of_files_converted} files, saved in '{output}'")
def convert(args: argparse.Namespace) -> None: output = Path(args.output[0]) if not os.path.exists(output): print_error_and_exit( f"The output path {output} doesn't exist. Please make sure to specify " f"existing directory and try again." ) return for training_data_path in args.training_data: if not os.path.exists(training_data_path): print_error_and_exit( f"The training data path {training_data_path} doesn't exist " f"and will be skipped." ) loop = asyncio.get_event_loop() num_of_files_converted = 0 for file in os.listdir(training_data_path): source_path = Path(training_data_path) / file output_path = Path(output) / f"{source_path.stem}{CONVERTED_FILE_POSTFIX}" if MarkdownReader.is_markdown_nlu_file(source_path): convert_nlu(source_path, output_path, source_path) num_of_files_converted += 1 elif MarkdownStoryReader.is_markdown_story_file(source_path): loop.run_until_complete( convert_core(source_path, output_path, source_path) ) num_of_files_converted += 1 else: print_warning( f"Skipped file '{source_path}' since it's neither NLU " "nor Core training data file." ) print_info(f"Converted {num_of_files_converted} files, saved in '{output}'")
31,703
def get_endpoint_properties(single_endpoint): status = 'Online' if single_endpoint.get('endpoint_status').lower() == 'connected' else 'Offline' is_isolated = 'No' if 'unisolated' in single_endpoint.get('is_isolated', '').lower() else 'Yes' hostname = single_endpoint['host_name'] if single_endpoint.get('host_name', '') else single_endpoint.get( 'endpoint_name') ip = single_endpoint.get('ip') return status, is_isolated, hostname, ip
def get_endpoint_properties(single_endpoint): status = 'Online' if single_endpoint.get('endpoint_status').lower() == 'connected' else 'Offline' is_isolated = 'No' if 'unisolated' in single_endpoint.get('is_isolated', '').lower() else 'Yes' hostname = single_endpoint['host_name'] if single_endpoint.get('host_name') else single_endpoint.get( 'endpoint_name') ip = single_endpoint.get('ip') return status, is_isolated, hostname, ip
58,012
def cyble_fetch_events(client, method, args): """ Fetch alert details from server for creating incidents in XSOAR :param client: instace of client to communicate with server :param method: Requests method to be used :param args: parameters for fetching event details :return: events from the server """ params = { 'token': args['token'], 'from': int(args['from']), 'limit': int(args['limit']), 'start_date': args['start_date'], 'end_date': args['end_date'], 'order_by': args['order_by'] } events_url = r'/api/v2/events/all' result = client.get_alerts(method, events_url, params) incidents: List[Dict[str, Any]] = [] if result is not None: eventTypes = get_event_types(client, "GET", args['token']) incidents = format_incidents(result, eventTypes) return incidents
def cyble_fetch_events(client, method, args): """ Fetch alert details from server for creating incidents in XSOAR :param client: instace of client to communicate with server :param method: Requests method to be used :param args: parameters for fetching event details :return: events from the server """ params = { 'token': args.get('token'), 'from': arg_to_number(args.get('from', '0')), 'limit': arg_to_number(args.get('limit', '50')), 'start_date': args.get('start_date'), 'end_date': args.get('end_date'), 'order_by': args.get('order_by') } events_url = r'/api/v2/events/all' result = client.get_alerts(method, events_url, params) incidents: List[Dict[str, Any]] = [] if result is not None: eventTypes = get_event_types(client, "GET", args['token']) incidents = format_incidents(result, eventTypes) return incidents
3,806
def get_names(adtype): """ Returns the field names of the input datatype as a tuple. Input datatype has to have fields otherwise error is raised. Parameters ---------- adtype : dtype Input datatype Examples -------- >>> from numpy.lib import recfunctions as rfn >>> rfn.get_names(np.empty((1,), dtype=[('A', int)]).dtype) ('A',) >>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]).dtype) ('A', 'B') >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) >>> rfn.get_names(adtype) ('a', ('b', ('ba', 'bb'))) """ listnames = [] names = adtype.names for name in names: current = adtype[name] if current.names is not None: listnames.append((name, tuple(get_names(current)))) else: listnames.append(name) return tuple(listnames)
def get_names(adtype): """ Returns the field names of the input datatype as a tuple. Input datatype must have fields otherwise error is raised. Parameters ---------- adtype : dtype Input datatype Examples -------- >>> from numpy.lib import recfunctions as rfn >>> rfn.get_names(np.empty((1,), dtype=[('A', int)]).dtype) ('A',) >>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]).dtype) ('A', 'B') >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) >>> rfn.get_names(adtype) ('a', ('b', ('ba', 'bb'))) """ listnames = [] names = adtype.names for name in names: current = adtype[name] if current.names is not None: listnames.append((name, tuple(get_names(current)))) else: listnames.append(name) return tuple(listnames)
58,388
def resolve_invite_link(link): """ Resolves the given invite link. Returns a tuple of ``(link creator user id, global chat id, random int)``. Note that for broadcast channels, the link creator user ID will be zero to protect their identity. Normal chats and megagroup channels will have such ID. Note that the chat ID may not be accurate for chats with a link that were upgraded to megagroup, since the link can remain the same, but the chat ID will be correct once a new link is generated. """ link_hash, is_link = parse_username(link) if not is_link: # Perhaps the user passed the link hash directly link_hash = link # Little known fact, but invite links with a # hex-string of bytes instead of base64 also works. if re.match(r'[a-fA-F\d]{32}', link_hash): payload = bytes.fromhex(link_hash) else: payload = _decode_telegram_base64(link_hash) try: if len(payload) == 12: # New format return 0, *struct.unpack('>LQ', payload) elif len(payload) == 16: # Old Format return struct.unpack('>LLQ', payload) else: raise TypeError except (struct.error, TypeError): return None, None, None
def resolve_invite_link(link): """ Resolves the given invite link. Returns a tuple of ``(link creator user id, global chat id, random int)``. Note that for broadcast channels, the link creator user ID will be zero to protect their identity. Normal chats and megagroup channels will have such ID. Note that the chat ID may not be accurate for chats with a link that were upgraded to megagroup, since the link can remain the same, but the chat ID will be correct once a new link is generated. """ link_hash, is_link = parse_username(link) if not is_link: # Perhaps the user passed the link hash directly link_hash = link # Little known fact, but invite links with a # hex-string of bytes instead of base64 also works. if re.match(r'[a-fA-F\d]{32}', link_hash): payload = bytes.fromhex(link_hash) else: payload = _decode_telegram_base64(link_hash) try: if len(payload) == 12: # New format return 0, *struct.unpack('>LQ', payload) elif len(payload) == 16: # Old Format return struct.unpack('>LLQ', payload) else: pass except (struct.error, TypeError): pass return None, None, None
20,487
def update_mail_alias_for_moves(env): # Done here just in case alias customizations exist journals = env["account.journal"].search([("alias_id", "!=", False)]) for journal in journals: journal.alias_id.unlink() journal._update_mail_alias({"alias_name": journal.alias_name})
def update_mail_alias_for_moves(env): # Done here just in case alias customizations exist journals = env["account.journal"].with_context(active_test=False).search([("alias_id", "!=", False)]) for journal in journals: journal.alias_id.unlink() journal._update_mail_alias({"alias_name": journal.alias_name})
50,664
def check_support(vevent: icalendar.cal.Event, href: str, calendar: str): """test if all icalendar features used in this event are supported, raise `UpdateFailed` otherwise. :param vevent: event to test :param href: href of this event, only used for logging """ rec_id = vevent.get(RECURRENCE_ID) if rec_id is not None and rec_id.params.get('RANGE') == THISANDPRIOR: raise UpdateFailed( 'The parameter `THISANDPRIOR` is not (and will not be) ' 'supported by khal (as applications supporting the latest ' 'standard MUST NOT create those. Therefore event {} from ' 'calendar {} will not be shown in khal' .format(href, calendar) ) rdate = vevent.get('RDATE') if rdate is not None and hasattr(rdate, 'params') and rdate.params.get('VALUE') == 'PERIOD': raise UpdateFailed( '`RDATE;VALUE=PERIOD` is currently not supported by khal. ' 'Therefore event {} from calendar {} will not be shown in khal.\n' 'Please post exemplary events (please remove any private data) ' 'to https://github.com/pimutils/khal/issues/152 .' .format(href, calendar) )
def check_support(vevent: icalendar.cal.Event, href: str, calendar: str): """test if all icalendar features used in this event are supported, raise `UpdateFailed` otherwise. :param vevent: event to test :param href: href of this event, only used for logging """ rec_id = vevent.get(RECURRENCE_ID) if rec_id is not None and rec_id.params.get('RANGE') == THISANDPRIOR: raise UpdateFailed( 'The parameter `THISANDPRIOR` is not (and will not be) ' 'supported by khal (as applications supporting the latest ' f'standard MUST NOT create those. Therefore event {href} from ' f'calendar {calendar} will not be shown in khal' ) rdate = vevent.get('RDATE') if rdate is not None and hasattr(rdate, 'params') and rdate.params.get('VALUE') == 'PERIOD': raise UpdateFailed( '`RDATE;VALUE=PERIOD` is currently not supported by khal. ' 'Therefore event {} from calendar {} will not be shown in khal.\n' 'Please post exemplary events (please remove any private data) ' 'to https://github.com/pimutils/khal/issues/152 .' .format(href, calendar) )
32,494
def pagination(response: dict, args: Dict[str, Any]) -> Tuple: """ Executing Manual Pagination (using the page and page size arguments) or Automatic Pagination (display a number of total results). Args: response (dict): API response. page (int, optional): Page number of paginated results. page_size (int, optional): Number of ip-list per page. limit (int, optional): The maximum number of records to retrieve. Returns: dict: output and pagination message for Command Results. """ page = arg_to_number(args.get('page')) page_size = arg_to_number(args.get('page_size')) limit = arg_to_number(args.get('limit')) validate_pagination_arguments(page, page_size, limit) output = response if page and page_size: if page_size < len(response): first_item = page_size * (page - 1) output = response[first_item:first_item + page_size] else: output = response[:page_size] pagination_message = f'Showing page {page} out of others that may exist. \n Current page size: {page_size}' else: output = response[:limit] pagination_message = f'Showing {len(output)} rows out of {len(response)}.' return output, pagination_message
def pagination(response: dict, args: Dict[str, Any]) -> Tuple: """ Executing Manual Pagination (using the page and page size arguments) or Automatic Pagination (display a number of total results). Args: response (dict): API response. page (int, optional): Page number of paginated results. page_size (int, optional): Number of ip-list per page. limit (int, optional): The maximum number of records to retrieve. Returns: dict: output and pagination message for Command Results. """ page = arg_to_number(args.get('page')) page_size = arg_to_number(args.get('page_size')) limit = arg_to_number(args.get('limit')) validate_pagination_arguments(page, page_size, limit) output = response if page and page_size: if page_size < len(response): first_item = page_size * (page - 1) output = response[first_item:first_item + page_size] else: output = response[:page_size] pagination_message = f'Showing page {page}. \n Current page size: {page_size}' else: output = response[:limit] pagination_message = f'Showing {len(output)} rows out of {len(response)}.' return output, pagination_message
17,705
def _parse_overrides_from_cmdline(cmdlineargs): """parse config overrides provided in command line Might exit(3) the entire process if value is not assigned""" # this expression is deliberately loose as gitconfig offers # quite some flexibility -- this is just meant to catch stupid # errors: we need a section, a variable, and a value at minimum # otherwise we break our own config parsing helpers # https://github.com/datalad/datalad/issues/3451 assign_expr = re.compile(r'[^\s]+\.[^\s]+=[\S]+') unset_expr = re.compile(r':[^\s]+\.[^\s=]+') noassign = [ o for o in cmdlineargs.cfg_overrides if not (assign_expr.match(o) or unset_expr.match(o)) ] if noassign: lgr.error( "Configuration override without section/variable " "or unset marker or value assignment " "(must be '[:]section.variable[=value]'): %s", noassign) sys.exit(3) overrides = dict( [o[1:], None] if o.startswith(':') else o.split('=', 1) for o in cmdlineargs.cfg_overrides ) return overrides
def _parse_overrides_from_cmdline(cmdlineargs): """parse config overrides provided in command line Might exit(3) the entire process if value is not assigned""" # this expression is deliberately loose as gitconfig offers # quite some flexibility -- this is just meant to catch stupid # errors: we need a section, a variable, and a value at minimum # otherwise we break our own config parsing helpers # https://github.com/datalad/datalad/issues/3451 assign_expr = re.compile(r'[^\s]+\.[^\s]+=[\S]+') unset_expr = re.compile(r':[^\s]+\.[^\s=]+') noassign = [ o for o in cmdlineargs.cfg_overrides if not (assign_expr.match(o) or unset_expr.match(o)) ] if noassign: lgr.error( "Configuration override without section/variable " "or unset marker or value assignment " "(must be '(:section.variable|section.variable=value)'): %s", noassign) sys.exit(3) overrides = dict( [o[1:], None] if o.startswith(':') else o.split('=', 1) for o in cmdlineargs.cfg_overrides ) return overrides
46,174
def is_colinear(points): """Determines is a list of 2D points are colinear Parameters ------- points : np.ndarray Nx2 array of points to be tested for colinearity Returns ------- val : bool True is all points are colinear, False otherwise. """ if len(points) < 3: return True for p in points[2:]: if orientation(points[0], points[1], p) != 0: return False return True
def is_colinear(points): """Determines is a list of 2D points are colinear Parameters ------- points : (N, 2) array Nx2 array of points to be tested for colinearity Returns ------- val : bool True is all points are colinear, False otherwise. """ if len(points) < 3: return True for p in points[2:]: if orientation(points[0], points[1], p) != 0: return False return True
47,901
def read_yaml(file: Union[str, Path], *args, ordered=True, **kwargs): with get_path(file).open() as content: loader = yaml.SafeLoader return yaml.load(content, *args, Loader=loader, **kwargs)
def read_yaml(file: Union[str, Path], *args, ordered=True, **kwargs): with get_path(file).open() as content: return yaml.safe_load(content, *args, **kwargs)
35,263
def dist(x1, x2=None, metric='sqeuclidean'): """Compute distance between samples in x1 and x2 .. note:: This function is backend-compatible and will work on arrays from all compatible backends. Parameters ---------- x1 : array-like, shape (n1,d) matrix with n1 samples of size d x2 : array-like, shape (n2,d), optional matrix with n2 samples of size d (if None then x2=x1) metric : str | callable, optional 'sqeuclidean' or 'euclidean' on all backends. On numpy the function also accepts from the scipy.spatial.distance.cdist function : 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'wminkowski', 'yule'. Returns ------- M : array-like (n1,n2) distance matrix computed with given metric """ if x2 is None: x2 = x1 if metric == "sqeuclidean": return euclidean_distances(x1, x2, squared=True) elif metric == "euclidean": return euclidean_distances(x1, x2, squared=False) else: if not get_backend(x1, x2).__name__ == 'numpy': raise NotImplementedError() else: return cdist(x1, x2, metric=metric)
def dist(x1, x2=None, metric='sqeuclidean'): """Compute distance between samples in x1 and x2 .. note:: This function is backend-compatible and will work on arrays from all compatible backends. Parameters ---------- x1 : array-like, shape (n1,d) matrix with n1 samples of size d x2 : array-like, shape (n2,d), optional matrix with n2 samples of size d (if None then x2=x1) metric : str | callable, optional 'sqeuclidean' or 'euclidean' on all backends. On numpy the function also accepts from the scipy.spatial.distance.cdist function : 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'wminkowski', 'yule'. Returns ------- M : array-like, shape (n1, n2) distance matrix computed with given metric """ if x2 is None: x2 = x1 if metric == "sqeuclidean": return euclidean_distances(x1, x2, squared=True) elif metric == "euclidean": return euclidean_distances(x1, x2, squared=False) else: if not get_backend(x1, x2).__name__ == 'numpy': raise NotImplementedError() else: return cdist(x1, x2, metric=metric)
17,335
def test_counting_scheduler(): data = da.from_array(np.random.RandomState(0).randn(4, 6), chunks=(2, 2)) sched = CountingScheduler(0) with raises_regex(RuntimeError, "To many computes"): with _set_dask_scheduler(sched): data.compute() assert sched.total_computes == 1
def test_counting_scheduler(): data = da.from_array(np.random.RandomState(0).randn(4, 6), chunks=(2, 2)) sched = CountingScheduler(max_computes=0) with raises_regex(RuntimeError, "To many computes"): with _set_dask_scheduler(sched): data.compute() assert sched.total_computes == 1
40,541
def get_custom_locations_oid(cmd, cl_oid): try: sp_graph_client = get_graph_client_service_principals(cmd.cli_ctx) sub_filters = [] sub_filters.append("displayName eq '{}'".format("Custom Locations RP")) result = list(sp_graph_client.list(filter=(' and '.join(sub_filters)))) if len(result) != 0: if cl_oid is not None and cl_oid != result[0].object_id: logger.warning("The 'Custom-locations' OID passed is different from the actual OID({}) of the Custom Locations RP app. Proceeding with the correct one...".format(result[0].object_id)) return result[0].object_id # Using the fetched OID if cl_oid is None: logger.warning("Unable to fetch oid of 'custom-locations' app. Proceeding without enabling the feature.") telemetry.set_exception(exception='Unable to fetch oid of custom locations app.', fault_type=consts.Custom_Locations_OID_Fetch_Fault_Type, summary='Unable to fetch oid for custom locations app.') return "" else: return cl_oid except Exception as e: log_string = "Unable to fetch oid of 'custom-locations' app. " telemetry.set_exception(exception=e, fault_type=consts.Custom_Locations_OID_Fetch_Fault_Type, summary='Unable to fetch oid for custom locations app.') if cl_oid: log_string += "Proceeding with the OID passed to enable the 'custom-locations' feature." logger.warning(log_string) return cl_oid log_string += "Proceeding without enabling the feature. " + str(e) logger.warning(log_string) return ""
def get_custom_locations_oid(cmd, cl_oid): try: sp_graph_client = get_graph_client_service_principals(cmd.cli_ctx) sub_filters = [] sub_filters.append("displayName eq '{}'".format("Custom Locations RP")) result = list(sp_graph_client.list(filter=(' and '.join(sub_filters)))) if len(result) != 0: if cl_oid is not None and cl_oid != result[0].object_id: logger.warning("The 'Custom-locations' OID passed is different from the actual OID({}) of the Custom Locations RP app. Proceeding with the correct one...".format(result[0].object_id)) return result[0].object_id # Using the fetched OID if cl_oid is None: logger.warning("Failed to enable Custom Locations feature on the cluster. Unable to fetch Object ID of Azure AD application used by Azure Arc service. Try enabling the feature by passing the --custom-locations-object-id parameter directly. Learn more at https://aka.ms/CustomLocationsObjectID") telemetry.set_exception(exception='Unable to fetch oid of custom locations app.', fault_type=consts.Custom_Locations_OID_Fetch_Fault_Type, summary='Unable to fetch oid for custom locations app.') return "" else: return cl_oid except Exception as e: log_string = "Unable to fetch oid of 'custom-locations' app. " telemetry.set_exception(exception=e, fault_type=consts.Custom_Locations_OID_Fetch_Fault_Type, summary='Unable to fetch oid for custom locations app.') if cl_oid: log_string += "Proceeding with the OID passed to enable the 'custom-locations' feature." logger.warning(log_string) return cl_oid log_string += "Proceeding without enabling the feature. " + str(e) logger.warning(log_string) return ""
33,999
def function(func): raise DeprecationWarning( "wrapping {} with tune.function() is no " "longer needed".format(func) )
def function(func): raise DeprecationWarning( "wrapping {} with tune.function() is no longer needed".format(func) )
4,251
def _set_sfreq(ft_struct): """Set the sample frequency.""" try: sfreq = ft_struct['fsample'] except KeyError: try: time = ft_struct['time'] except KeyError: raise ValueError('No Source for sfreq found') else: t1, t2 = float(time[0]), float(time[1]) sfreq = 1 / abs(t1 - t2) try: sfreq = float(sfreq) except TypeError: warn('FieldTrip structure contained multiple sample rates, trying the ' f'first of:\n{sfreq}') sfreq = float(sfreq.ravel()[0]) return sfreq
def _set_sfreq(ft_struct): """Set the sample frequency.""" try: sfreq = ft_struct['fsample'] except KeyError: try: time = ft_struct['time'] except KeyError: raise ValueError('No Source for sfreq found') else: t1, t2 = float(time[0]), float(time[1]) sfreq = 1 / abs(t1 - t2) try: sfreq = float(sfreq) except TypeError: warn('FieldTrip structure contained multiple sample rates, trying the ' f'first of:\n{sfreq} Hz') sfreq = float(sfreq.ravel()[0]) return sfreq
57,160
def open_new_tab_in_browser_if_possible(url): """Opens the given URL in a new browser tab, if possible.""" if USER_PREFERENCES['open_new_tab_in_browser'] is None: print( '\nDo you want the url to be opened in the browser? ' 'Confirm by entering y/ye/yes.') USER_PREFERENCES['open_new_tab_in_browser'] = input() if USER_PREFERENCES['open_new_tab_in_browser'] not in ['y', 'ye', 'yes']: print('Please open the following link in browser: %s' % url) return browser_cmds = ['brave', 'chromium-browser', 'google-chrome', 'firefox'] print( 'Please choose your default browser from the list using a number. ' 'It will be given a preference over other available options.') for index, browser in enumerate(browser_cmds): print('%s). %s' % (index + 1, browser)) default_index = int(input().strip()) - 1 ordered_browser_cmds = ( [browser_cmds[default_index]] + browser_cmds[:default_index] + browser_cmds[default_index + 1:]) for cmd in ordered_browser_cmds: if subprocess.call(['which', cmd]) == 0: subprocess.check_call([cmd, url]) return print('******************************************************************') print( 'WARNING: Unable to open browser. Please manually open the following') print('URL in a browser window, then press Enter to confirm.') print('') print(' %s' % url) print('') print('NOTE: To get rid of this message, open scripts/common.py and fix') print('the function open_new_tab_in_browser_if_possible() to work on your') print('system.') input()
def open_new_tab_in_browser_if_possible(url): """Opens the given URL in a new browser tab, if possible.""" if USER_PREFERENCES['open_new_tab_in_browser'] is None: print( '\nDo you want the url to be opened in the browser? ' 'Confirm by entering y/ye/yes.') USER_PREFERENCES['open_new_tab_in_browser'] = input() if USER_PREFERENCES['open_new_tab_in_browser'] not in ['y', 'ye', 'yes']: print('Please open the following link in browser: %s' % url) return browser_cmds = ['brave', 'chromium-browser', 'google-chrome', 'firefox'] print( 'Please choose your default browser from the list using a number. ' 'It will be given a preference over other available options.' ) for index, browser in enumerate(browser_cmds): print('%s). %s' % (index + 1, browser)) default_index = int(input().strip()) - 1 ordered_browser_cmds = ( [browser_cmds[default_index]] + browser_cmds[:default_index] + browser_cmds[default_index + 1:]) for cmd in ordered_browser_cmds: if subprocess.call(['which', cmd]) == 0: subprocess.check_call([cmd, url]) return print('******************************************************************') print( 'WARNING: Unable to open browser. Please manually open the following') print('URL in a browser window, then press Enter to confirm.') print('') print(' %s' % url) print('') print('NOTE: To get rid of this message, open scripts/common.py and fix') print('the function open_new_tab_in_browser_if_possible() to work on your') print('system.') input()
36,026
def test_entry_point_validation(setup_codes): """Verify that when an `entry_point` is defined in the constructor, it is respected in the validation.""" entity_01, entity_02, entity_03 = setup_codes param = CodeParamType(entry_point='arithmetic.add') identifier = '{}'.format(entity_02.pk) result = param.convert(identifier, None, None) assert result.uuid == entity_02.uuid with pytest.raises(click.BadParameter): identifier = '{}'.format(entity_03.pk) result = param.convert(identifier, None, None)
def test_entry_point_validation(setup_codes): """Verify that when an `entry_point` is defined in the constructor, it is respected in the validation.""" _, entity_02, entity_03 = setup_codes param = CodeParamType(entry_point='arithmetic.add') identifier = '{}'.format(entity_02.pk) result = param.convert(identifier, None, None) assert result.uuid == entity_02.uuid with pytest.raises(click.BadParameter): identifier = '{}'.format(entity_03.pk) result = param.convert(identifier, None, None)
38,260
def read_bool_setting(settings_obj: sublime.Settings, key: str, default: bool) -> bool: val = settings_obj.get(key) if isinstance(val, bool): return val else: return default
def read_bool_setting(settings_obj: sublime.Settings, key: str, default: bool) -> bool: val = settings_obj.get(key) return val if isinstance(val, bool) else default
13,262
def pure_func(arg1, arg2): ...
def pure_func(arg1, arg2): pass
25,612
def _broadcasting_elementwise_op(op, a, b): r""" Apply binary operation `op` to every pair of in input tensors `a` and `b`. :param op: binary operator on tensors, e.g. tf.add, tf.substract :param a: tf.Tensor, shape [n_1, ..., n_a] :param b: tf.Tensor, shape [m_1, ..., m_b] :return: tf.Tensor, shape [n_1, ..., n_a, m_1, ..., m_b] """ flatres = op(tf.reshape(a, [-1, 1]), tf.reshape(b, [1, -1])) return tf.reshape(flatres, tf.concat([tf.shape(a), tf.shape(b)], 0))
def _broadcasting_elementwise_op(op, a, b): r""" Apply binary operation `op` to every pair in tensors `a` and `b`. :param op: binary operator on tensors, e.g. tf.add, tf.substract :param a: tf.Tensor, shape [n_1, ..., n_a] :param b: tf.Tensor, shape [m_1, ..., m_b] :return: tf.Tensor, shape [n_1, ..., n_a, m_1, ..., m_b] """ flatres = op(tf.reshape(a, [-1, 1]), tf.reshape(b, [1, -1])) return tf.reshape(flatres, tf.concat([tf.shape(a), tf.shape(b)], 0))
20,000
def test_plantcv_transform_find_color_card_bad_background_input(): # Load rgb image rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG)) with pytest.raises(RuntimeError): pcv.params.debug = None _, _, _ = pcv.transform.find_color_card(img=rgb_img, background='lite')
def test_plantcv_transform_find_color_card_bad_background_input(): # Load rgb image rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG)) with pytest.raises(RuntimeError): pcv.params.debug = None _, _, _ = pcv.transform.find_color_card(rgb_img=rgb_img, background='lite')
17,043
def save_json( filename: str, data: list | dict, private: bool = False, *, encoder: type[json.JSONEncoder] | None = None, atomic_writes: bool = False, ) -> None: """Save JSON data to a file. Returns True on success. """ dump: Callable[[Any], Any] try: if encoder: # For backwards compatibility, if they pass in the # default json encoder we use _orjson_default_encoder # which is the orjson equivalent to the default encoder. if encoder == DefaultHASSJSONEncoder: dump = _orjson_default_encoder json_data = _orjson_default_encoder(data) # If they pass a custom encoder that is not the # DefaultHASSJSONEncoder, we use the slow path of json.dumps else: dump = json.dumps json_data = json.dumps(data, indent=2, cls=encoder) else: dump = _orjson_encoder json_data = _orjson_encoder(data) except TypeError as error: msg = f"Failed to serialize to JSON: {filename}. Bad data at {format_unserializable_data(find_paths_unserializable_data(data, dump=dump))}" _LOGGER.error(msg) raise SerializationError(msg) from error if atomic_writes: write_utf8_file_atomic(filename, json_data, private) else: write_utf8_file(filename, json_data, private)
def save_json( filename: str, data: list | dict, private: bool = False, *, encoder: type[json.JSONEncoder] | None = None, atomic_writes: bool = False, ) -> None: """Save JSON data to a file. Returns True on success. """ dump: Callable[[Any], Any] try: if encoder: # For backwards compatibility, if they pass in the # default json encoder we use _orjson_default_encoder # which is the orjson equivalent to the default encoder. if encoder is DefaultHASSJSONEncoder: dump = _orjson_default_encoder json_data = _orjson_default_encoder(data) # If they pass a custom encoder that is not the # DefaultHASSJSONEncoder, we use the slow path of json.dumps else: dump = json.dumps json_data = json.dumps(data, indent=2, cls=encoder) else: dump = _orjson_encoder json_data = _orjson_encoder(data) except TypeError as error: msg = f"Failed to serialize to JSON: {filename}. Bad data at {format_unserializable_data(find_paths_unserializable_data(data, dump=dump))}" _LOGGER.error(msg) raise SerializationError(msg) from error if atomic_writes: write_utf8_file_atomic(filename, json_data, private) else: write_utf8_file(filename, json_data, private)
12,789
def build_merkle_tree(fileinfodict): """ Create a Merkle tree from the snapshot fileinfo and writes it to individual snapshot files Returns the root and leaves """ # We will build the merkle tree starting with the leaf nodes. Each # leaf contains snapshot information for a single metadata file. leaves = [] nodes = [] for name, contents in sorted(fileinfodict.items()): if name.endswith(".json"): name = os.path.splitext(name)[0] leaves.append(Leaf(name, contents)) # Starting with the leaves, combine pairs of nodes to build the tree. # For each pair of nodes, set the first to a left child and the second # as a right child. Add the resulting parent node to new_nodes. On # the next iteration, pair the nodes in new_nodes. In order to handle # an odd number of nodes on any iteration, if this is the last node # in an odd numbered list (there is no next node), add this node to # new_nodes. End the loop when there is one remaining current_node # This last node will be the root of the tree. current_nodes = leaves while(len(current_nodes) > 1): new_nodes = [] for i in range(0, len(current_nodes), 2): # If there are an odd number of nodes and this is the last # node, add this node to the next level. if i + 1 >= len(current_nodes): new_nodes.append(current_nodes[i]) # Otherwise, use the next two nodes to build a new node. else: n = InternalNode(current_nodes[i], current_nodes[i+1]) # Add this node to the next level, and to a list of all nodes new_nodes.append(n) nodes.append(n) current_nodes = new_nodes # The only node remaining in current_nodes will be the root node. root = current_nodes[0] # Return the root node and the leaves. The root hash must be used along with the # path to verify the tree. The root hash should be securely sent to # each client. To do so, we will add it to the timestamp metadata. # The leaves will be used to find the path to each leaf and send # this path to the client for verification return root, leaves
def _build_merkle_tree(fileinfodict): """ Create a Merkle tree from the snapshot fileinfo and writes it to individual snapshot files Returns the root and leaves """ # We will build the merkle tree starting with the leaf nodes. Each # leaf contains snapshot information for a single metadata file. leaves = [] nodes = [] for name, contents in sorted(fileinfodict.items()): if name.endswith(".json"): name = os.path.splitext(name)[0] leaves.append(Leaf(name, contents)) # Starting with the leaves, combine pairs of nodes to build the tree. # For each pair of nodes, set the first to a left child and the second # as a right child. Add the resulting parent node to new_nodes. On # the next iteration, pair the nodes in new_nodes. In order to handle # an odd number of nodes on any iteration, if this is the last node # in an odd numbered list (there is no next node), add this node to # new_nodes. End the loop when there is one remaining current_node # This last node will be the root of the tree. current_nodes = leaves while(len(current_nodes) > 1): new_nodes = [] for i in range(0, len(current_nodes), 2): # If there are an odd number of nodes and this is the last # node, add this node to the next level. if i + 1 >= len(current_nodes): new_nodes.append(current_nodes[i]) # Otherwise, use the next two nodes to build a new node. else: n = InternalNode(current_nodes[i], current_nodes[i+1]) # Add this node to the next level, and to a list of all nodes new_nodes.append(n) nodes.append(n) current_nodes = new_nodes # The only node remaining in current_nodes will be the root node. root = current_nodes[0] # Return the root node and the leaves. The root hash must be used along with the # path to verify the tree. The root hash should be securely sent to # each client. To do so, we will add it to the timestamp metadata. # The leaves will be used to find the path to each leaf and send # this path to the client for verification return root, leaves
38,293
def _read_fluid_file_descriptor(fname): """ Read a file descriptor and returns the array of the fields found. """ # Mapping mapping = [ ('density', 'Density'), ('velocity_x', 'x-velocity'), ('velocity_y', 'y-velocity'), ('velocity_z', 'z-velocity'), ('pressure', 'Pressure'), ('metallicity', 'Metallicity'), ] #Magnetic field file descriptors magnetic=np.array([['B_{0}_{1}'.format(dim,side) for side in ['left','right']] for dim in ['x','y','z']]).ravel() # Convert in dictionary mapping = {k: v for k, v in mapping} with open(fname, 'r') as f: line = f.readline() tmp = VERSION_RE.match(line) mylog.debug('Reading fluid file descriptor %s.' % fname) if not tmp: return [] version = int(tmp.group(1)) if version == 1: # Skip one line (containing the headers) line = f.readline() fields = [] for i, line in enumerate(f.readlines()): tmp = VAR_DESC_RE.match(line) if not tmp: raise YTFileNotParseable(fname, i+1) # ivar = tmp.group(1) varname = tmp.group(2) dtype = tmp.group(3) if varname in mapping: varname = mapping[varname] elif varname in magnetic: varname = varname else: varname = 'hydro_%s' % varname fields.append((varname, dtype)) else: mylog.error('Version %s', version) raise YTParticleOutputFormatNotImplemented() return fields
def _read_fluid_file_descriptor(fname): """ Read a file descriptor and returns the array of the fields found. """ # Mapping mapping = [ ('density', 'Density'), ('velocity_x', 'x-velocity'), ('velocity_y', 'y-velocity'), ('velocity_z', 'z-velocity'), ('pressure', 'Pressure'), ('metallicity', 'Metallicity'), ] #Magnetic field file descriptors magnetic = ['B_{0}_{1}'.format(dim,side) for side in ['left','right'] for dim in ['x','y','z']] # Convert in dictionary mapping = {k: v for k, v in mapping} with open(fname, 'r') as f: line = f.readline() tmp = VERSION_RE.match(line) mylog.debug('Reading fluid file descriptor %s.' % fname) if not tmp: return [] version = int(tmp.group(1)) if version == 1: # Skip one line (containing the headers) line = f.readline() fields = [] for i, line in enumerate(f.readlines()): tmp = VAR_DESC_RE.match(line) if not tmp: raise YTFileNotParseable(fname, i+1) # ivar = tmp.group(1) varname = tmp.group(2) dtype = tmp.group(3) if varname in mapping: varname = mapping[varname] elif varname in magnetic: varname = varname else: varname = 'hydro_%s' % varname fields.append((varname, dtype)) else: mylog.error('Version %s', version) raise YTParticleOutputFormatNotImplemented() return fields
2,201
def test_nowarn_if_metric_no_bool(): # make sure no conversion warning is raised if # metric isn't boolean, no matter what the data type is pairwise_metric = 'minkowski' X_bool = np.random.randint(2, size=(5, 2), dtype=np.bool_) X_num = np.random.randint(2, size=(5, 2), dtype=np.int32) with pytest.warns(None) as warn_record: # fit boolean data OPTICS(metric=pairwise_metric).fit(X_bool) # fit numeric data OPTICS(metric=pairwise_metric).fit(X_num) assert len(warn_record) == 0
def test_nowarn_if_metric_no_bool(): # make sure no conversion warning is raised if # metric isn't boolean, no matter what the data type is pairwise_metric = 'minkowski' X_bool = np.random.randint(2, size=(5, 2), dtype=bool) X_num = np.random.randint(2, size=(5, 2), dtype=np.int32) with pytest.warns(None) as warn_record: # fit boolean data OPTICS(metric=pairwise_metric).fit(X_bool) # fit numeric data OPTICS(metric=pairwise_metric).fit(X_num) assert len(warn_record) == 0
48,470
def get_best_parsable_locale(module, preferences=None): ''' Attempts to return the best possible locale for parsing output in english useful for scraping output with i18n tools :param module: an AnsibleModule instance :param preferences: A list of prefered locales, in order of preference :returns: The first matched prefered locale or 'C' which is the default ''' found = 'C' # default posix, its ascii but always there if preferences is None: # new posix standard or english cause those are messages core team expects # yes, last 2 are same but some systems are weird preferences = ['C.utf8', 'en_US.utf8', 'C', 'POSIX'] rc, out, err = module.run_command(['locale', '-a']) if rc == 0: if out: available = out.strip().splitlines() else: module.warn("No output from locale, defaulting to C, rc=%s: %s" % (rc, to_native(err))) else: module.warn("Unable to get locale information, defaulting to C, rc=%s: %s" % (rc, to_native(err))) if available: for pref in preferences: if pref in available: found = pref break return found
def get_best_parsable_locale(module, preferences=None): ''' Attempts to return the best possible locale for parsing output in english useful for scraping output with i18n tools :param module: an AnsibleModule instance :param preferences: A list of prefered locales, in order of preference :returns: The first matched prefered locale or 'C' which is the default ''' found = 'C' # default posix, its ascii but always there if preferences is None: # new POSIX standard or English cause those are messages core team expects # yes, the last 2 are the same but some systems are weird preferences = ['C.utf8', 'en_US.utf8', 'C', 'POSIX'] rc, out, err = module.run_command(['locale', '-a']) if rc == 0: if out: available = out.strip().splitlines() else: module.warn("No output from locale, defaulting to C, rc=%s: %s" % (rc, to_native(err))) else: module.warn("Unable to get locale information, defaulting to C, rc=%s: %s" % (rc, to_native(err))) if available: for pref in preferences: if pref in available: found = pref break return found
22,465
def get_update_permission_payload(payload: Dict[str, Any]) -> UpdateDatasetPermissionsPayload: """Coverts the generic payload dictionary into a UpdateDatasetPermissionsPayload model with custom parsing. This is an attempt on supporting multiple aliases for the permissions params.""" # There are several allowed names for the same role list parameter, i.e.: `access`, `access_ids`, `access_ids[]` # The `access_ids[]` name is not pydantic friendly, so this will be modelled as an alias but we can only set one alias # TODO: Maybe we should choose only one way/naming and deprecate the others? payload["access_ids"] = payload.get("access_ids[]") or payload.get("access") payload["manage_ids"] = payload.get("manage_ids[]") or payload.get("manage") payload["modify_ids"] = payload.get("modify_ids[]") or payload.get("modify") update_payload = UpdateDatasetPermissionsPayload(**payload) return update_payload
def get_update_permission_payload(payload: Dict[str, Any]) -> UpdateDatasetPermissionsPayload: """Converts the generic payload dictionary into a UpdateDatasetPermissionsPayload model with custom parsing. This is an attempt on supporting multiple aliases for the permissions params.""" # There are several allowed names for the same role list parameter, i.e.: `access`, `access_ids`, `access_ids[]` # The `access_ids[]` name is not pydantic friendly, so this will be modelled as an alias but we can only set one alias # TODO: Maybe we should choose only one way/naming and deprecate the others? payload["access_ids"] = payload.get("access_ids[]") or payload.get("access") payload["manage_ids"] = payload.get("manage_ids[]") or payload.get("manage") payload["modify_ids"] = payload.get("modify_ids[]") or payload.get("modify") update_payload = UpdateDatasetPermissionsPayload(**payload) return update_payload
57,019
def _pseudonymize_blog_posts_models(pending_deletion_request): """Pseudonymize the blog post models for the user with user_id. Args: pending_deletion_request: PendingDeletionRequest. The pending deletion request object to be saved in the datastore. """ user_id = pending_deletion_request.user_id # We want to preserve the same pseudonymous user ID on all the models # related to one blog post. So we collect all the users' blog # post models and blog post summary models then # we generate a pseudonymous user ID and replace the user ID # with that pseudonymous user ID in all the models. blog_post_model_class = blog_models.BlogPostModel blog_post_models = blog_post_model_class.query( datastore_services.any_of( blog_post_model_class.author_id == user_id) ).fetch() blogpost_ids = set([model.id for model in blog_post_models]) blog_post_summary_model_class = blog_models.BlogPostSummaryModel blog_post_summary_models = blog_post_summary_model_class.query( datastore_services.any_of( blog_post_summary_model_class.author_id == user_id) ).fetch() blogpost_ids |= set([model.id for model in blog_post_summary_models]) _save_pseudonymizable_entity_mappings_to_different_pseudonyms( pending_deletion_request, models.NAMES.blog, blogpost_ids) # TODO(sll):Add wipeout for BlogPostsRightsModel after adding # service layer. @transaction_services.run_in_transaction_wrapper def _pseudonymize_models_transactional( blog_posts_related_models, pseudonymized_id): """Pseudonymize user ID fields in the models. This function is run in a transaction, with the maximum number of blog_posts_related_models being MAX_NUMBER_OF_OPS_IN_TRANSACTION. Args: blog_posts_related_models: list(BaseModel). Models whose user IDs should be pseudonymized. pseudonymized_id: str. New pseudonymized user ID to be used for the models. """ blog_post_models = [ model for model in blog_posts_related_models if isinstance(model, blog_post_model_class)] for blog_post_model in blog_post_models: if blog_post_model.author_id == user_id: blog_post_model.author_id = pseudonymized_id blog_post_model.update_timestamps() blog_post_summary_models = [ model for model in blog_posts_related_models if isinstance(model, blog_post_summary_model_class)] for blog_post_summary in blog_post_summary_models: blog_post_summary.author_id = pseudonymized_id blog_post_summary.update_timestamps() datastore_services.put_multi( blog_post_models + blog_post_summary_models) blog_posts_ids_to_pids = ( pending_deletion_request.pseudonymizable_entity_mappings[ models.NAMES.blog.value]) for blogpost_id, pseudonymized_id in blog_posts_ids_to_pids.items(): blog_posts_related_models = [ model for model in blog_post_models if model.id == blogpost_id ] + [ model for model in blog_post_summary_models if model.id == blogpost_id ] for i in python_utils.RANGE( 0, len(blog_posts_related_models), feconf.MAX_NUMBER_OF_OPS_IN_TRANSACTION): _pseudonymize_models_transactional( blog_posts_related_models[ i:i + feconf.MAX_NUMBER_OF_OPS_IN_TRANSACTION], pseudonymized_id)
def _pseudonymize_blog_posts_models(pending_deletion_request): """Pseudonymize the blog post models for the user with user_id. Args: pending_deletion_request: PendingDeletionRequest. The pending deletion request object to be saved in the datastore. """ user_id = pending_deletion_request.user_id # We want to preserve the same pseudonymous user ID on all the models # related to one blog post. So we collect all the users' blog # post models and blog post summary models then # we generate a pseudonymous user ID and replace the user ID # with that pseudonymous user ID in all the models. blog_post_model_class = blog_models.BlogPostModel blog_post_models = blog_post_model_class.query( datastore_services.any_of( blog_post_model_class.author_id == user_id) ).fetch() blogpost_ids = set([model.id for model in blog_post_models]) blog_post_summary_model_class = blog_models.BlogPostSummaryModel blog_post_summary_models = blog_post_summary_model_class.query( datastore_services.any_of( blog_post_summary_model_class.author_id == user_id) ).fetch() blogpost_ids |= set([model.id for model in blog_post_summary_models]) _save_pseudonymizable_entity_mappings_to_different_pseudonyms( pending_deletion_request, models.NAMES.blog, blogpost_ids) # TODO(sll):Add wipeout for BlogPostsRightsModel after adding # service layer. @transaction_services.run_in_transaction_wrapper def _pseudonymize_models_transactional( blog_posts_related_models, pseudonymized_id): """Pseudonymize user ID fields in the models. This function is run in a transaction, with the maximum number of blog_posts_related_models being MAX_NUMBER_OF_OPS_IN_TRANSACTION. Args: blog_posts_related_models: list(BaseModel). Models whose user IDs should be pseudonymized. pseudonymized_id: str. New pseudonymized user ID to be used for the models. """ blog_post_models = [ model for model in blog_posts_related_models if isinstance(model, blog_post_model_class)] for blog_post_model in blog_post_models: if blog_post_model.author_id == user_id: blog_post_model.author_id = pseudonymized_id blog_post_model.update_timestamps() blog_post_summary_models = [ model for model in blog_posts_related_models if isinstance(model, blog_post_summary_model_class)] for blog_post_summary in blog_post_summary_models: blog_post_summary.author_id = pseudonymized_id blog_post_summary.update_timestamps() datastore_services.put_multi( blog_post_models + blog_post_summary_models) blog_posts_ids_to_pids = ( pending_deletion_request.pseudonymizable_entity_mappings[ models.NAMES.blog.value]) for blogpost_id, pseudonymized_id in blog_posts_ids_to_pids.items(): blog_posts_related_models = [ model for model in blog_post_models if model.id == blogpost_id ] + [ model for model in blog_post_summary_models if model.id == blogpost_id ] transaction_slices = utils.grouper( blog_posts_related_models, feconf.MAX_NUMBER_OF_OPS_IN_TRANSACTION) for transaction_slice in transaction_slices: _pseudonymize_models_transactional( [m for m in transaction_slice if m is not None], pseudonymized_id)
42,644
def asset_from_cryptocom(crtptocom_name: str) -> Asset: """May raise: - DeserializationError - UnsupportedAsset - UnknownAsset """ if not isinstance(crtptocom_name, str): raise DeserializationError( f'Got non-string type {type(crtptocom_name)} for cryptocom asset', ) symbol = CRYPTOCOM_TO_WORLD.get(crtptocom_name, crtptocom_name) return symbol_to_asset_or_token(symbol)
def asset_from_cryptocom(cryptocom_name: str) -> Asset: """May raise: - DeserializationError - UnsupportedAsset - UnknownAsset """ if not isinstance(crtptocom_name, str): raise DeserializationError( f'Got non-string type {type(crtptocom_name)} for cryptocom asset', ) symbol = CRYPTOCOM_TO_WORLD.get(crtptocom_name, crtptocom_name) return symbol_to_asset_or_token(symbol)
44,833
def main(): parser = argparse.ArgumentParser() parser.add_argument("--host") parser.add_argument("--token") parser.add_argument("--user") args = parser.parse_args() os.environ["DATABRICKS_HOST"] = args.host os.environ["DATABRICKS_TOKEN"] = args.token mlflow.set_tracking_uri("databricks") experiment_name = f"/Users/harutaka.kawamura@databricks.com/{uuid.uuid4().hex}" experiment_id = mlflow.create_experiment(experiment_name) mlflow.set_experiment(experiment_id=experiment_id) mlflow.sklearn.autolog() num_runs = 5 print(f"Logging {num_runs} runs in {args.host}#/mlflow/experiments/{experiment_id}") for i in range(num_runs): with mlflow.start_run() as run: print(f"Logging run:", run.info.run_id, f"{i + 1} / {num_runs} ") LinearRegression().fit(*load_iris(as_frame=True, return_X_y=True))
def main(): parser = argparse.ArgumentParser() parser.add_argument("--host") parser.add_argument("--token") parser.add_argument("--user") args = parser.parse_args() os.environ["DATABRICKS_HOST"] = args.host os.environ["DATABRICKS_TOKEN"] = args.token mlflow.set_tracking_uri("databricks") experiment_name = f"/Users/{args.user}@databricks.com/{uuid.uuid4().hex}" experiment_id = mlflow.create_experiment(experiment_name) mlflow.set_experiment(experiment_id=experiment_id) mlflow.sklearn.autolog() num_runs = 5 print(f"Logging {num_runs} runs in {args.host}#/mlflow/experiments/{experiment_id}") for i in range(num_runs): with mlflow.start_run() as run: print(f"Logging run:", run.info.run_id, f"{i + 1} / {num_runs} ") LinearRegression().fit(*load_iris(as_frame=True, return_X_y=True))
33,537
def test_report_batch_item_failures_invalid_result_json_batch_fails( create_lambda_function, lambda_client, sqs_client, sqs_create_queue, sqs_queue_arn, lambda_su_role, snapshot, cleanups, ): # create queue used in the lambda to send invocation results to (to verify lambda was invoked) destination_queue_name = f"destination-queue-{short_uid()}" destination_url = sqs_create_queue(QueueName=destination_queue_name) snapshot.match( "get_destination_queue_url", sqs_client.get_queue_url(QueueName=destination_queue_name) ) # timeout in seconds, used for both the lambda and the queue visibility timeout. # increase to 10 if testing against AWS fails. retry_timeout = 4 retries = 2 # set up lambda function function_name = f"failing-lambda-{short_uid()}" create_lambda_function( func_name=function_name, handler_file=LAMBDA_SQS_BATCH_ITEM_FAILURE_FILE, runtime=LAMBDA_RUNTIME_PYTHON38, role=lambda_su_role, timeout=retry_timeout, # timeout needs to be <= than visibility timeout envvars={ "DESTINATION_QUEUE_URL": destination_url, "OVERWRITE_RESULT": '{"batchItemFailures": [{"foo":"notvalid"}]}', }, ) # create dlq for event source queue event_dlq_url = sqs_create_queue(QueueName=f"event-dlq-{short_uid()}") event_dlq_arn = sqs_queue_arn(event_dlq_url) # create event source queue event_source_url = sqs_create_queue( QueueName=f"source-queue-{short_uid()}", Attributes={ # the visibility timeout is implicitly also the time between retries "VisibilityTimeout": str(retry_timeout), "RedrivePolicy": json.dumps( {"deadLetterTargetArn": event_dlq_arn, "maxReceiveCount": retries} ), }, ) event_source_arn = sqs_queue_arn(event_source_url) # wire everything with the event source mapping mapping_uuid = lambda_client.create_event_source_mapping( EventSourceArn=event_source_arn, FunctionName=function_name, BatchSize=10, MaximumBatchingWindowInSeconds=0, FunctionResponseTypes=["ReportBatchItemFailures"], )["UUID"] cleanups.append(lambda: lambda_client.delete_event_source_mapping(UUID=mapping_uuid)) _await_event_source_mapping_enabled(lambda_client, mapping_uuid) # trigger the lambda, the message content doesn't matter because the whole batch should be treated as failure sqs_client.send_message( QueueUrl=event_source_url, MessageBody=json.dumps({"message": 1, "fail_attempts": 0}), ) # now wait for the first invocation result which is expected to have processed message 1 we wait half the retry # interval to wait long enough for the message to appear, but short enough to check that the DLQ is empty after # the first attempt. first_invocation = sqs_client.receive_message( QueueUrl=destination_url, WaitTimeSeconds=15, MaxNumberOfMessages=1 ) assert "Messages" in first_invocation snapshot.match("first_invocation", first_invocation) # now wait for the second invocation result which second_invocation = sqs_client.receive_message( QueueUrl=destination_url, WaitTimeSeconds=15, MaxNumberOfMessages=1 ) assert "Messages" in second_invocation # hack to make snapshot work snapshot.match("second_invocation", second_invocation) # now check that the messages was placed in the DLQ dlq_response = sqs_client.receive_message(QueueUrl=event_dlq_url, WaitTimeSeconds=15) assert "Messages" in dlq_response snapshot.match("dlq_response", dlq_response)
def test_report_batch_item_failures_invalid_result_json_batch_fails( create_lambda_function, lambda_client, sqs_client, sqs_create_queue, sqs_queue_arn, lambda_su_role, snapshot, cleanups, ): # create queue used in the lambda to send invocation results to (to verify lambda was invoked) destination_queue_name = f"destination-queue-{short_uid()}" destination_url = sqs_create_queue(QueueName=destination_queue_name) snapshot.match( "get_destination_queue_url", sqs_client.get_queue_url(QueueName=destination_queue_name) ) # timeout in seconds, used for both the lambda and the queue visibility timeout. # increase to 10 if testing against AWS fails. retry_timeout = 4 retries = 2 # set up lambda function function_name = f"failing-lambda-{short_uid()}" create_lambda_function( func_name=function_name, handler_file=LAMBDA_SQS_BATCH_ITEM_FAILURE_FILE, runtime=LAMBDA_RUNTIME_PYTHON38, role=lambda_su_role, timeout=retry_timeout, # timeout needs to be <= than visibility timeout envvars={ "DESTINATION_QUEUE_URL": destination_url, "OVERWRITE_RESULT": '{"batchItemFailures": [{"foo":"notvalid"}]}', }, ) # create dlq for event source queue event_dlq_url = sqs_create_queue(QueueName=f"event-dlq-{short_uid()}") event_dlq_arn = sqs_queue_arn(event_dlq_url) # create event source queue event_source_url = sqs_create_queue( QueueName=f"source-queue-{short_uid()}", Attributes={ # the visibility timeout is implicitly also the time between retries "VisibilityTimeout": str(retry_timeout), "RedrivePolicy": json.dumps( {"deadLetterTargetArn": event_dlq_arn, "maxReceiveCount": retries} ), }, ) event_source_arn = sqs_queue_arn(event_source_url) # wire everything with the event source mapping mapping_uuid = lambda_client.create_event_source_mapping( EventSourceArn=event_source_arn, FunctionName=function_name, BatchSize=10, MaximumBatchingWindowInSeconds=0, FunctionResponseTypes=["ReportBatchItemFailures"], )["UUID"] cleanups.append(lambda: lambda_client.delete_event_source_mapping(UUID=mapping_uuid)) _await_event_source_mapping_enabled(lambda_client, mapping_uuid) # trigger the lambda, the message content doesn't matter because the whole batch should be treated as failure sqs_client.send_message( QueueUrl=event_source_url, MessageBody=json.dumps({"message": 1, "fail_attempts": 0}), ) # now wait for the first invocation result which is expected to have processed message 1 we wait half the retry # interval to wait long enough for the message to appear, but short enough to check that the DLQ is empty after # the first attempt. first_invocation = sqs_client.receive_message( QueueUrl=destination_url, WaitTimeSeconds=15, MaxNumberOfMessages=1 ) assert "Messages" in first_invocation snapshot.match("first_invocation", first_invocation) # now wait for the second invocation result which (?) second_invocation = sqs_client.receive_message( QueueUrl=destination_url, WaitTimeSeconds=15, MaxNumberOfMessages=1 ) assert "Messages" in second_invocation # hack to make snapshot work snapshot.match("second_invocation", second_invocation) # now check that the messages was placed in the DLQ dlq_response = sqs_client.receive_message(QueueUrl=event_dlq_url, WaitTimeSeconds=15) assert "Messages" in dlq_response snapshot.match("dlq_response", dlq_response)
56,047
def main(): args = parse_args() # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. accelerator = Accelerator() logger.info(accelerator.state) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() # set up weights and biases if available if is_wandb_available() and args.wandb: import wandb wandb.init(project=args.output_dir.split("/")[-1]) else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: if args.hub_model_id is None: repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) else: repo_name = args.hub_model_id repo = Repository(args.output_dir, clone_from=repo_name) elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # Load dataset # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # TODO support datasets from local folders dataset = load_dataset(args.dataset_name, cache_dir=args.cache_dir) # Rename column names to standardized names (only "image" and "label" need to be present) if "pixel_values" in dataset["train"].column_names: dataset = dataset.rename_columns({"pixel_values": "image"}) if "annotation" in dataset["train"].column_names: dataset = dataset.rename_columns({"annotation": "label"}) # If we don't have a validation split, split off a percentage of train as validation. args.train_val_split = None if "validation" in dataset.keys() else args.train_val_split if isinstance(args.train_val_split, float) and args.train_val_split > 0.0: split = dataset["train"].train_test_split(args.train_val_split) dataset["train"] = split["train"] dataset["validation"] = split["test"] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. if args.dataset_name == "scene_parse_150": repo_id = "datasets/huggingface/label-files" filename = "ade20k-id2label.json" num_labels = 150 else: repo_id = f"datasets/{args.dataset_name}" filename = "id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: k for k, v in id2label.items()} num_labels = len(id2label) # Load pretrained model and feature extractor config = AutoConfig.from_pretrained( args.model_name_or_path, num_labels=num_labels, id2label=id2label, label2id=label2id ) feature_extractor = AutoFeatureExtractor.from_pretrained(args.model_name_or_path) model = AutoModelForSemanticSegmentation.from_pretrained( args.model_name_or_path, config=config, ) # Preprocessing the datasets # Define torchvision transforms to be applied to each image + target. # Not that straightforward in torchvision: https://github.com/pytorch/vision/issues/9 # Currently based on official torchvision references: https://github.com/pytorch/vision/blob/main/references/segmentation/transforms.py _train_transforms = Compose( [ ReduceLabels() if args.reduce_labels else Identity(), RandomCrop(size=feature_extractor.size), RandomHorizontalFlip(flip_prob=0.5), PILToTensor(), ConvertImageDtype(torch.float), Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std), ] ) # Define torchvision transform to be applied to each image. # jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1) _val_transforms = Compose( [ ReduceLabels() if args.reduce_labels else Identity(), Resize(size=(feature_extractor.size, feature_extractor.size)), PILToTensor(), ConvertImageDtype(torch.float), Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std), ] ) def train_transforms(example_batch): pixel_values = [] labels = [] for image, target in zip(example_batch["image"], example_batch["label"]): image, target = _train_transforms(image.convert("RGB"), target) pixel_values.append(image) labels.append(target) encoding = dict() encoding["pixel_values"] = torch.stack(pixel_values) encoding["labels"] = torch.stack(labels) return encoding def val_transforms(example_batch): pixel_values = [] labels = [] for image, target in zip(example_batch["image"], example_batch["label"]): image, target = _val_transforms(image.convert("RGB"), target) pixel_values.append(image) labels.append(target) encoding = dict() encoding["pixel_values"] = torch.stack(pixel_values) encoding["labels"] = torch.stack(labels) return encoding with accelerator.main_process_first(): train_dataset = dataset["train"].with_transform(train_transforms) eval_dataset = dataset["validation"].with_transform(val_transforms) train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size ) eval_dataloader = DataLoader( eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size ) # Optimizer optimizer = torch.optim.AdamW( list(model.parameters()), lr=args.learning_rate, betas=[args.adam_beta1, args.adam_beta2], eps=args.adam_epsilon, ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader ) # Scheduler and math around the number of training steps. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch else: args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) # Instantiate metric metric = load_metric("mean_iou") # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 for epoch in range(args.num_train_epochs): model.train() for step, batch in enumerate(train_dataloader): outputs = model(**batch) loss = outputs.loss loss = loss / args.gradient_accumulation_steps accelerator.backward(loss) if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) completed_steps += 1 if completed_steps >= args.max_train_steps: break # Log all results if (step + 1) % (args.gradient_accumulation_steps * args.logging_steps) == 0: loss.detach() if accelerator.state.num_processes > 1: loss = accelerator.gather(loss).sum() / accelerator.num_processes train_logs = { "loss": loss, "lr": torch.tensor(optimizer.param_groups[0]["lr"]), } # Evaluate (gather required) with torch.no_grad(): upsampled_logits = torch.nn.functional.interpolate( outputs.logits, size=batch["labels"].shape[-2:], mode="bilinear", align_corners=False ) predictions = upsampled_logits.argmax(dim=1) metric.add_batch( predictions=accelerator.gather(predictions), references=accelerator.gather(batch["labels"]), ) train_metrics = metric.compute( num_labels=len(id2label), ignore_index=255, reduce_labels=False, # we've already reduced the labels before ) train_logs["mean_iou"] = train_metrics["mean_iou"] train_logs["mean_accuracy"] = train_metrics["mean_accuracy"] train_logs["overall_accuracy"] = train_metrics["overall_accuracy"] log_str = "" for k, v in train_logs.items(): if isinstance(v, torch.Tensor): log_str += "| {}: {:.3e}".format(k, v.item()) else: log_str += "| {}: {:.3e}".format(k, v) if accelerator.is_local_main_process: progress_bar.write(log_str) if is_wandb_available() and args.wandb: wandb.log(train_logs) # Save model every `args.saving_steps` steps if (step + 1) % (args.gradient_accumulation_steps * args.saving_steps) == 0: if (args.push_to_hub and epoch < args.num_train_epochs - 1) or args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) if (args.push_to_hub and epoch < args.num_train_epochs - 1) and accelerator.is_main_process: repo.push_to_hub( commit_message=f"Training in progress step {completed_steps}", blocking=False, auto_lfs_prune=True, ) logger.info("***** Running evaluation *****") model.eval() for step, batch in enumerate(tqdm(eval_dataloader, disable=not accelerator.is_local_main_process)): outputs = model(**batch) upsampled_logits = torch.nn.functional.interpolate( outputs.logits, size=batch["labels"].shape[-2:], mode="bilinear", align_corners=False ) predictions = upsampled_logits.argmax(dim=1) metric.add_batch( predictions=accelerator.gather(predictions), references=accelerator.gather(batch["labels"]), ) eval_metrics = metric.compute( num_labels=len(id2label), ignore_index=255, reduce_labels=False, # we've already reduced the labels before ) logger.info(f"epoch {epoch}: {eval_metrics}") if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) if accelerator.is_main_process: feature_extractor.save_pretrained(args.output_dir) repo.push_to_hub( commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True ) if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) if accelerator.is_main_process: feature_extractor.save_pretrained(args.output_dir) if args.push_to_hub: repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True)
def main(): args = parse_args() # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. accelerator = Accelerator() logger.info(accelerator.state) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() # set up weights and biases if available if is_wandb_available() and args.wandb: import wandb wandb.init(project=args.output_dir.split("/")[-1]) else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: if args.hub_model_id is None: repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) else: repo_name = args.hub_model_id repo = Repository(args.output_dir, clone_from=repo_name) elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # Load dataset # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # TODO support datasets from local folders dataset = load_dataset(args.dataset_name, cache_dir=args.cache_dir) # Rename column names to standardized names (only "image" and "label" need to be present) if "pixel_values" in dataset["train"].column_names: dataset = dataset.rename_columns({"pixel_values": "image"}) if "annotation" in dataset["train"].column_names: dataset = dataset.rename_columns({"annotation": "label"}) # If we don't have a validation split, split off a percentage of train as validation. args.train_val_split = None if "validation" in dataset.keys() else args.train_val_split if isinstance(args.train_val_split, float) and args.train_val_split > 0.0: split = dataset["train"].train_test_split(args.train_val_split) dataset["train"] = split["train"] dataset["validation"] = split["test"] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. if args.dataset_name == "scene_parse_150": repo_id = "datasets/huggingface/label-files" filename = "ade20k-id2label.json" num_labels = 150 else: repo_id = f"datasets/{args.dataset_name}" filename = "id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: k for k, v in id2label.items()} num_labels = len(id2label) # Load pretrained model and feature extractor config = AutoConfig.from_pretrained( args.model_name_or_path, num_labels=num_labels, id2label=id2label, label2id=label2id ) feature_extractor = AutoFeatureExtractor.from_pretrained(args.model_name_or_path) model = AutoModelForSemanticSegmentation.from_pretrained( args.model_name_or_path, config=config, ) # Preprocessing the datasets # Define torchvision transforms to be applied to each image + target. # Not that straightforward in torchvision: https://github.com/pytorch/vision/issues/9 # Currently based on official torchvision references: https://github.com/pytorch/vision/blob/main/references/segmentation/transforms.py _train_transforms = Compose( [ ReduceLabels() if args.reduce_labels else Identity(), RandomCrop(size=feature_extractor.size), RandomHorizontalFlip(flip_prob=0.5), PILToTensor(), ConvertImageDtype(torch.float), Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std), ] ) # Define torchvision transform to be applied to each image. # jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1) _val_transforms = Compose( [ ReduceLabels() if args.reduce_labels else Identity(), Resize(size=(feature_extractor.size, feature_extractor.size)), PILToTensor(), ConvertImageDtype(torch.float), Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std), ] ) def train_transforms(example_batch): pixel_values = [] labels = [] for image, target in zip(example_batch["image"], example_batch["label"]): image, target = _train_transforms(image.convert("RGB"), target) pixel_values.append(image) labels.append(target) encoding = dict() encoding["pixel_values"] = torch.stack(pixel_values) encoding["labels"] = torch.stack(labels) return encoding def val_transforms(example_batch): pixel_values = [] labels = [] for image, target in zip(example_batch["image"], example_batch["label"]): image, target = _val_transforms(image.convert("RGB"), target) pixel_values.append(image) labels.append(target) encoding = dict() encoding["pixel_values"] = torch.stack(pixel_values) encoding["labels"] = torch.stack(labels) return encoding with accelerator.main_process_first(): train_dataset = dataset["train"].with_transform(train_transforms) eval_dataset = dataset["validation"].with_transform(val_transforms) train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size ) eval_dataloader = DataLoader( eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size ) # Optimizer optimizer = torch.optim.AdamW( list(model.parameters()), lr=args.learning_rate, betas=[args.adam_beta1, args.adam_beta2], eps=args.adam_epsilon, ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader ) # Scheduler and math around the number of training steps. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch else: args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) # Instantiate metric metric = load_metric("mean_iou") # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 for epoch in range(args.num_train_epochs): model.train() for step, batch in enumerate(train_dataloader): outputs = model(**batch) loss = outputs.loss loss = loss / args.gradient_accumulation_steps accelerator.backward(loss) if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) completed_steps += 1 if completed_steps >= args.max_train_steps: break # Log all results if (step + 1) % (args.gradient_accumulation_steps * args.logging_steps) == 0: loss.detach() if accelerator.state.num_processes > 1: loss = accelerator.gather(loss).sum() / accelerator.num_processes train_logs = { "loss": loss, "lr": torch.tensor(optimizer.param_groups[0]["lr"]), } # Evaluate (gather required) with torch.no_grad(): upsampled_logits = torch.nn.functional.interpolate( outputs.logits, size=batch["labels"].shape[-2:], mode="bilinear", align_corners=False ) predictions = upsampled_logits.argmax(dim=1) metric.add_batch( predictions=accelerator.gather(predictions), references=accelerator.gather(batch["labels"]), ) train_metrics = metric.compute( num_labels=len(id2label), ignore_index=255, reduce_labels=False, # we've already reduced the labels before ) train_logs["mean_iou"] = train_metrics["mean_iou"] train_logs["mean_accuracy"] = train_metrics["mean_accuracy"] train_logs["overall_accuracy"] = train_metrics["overall_accuracy"] log_str = "" for k, v in train_logs.items(): if isinstance(v, torch.Tensor): log_str += "| {}: {:.3e}".format(k, v.item()) else: log_str += "| {}: {:.3e}".format(k, v) if accelerator.is_local_main_process: progress_bar.write(log_str) if args.wandb: accelerator.log(train_logs) # Save model every `args.saving_steps` steps if (step + 1) % (args.gradient_accumulation_steps * args.saving_steps) == 0: if (args.push_to_hub and epoch < args.num_train_epochs - 1) or args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) if (args.push_to_hub and epoch < args.num_train_epochs - 1) and accelerator.is_main_process: repo.push_to_hub( commit_message=f"Training in progress step {completed_steps}", blocking=False, auto_lfs_prune=True, ) logger.info("***** Running evaluation *****") model.eval() for step, batch in enumerate(tqdm(eval_dataloader, disable=not accelerator.is_local_main_process)): outputs = model(**batch) upsampled_logits = torch.nn.functional.interpolate( outputs.logits, size=batch["labels"].shape[-2:], mode="bilinear", align_corners=False ) predictions = upsampled_logits.argmax(dim=1) metric.add_batch( predictions=accelerator.gather(predictions), references=accelerator.gather(batch["labels"]), ) eval_metrics = metric.compute( num_labels=len(id2label), ignore_index=255, reduce_labels=False, # we've already reduced the labels before ) logger.info(f"epoch {epoch}: {eval_metrics}") if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) if accelerator.is_main_process: feature_extractor.save_pretrained(args.output_dir) repo.push_to_hub( commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True ) if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) if accelerator.is_main_process: feature_extractor.save_pretrained(args.output_dir) if args.push_to_hub: repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True)
54,571
def _is_an_unambiguous_user_argument(argument: str) -> bool: """Check if the provided argument is a user mention, user id, or username.""" has_id_or_mention = bool(IDConverter()._get_id_match(argument) or RE_USER_MENTION.match(argument)) if not has_id_or_mention: if argument[0] == '@': argument = argument[1:] # Check to see if the author passed a username (a discriminator exists) if len(argument) > 5 and argument[-5] == '#': return True return has_id_or_mention
def _is_an_unambiguous_user_argument(argument: str) -> bool: """Check if the provided argument is a user mention, user id, or username.""" has_id_or_mention = bool(IDConverter()._get_id_match(argument) or RE_USER_MENTION.match(argument)) if has_id_or_mention: return has_id_or_mention if argument[0] == '@': argument = argument[1:] # Check to see if the author passed a username (a discriminator exists) if len(argument) > 5 and argument[-5] == '#': return True
48,005
def main(): args = build_argparser().parse_args() cap = open_images_capture(args.input, args.loop) log.info('OpenVINO Inference Engine') log.info('\tbuild: {}'.format(get_version())) ie = IECore() plugin_config = get_user_config(args.device, args.num_streams, args.num_threads) model = MonoDepthModel(ie, args.model) log.info('Reading model {}'.format(args.model)) log_blobs_info(model) pipeline = AsyncPipeline(ie, model, plugin_config, device=args.device, max_num_requests=args.num_infer_requests) log.info('The model {} is loaded to {}'.format(args.model, args.device)) log_runtime_settings(pipeline.exec_net, args.device) next_frame_id = 0 next_frame_id_to_show = 0 metrics = PerformanceMetrics() presenter = None output_transform = None video_writer = cv2.VideoWriter() while True: if pipeline.is_ready(): # Get new image/frame start_time = perf_counter() frame = cap.read() if frame is None: if next_frame_id == 0: raise ValueError("Can't read an image from the input") break if next_frame_id == 0: output_transform = OutputTransform(frame.shape[:2], args.output_resolution) if args.output_resolution: output_resolution = output_transform.new_resolution else: output_resolution = (frame.shape[1], frame.shape[0]) presenter = monitors.Presenter(args.utilization_monitors, 55, (round(output_resolution[0] / 4), round(output_resolution[1] / 8))) if args.output and not video_writer.open(args.output, cv2.VideoWriter_fourcc(*'MJPG'), cap.fps(), output_resolution): raise RuntimeError("Can't open video writer") # Submit for inference pipeline.submit_data(frame, next_frame_id, {'start_time': start_time}) next_frame_id += 1 else: # Wait for empty request pipeline.await_any() if pipeline.callback_exceptions: raise pipeline.callback_exceptions[0] # Process all completed requests results = pipeline.get_result(next_frame_id_to_show) if results: depth_map, frame_meta = results depth_map = apply_color_map(depth_map) start_time = frame_meta['start_time'] presenter.drawGraphs(depth_map) metrics.update(start_time, depth_map) if video_writer.isOpened() and (args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit-1): video_writer.write(depth_map) next_frame_id_to_show += 1 if not args.no_show: cv2.imshow(DEMO_NAME, depth_map) key = cv2.waitKey(1) if key == 27 or key == 'q' or key == 'Q': break presenter.handleKey(key) pipeline.await_all() # Process completed requests for next_frame_id_to_show in range(next_frame_id_to_show, next_frame_id): results = pipeline.get_result(next_frame_id_to_show) while results is None: results = pipeline.get_result(next_frame_id_to_show) depth_map, frame_meta = results depth_map = apply_color_map(depth_map) start_time = frame_meta['start_time'] presenter.drawGraphs(depth_map) metrics.update(start_time, depth_map) if video_writer.isOpened() and (args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit-1): video_writer.write(depth_map) if not args.no_show: cv2.imshow(DEMO_NAME, depth_map) key = cv2.waitKey(1) metrics.log_total() print(presenter.reportMeans())
def main(): args = build_argparser().parse_args() cap = open_images_capture(args.input, args.loop) log.info('OpenVINO Inference Engine') log.info('\tbuild: {}'.format(get_version())) ie = IECore() plugin_config = get_user_config(args.device, args.num_streams, args.num_threads) model = MonoDepthModel(ie, args.model) log.info('Reading model {}'.format(args.model)) log_blobs_info(model) pipeline = AsyncPipeline(ie, model, plugin_config, device=args.device, max_num_requests=args.num_infer_requests) log.info('The model {} is loaded to {}'.format(args.model, args.device)) log_runtime_settings(pipeline.exec_net, set(parse_devices(args.device))) next_frame_id = 0 next_frame_id_to_show = 0 metrics = PerformanceMetrics() presenter = None output_transform = None video_writer = cv2.VideoWriter() while True: if pipeline.is_ready(): # Get new image/frame start_time = perf_counter() frame = cap.read() if frame is None: if next_frame_id == 0: raise ValueError("Can't read an image from the input") break if next_frame_id == 0: output_transform = OutputTransform(frame.shape[:2], args.output_resolution) if args.output_resolution: output_resolution = output_transform.new_resolution else: output_resolution = (frame.shape[1], frame.shape[0]) presenter = monitors.Presenter(args.utilization_monitors, 55, (round(output_resolution[0] / 4), round(output_resolution[1] / 8))) if args.output and not video_writer.open(args.output, cv2.VideoWriter_fourcc(*'MJPG'), cap.fps(), output_resolution): raise RuntimeError("Can't open video writer") # Submit for inference pipeline.submit_data(frame, next_frame_id, {'start_time': start_time}) next_frame_id += 1 else: # Wait for empty request pipeline.await_any() if pipeline.callback_exceptions: raise pipeline.callback_exceptions[0] # Process all completed requests results = pipeline.get_result(next_frame_id_to_show) if results: depth_map, frame_meta = results depth_map = apply_color_map(depth_map) start_time = frame_meta['start_time'] presenter.drawGraphs(depth_map) metrics.update(start_time, depth_map) if video_writer.isOpened() and (args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit-1): video_writer.write(depth_map) next_frame_id_to_show += 1 if not args.no_show: cv2.imshow(DEMO_NAME, depth_map) key = cv2.waitKey(1) if key == 27 or key == 'q' or key == 'Q': break presenter.handleKey(key) pipeline.await_all() # Process completed requests for next_frame_id_to_show in range(next_frame_id_to_show, next_frame_id): results = pipeline.get_result(next_frame_id_to_show) while results is None: results = pipeline.get_result(next_frame_id_to_show) depth_map, frame_meta = results depth_map = apply_color_map(depth_map) start_time = frame_meta['start_time'] presenter.drawGraphs(depth_map) metrics.update(start_time, depth_map) if video_writer.isOpened() and (args.output_limit <= 0 or next_frame_id_to_show <= args.output_limit-1): video_writer.write(depth_map) if not args.no_show: cv2.imshow(DEMO_NAME, depth_map) key = cv2.waitKey(1) metrics.log_total() print(presenter.reportMeans())
59,443
def _parse_common_args(backend, qobj_id, qobj_header, shots, memory, max_credits, seed_simulator, init_qubits, rep_delay, **run_config): """Resolve the various types of args allowed to the assemble() function through duck typing, overriding args, etc. Refer to the assemble() docstring for details on what types of inputs are allowed. Here the args are resolved by converting them to standard instances, and prioritizing them in case a run option is passed through multiple args (explicitly setting an arg has more priority than the arg set by backend) Returns: RunConfig: a run config, which is a standardized object that configures the qobj and determines the runtime environment. Raises: QiskitError: if the memory arg is True and the backend does not support memory. Also if shots exceeds max_shots for the configured backend. TypeError: if the type of shots is not int. """ # grab relevant info from backend if it exists backend_config = None if backend: backend_config = backend.configuration() # check for memory flag applied to backend that does not support memory if memory and not backend_config.memory: raise QiskitError("memory not supported by backend {}" .format(backend_config.backend_name)) # an identifier for the Qobj qobj_id = qobj_id or str(uuid.uuid4()) # The header that goes at the top of the Qobj (and later Result) # we process it as dict, then write entries that are not None to a QobjHeader object qobj_header = qobj_header or {} if isinstance(qobj_header, QobjHeader): qobj_header = qobj_header.to_dict() backend_name = getattr(backend_config, 'backend_name', None) backend_version = getattr(backend_config, 'backend_version', None) qobj_header = {**dict(backend_name=backend_name, backend_version=backend_version), **qobj_header} qobj_header = QobjHeader(**{k: v for k, v in qobj_header.items() if v is not None}) max_shots = getattr(backend_config, 'max_shots', None) if shots is None: if max_shots: shots = min(1024, max_shots) else: shots = 1024 elif not isinstance(shots, int): raise TypeError( "Argument 'shots' should be of type 'int'") elif max_shots and max_shots < shots: raise QiskitError( 'Number of shots specified: %s exceeds max_shots property of the ' 'backend: %s.' % (shots, max_shots)) dynamic_reprate_enabled = getattr(backend_config, 'dynamic_reprate_enabled', False) if dynamic_reprate_enabled: default_rep_delay = getattr(backend_config, "default_rep_delay", None) rep_delay_range = getattr(backend_config, "rep_delay_range", None) rep_delay = _parse_rep_delay(rep_delay, default_rep_delay, rep_delay_range) else: if rep_delay is not None: rep_delay = None warnings.warn( "Dynamic rep rates not supported on this backend, cannot use rep_delay.", RuntimeWarning, ) # create run configuration and populate run_config_dict = dict(shots=shots, memory=memory, max_credits=max_credits, seed_simulator=seed_simulator, init_qubits=init_qubits, rep_delay=rep_delay, **run_config) return qobj_id, qobj_header, run_config_dict
def _parse_common_args(backend, qobj_id, qobj_header, shots, memory, max_credits, seed_simulator, init_qubits, rep_delay, **run_config): """Resolve the various types of args allowed to the assemble() function through duck typing, overriding args, etc. Refer to the assemble() docstring for details on what types of inputs are allowed. Here the args are resolved by converting them to standard instances, and prioritizing them in case a run option is passed through multiple args (explicitly setting an arg has more priority than the arg set by backend) Returns: RunConfig: a run config, which is a standardized object that configures the qobj and determines the runtime environment. Raises: QiskitError: if the memory arg is True and the backend does not support memory. Also if shots exceeds max_shots for the configured backend. TypeError: if the type of shots is not int. """ # grab relevant info from backend if it exists backend_config = None if backend: backend_config = backend.configuration() # check for memory flag applied to backend that does not support memory if memory and not backend_config.memory: raise QiskitError("memory not supported by backend {}" .format(backend_config.backend_name)) # an identifier for the Qobj qobj_id = qobj_id or str(uuid.uuid4()) # The header that goes at the top of the Qobj (and later Result) # we process it as dict, then write entries that are not None to a QobjHeader object qobj_header = qobj_header or {} if isinstance(qobj_header, QobjHeader): qobj_header = qobj_header.to_dict() backend_name = getattr(backend_config, 'backend_name', None) backend_version = getattr(backend_config, 'backend_version', None) qobj_header = {**dict(backend_name=backend_name, backend_version=backend_version), **qobj_header} qobj_header = QobjHeader(**{k: v for k, v in qobj_header.items() if v is not None}) max_shots = getattr(backend_config, 'max_shots', None) if shots is None: if max_shots: shots = min(1024, max_shots) else: shots = 1024 elif not isinstance(shots, int): raise QiskitError( "Argument 'shots' should be of type 'int'" ) elif max_shots and max_shots < shots: raise QiskitError( 'Number of shots specified: %s exceeds max_shots property of the ' 'backend: %s.' % (shots, max_shots)) dynamic_reprate_enabled = getattr(backend_config, 'dynamic_reprate_enabled', False) if dynamic_reprate_enabled: default_rep_delay = getattr(backend_config, "default_rep_delay", None) rep_delay_range = getattr(backend_config, "rep_delay_range", None) rep_delay = _parse_rep_delay(rep_delay, default_rep_delay, rep_delay_range) else: if rep_delay is not None: rep_delay = None warnings.warn( "Dynamic rep rates not supported on this backend, cannot use rep_delay.", RuntimeWarning, ) # create run configuration and populate run_config_dict = dict(shots=shots, memory=memory, max_credits=max_credits, seed_simulator=seed_simulator, init_qubits=init_qubits, rep_delay=rep_delay, **run_config) return qobj_id, qobj_header, run_config_dict
31,895
def main(): incident_id = demisto.args()['id'] if 'id' in demisto.args() else demisto.incidents()[0]['id'] key = demisto.args()['key'] value = demisto.args()['value'] append = demisto.args()['append'] error_unfinished = argToBoolean(demisto.args().get('errorUnfinished', "false")) args = {'key': key, 'value': value, 'append': append} res = demisto.executeCommand( 'executeCommandAt', { 'incidents': incident_id, 'command': 'Set', 'arguments': args, } ) if error_unfinished: result_string = res[-1].get('Contents', "") result_string = result_string.strip('.') numbers = [int(s) for s in result_string.split() if s.isdigit()] if len(set(numbers)) > 1: # check if the all the numbers are the same. Supposed to be 2 numbers. # if the numbers are the same, Set succeed on all of the incidents. return_error("Not all incidents were set.\n" + result_string) demisto.results(res)
def main(): incident_id = demisto.args()['id'] if 'id' in demisto.args() else demisto.incidents()[0]['id'] key = demisto.args()['key'] value = demisto.args()['value'] append = demisto.args()['append'] error_unfinished = argToBoolean(demisto.args().get('errorUnfinished', "false")) args = {'key': key, 'value': value, 'append': append} res = demisto.executeCommand( 'executeCommandAt', { 'incidents': incident_id, 'command': 'Set', 'arguments': args, } ) if error_unfinished: result_string = res[-1].get('Contents', "") result_string = result_string.strip('.') numbers = [int(s) for s in result_string.split() if s.isdigit()] if len(set(numbers)) > 1: # check if all the numbers are the same. Supposed to be 2 numbers. # if the numbers are the same, Set succeed on all of the incidents. return_error("Not all incidents were set.\n" + result_string) demisto.results(res)
1,270
def dynamic_subimport(name, vars): mod = types.ModuleType(name) mod.__dict__.update(vars) return mod
def dynamic_subimport(name, vars): mod = importlib.util.module_from_spec(name) mod.__dict__.update(vars) return mod
34,314
def add_no_plot_param( parser: argparse.ArgumentParser, default: bool = False, required: bool = False, ): parser.add_argument( "--no-plot", action="store_true", default=default, help=f"Don't render plots of confusion matrix and histogram", required=required, )
def add_no_plot_param( parser: argparse.ArgumentParser, default: bool = False, required: bool = False, ): parser.add_argument( "--no-plot", action="store_true", default=default, help=f"Don't render plots using matplotlib", required=required, )
44,810
def test_client_can_be_serialized_with_pickle(tmpdir): """ Verifies that instances of `MlflowClient` can be serialized using pickle, even if the underlying Tracking and Model Registry stores used by the client are not serializable using pickle """ class MockUnpickleableTrackingStore(SqlAlchemyTrackingStore): pass class MockUnpickleableModelRegistryStore(SqlAlchemyModelRegistryStore): pass backend_store_path = os.path.join(str(tmpdir), "test.db") artifact_store_path = os.path.join(str(tmpdir), "artifacts") mock_tracking_store = MockUnpickleableTrackingStore( "sqlite:///" + backend_store_path, artifact_store_path ) mock_model_registry_store = MockUnpickleableModelRegistryStore( "sqlite:///" + backend_store_path ) # Verify that the mock stores cannot be pickled because they are defined within a function # (i.e. the test function) with pytest.raises(AttributeError): pickle.dumps(mock_tracking_store) with pytest.raises(AttributeError): pickle.dumps(mock_model_registry_store) _tracking_store_registry.register("pickle", lambda *args, **kwargs: mock_tracking_store) _model_registry_store_registry.register( "pickle", lambda *args, **kwargs: mock_model_registry_store ) # Create an MlflowClient with the store that cannot be pickled, perform # tracking & model registry operations, and verify that the client can still be pickled client = MlflowClient("pickle://foo") client.create_experiment("test_experiment") client.create_registered_model("test_model") pickle.dumps(client)
def test_client_can_be_serialized_with_pickle(tmpdir): """ Verifies that instances of `MlflowClient` can be serialized using pickle, even if the underlying Tracking and Model Registry stores used by the client are not serializable using pickle """ class MockUnpickleableTrackingStore(SqlAlchemyTrackingStore): pass class MockUnpickleableModelRegistryStore(SqlAlchemyModelRegistryStore): pass backend_store_path = os.path.join(str(tmpdir), "test.db") artifact_store_path = os.path.join(str(tmpdir), "artifacts") mock_tracking_store = MockUnpickleableTrackingStore( "sqlite:///" + backend_store_path, artifact_store_path ) mock_model_registry_store = MockUnpickleableModelRegistryStore( "sqlite:///" + backend_store_path ) # Verify that the mock stores cannot be pickled because they are defined within a function # (i.e. the test function) with pytest.raises(AttributeError, match="<locals>.MockUnpickleableTrackingStore'"): pickle.dumps(mock_tracking_store) with pytest.raises(AttributeError): pickle.dumps(mock_model_registry_store) _tracking_store_registry.register("pickle", lambda *args, **kwargs: mock_tracking_store) _model_registry_store_registry.register( "pickle", lambda *args, **kwargs: mock_model_registry_store ) # Create an MlflowClient with the store that cannot be pickled, perform # tracking & model registry operations, and verify that the client can still be pickled client = MlflowClient("pickle://foo") client.create_experiment("test_experiment") client.create_registered_model("test_model") pickle.dumps(client)
2,828
def chi2_kernel(X, Y=None, gamma=1.0): """Compute the exponential chi-squared kernel X and Y. The chi-squared kernel is computed between each pair of rows in X and Y. X and Y have to be non-negative. This kernel is most commonly applied to histograms. The chi-squared kernel is given by:: k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)]) It can be interpreted as a weighted difference per entry. Read more in the :ref:`User Guide <chi2_kernel>`. Parameters ---------- X : array-like of shape (n_samples_X, n_features) Input array/matrix X. Y : ndarray of shape (n_samples_Y, n_features), default=None Input array/matrix Y. gamma : float, default=1 Scaling parameter of the chi2 kernel. Returns ------- kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y) Returns the exponential chi-squared kernel X and Y. See Also -------- additive_chi2_kernel : The additive version of this kernel. sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation to the additive version of this kernel. References ---------- * Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C. Local features and kernels for classification of texture and object categories: A comprehensive study International Journal of Computer Vision 2007 https://hal.archives-ouvertes.fr/hal-00171412/document """ K = additive_chi2_kernel(X, Y) K *= gamma return np.exp(K, K)
def chi2_kernel(X, Y=None, gamma=1.0): """Compute the exponential chi-squared kernel X and Y. The chi-squared kernel is computed between each pair of rows in X and Y. X and Y have to be non-negative. This kernel is most commonly applied to histograms. The chi-squared kernel is given by:: k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)]) It can be interpreted as a weighted difference per entry. Read more in the :ref:`User Guide <chi2_kernel>`. Parameters ---------- X : array-like of shape (n_samples_X, n_features) Input array/matrix X. Y : ndarray of shape (n_samples_Y, n_features), default=None Input array/matrix Y. gamma : float, default=1 Scaling parameter of the chi2 kernel. Returns ------- kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y) The kernel matrix. See Also -------- additive_chi2_kernel : The additive version of this kernel. sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation to the additive version of this kernel. References ---------- * Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C. Local features and kernels for classification of texture and object categories: A comprehensive study International Journal of Computer Vision 2007 https://hal.archives-ouvertes.fr/hal-00171412/document """ K = additive_chi2_kernel(X, Y) K *= gamma return np.exp(K, K)
32,208
def generic_command(client: MsGraphClient, args: Dict[str, Any]) -> Union[CommandResults, str]: request_body = args.get('request_body') results: dict if request_body and isinstance(request_body, str): try: request_body = json.loads(request_body) except json.decoder.JSONDecodeError as e: raise ValueError(f'Invalid request body - {str(e)}') http_method = args.get('http_method', 'GET') response = client.generic_request( resource=args.get('resource', ''), http_method=http_method, api_version=args.get('api_version', 'v1.0'), odata=args.get('odata', ''), request_body=request_body, ) if not response: results = { 'readable_output': 'The API query ran successfully and returned no content.', } else: results = {'raw_response': response} if argToBoolean(args.get('populate_context', 'true')): results['outputs'] = get_response_outputs(response) if results['outputs'] is True: return 'The API query ran successfully and returned no content.' results['outputs_prefix'] = 'MicrosoftGraph' return CommandResults(**results) # type: ignore[arg-type]
def generic_command(client: MsGraphClient, args: Dict[str, Any]) -> CommandResults: request_body = args.get('request_body') results: dict if request_body and isinstance(request_body, str): try: request_body = json.loads(request_body) except json.decoder.JSONDecodeError as e: raise ValueError(f'Invalid request body - {str(e)}') http_method = args.get('http_method', 'GET') response = client.generic_request( resource=args.get('resource', ''), http_method=http_method, api_version=args.get('api_version', 'v1.0'), odata=args.get('odata', ''), request_body=request_body, ) if not response: results = { 'readable_output': 'The API query ran successfully and returned no content.', } else: results = {'raw_response': response} if argToBoolean(args.get('populate_context', 'true')): results['outputs'] = get_response_outputs(response) if results['outputs'] is True: return 'The API query ran successfully and returned no content.' results['outputs_prefix'] = 'MicrosoftGraph' return CommandResults(**results) # type: ignore[arg-type]
2,980
def _interpolate_scipy_wrapper( x, y, new_x, method, fill_value=None, bounds_error=False, order=None, **kwargs ): """ Passed off to scipy.interpolate.interp1d. method is scipy's kind. Returns an array interpolated at new_x. Add any new methods to the list in _clean_interp_method. """ extra = f"{method} interpolation requires SciPy." import_optional_dependency("scipy", extra=extra) from scipy import interpolate new_x = np.asarray(new_x) # ignores some kwargs that could be passed along. alt_methods = { "barycentric": interpolate.barycentric_interpolate, "krogh": interpolate.krogh_interpolate, "from_derivatives": _from_derivatives, "piecewise_polynomial": _from_derivatives, } if getattr(x, "is_all_dates", False): # GH 5975, scipy.interp1d can't handle datetime64s x, new_x = x._values.astype("i8"), new_x.astype("i8") if method == "pchip": try: alt_methods["pchip"] = interpolate.pchip_interpolate except AttributeError: raise ImportError( "Your version of Scipy does not support PCHIP interpolation." ) elif method == "akima": alt_methods["akima"] = _akima_interpolate interp1d_methods = [ "nearest", "zero", "slinear", "quadratic", "cubic", "polynomial", ] if method in interp1d_methods: if method == "polynomial": method = order terp = interpolate.interp1d( x, y, kind=method, fill_value=fill_value, bounds_error=bounds_error ) new_y = terp(new_x) elif method == "spline": # GH #10633, #24014 if isna(order) or (order <= 0): raise ValueError( "order needs to be specified and greater than 0; " f"got order: {order}" ) terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs) new_y = terp(new_x) else: # GH 7295: need to be able to write for some reason # in some circumstances: check all three if not x.flags.writeable: x = x.copy() if not y.flags.writeable: y = y.copy() if not new_x.flags.writeable: new_x = new_x.copy() method = alt_methods[method] new_y = method(x, y, new_x, **kwargs) return new_y
def _interpolate_scipy_wrapper( x, y, new_x, method, fill_value=None, bounds_error=False, order=None, **kwargs ): """ Passed off to scipy.interpolate.interp1d. method is scipy's kind. Returns an array interpolated at new_x. Add any new methods to the list in _clean_interp_method. """ extra = f"{method} interpolation requires SciPy." import_optional_dependency("scipy", extra=extra) from scipy import interpolate new_x = np.asarray(new_x) # ignores some kwargs that could be passed along. alt_methods = { "barycentric": interpolate.barycentric_interpolate, "krogh": interpolate.krogh_interpolate, "from_derivatives": _from_derivatives, "piecewise_polynomial": _from_derivatives, } if getattr(x, "is_all_dates", False): # GH 5975, scipy.interp1d can't handle datetime64s x, new_x = x._values.astype("i8"), new_x.astype("i8") if method == "pchip": try: alt_methods["pchip"] = interpolate.pchip_interpolate except AttributeError: raise ImportError( "Your version of Scipy does not support PCHIP interpolation." ) elif method == "akima": alt_methods["akima"] = _akima_interpolate interp1d_methods = [ "nearest", "zero", "slinear", "quadratic", "cubic", "polynomial", ] if method in interp1d_methods: if method == "polynomial": method = order terp = interpolate.interp1d( x, y, kind=method, fill_value=fill_value, bounds_error=bounds_error ) new_y = terp(new_x) elif method == "spline": # GH #10633, #24014 if isna(order) or (order <= 0): raise ValueError( f"order needs to be specified and greater than 0; got order: {order}" ) terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs) new_y = terp(new_x) else: # GH 7295: need to be able to write for some reason # in some circumstances: check all three if not x.flags.writeable: x = x.copy() if not y.flags.writeable: y = y.copy() if not new_x.flags.writeable: new_x = new_x.copy() method = alt_methods[method] new_y = method(x, y, new_x, **kwargs) return new_y
48,050
def test_expression_grammar(nested_assy): nested_assy.constrain( "TOP@faces@>Z", "SECOND/BOTTOM@vertices@>X and >Y and >Z", "Plane" )
def test_expression_grammar(nested_assy): nested_assy.constrain( "TOP@faces@>Z", "SECOND/BOTTOM@vertices@>X and >Y and >Z", "Point" )
32,627
def status_blacklist(client: Client, args: dict) -> CommandResults: """ Returns url blacklisting information and reputation. """ domain = args["domain"] if not domain: raise ValueError('domain is missing') start = client._api_request(domain=domain, request_type="GET", operation="blacklist") readable_output = f"Blacklisted URI's of {domain}" if start.get('errorstr') != 'success': readable_output = f'Failed to get the report for domain {domain}' result = CommandResults( readable_output=readable_output, outputs_prefix="", outputs_key_field='QutteraWebsiteMalwareScanning.domain', outputs=start ) return_results(result)
def status_blacklist(client: Client, args: dict) -> CommandResults: """ Returns url blacklisting information and reputation. """ domain = args.get('domain') if not domain: raise ValueError('domain is missing') start = client._api_request(domain=domain, request_type="GET", operation="blacklist") readable_output = f"Blacklisted URI's of {domain}" if start.get('errorstr') != 'success': readable_output = f'Failed to get the report for domain {domain}' result = CommandResults( readable_output=readable_output, outputs_prefix="", outputs_key_field='QutteraWebsiteMalwareScanning.domain', outputs=start ) return_results(result)
57,971
def main(): params = demisto.params() base_url = params.get('url') verify_ssl = not params.get('insecure', False) proxy = params.get('proxy') credentials = params.get('credentials') api_params = { 'key': params.get('api_key') or params.get('key_and_token').get('identifier', ''), 'api_token': params.get('api_token') or params.get('key_and_token').get('password', '') } api_params.update(demisto.args()) threshold = int(api_params.get('threshold', params.get('threshold', 70))) if not credentials or not credentials.get('identifier') or not credentials.get('password'): credentials = {} if not api_params.get('key') or not api_params.get('api_token'): api_params = {} if not ((params.get('api_key') and params.get('api_token')) or api_params or credentials): raise DemistoException('Please fill the credentials in the integration params' ' - api key and token or username and password') client = Client(base_url, api_params, verify=verify_ssl, proxy=proxy, credentials=credentials, threshold=threshold) command = demisto.command() demisto.debug(f'Command being called is {command}') # Switch case commands = { 'test-module': Client.test_module_command, 'file': Client.file, f'{INTEGRATION_COMMAND_NAME}-check-status': Client.check_status, f'{INTEGRATION_COMMAND_NAME}-get-report': Client.get_report, f'{INTEGRATION_COMMAND_NAME}-get-task-list': Client.get_task_list, f'{INTEGRATION_COMMAND_NAME}-upload-file': Client.upload_file, f'{INTEGRATION_COMMAND_NAME}-upload-url': Client.upload_url } try: if command in commands: readable_output, outputs, raw_response = commands[command](client) return_outputs(readable_output, outputs, raw_response) else: raise DemistoException(f'{demisto.command()} is not a command') # Log exceptions except Exception as every_error: err_msg = f'Error in {INTEGRATION_NAME} Integration [{every_error}]' return_error(err_msg, error=every_error)
def main(): params = demisto.params() base_url = params.get('url') verify_ssl = not params.get('insecure', False) proxy = params.get('proxy') credentials = params.get('credentials') api_params = { 'key': params.get('api_key') or params.get('key_and_token').get('identifier', ''), 'api_token': params.get('api_token') or params.get('key_and_token').get('password', '') } api_params.update(demisto.args()) threshold = int(api_params.get('threshold', params.get('threshold', 70))) if not credentials or not credentials.get('identifier') or not credentials.get('password'): credentials = {} if not (api_params.get('key') or api_params.get('api_token')): api_params = {} if not (api_params or credentials): raise DemistoException('Please fill the credentials in the integration params' ' - api key and token or username and password') client = Client(base_url, api_params, verify=verify_ssl, proxy=proxy, credentials=credentials, threshold=threshold) command = demisto.command() demisto.debug(f'Command being called is {command}') # Switch case commands = { 'test-module': Client.test_module_command, 'file': Client.file, f'{INTEGRATION_COMMAND_NAME}-check-status': Client.check_status, f'{INTEGRATION_COMMAND_NAME}-get-report': Client.get_report, f'{INTEGRATION_COMMAND_NAME}-get-task-list': Client.get_task_list, f'{INTEGRATION_COMMAND_NAME}-upload-file': Client.upload_file, f'{INTEGRATION_COMMAND_NAME}-upload-url': Client.upload_url } try: if command in commands: readable_output, outputs, raw_response = commands[command](client) return_outputs(readable_output, outputs, raw_response) else: raise DemistoException(f'{demisto.command()} is not a command') # Log exceptions except Exception as every_error: err_msg = f'Error in {INTEGRATION_NAME} Integration [{every_error}]' return_error(err_msg, error=every_error)
44,970
def test_deploy_flow_run_sleeps_until_start_time(monkeypatch, cloud_api): gql_return = MagicMock( return_value=MagicMock(data=MagicMock(write_run_logs=MagicMock(success=True))) ) client = MagicMock() client.return_value.write_run_logs = gql_return monkeypatch.setattr("prefect.agent.agent.Client", MagicMock(return_value=client)) sleep = MagicMock() monkeypatch.setattr("time.sleep", sleep) dt = pendulum.now() agent = Agent() agent.deploy_flow = MagicMock() agent._deploy_flow_run( flow_run=GraphQLResult( { "id": "id", "serialized_state": Scheduled( start_time=dt.add(seconds=10) ).serialize(), "scheduled_start_time": str(dt.add(seconds=10)), "version": 1, "task_runs": [ GraphQLResult( { "id": "id", "version": 1, "serialized_state": Scheduled( start_time=dt.add(seconds=10) ).serialize(), } ) ], } ) ) sleep_time = sleep.call_args[0][0] assert 10 >= sleep_time > 9 agent.deploy_flow.assert_called_once()
def test_deploy_flow_run_sleeps_until_start_time(monkeypatch, cloud_api): gql_return = MagicMock( return_value=MagicMock(data=MagicMock(write_run_logs=MagicMock(success=True))) ) client = MagicMock() client.return_value.write_run_logs = gql_return monkeypatch.setattr("prefect.agent.agent.Client", MagicMock(return_value=client)) sleep = MagicMock() monkeypatch.setattr("time.sleep", sleep) dt = pendulum.now() agent = Agent() agent.deploy_flow = MagicMock() agent._deploy_flow_run( flow_run=GraphQLResult( { "id": "id", "serialized_state": Scheduled( start_time=dt.add(seconds=10) ).serialize(), "scheduled_start_time": str(dt), "version": 1, "task_runs": [ GraphQLResult( { "id": "id", "version": 1, "serialized_state": Scheduled( start_time=dt.add(seconds=10) ).serialize(), } ) ], } ) ) sleep_time = sleep.call_args[0][0] assert 10 >= sleep_time > 9 agent.deploy_flow.assert_called_once()
35,335
def test__get_file_path(mapdl): fname = 'dummy.txt' with open(fname, 'w') as fid: fid.write("Dummy file for testing") assert fname == mapdl._get_file_path(fname) os.remove(fname)
def test_get_file_path(mapdl): fname = 'dummy.txt' with open(fname, 'w') as fid: fid.write("Dummy file for testing") assert fname == mapdl._get_file_path(fname) os.remove(fname)
39,681
def main(): module = KatelloEntityAnsibleModule( argument_spec=dict( name=dict(), product=dict(), label=dict(), repositories=dict(type='list', elements='dict'), all_repositories=dict(type='bool', default=False), state=dict(default='enabled', choices=['disabled', 'enabled']), ), required_one_of=[['label', 'name']], required_if=[ ['all_repositories', False, ['repositories']], ], ) module_params = module.clean_params() module.connect() module_params, scope = module.handle_organization_param(module_params) record_data = {} if 'product' in module_params: module_params['product'] = module.find_resource_by_name('products', name=module_params['product'], params=scope, thin=True) scope['product_id'] = module_params['product']['id'] record_data['product'] = module_params['product'] if 'label' in module_params: search = 'label="{0}"'.format(module_params['label']) repo_set = module.find_resource('repository_sets', search=search, params=scope) record_data['label'] = module_params['label'] else: repo_set = module.find_resource_by_name('repository_sets', name=module_params['name'], params=scope) record_data['name'] = module_params['name'] repo_set_scope = {'id': repo_set['id'], 'product_id': repo_set['product']['id']} repo_set_scope.update(scope) available_repos = module.resource_action('repository_sets', 'available_repositories', params=repo_set_scope, ignore_check_mode=True) available_repos = available_repos['results'] current_repos = repo_set['repositories'] if not module_params['all_repositories']: desired_repos = get_desired_repos(module_params['repositories'], available_repos) else: desired_repos = available_repos.copy() available_repo_names = set(map(lambda repo: repo['repo_name'], available_repos)) current_repo_names = set(map(lambda repo: repo['name'], current_repos)) desired_repo_names = set(map(lambda repo: repo['repo_name'], desired_repos)) if len(desired_repo_names - available_repo_names) > 0: module.fail_json(msg="Desired repositories are not available on the repository set {0}. Desired: {1} Available: {2}" .format(module_params['name'], desired_repo_names, available_repo_names)) if module.state == 'enabled': for repo in desired_repo_names - current_repo_names: repo_to_enable = next((r for r in available_repos if r['repo_name'] == repo)) repo_change_params = repo_to_enable['substitutions'].copy() repo_change_params.update(repo_set_scope) record_repository_set_state(module, record_data, repo, 'disabled', 'enabled') module.resource_action('repository_sets', 'enable', params=repo_change_params) elif module.state == 'disabled': for repo in current_repo_names & desired_repo_names: repo_to_disable = next((r for r in available_repos if r['repo_name'] == repo)) repo_change_params = repo_to_disable['substitutions'].copy() repo_change_params.update(repo_set_scope) record_repository_set_state(module, record_data, repo, 'enabled', 'disabled') module.resource_action('repository_sets', 'disable', params=repo_change_params) module.exit_json()
def main(): module = KatelloEntityAnsibleModule( argument_spec=dict( name=dict(), product=dict(), label=dict(), repositories=dict(type='list', elements='dict'), all_repositories=dict(type='bool', default=False), state=dict(default='enabled', choices=['disabled', 'enabled']), ), required_one_of=[['label', 'name']], required_if=[ ['all_repositories', False, ['repositories']], ], ) module_params = module.clean_params() module.connect() module_params, scope = module.handle_organization_param(module_params) record_data = {} if 'product' in module_params: module_params['product'] = module.find_resource_by_name('products', name=module_params['product'], params=scope, thin=True) scope['product_id'] = module_params['product']['id'] record_data['product'] = module_params['product'] if 'label' in module_params: search = 'label="{0}"'.format(module_params['label']) repo_set = module.find_resource('repository_sets', search=search, params=scope) record_data['label'] = module_params['label'] else: repo_set = module.find_resource_by_name('repository_sets', name=module_params['name'], params=scope) record_data['name'] = module_params['name'] repo_set_scope = {'id': repo_set['id'], 'product_id': repo_set['product']['id']} repo_set_scope.update(scope) available_repos = module.resource_action('repository_sets', 'available_repositories', params=repo_set_scope, ignore_check_mode=True) available_repos = available_repos['results'] current_repos = repo_set['repositories'] if not module_params['all_repositories']: desired_repos = get_desired_repos(module_params['repositories'], available_repos) else: desired_repos = available_repos[:] available_repo_names = set(map(lambda repo: repo['repo_name'], available_repos)) current_repo_names = set(map(lambda repo: repo['name'], current_repos)) desired_repo_names = set(map(lambda repo: repo['repo_name'], desired_repos)) if len(desired_repo_names - available_repo_names) > 0: module.fail_json(msg="Desired repositories are not available on the repository set {0}. Desired: {1} Available: {2}" .format(module_params['name'], desired_repo_names, available_repo_names)) if module.state == 'enabled': for repo in desired_repo_names - current_repo_names: repo_to_enable = next((r for r in available_repos if r['repo_name'] == repo)) repo_change_params = repo_to_enable['substitutions'].copy() repo_change_params.update(repo_set_scope) record_repository_set_state(module, record_data, repo, 'disabled', 'enabled') module.resource_action('repository_sets', 'enable', params=repo_change_params) elif module.state == 'disabled': for repo in current_repo_names & desired_repo_names: repo_to_disable = next((r for r in available_repos if r['repo_name'] == repo)) repo_change_params = repo_to_disable['substitutions'].copy() repo_change_params.update(repo_set_scope) record_repository_set_state(module, record_data, repo, 'enabled', 'disabled') module.resource_action('repository_sets', 'disable', params=repo_change_params) module.exit_json()
20,475
def fill_link_tracker_code(env): tracker_codes = env['link.tracker.code'].search([('code', '=', None)]) for tracker in tracker_codes: tracker.code = tracker.get_random_code_string()
def fill_link_tracker_code(env): tracker_codes = env['link.tracker.code'].search([('code', '=', False)]) for tracker in tracker_codes: tracker.code = tracker.get_random_code_string()
22,177
def render_pdf(event, positions, opt): from PyPDF2 import PdfMerger, PdfReader, PdfWriter, Transformation from PyPDF2.generic import RectangleObject Renderer._register_fonts() renderermap = { bi.item_id: _renderer(event, bi.layout) for bi in BadgeItem.objects.select_related('layout').filter(item__event=event) } try: default_renderer = _renderer(event, event.badge_layouts.get(default=True)) except BadgeLayout.DoesNotExist: default_renderer = None op_renderers = [(op, renderermap.get(op.item_id, default_renderer)) for op in positions if renderermap.get(op.item_id, default_renderer)] if not len(op_renderers): raise OrderError(_("None of the selected products is configured to print badges.")) # render each badge on its own page first merger = PdfMerger() merger.add_metadata({ '/Title': 'Badges', '/Creator': 'pretix', }) for op, renderer in op_renderers: buffer = BytesIO() page = canvas.Canvas(buffer, pagesize=pagesizes.A4) with language(op.order.locale, op.order.event.settings.region): renderer.draw_page(page, op.order, op) if opt['pagesize']: page.setPageSize(opt['pagesize']) page.save() buffer = renderer.render_background(buffer, _('Badge')) merger.append(ContentFile(buffer.read())) outbuffer = BytesIO() merger.write(outbuffer) outbuffer.seek(0) badges_per_page = opt['cols'] * opt['rows'] if (badges_per_page == 1): # no need to place multiple badges on one page return outbuffer # place n-up badges/pages per page badges_pdf = PdfReader(outbuffer) nup_pdf = PdfWriter() nup_page = None for i, page in enumerate(badges_pdf.pages): di = i % badges_per_page if (di == 0): nup_page = nup_pdf.add_blank_page( width=opt['pagesize'][0], height=opt['pagesize'][1], ) tx = opt['margins'][3] + (di % opt['cols']) * opt['offsets'][0] ty = opt['margins'][2] + (opt['rows'] - 1 - (di // opt['cols'])) * opt['offsets'][1] page.add_transformation(Transformation().translate(tx, ty)) page.mediabox = RectangleObject(( page.mediabox.left.as_numeric() + tx, page.mediabox.bottom.as_numeric() + ty, page.mediabox.right.as_numeric() + tx, page.mediabox.top.as_numeric() + ty )) page.trimbox = page.mediabox nup_page.merge_page(page) outbuffer = BytesIO() nup_pdf.write(outbuffer) outbuffer.seek(0) return outbuffer
def render_pdf(event, positions, opt): from PyPDF2 import PdfMerger, PdfReader, PdfWriter, Transformation from PyPDF2.generic import RectangleObject Renderer._register_fonts() renderermap = { bi.item_id: _renderer(event, bi.layout) for bi in BadgeItem.objects.select_related('layout').filter(item__event=event) } try: default_renderer = _renderer(event, event.badge_layouts.get(default=True)) except BadgeLayout.DoesNotExist: default_renderer = None op_renderers = [(op, renderermap.get(op.item_id, default_renderer)) for op in positions if renderermap.get(op.item_id, default_renderer)] if not len(op_renderers): raise OrderError(_("None of the selected products is configured to print badges.")) # render each badge on its own page first merger = PdfMerger() merger.add_metadata({ '/Title': 'Badges', '/Creator': 'pretix', }) for op, renderer in op_renderers: buffer = BytesIO() page = canvas.Canvas(buffer, pagesize=pagesizes.A4) with language(op.order.locale, op.order.event.settings.region): renderer.draw_page(page, op.order, op) if opt['pagesize']: page.setPageSize(opt['pagesize']) page.save() buffer = renderer.render_background(buffer, _('Badge')) merger.append(ContentFile(buffer.read())) outbuffer = BytesIO() merger.write(outbuffer) outbuffer.seek(0) badges_per_page = opt['cols'] * opt['rows'] if (badges_per_page == 1): # no need to place multiple badges on one page return outbuffer # place n-up badges/pages per page badges_pdf = PdfReader(outbuffer) nup_pdf = PdfWriter() nup_page = None for i, page in enumerate(badges_pdf.pages): di = i % badges_per_page if di == 0: nup_page = nup_pdf.add_blank_page( width=opt['pagesize'][0], height=opt['pagesize'][1], ) tx = opt['margins'][3] + (di % opt['cols']) * opt['offsets'][0] ty = opt['margins'][2] + (opt['rows'] - 1 - (di // opt['cols'])) * opt['offsets'][1] page.add_transformation(Transformation().translate(tx, ty)) page.mediabox = RectangleObject(( page.mediabox.left.as_numeric() + tx, page.mediabox.bottom.as_numeric() + ty, page.mediabox.right.as_numeric() + tx, page.mediabox.top.as_numeric() + ty )) page.trimbox = page.mediabox nup_page.merge_page(page) outbuffer = BytesIO() nup_pdf.write(outbuffer) outbuffer.seek(0) return outbuffer
41,523
def test_get_workspace_data_bad_model(workspace_factory, caplog, mocker): w = workspace_factory() m = w.model() # the iconic fragrance of an expected failure mocker.patch( "pyhf.mixins._ChannelSummaryMixin.channels", new_callable=mocker.PropertyMock, return_value=["channel"], ) with caplog.at_level(logging.INFO, 'pyhf.pdf'): with pytest.raises(KeyError): assert w.data(m) assert 'Invalid channel' in caplog.text
def test_get_workspace_data_bad_model(workspace_factory, caplog, mocker): w = workspace_factory() m = w.model() # the iconic fragrance of an expected failure mocker.patch( "pyhf.mixins._ChannelSummaryMixin.channels", new_callable=mocker.PropertyMock, return_value=["chanel"], ) with caplog.at_level(logging.INFO, 'pyhf.pdf'): with pytest.raises(KeyError): assert w.data(m) assert 'Invalid channel' in caplog.text
33,025
def install_build_src(args: CommandLineArguments, root: str, do_run_build_script: bool, for_cache: bool) -> None: if (not do_run_build_script or args.build_script is None) and not args.prepare_script: return if for_cache: return with complete_step('Copying in build script and sources'): if args.build_script is not None: copy_file(args.build_script, os.path.join(root, "root", os.path.basename(args.build_script))) if args.build_sources is not None: target = os.path.join(root, "root/src") if args.source_file_transfer in (SourceFileTransfer.copy_git_others, SourceFileTransfer.copy_git_cached, SourceFileTransfer.copy_git_more): copy_git_files(args.build_sources, target, source_file_transfer=args.source_file_transfer) elif args.source_file_transfer == SourceFileTransfer.copy_all: ignore = shutil.ignore_patterns('.git', '.mkosi-*', '*.cache-pre-dev', '*.cache-pre-inst', os.path.basename(args.output_dir)+"/" if args.output_dir else "mkosi.output/", # NOQA: E501 os.path.basename(args.cache_path)+"/" if args.cache_path else "mkosi.cache/", # NOQA: E501 os.path.basename(args.build_dir)+"/" if args.build_dir else "mkosi.builddir/") # NOQA: E501 shutil.copytree(args.build_sources, target, symlinks=True, ignore=ignore)
def install_build_src(args: CommandLineArguments, root: str, do_run_build_script: bool, for_cache: bool) -> None: if not args.prepare_script is None and (not do_run_build_script or args.build_script is None): return if for_cache: return with complete_step('Copying in build script and sources'): if args.build_script is not None: copy_file(args.build_script, os.path.join(root, "root", os.path.basename(args.build_script))) if args.build_sources is not None: target = os.path.join(root, "root/src") if args.source_file_transfer in (SourceFileTransfer.copy_git_others, SourceFileTransfer.copy_git_cached, SourceFileTransfer.copy_git_more): copy_git_files(args.build_sources, target, source_file_transfer=args.source_file_transfer) elif args.source_file_transfer == SourceFileTransfer.copy_all: ignore = shutil.ignore_patterns('.git', '.mkosi-*', '*.cache-pre-dev', '*.cache-pre-inst', os.path.basename(args.output_dir)+"/" if args.output_dir else "mkosi.output/", # NOQA: E501 os.path.basename(args.cache_path)+"/" if args.cache_path else "mkosi.cache/", # NOQA: E501 os.path.basename(args.build_dir)+"/" if args.build_dir else "mkosi.builddir/") # NOQA: E501 shutil.copytree(args.build_sources, target, symlinks=True, ignore=ignore)
448
def _bulk_case_upload_api(request, domain): try: upload_file = request.FILES["file"] case_type = request.POST["case_type"] if not upload_file or not case_type: raise Exception except Exception: raise ImporterError("Invalid POST request. " "Both 'file' and 'case_type' are required") search_field = request.POST.get('search_field', 'case_id') create_new_cases = request.POST.get('create_new_cases') == 'on' if search_field == 'case_id': default_search_column = 'case_id' elif search_field == 'external_id': default_search_column = 'external_id' else: raise ImporterError("Illegal value for search_field: %s" % search_field) search_column = request.POST.get('search_column', default_search_column) name_column = request.POST.get('name_column', 'name') upload_comment = request.POST.get('comment') case_upload, context = _process_file_and_get_upload(upload_file, request, domain) case_upload.check_file() with case_upload.get_spreadsheet() as spreadsheet: columns = spreadsheet.get_header_columns() excel_fields = columns # hide search column and matching case fields from the update list if search_column in excel_fields: excel_fields.remove(search_column) custom_fields = [] case_fields = [] #Create the field arrays for the importer in the same format #as the "Step 2" Web UI from the manual process for f in excel_fields: if f == name_column: custom_fields.append("") case_fields.append("name") else: custom_fields.append(f) case_fields.append("") config = importer_util.ImporterConfig( couch_user_id=request.couch_user._id, excel_fields=excel_fields, case_fields=case_fields, custom_fields=custom_fields, search_column=search_column, case_type=case_type, search_field=search_field, create_new_cases=create_new_cases) case_upload.trigger_upload(domain, config, comment=upload_comment) upload_id = case_upload.upload_id status_url = "{}{}".format( get_url_base(), reverse('case_importer_upload_status', args=(domain, upload_id)) ) return json_response({"code": 200, "message": "success", "status_url": status_url})
def _bulk_case_upload_api(request, domain): try: upload_file = request.FILES["file"] case_type = request.POST["case_type"] if not upload_file or not case_type: raise Exception except Exception: raise ImporterError("Invalid POST request. " "Both 'file' and 'case_type' are required") search_field = request.POST.get('search_field', 'case_id') create_new_cases = request.POST.get('create_new_cases') == 'on' if search_field == 'case_id': default_search_column = 'case_id' elif search_field == 'external_id': default_search_column = 'external_id' else: raise ImporterError("Illegal value for search_field: %s" % search_field) search_column = request.POST.get('search_column', default_search_column) name_column = request.POST.get('name_column', 'name') upload_comment = request.POST.get('comment') case_upload, context = _process_file_and_get_upload(upload_file, request, domain) case_upload.check_file() with case_upload.get_spreadsheet() as spreadsheet: columns = spreadsheet.get_header_columns() excel_fields = columns # hide search column and matching case fields from the update list if search_column in excel_fields: excel_fields.remove(search_column) custom_fields = [] case_fields = [] #Create the field arrays for the importer in the same format #as the "Step 2" Web UI from the manual process for f in excel_fields: if f == name_column: custom_fields.append("") case_fields.append("name") else: custom_fields.append(f) case_fields.append("") config = importer_util.ImporterConfig( couch_user_id=request.couch_user._id, excel_fields=excel_fields, case_fields=case_fields, custom_fields=custom_fields, search_column=search_column, case_type=case_type, search_field=search_field, create_new_cases=create_new_cases) case_upload.trigger_upload(domain, config, comment=upload_comment) upload_id = case_upload.upload_id status_url = absolute_reverse('case_importer_upload_status', args=(domain, upload_id)) return json_response({"code": 200, "message": "success", "status_url": status_url})
15,041
def build_resources( translation_cache: Dict[str, Dict[str, Any]], components: Set[str], category: Optional[str], ) -> Dict[str, Dict[str, Any]]: """Build the resources response for the given components.""" # Build response resources: Dict[str, Dict[str, Any]] = {} for component in components: if "." not in component: domain = component else: domain = component.split(".", 1)[0] domain_resources = resources.setdefault(domain, {}) # Add the translations for this component to the domain resources. # Since clients cannot determine which platform an entity belongs to, # all translations for a domain will be returned together. if category is None: domain_resources.update(translation_cache[component]) continue new_value = translation_cache[component].get(category) if new_value is None: continue if isinstance(new_value, dict): domain_resources.setdefault(category, {}).update( translation_cache[component][category] ) else: domain_resources[category] = translation_cache[component][category] return {"component": resources}
def build_resources( translation_cache: Dict[str, Dict[str, Any]], components: Set[str], category: Optional[str], ) -> Dict[str, Dict[str, Any]]: """Build the resources response for the given components.""" # Build response resources: Dict[str, Dict[str, Any]] = {} for component in components: if "." not in component: domain = component else: domain = component.split(".", 1)[0] domain_resources = resources.setdefault(domain, {}) # Add the translations for this component to the domain resources. # Since clients cannot determine which platform an entity belongs to, # all translations for a domain will be returned together. if category is None: domain_resources.update(translation_cache[component]) continue new_value = translation_cache[component].get(category) if new_value is None: continue if isinstance(new_value, dict): domain_resources.setdefault(category, {}).update( translation_cache[component][category] ) else: domain_resources[category] = new_value return {"component": resources}
29,902
def main(argsl=None, # type: List[str] args=None, # type: argparse.Namespace job_order_object=None, # type: MutableMapping[Text, Any] stdin=sys.stdin, # type: IO[Any] stdout=None, # type: Union[TextIO, StreamWriter] stderr=sys.stderr, # type: IO[Any] versionfunc=versionstring, # type: Callable[[], Text] logger_handler=None, # custom_schema_callback=None, # type: Callable[[], None] executor=None, # type: Callable[..., Tuple[Dict[Text, Any], Text]] loadingContext=None, # type: LoadingContext runtimeContext=None # type: RuntimeContext ): # type: (...) -> int if not stdout: # force UTF-8 even if the console is configured differently if (hasattr(sys.stdout, "encoding") # type: ignore and sys.stdout.encoding != 'UTF-8'): # type: ignore if PY3 and hasattr(sys.stdout, "detach"): stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8') else: stdout = getwriter('utf-8')(sys.stdout) # type: ignore else: stdout = cast(TextIO, sys.stdout) # type: ignore _logger.removeHandler(defaultStreamHandler) if logger_handler is not None: stderr_handler = logger_handler else: stderr_handler = logging.StreamHandler(stderr) _logger.addHandler(stderr_handler) # pre-declared for finally block workflowobj = None prov_log_handler = None # type: Optional[logging.StreamHandler] try: if args is None: if argsl is None: argsl = sys.argv[1:] args = arg_parser().parse_args(argsl) if runtimeContext is None: runtimeContext = RuntimeContext(vars(args)) else: runtimeContext = runtimeContext.copy() # If on Windows platform, a default Docker Container is used if not # explicitely provided by user if onWindows() and not runtimeContext.default_container: # This docker image is a minimal alpine image with bash installed # (size 6 mb). source: https://github.com/frol/docker-alpine-bash runtimeContext.default_container = windows_default_container_id # If caller parsed its own arguments, it may not include every # cwltool option, so fill in defaults to avoid crashing when # dereferencing them in args. for key, val in iteritems(get_default_args()): if not hasattr(args, key): setattr(args, key, val) # Configure logging rdflib_logger = logging.getLogger("rdflib.term") rdflib_logger.addHandler(stderr_handler) rdflib_logger.setLevel(logging.ERROR) if args.quiet: # Silence STDERR, not an eventual provenance log file stderr_handler.setLevel(logging.WARN) if runtimeContext.debug: # Increase to debug for both stderr and provenance log file _logger.setLevel(logging.DEBUG) rdflib_logger.setLevel(logging.DEBUG) formatter = None # type: Optional[logging.Formatter] if args.timestamps: formatter = logging.Formatter("[%(asctime)s] %(message)s", "%Y-%m-%d %H:%M:%S") stderr_handler.setFormatter(formatter) ## if args.version: print(versionfunc()) return 0 _logger.info(versionfunc()) if args.print_supported_versions: print("\n".join(supported_cwl_versions(args.enable_dev))) return 0 if not args.workflow: if os.path.isfile("CWLFile"): setattr(args, "workflow", "CWLFile") else: _logger.error("") _logger.error("CWL document required, no input file was provided") arg_parser().print_help() return 1 if args.relax_path_checks: command_line_tool.ACCEPTLIST_RE = command_line_tool.ACCEPTLIST_EN_RELAXED_RE if args.ga4gh_tool_registries: ga4gh_tool_registries[:] = args.ga4gh_tool_registries if not args.enable_ga4gh_tool_registry: del ga4gh_tool_registries[:] if custom_schema_callback is not None: custom_schema_callback() elif args.enable_ext: res = pkg_resources.resource_stream(__name__, 'extensions.yml') use_custom_schema("v1.0", "http://commonwl.org/cwltool", res.read()) res.close() else: use_standard_schema("v1.0") if args.provenance: if not args.compute_checksum: _logger.error("--provenance incompatible with --no-compute-checksum") return 1 ro = ResearchObject( temp_prefix_ro=args.tmpdir_prefix, orcid=args.orcid, full_name=args.cwl_full_name) runtimeContext.research_obj = ro log_file_io = ro.open_log_file_for_activity(ro.engine_uuid) prov_log_handler = logging.StreamHandler(log_file_io) class ProvLogFormatter(logging.Formatter): """Enforce ISO8601 with both T and Z.""" def __init__(self): # type: () -> None super(ProvLogFormatter, self).__init__( "[%(asctime)sZ] %(message)s") def formatTime(self, record, datefmt=None): # type: (logging.LogRecord, str) -> str record_time = time.gmtime(record.created) formatted_time = time.strftime("%Y-%m-%dT%H:%M:%S", record_time) with_msecs = "%s,%03d" % (formatted_time, record.msecs) return with_msecs prov_log_handler.setFormatter(ProvLogFormatter()) _logger.addHandler(prov_log_handler) _logger.debug(u"[provenance] Logging to %s", log_file_io) if argsl is not None: # Log cwltool command line options to provenance file _logger.info("[cwltool] %s %s", sys.argv[0], u" ".join(argsl)) _logger.debug(u"[cwltool] Arguments: %s", args) if loadingContext is None: loadingContext = LoadingContext(vars(args)) else: loadingContext = loadingContext.copy() loadingContext.research_obj = runtimeContext.research_obj loadingContext.disable_js_validation = \ args.disable_js_validation or (not args.do_validate) loadingContext.construct_tool_object = getdefault( loadingContext.construct_tool_object, workflow.default_make_tool) loadingContext.resolver = getdefault(loadingContext.resolver, tool_resolver) uri, tool_file_uri = resolve_tool_uri( args.workflow, resolver=loadingContext.resolver, fetcher_constructor=loadingContext.fetcher_constructor) try_again_msg = "" if args.debug else ", try again with --debug for more information" try: job_order_object, input_basedir, jobloader = load_job_order( args, stdin, loadingContext.fetcher_constructor, loadingContext.overrides_list, tool_file_uri) if args.overrides: loadingContext.overrides_list.extend(load_overrides( file_uri(os.path.abspath(args.overrides)), tool_file_uri)) document_loader, workflowobj, uri = fetch_document( uri, resolver=loadingContext.resolver, fetcher_constructor=loadingContext.fetcher_constructor) if args.print_deps: printdeps(workflowobj, document_loader, stdout, args.relative_deps, uri) return 0 document_loader, avsc_names, processobj, metadata, uri \ = validate_document(document_loader, workflowobj, uri, loadingContext.overrides_list, {}, enable_dev=loadingContext.enable_dev, strict=loadingContext.strict, preprocess_only=(args.print_pre or args.pack), fetcher_constructor=loadingContext.fetcher_constructor, skip_schemas=args.skip_schemas, do_validate=loadingContext.do_validate) if args.pack: stdout.write(print_pack(document_loader, processobj, uri, metadata)) return 0 if args.provenance and runtimeContext.research_obj: # Can't really be combined with args.pack at same time runtimeContext.research_obj.packed_workflow( print_pack(document_loader, processobj, uri, metadata)) if args.print_pre: stdout.write(json_dumps(processobj, indent=4, sort_keys=True, separators=(',', ': '))) return 0 loadingContext.overrides_list.extend(metadata.get("cwltool:overrides", [])) tool = make_tool(document_loader, avsc_names, metadata, uri, loadingContext) if args.make_template: def my_represent_none(self, data): # pylint: disable=unused-argument """Force clean representation of 'null'.""" return self.represent_scalar(u'tag:yaml.org,2002:null', u'null') yaml.RoundTripRepresenter.add_representer(type(None), my_represent_none) yaml.round_trip_dump( generate_input_template(tool), sys.stdout, default_flow_style=False, indent=4, block_seq_indent=2) return 0 if args.validate: print("{} is valid CWL.".format(args.workflow)) return 0 if args.print_rdf: stdout.write(printrdf(tool, document_loader.ctx, args.rdf_serializer)) return 0 if args.print_dot: printdot(tool, document_loader.ctx, stdout) return 0 if args.print_targets: for f in ("outputs", "steps", "inputs"): if tool.tool[f]: _logger.info("%s%s targets:", f[0].upper(), f[1:-1]) stdout.write(" "+"\n ".join([shortname(t["id"]) for t in tool.tool[f]])+"\n") return 0 if args.target: if isinstance(tool, Workflow): url = urlparse(tool.tool["id"]) if url.fragment: extracted = get_subgraph([tool.tool["id"] + "/" + r for r in args.target], tool) else: extracted = get_subgraph([document_loader.fetcher.urljoin(tool.tool["id"], "#" + r) for r in args.target], tool) else: _logger.error("Can only use --target on Workflows") return 1 del document_loader.idx[extracted["id"]] tool = make_tool(document_loader, avsc_names, metadata, cast(CommentedMap, cmap(extracted)), loadingContext) if args.print_subgraph: if "name" in tool.tool: del tool.tool["name"] stdout.write(json_dumps(tool.tool, indent=4, sort_keys=True, separators=(',', ': '))) return 0 except (validate.ValidationException) as exc: _logger.error(u"Tool definition failed validation:\n%s", exc, exc_info=args.debug) return 1 except (RuntimeError, WorkflowException) as exc: _logger.error(u"Tool definition failed initialization:\n%s", exc, exc_info=args.debug) return 1 except Exception as exc: _logger.error( u"I'm sorry, I couldn't load this CWL file%s.\nThe error was: %s", try_again_msg, exc if not args.debug else "", exc_info=args.debug) return 1 if isinstance(tool, int): return tool # If on MacOS platform, TMPDIR must be set to be under one of the # shared volumes in Docker for Mac # More info: https://dockstore.org/docs/faq if sys.platform == "darwin": default_mac_path = "/private/tmp/docker_tmp" if runtimeContext.tmp_outdir_prefix == DEFAULT_TMP_PREFIX: runtimeContext.tmp_outdir_prefix = default_mac_path for dirprefix in ("tmpdir_prefix", "tmp_outdir_prefix", "cachedir"): if getattr(runtimeContext, dirprefix) and getattr(runtimeContext, dirprefix) != DEFAULT_TMP_PREFIX: sl = "/" if getattr(runtimeContext, dirprefix).endswith("/") or dirprefix == "cachedir" \ else "" setattr(runtimeContext, dirprefix, os.path.abspath(getattr(runtimeContext, dirprefix)) + sl) if not os.path.exists(os.path.dirname(getattr(runtimeContext, dirprefix))): try: os.makedirs(os.path.dirname(getattr(runtimeContext, dirprefix))) except Exception as e: _logger.error("Failed to create directory: %s", e) return 1 if args.cachedir: if args.move_outputs == "move": runtimeContext.move_outputs = "copy" runtimeContext.tmp_outdir_prefix = args.cachedir runtimeContext.secret_store = getdefault(runtimeContext.secret_store, SecretStore()) runtimeContext.make_fs_access = getdefault(runtimeContext.make_fs_access, StdFsAccess) try: initialized_job_order_object = init_job_order( job_order_object, args, tool, jobloader, stdout, print_input_deps=args.print_input_deps, relative_deps=args.relative_deps, make_fs_access=runtimeContext.make_fs_access, input_basedir=input_basedir, secret_store=runtimeContext.secret_store) except SystemExit as err: return err.code if not executor: if args.parallel: executor = MultithreadedJobExecutor() runtimeContext.select_resources = executor.select_resources else: executor = SingleJobExecutor() assert executor is not None try: runtimeContext.basedir = input_basedir del args.workflow del args.job_order conf_file = getattr(args, "beta_dependency_resolvers_configuration", None) # Text use_conda_dependencies = getattr(args, "beta_conda_dependencies", None) # Text if conf_file or use_conda_dependencies: runtimeContext.job_script_provider = DependenciesConfiguration(args) else: runtimeContext.find_default_container = functools.partial( find_default_container, default_container=runtimeContext.default_container, use_biocontainers=args.beta_use_biocontainers) (out, status) = executor(tool, initialized_job_order_object, runtimeContext, logger=_logger) if out is not None: if runtimeContext.research_obj is not None: runtimeContext.research_obj.create_job( out, None, True) def loc_to_path(obj): for field in ("path", "nameext", "nameroot", "dirname"): if field in obj: del obj[field] if obj["location"].startswith("file://"): obj["path"] = uri_file_path(obj["location"]) visit_class(out, ("File", "Directory"), loc_to_path) # Unsetting the Generation from final output object visit_class(out, ("File", ), MutationManager().unset_generation) if isinstance(out, string_types): stdout.write(out) else: stdout.write(json_dumps(out, indent=4, # type: ignore ensure_ascii=False)) stdout.write("\n") if hasattr(stdout, "flush"): stdout.flush() # type: ignore if status != "success": _logger.warning(u"Final process status is %s", status) return 1 _logger.info(u"Final process status is %s", status) return 0 except (validate.ValidationException) as exc: _logger.error(u"Input object failed validation:\n%s", exc, exc_info=args.debug) return 1 except UnsupportedRequirement as exc: _logger.error( u"Workflow or tool uses unsupported feature:\n%s", exc, exc_info=args.debug) return 33 except WorkflowException as exc: _logger.error( u"Workflow error%s:\n%s", try_again_msg, strip_dup_lineno(Text(exc)), exc_info=args.debug) return 1 except Exception as exc: # pylint: disable=broad-except _logger.error( u"Unhandled error%s:\n %s", try_again_msg, exc, exc_info=args.debug) return 1 finally: if args and runtimeContext and runtimeContext.research_obj \ and workflowobj: research_obj = runtimeContext.research_obj prov_dependencies = prov_deps(workflowobj, document_loader, uri) research_obj.generate_snapshot(prov_dependencies) if prov_log_handler is not None: # Stop logging so we won't half-log adding ourself to RO _logger.debug(u"[provenance] Closing provenance log file %s", prov_log_handler) _logger.removeHandler(prov_log_handler) # Ensure last log lines are written out prov_log_handler.flush() # Underlying WritableBagFile will add the tagfile to the manifest prov_log_handler.stream.close() prov_log_handler.close() research_obj.close(args.provenance) _logger.removeHandler(stderr_handler) _logger.addHandler(defaultStreamHandler)
def main(argsl=None, # type: List[str] args=None, # type: argparse.Namespace job_order_object=None, # type: MutableMapping[Text, Any] stdin=sys.stdin, # type: IO[Any] stdout=None, # type: Union[TextIO, StreamWriter] stderr=sys.stderr, # type: IO[Any] versionfunc=versionstring, # type: Callable[[], Text] logger_handler=None, # custom_schema_callback=None, # type: Callable[[], None] executor=None, # type: Callable[..., Tuple[Dict[Text, Any], Text]] loadingContext=None, # type: LoadingContext runtimeContext=None # type: RuntimeContext ): # type: (...) -> int if not stdout: # force UTF-8 even if the console is configured differently if (hasattr(sys.stdout, "encoding") # type: ignore and sys.stdout.encoding != 'UTF-8'): # type: ignore if PY3 and hasattr(sys.stdout, "detach"): stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8') else: stdout = getwriter('utf-8')(sys.stdout) # type: ignore else: stdout = cast(TextIO, sys.stdout) # type: ignore _logger.removeHandler(defaultStreamHandler) if logger_handler is not None: stderr_handler = logger_handler else: stderr_handler = logging.StreamHandler(stderr) _logger.addHandler(stderr_handler) # pre-declared for finally block workflowobj = None prov_log_handler = None # type: Optional[logging.StreamHandler] try: if args is None: if argsl is None: argsl = sys.argv[1:] args = arg_parser().parse_args(argsl) if runtimeContext is None: runtimeContext = RuntimeContext(vars(args)) else: runtimeContext = runtimeContext.copy() # If on Windows platform, a default Docker Container is used if not # explicitely provided by user if onWindows() and not runtimeContext.default_container: # This docker image is a minimal alpine image with bash installed # (size 6 mb). source: https://github.com/frol/docker-alpine-bash runtimeContext.default_container = windows_default_container_id # If caller parsed its own arguments, it may not include every # cwltool option, so fill in defaults to avoid crashing when # dereferencing them in args. for key, val in iteritems(get_default_args()): if not hasattr(args, key): setattr(args, key, val) # Configure logging rdflib_logger = logging.getLogger("rdflib.term") rdflib_logger.addHandler(stderr_handler) rdflib_logger.setLevel(logging.ERROR) if args.quiet: # Silence STDERR, not an eventual provenance log file stderr_handler.setLevel(logging.WARN) if runtimeContext.debug: # Increase to debug for both stderr and provenance log file _logger.setLevel(logging.DEBUG) rdflib_logger.setLevel(logging.DEBUG) formatter = None # type: Optional[logging.Formatter] if args.timestamps: formatter = logging.Formatter("[%(asctime)s] %(message)s", "%Y-%m-%d %H:%M:%S") stderr_handler.setFormatter(formatter) ## if args.version: print(versionfunc()) return 0 _logger.info(versionfunc()) if args.print_supported_versions: print("\n".join(supported_cwl_versions(args.enable_dev))) return 0 if not args.workflow: if os.path.isfile("CWLFile"): setattr(args, "workflow", "CWLFile") else: _logger.error("") _logger.error("CWL document required, no input file was provided") arg_parser().print_help() return 1 if args.relax_path_checks: command_line_tool.ACCEPTLIST_RE = command_line_tool.ACCEPTLIST_EN_RELAXED_RE if args.ga4gh_tool_registries: ga4gh_tool_registries[:] = args.ga4gh_tool_registries if not args.enable_ga4gh_tool_registry: del ga4gh_tool_registries[:] if custom_schema_callback is not None: custom_schema_callback() elif args.enable_ext: res = pkg_resources.resource_stream(__name__, 'extensions.yml') use_custom_schema("v1.0", "http://commonwl.org/cwltool", res.read()) res.close() else: use_standard_schema("v1.0") if args.provenance: if not args.compute_checksum: _logger.error("--provenance incompatible with --no-compute-checksum") return 1 ro = ResearchObject( temp_prefix_ro=args.tmpdir_prefix, orcid=args.orcid, full_name=args.cwl_full_name) runtimeContext.research_obj = ro log_file_io = ro.open_log_file_for_activity(ro.engine_uuid) prov_log_handler = logging.StreamHandler(log_file_io) class ProvLogFormatter(logging.Formatter): """Enforce ISO8601 with both T and Z.""" def __init__(self): # type: () -> None super(ProvLogFormatter, self).__init__( "[%(asctime)sZ] %(message)s") def formatTime(self, record, datefmt=None): # type: (logging.LogRecord, str) -> str record_time = time.gmtime(record.created) formatted_time = time.strftime("%Y-%m-%dT%H:%M:%S", record_time) with_msecs = "%s,%03d" % (formatted_time, record.msecs) return with_msecs prov_log_handler.setFormatter(ProvLogFormatter()) _logger.addHandler(prov_log_handler) _logger.debug(u"[provenance] Logging to %s", log_file_io) if argsl is not None: # Log cwltool command line options to provenance file _logger.info("[cwltool] %s %s", sys.argv[0], u" ".join(argsl)) _logger.debug(u"[cwltool] Arguments: %s", args) if loadingContext is None: loadingContext = LoadingContext(vars(args)) else: loadingContext = loadingContext.copy() loadingContext.research_obj = runtimeContext.research_obj loadingContext.disable_js_validation = \ args.disable_js_validation or (not args.do_validate) loadingContext.construct_tool_object = getdefault( loadingContext.construct_tool_object, workflow.default_make_tool) loadingContext.resolver = getdefault(loadingContext.resolver, tool_resolver) uri, tool_file_uri = resolve_tool_uri( args.workflow, resolver=loadingContext.resolver, fetcher_constructor=loadingContext.fetcher_constructor) try_again_msg = "" if args.debug else ", try again with --debug for more information" try: job_order_object, input_basedir, jobloader = load_job_order( args, stdin, loadingContext.fetcher_constructor, loadingContext.overrides_list, tool_file_uri) if args.overrides: loadingContext.overrides_list.extend(load_overrides( file_uri(os.path.abspath(args.overrides)), tool_file_uri)) document_loader, workflowobj, uri = fetch_document( uri, resolver=loadingContext.resolver, fetcher_constructor=loadingContext.fetcher_constructor) if args.print_deps: printdeps(workflowobj, document_loader, stdout, args.relative_deps, uri) return 0 document_loader, avsc_names, processobj, metadata, uri \ = validate_document(document_loader, workflowobj, uri, loadingContext.overrides_list, {}, enable_dev=loadingContext.enable_dev, strict=loadingContext.strict, preprocess_only=(args.print_pre or args.pack), fetcher_constructor=loadingContext.fetcher_constructor, skip_schemas=args.skip_schemas, do_validate=loadingContext.do_validate) if args.pack: stdout.write(print_pack(document_loader, processobj, uri, metadata)) return 0 if args.provenance and runtimeContext.research_obj: # Can't really be combined with args.pack at same time runtimeContext.research_obj.packed_workflow( print_pack(document_loader, processobj, uri, metadata)) if args.print_pre: stdout.write(json_dumps(processobj, indent=4, sort_keys=True, separators=(',', ': '))) return 0 loadingContext.overrides_list.extend(metadata.get("cwltool:overrides", [])) tool = make_tool(document_loader, avsc_names, metadata, uri, loadingContext) if args.make_template: def my_represent_none(self, data): # pylint: disable=unused-argument """Force clean representation of 'null'.""" return self.represent_scalar(u'tag:yaml.org,2002:null', u'null') yaml.RoundTripRepresenter.add_representer(type(None), my_represent_none) yaml.round_trip_dump( generate_input_template(tool), sys.stdout, default_flow_style=False, indent=4, block_seq_indent=2) return 0 if args.validate: print("{} is valid CWL.".format(args.workflow)) return 0 if args.print_rdf: stdout.write(printrdf(tool, document_loader.ctx, args.rdf_serializer)) return 0 if args.print_dot: printdot(tool, document_loader.ctx, stdout) return 0 if args.print_targets: for f in ("outputs", "steps", "inputs"): if tool.tool[f]: _logger.info("%s%s targets:", f[0].upper(), f[1:-1]) stdout.write(" "+"\n ".join([shortname(t["id"]) for t in tool.tool[f]])+"\n") return 0 if args.target: if isinstance(tool, Workflow): url = urllib.parse.urlparse(tool.tool["id"]) if url.fragment: extracted = get_subgraph([tool.tool["id"] + "/" + r for r in args.target], tool) else: extracted = get_subgraph([document_loader.fetcher.urljoin(tool.tool["id"], "#" + r) for r in args.target], tool) else: _logger.error("Can only use --target on Workflows") return 1 del document_loader.idx[extracted["id"]] tool = make_tool(document_loader, avsc_names, metadata, cast(CommentedMap, cmap(extracted)), loadingContext) if args.print_subgraph: if "name" in tool.tool: del tool.tool["name"] stdout.write(json_dumps(tool.tool, indent=4, sort_keys=True, separators=(',', ': '))) return 0 except (validate.ValidationException) as exc: _logger.error(u"Tool definition failed validation:\n%s", exc, exc_info=args.debug) return 1 except (RuntimeError, WorkflowException) as exc: _logger.error(u"Tool definition failed initialization:\n%s", exc, exc_info=args.debug) return 1 except Exception as exc: _logger.error( u"I'm sorry, I couldn't load this CWL file%s.\nThe error was: %s", try_again_msg, exc if not args.debug else "", exc_info=args.debug) return 1 if isinstance(tool, int): return tool # If on MacOS platform, TMPDIR must be set to be under one of the # shared volumes in Docker for Mac # More info: https://dockstore.org/docs/faq if sys.platform == "darwin": default_mac_path = "/private/tmp/docker_tmp" if runtimeContext.tmp_outdir_prefix == DEFAULT_TMP_PREFIX: runtimeContext.tmp_outdir_prefix = default_mac_path for dirprefix in ("tmpdir_prefix", "tmp_outdir_prefix", "cachedir"): if getattr(runtimeContext, dirprefix) and getattr(runtimeContext, dirprefix) != DEFAULT_TMP_PREFIX: sl = "/" if getattr(runtimeContext, dirprefix).endswith("/") or dirprefix == "cachedir" \ else "" setattr(runtimeContext, dirprefix, os.path.abspath(getattr(runtimeContext, dirprefix)) + sl) if not os.path.exists(os.path.dirname(getattr(runtimeContext, dirprefix))): try: os.makedirs(os.path.dirname(getattr(runtimeContext, dirprefix))) except Exception as e: _logger.error("Failed to create directory: %s", e) return 1 if args.cachedir: if args.move_outputs == "move": runtimeContext.move_outputs = "copy" runtimeContext.tmp_outdir_prefix = args.cachedir runtimeContext.secret_store = getdefault(runtimeContext.secret_store, SecretStore()) runtimeContext.make_fs_access = getdefault(runtimeContext.make_fs_access, StdFsAccess) try: initialized_job_order_object = init_job_order( job_order_object, args, tool, jobloader, stdout, print_input_deps=args.print_input_deps, relative_deps=args.relative_deps, make_fs_access=runtimeContext.make_fs_access, input_basedir=input_basedir, secret_store=runtimeContext.secret_store) except SystemExit as err: return err.code if not executor: if args.parallel: executor = MultithreadedJobExecutor() runtimeContext.select_resources = executor.select_resources else: executor = SingleJobExecutor() assert executor is not None try: runtimeContext.basedir = input_basedir del args.workflow del args.job_order conf_file = getattr(args, "beta_dependency_resolvers_configuration", None) # Text use_conda_dependencies = getattr(args, "beta_conda_dependencies", None) # Text if conf_file or use_conda_dependencies: runtimeContext.job_script_provider = DependenciesConfiguration(args) else: runtimeContext.find_default_container = functools.partial( find_default_container, default_container=runtimeContext.default_container, use_biocontainers=args.beta_use_biocontainers) (out, status) = executor(tool, initialized_job_order_object, runtimeContext, logger=_logger) if out is not None: if runtimeContext.research_obj is not None: runtimeContext.research_obj.create_job( out, None, True) def loc_to_path(obj): for field in ("path", "nameext", "nameroot", "dirname"): if field in obj: del obj[field] if obj["location"].startswith("file://"): obj["path"] = uri_file_path(obj["location"]) visit_class(out, ("File", "Directory"), loc_to_path) # Unsetting the Generation from final output object visit_class(out, ("File", ), MutationManager().unset_generation) if isinstance(out, string_types): stdout.write(out) else: stdout.write(json_dumps(out, indent=4, # type: ignore ensure_ascii=False)) stdout.write("\n") if hasattr(stdout, "flush"): stdout.flush() # type: ignore if status != "success": _logger.warning(u"Final process status is %s", status) return 1 _logger.info(u"Final process status is %s", status) return 0 except (validate.ValidationException) as exc: _logger.error(u"Input object failed validation:\n%s", exc, exc_info=args.debug) return 1 except UnsupportedRequirement as exc: _logger.error( u"Workflow or tool uses unsupported feature:\n%s", exc, exc_info=args.debug) return 33 except WorkflowException as exc: _logger.error( u"Workflow error%s:\n%s", try_again_msg, strip_dup_lineno(Text(exc)), exc_info=args.debug) return 1 except Exception as exc: # pylint: disable=broad-except _logger.error( u"Unhandled error%s:\n %s", try_again_msg, exc, exc_info=args.debug) return 1 finally: if args and runtimeContext and runtimeContext.research_obj \ and workflowobj: research_obj = runtimeContext.research_obj prov_dependencies = prov_deps(workflowobj, document_loader, uri) research_obj.generate_snapshot(prov_dependencies) if prov_log_handler is not None: # Stop logging so we won't half-log adding ourself to RO _logger.debug(u"[provenance] Closing provenance log file %s", prov_log_handler) _logger.removeHandler(prov_log_handler) # Ensure last log lines are written out prov_log_handler.flush() # Underlying WritableBagFile will add the tagfile to the manifest prov_log_handler.stream.close() prov_log_handler.close() research_obj.close(args.provenance) _logger.removeHandler(stderr_handler) _logger.addHandler(defaultStreamHandler)
53,136
def test_not_eksfargate(aggregator, instance): os.environ["DD_KUBERNETES_KUBELET_NODENAME"] = "foo" check = EksFargateCheck('eks_fargate', {}, [{}]) check.check(instance) assert "eks.fargate.pods.running" not in aggregator._metrics
def test_not_eksfargate(aggregator, instance): os.environ["DD_KUBERNETES_KUBELET_NODENAME"] = "foo" check = EksFargateCheck('eks_fargate', {}, [{}]) check.check(instance) assert aggregator.assert_all_metrics_covered()
46,016
def match_adalam( desc1: Tensor, desc2: Tensor, lafs1: Tensor, lafs2: Tensor, config: Dict = get_adalam_default_config(), hw1: Optional[Tensor] = None, hw2: Optional[Tensor] = None, dm: Optional[Tensor] = None, ) -> Tuple[Tensor, Tensor]: """Function, which performs descriptor matching, followed by AdaLAM filtering (see :cite:`AdaLAM2020` for more details) If the distance matrix dm is not provided, :py:func:`torch.cdist` is used. Args: desc1: Batch of descriptors of a shape :math:`(B1, D)`. desc2: Batch of descriptors of a shape :math:`(B2, D)`. lafs1: LAFs of a shape :math:`(1, B1, 2, 3)`. lafs2: LAFs of a shape :math:`(1, B1, 2, 3)`. config: dict with AdaLAM config dm: Tensor containing the distances from each descriptor in desc1 to each descriptor in desc2, shape of :math:`(B1, B2)`. Return: - Descriptor distance of matching descriptors, shape of :math:`(B3, 1)`. - Long tensor indexes of matching descriptors in desc1 and desc2. Shape: :math:`(B3, 2)`, where 0 <= B3 <= B1. """ KORNIA_CHECK_SHAPE(desc1, ["B", "DIM"]) KORNIA_CHECK_SHAPE(desc2, ["B", "DIM"]) adalam_object = AdalamFilter(config) idxs = adalam_object.match_and_filter( get_laf_center(lafs1).reshape(-1, 2), get_laf_center(lafs2).reshape(-1, 2), desc1, desc2, hw1, hw2, get_laf_orientation(lafs1).reshape(-1), get_laf_orientation(lafs2).reshape(-1), get_laf_scale(lafs1).reshape(-1), get_laf_scale(lafs2).reshape(-1), ) quality = None return quality, idxs
def match_adalam( desc1: Tensor, desc2: Tensor, lafs1: Tensor, lafs2: Tensor, config: Dict = get_adalam_default_config(), hw1: Optional[Tensor] = None, hw2: Optional[Tensor] = None, dm: Optional[Tensor] = None, ) -> Tuple[Tensor, Tensor]: """Function, which performs descriptor matching, followed by AdaLAM filtering (see :cite:`AdaLAM2020` for more details) If the distance matrix dm is not provided, :py:func:`torch.cdist` is used. Args: desc1: Batch of descriptors of a shape :math:`(B1, D)`. desc2: Batch of descriptors of a shape :math:`(B2, D)`. lafs1: LAFs of a shape :math:`(B1, 2, 3)`. lafs2: LAFs of a shape :math:`(1, B1, 2, 3)`. config: dict with AdaLAM config dm: Tensor containing the distances from each descriptor in desc1 to each descriptor in desc2, shape of :math:`(B1, B2)`. Return: - Descriptor distance of matching descriptors, shape of :math:`(B3, 1)`. - Long tensor indexes of matching descriptors in desc1 and desc2. Shape: :math:`(B3, 2)`, where 0 <= B3 <= B1. """ KORNIA_CHECK_SHAPE(desc1, ["B", "DIM"]) KORNIA_CHECK_SHAPE(desc2, ["B", "DIM"]) adalam_object = AdalamFilter(config) idxs = adalam_object.match_and_filter( get_laf_center(lafs1).reshape(-1, 2), get_laf_center(lafs2).reshape(-1, 2), desc1, desc2, hw1, hw2, get_laf_orientation(lafs1).reshape(-1), get_laf_orientation(lafs2).reshape(-1), get_laf_scale(lafs1).reshape(-1), get_laf_scale(lafs2).reshape(-1), ) quality = None return quality, idxs
45,140
def include_object( object: sqlalchemy.schema.SchemaItem, name: str, type_: str, reflected: bool, compare_to: sqlalchemy.schema.SchemaItem, ) -> bool: """ Determines whether or not alembic should include an object when autogenerating database migrations. Args: object: a sqlalchemy.schema.SchemaItem object such as a sqlalchemy.schema.Table, sqlalchemy.schema.Column, sqlalchemy.schema.Index sqlalchemy.schema.UniqueConstraint, or sqlalchemy.schema.ForeignKeyConstraint object. name: the name of the object. This is typically available via object.name. type: a string describing the type of object; currently "table", "column", "index", "unique_constraint", or "foreign_key_constraint" reflected: True if the given object was produced based on table reflection, False if it's from a local .MetaData object. compare_to: the object being compared against, if available, else None. Returns: bool: whether or not the specified object should be included in autogenerated migration code. """ # because of the dynamic inheritance pattern used by orion database, # it is difficult to get alembic to resolve references to indexes on inherited models # # to keep autogenerated migration code clean, we ignore the following indexes: # * functional indexes (ending in 'desc', 'asc'), if an index with the same name already exists # * triagram indexes that already exist # * case_insensitive indexes that already exist if type_ == "index": if not reflected and any([name.endswith(suffix) for suffix in {"asc", "desc"}]): return compare_to is None or object.name != compare_to.name elif reflected and ( name.startswith("gin") or name.endswith("case_insensitive") ): return False return True
def include_object( object: sqlalchemy.schema.SchemaItem, name: str, type_: str, reflected: bool, compare_to: sqlalchemy.schema.SchemaItem, ) -> bool: """ Determines whether or not alembic should include an object when autogenerating database migrations. Args: object: a sqlalchemy.schema.SchemaItem object such as a sqlalchemy.schema.Table, sqlalchemy.schema.Column, sqlalchemy.schema.Index sqlalchemy.schema.UniqueConstraint, or sqlalchemy.schema.ForeignKeyConstraint object. name: the name of the object. This is typically available via object.name. type: a string describing the type of object; currently "table", "column", "index", "unique_constraint", or "foreign_key_constraint" reflected: True if the given object was produced based on table reflection, False if it's from a local .MetaData object. compare_to: the object being compared against, if available, else None. Returns: bool: whether or not the specified object should be included in autogenerated migration code. """ # because of the dynamic inheritance pattern used by orion database, # it is difficult to get alembic to resolve references to indexes on inherited models # # to keep autogenerated migration code clean, we ignore the following indexes: # * functional indexes (ending in 'desc', 'asc'), if an index with the same name already exists # * trigram indexes that already exist # * case_insensitive indexes that already exist if type_ == "index": if not reflected and any([name.endswith(suffix) for suffix in {"asc", "desc"}]): return compare_to is None or object.name != compare_to.name elif reflected and ( name.startswith("gin") or name.endswith("case_insensitive") ): return False return True
37,605
def call( target: Optional[Union[circuit.QuantumCircuit, Schedule, ScheduleBlock]] = None, name: Optional[str] = None, channels: Optional[List[chans.Channel]] = None, value_dict: Optional[Dict[ParameterValueType, ParameterValueType]] = None, **kw_params: ParameterValueType, ): """Call the subroutine within the currently active builder context with arbitrary parameters which will be assigned to the target program. .. note:: If the ``target`` program is instance of schedule or quantum cirucit, it will be assigned as :class:`~qiskit.pulse.instructions.Call` instruction. Otherwise :class:`~qiskit.pulse.instructions.Reference` instruction is added and ``target`` is separately registered to the references. Examples: 1. Call with substantial program. .. code-block:: python from qiskit import circuit, pulse, schedule, transpile from qiskit.test.mock import FakeOpenPulse2Q backend = FakeOpenPulse2Q() qc = circuit.QuantumCircuit(2) qc.cx(0, 1) qc_transpiled = transpile(qc, optimization_level=3) sched = schedule(qc_transpiled, backend) with pulse.build(backend) as pulse_prog: pulse.call(sched) pulse.call(qc) This function can optionally take parameter dictionary with the parameterized target program. .. code-block:: python from qiskit import circuit, pulse amp = circuit.Parameter('amp') with pulse.build() as subroutine: pulse.play(pulse.Gaussian(160, amp, 40), pulse.DriveChannel(0)) with pulse.build() as main_prog: pulse.call(subroutine, amp=0.1) pulse.call(subroutine, amp=0.3) If there is any parameter name collision, you can distinguish them by specifying each parameter object as a python dictionary. Otherwise ``amp1`` and ``amp2`` will be updated with the same value. .. code-block:: python from qiskit import circuit, pulse amp1 = circuit.Parameter('amp') amp2 = circuit.Parameter('amp') with pulse.build() as subroutine: pulse.play(pulse.Gaussian(160, amp1, 40), pulse.DriveChannel(0)) pulse.play(pulse.Gaussian(160, amp2, 40), pulse.DriveChannel(1)) with pulse.build() as main_prog: pulse.call(subroutine, value_dict={amp1: 0.1, amp2: 0.2}) 2. Call with unassigned program. .. code-block:: python qiskit import pulse with pulse.build() as main_prog: ref_key = "my_subroutine" pulse.call(name=ref_key, channels=[pulse.DriveChannel(0)]) with pulse.build() as subroutine: pulse.play(pulse.Gaussian(160, 0.1, 40), pulse.DriveChannel(0)) main_prog.assign_reference(ref_key=ref_key, schedule=subroutine) When you call without actual program, you can assign the program afterwards through the :meth:`ScheduleBlock.assign_reference` method. Args: target: Target circuit or pulse schedule to call. If this program is not provided, both ``name`` and ``channels`` should be provided instead. name: Name of subroutine if defined. channels: Optional. Channels associated to the subroutine. value_dict: Optional. Local scoped parameters assigned to the subroutine. If this dictionary is provided, the ``target`` program is copied and then stored in the main built schedule with having parameters assigned. This dictionary is keyed on the :class:`~.Parameter` object, thus parameter name collision can be avoided. This option is valid only when the subroutine is called with ``target``. kw_params: Alternative way to provide local scoped parameters. Since this is keyed on the string parameter name, the parameters having the same name are all updated together. If you want to avoid name collision, use ``value_dict`` with :class:`~.Parameter` object instead. Raises: exceptions.PulseError: If the input ``target`` type is not supported. exceptions.PulseError: Target program is empty and name and channels are not both provided. exceptions.PulseError: Subroutine is called by name and channels but local scoped parameters are also provided. """ if target is None: if value_dict is not None or any(kw_params): raise exceptions.PulseError( "Parameters are provided without target program. " "These parameters cannot be assigned." ) if name is None or channels is None: raise exceptions.PulseError( "Subroutine name and channels are not both provided. " "Please call subroutine with target program, or both name and channels." ) _active_builder().append_reference(reference_key=name, channels=channels) else: if not isinstance(target, (circuit.QuantumCircuit, Schedule, ScheduleBlock)): raise exceptions.PulseError( f'Target of type "{target.__class__.__name__}" is not supported.' ) _active_builder().call_subroutine( subroutine=target, name=name, value_dict=value_dict, **kw_params )
def call( target: Optional[Union[circuit.QuantumCircuit, Schedule, ScheduleBlock]] = None, name: Optional[str] = None, channels: Optional[List[chans.Channel]] = None, value_dict: Optional[Dict[ParameterValueType, ParameterValueType]] = None, **kw_params: ParameterValueType, ): """Call the subroutine within the currently active builder context with arbitrary parameters which will be assigned to the target program. .. note:: If the ``target`` program is instance of schedule or quantum cirucit, it will be assigned as :class:`~qiskit.pulse.instructions.Call` instruction. Otherwise :class:`~qiskit.pulse.instructions.Reference` instruction is added and ``target`` is separately registered to the references. Examples: 1. Call with substantial program. .. code-block:: python from qiskit import circuit, pulse, schedule, transpile from qiskit.test.mock import FakeOpenPulse2Q backend = FakeOpenPulse2Q() qc = circuit.QuantumCircuit(2) qc.cx(0, 1) qc_transpiled = transpile(qc, optimization_level=3) sched = schedule(qc_transpiled, backend) with pulse.build(backend) as pulse_prog: pulse.call(sched) pulse.call(qc) This function can optionally take parameter dictionary with the parameterized target program. .. code-block:: python from qiskit import circuit, pulse amp = circuit.Parameter('amp') with pulse.build() as subroutine: pulse.play(pulse.Gaussian(160, amp, 40), pulse.DriveChannel(0)) with pulse.build() as main_prog: pulse.call(subroutine, amp=0.1) pulse.call(subroutine, amp=0.3) If there is any parameter name collision, you can distinguish them by specifying each parameter object as a python dictionary. Otherwise ``amp1`` and ``amp2`` will be updated with the same value. .. code-block:: python from qiskit import circuit, pulse amp1 = circuit.Parameter('amp') amp2 = circuit.Parameter('amp') with pulse.build() as subroutine: pulse.play(pulse.Gaussian(160, amp1, 40), pulse.DriveChannel(0)) pulse.play(pulse.Gaussian(160, amp2, 40), pulse.DriveChannel(1)) with pulse.build() as main_prog: pulse.call(subroutine, value_dict={amp1: 0.1, amp2: 0.2}) 2. Call with unassigned program. .. code-block:: python qiskit import pulse with pulse.build() as main_prog: ref_key = "my_subroutine" pulse.call(name=ref_key, channels=[pulse.DriveChannel(0)]) with pulse.build() as subroutine: pulse.play(pulse.Gaussian(160, 0.1, 40), pulse.DriveChannel(0)) main_prog.assign_reference(ref_key=ref_key, schedule=subroutine) When you call without actual program, you can assign the program afterwards through the :meth:`ScheduleBlock.assign_reference` method. Args: target: Target circuit or pulse schedule to call. If this program is not provided, both ``name`` and ``channels`` must be provided. name: Name of subroutine if defined. channels: Optional. Channels associated to the subroutine. value_dict: Optional. Local scoped parameters assigned to the subroutine. If this dictionary is provided, the ``target`` program is copied and then stored in the main built schedule with having parameters assigned. This dictionary is keyed on the :class:`~.Parameter` object, thus parameter name collision can be avoided. This option is valid only when the subroutine is called with ``target``. kw_params: Alternative way to provide local scoped parameters. Since this is keyed on the string parameter name, the parameters having the same name are all updated together. If you want to avoid name collision, use ``value_dict`` with :class:`~.Parameter` object instead. Raises: exceptions.PulseError: If the input ``target`` type is not supported. exceptions.PulseError: Target program is empty and name and channels are not both provided. exceptions.PulseError: Subroutine is called by name and channels but local scoped parameters are also provided. """ if target is None: if value_dict is not None or any(kw_params): raise exceptions.PulseError( "Parameters are provided without target program. " "These parameters cannot be assigned." ) if name is None or channels is None: raise exceptions.PulseError( "Subroutine name and channels are not both provided. " "Please call subroutine with target program, or both name and channels." ) _active_builder().append_reference(reference_key=name, channels=channels) else: if not isinstance(target, (circuit.QuantumCircuit, Schedule, ScheduleBlock)): raise exceptions.PulseError( f'Target of type "{target.__class__.__name__}" is not supported.' ) _active_builder().call_subroutine( subroutine=target, name=name, value_dict=value_dict, **kw_params )
2,379
def available_if(check, err_msg_template=None): """An attribute that is available only if check returns a truthy value Parameters ---------- check : callable When passed the object with the decorated method, this should return a truthy value if the attribute is available, and either return False or raise an AttributeError if not available. err_msg_template : str An error message template for an AttributeError raised if `check` returns a falsy value. `err_msg_template` can have the following placeholders for formatting: - owner: The name of the class that owns the decorated method. - attribute_name: The name of the decorated method. If not passed, defaults to `"This {owner} has no attribute {attribute_name}"`. Examples -------- >>> from sklearn.utils.metaestimators import available_if >>> class HelloIfEven: ... def __init__(self, x): ... self.x = x ... ... def _x_is_even(self): ... return self.x % 2 == 0 ... ... @available_if( ... _x_is_even, ... err_msg_template="{attribute_name} is not available for this {owner}", ... ) ... def say_hello(self): ... print("Hello") ... >>> obj = HelloIfEven(1) >>> hasattr(obj, "say_hello") False >>> obj.say_hello() Traceback (most recent call last): ... AttributeError: 'say_hello' is not available for this 'HelloIfEven' >>> obj.x = 2 >>> hasattr(obj, "say_hello") True >>> obj.say_hello() Hello """ return lambda fn: _AvailableIfDescriptor( fn, check, attribute_name=fn.__name__, err_msg_template=err_msg_template, )
def available_if(check, *, err_msg_template=None): """An attribute that is available only if check returns a truthy value Parameters ---------- check : callable When passed the object with the decorated method, this should return a truthy value if the attribute is available, and either return False or raise an AttributeError if not available. err_msg_template : str An error message template for an AttributeError raised if `check` returns a falsy value. `err_msg_template` can have the following placeholders for formatting: - owner: The name of the class that owns the decorated method. - attribute_name: The name of the decorated method. If not passed, defaults to `"This {owner} has no attribute {attribute_name}"`. Examples -------- >>> from sklearn.utils.metaestimators import available_if >>> class HelloIfEven: ... def __init__(self, x): ... self.x = x ... ... def _x_is_even(self): ... return self.x % 2 == 0 ... ... @available_if( ... _x_is_even, ... err_msg_template="{attribute_name} is not available for this {owner}", ... ) ... def say_hello(self): ... print("Hello") ... >>> obj = HelloIfEven(1) >>> hasattr(obj, "say_hello") False >>> obj.say_hello() Traceback (most recent call last): ... AttributeError: 'say_hello' is not available for this 'HelloIfEven' >>> obj.x = 2 >>> hasattr(obj, "say_hello") True >>> obj.say_hello() Hello """ return lambda fn: _AvailableIfDescriptor( fn, check, attribute_name=fn.__name__, err_msg_template=err_msg_template, )
2,518
def d2_pinball_loss_score( y_true, y_pred, *, sample_weight=None, alpha=0.5, multioutput="uniform_average" ): """ :math:`D^2` regression score function, \ fraction of pinball loss deviance explained. Best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A model that always uses the empirical median of `y_true` as constant prediction, disregarding the input features, gets a :math:`D^2` score of 0.0. Read more in the :ref:`User Guide <d2_score>`. .. versionadded:: 1.1 Parameters ---------- y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. sample_weight : array-like of shape (n_samples,), optional Sample weights. alpha : float, default=0.5 Pinball loss quantile parameter, determines the slope of the pinball_loss. Equivalent to `d2_absolute_error_score` when `alpha=0.5`. multioutput : {'raw_values', 'uniform_average'} or array-like of shape \ (n_outputs,), default='uniform_average' Defines aggregating of multiple output values. Array-like value defines weights used to average scores. 'raw_values' : Returns a full set of errors in case of multioutput input. 'uniform_average' : Scores of all outputs are averaged with uniform weight. Returns ------- score : float or ndarray of floats The :math:`D^2` score with a pinball loss deviance or ndarray of scores if `multioutput='raw_values'`. Notes ----- This is not a symmetric function. Like :math:`R^2`, :math:`D^2` score may be negative (it need not actually be the square of a quantity D). This metric is not well-defined for single samples and will return a NaN value if n_samples is less than two. References ---------- .. [1] Eq. (3.11) of Hastie, Trevor J., Robert Tibshirani and Martin J. Wainwright. "Statistical Learning with Sparsity: The Lasso and Generalizations." (2015). https://trevorhastie.github.io Examples -------- >>> from sklearn.metrics import d2_pinball_loss_score >>> y_true = [1, 2, 3] >>> y_pred = [1, 3, 3] >>> d2_pinball_loss_score(y_true, y_pred) 0.5 >>> d2_pinball_loss_score(y_true, y_pred, alpha=0.9) 0.772... >>> d2_pinball_loss_score(y_true, y_pred, alpha=0.1) -1.045... >>> d2_pinball_loss_score(y_true, y_true, alpha=0.1) 1.0 """ y_type, y_true, y_pred, multioutput = _check_reg_targets( y_true, y_pred, multioutput ) check_consistent_length(y_true, y_pred, sample_weight) if _num_samples(y_pred) < 2: msg = "D^2 score is not well-defined with less than two samples." warnings.warn(msg, UndefinedMetricWarning) return float("nan") numerator = mean_pinball_loss( y_true, y_pred, sample_weight=sample_weight, alpha=alpha, multioutput="raw_values", ) if sample_weight is None: y_quantile = [np.percentile(y_true, q=alpha * 100, axis=0)] * len(y_true) else: sample_weight = _check_sample_weight(sample_weight, y_true) y_quantile = [ _weighted_percentile( y_true, sample_weight=sample_weight, percentile=alpha * 100 ) ] * len(y_true) denominator = mean_pinball_loss( y_true, y_quantile, sample_weight=sample_weight, alpha=alpha, multioutput="raw_values", ) nonzero_numerator = numerator != 0 nonzero_denominator = denominator != 0 valid_score = nonzero_numerator & nonzero_denominator output_scores = np.ones(y_true.shape[1]) output_scores[valid_score] = 1 - (numerator[valid_score] / denominator[valid_score]) output_scores[nonzero_numerator & ~nonzero_denominator] = 0.0 if isinstance(multioutput, str): if multioutput == "raw_values": # return scores individually return output_scores elif multioutput == "uniform_average": # passing None as weights to np.average results in uniform mean avg_weights = None else: raise ValueError( "multioutput is expected to be 'raw_values' " "or 'uniform_average' but we got %r" " instead." % multioutput ) else: avg_weights = multioutput return np.average(output_scores, weights=avg_weights)
def d2_pinball_loss_score( y_true, y_pred, *, sample_weight=None, alpha=0.5, multioutput="uniform_average" ): """ :math:`D^2` regression score function, fraction of pinball loss explained. Best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A model that always uses the empirical median of `y_true` as constant prediction, disregarding the input features, gets a :math:`D^2` score of 0.0. Read more in the :ref:`User Guide <d2_score>`. .. versionadded:: 1.1 Parameters ---------- y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. sample_weight : array-like of shape (n_samples,), optional Sample weights. alpha : float, default=0.5 Pinball loss quantile parameter, determines the slope of the pinball_loss. Equivalent to `d2_absolute_error_score` when `alpha=0.5`. multioutput : {'raw_values', 'uniform_average'} or array-like of shape \ (n_outputs,), default='uniform_average' Defines aggregating of multiple output values. Array-like value defines weights used to average scores. 'raw_values' : Returns a full set of errors in case of multioutput input. 'uniform_average' : Scores of all outputs are averaged with uniform weight. Returns ------- score : float or ndarray of floats The :math:`D^2` score with a pinball loss deviance or ndarray of scores if `multioutput='raw_values'`. Notes ----- This is not a symmetric function. Like :math:`R^2`, :math:`D^2` score may be negative (it need not actually be the square of a quantity D). This metric is not well-defined for single samples and will return a NaN value if n_samples is less than two. References ---------- .. [1] Eq. (3.11) of Hastie, Trevor J., Robert Tibshirani and Martin J. Wainwright. "Statistical Learning with Sparsity: The Lasso and Generalizations." (2015). https://trevorhastie.github.io Examples -------- >>> from sklearn.metrics import d2_pinball_loss_score >>> y_true = [1, 2, 3] >>> y_pred = [1, 3, 3] >>> d2_pinball_loss_score(y_true, y_pred) 0.5 >>> d2_pinball_loss_score(y_true, y_pred, alpha=0.9) 0.772... >>> d2_pinball_loss_score(y_true, y_pred, alpha=0.1) -1.045... >>> d2_pinball_loss_score(y_true, y_true, alpha=0.1) 1.0 """ y_type, y_true, y_pred, multioutput = _check_reg_targets( y_true, y_pred, multioutput ) check_consistent_length(y_true, y_pred, sample_weight) if _num_samples(y_pred) < 2: msg = "D^2 score is not well-defined with less than two samples." warnings.warn(msg, UndefinedMetricWarning) return float("nan") numerator = mean_pinball_loss( y_true, y_pred, sample_weight=sample_weight, alpha=alpha, multioutput="raw_values", ) if sample_weight is None: y_quantile = [np.percentile(y_true, q=alpha * 100, axis=0)] * len(y_true) else: sample_weight = _check_sample_weight(sample_weight, y_true) y_quantile = [ _weighted_percentile( y_true, sample_weight=sample_weight, percentile=alpha * 100 ) ] * len(y_true) denominator = mean_pinball_loss( y_true, y_quantile, sample_weight=sample_weight, alpha=alpha, multioutput="raw_values", ) nonzero_numerator = numerator != 0 nonzero_denominator = denominator != 0 valid_score = nonzero_numerator & nonzero_denominator output_scores = np.ones(y_true.shape[1]) output_scores[valid_score] = 1 - (numerator[valid_score] / denominator[valid_score]) output_scores[nonzero_numerator & ~nonzero_denominator] = 0.0 if isinstance(multioutput, str): if multioutput == "raw_values": # return scores individually return output_scores elif multioutput == "uniform_average": # passing None as weights to np.average results in uniform mean avg_weights = None else: raise ValueError( "multioutput is expected to be 'raw_values' " "or 'uniform_average' but we got %r" " instead." % multioutput ) else: avg_weights = multioutput return np.average(output_scores, weights=avg_weights)
54,612
def deprecated(end_version, *, reason="", replaced_by=""): """ Deprecate a function or method and raise a `DeprecationWarning`. The `@deprecated` decorator is used to deprecate functions and methods. Several cases are supported. For example one can use it to depcreate a function that has become redundant or renaming a function. The following code examples provide different use cases of how to use decorator. .. code-block:: python @deprecated("0.1.5", replaced_by="sum") def simple_addition(a, b): return a + b :param end_version: Release version of removal. :type end_version: `str` :param reason: Additional deprecation reason. :type reason: `str` :param replaced_by: Function that replaces deprecated function. :type replaced_by: `str` """ def decorator(function): reason_msg = "\n" + reason if reason else reason replaced_msg = f" It will be replaced by '{replaced_by}'." if replaced_by else replaced_by deprecated_msg = ( f"Function '{function.__name__}' is deprecated and will be removed in future release {end_version}." ) @wraps(function) def wrapper(*args, **kwargs): warnings.simplefilter("always", category=DeprecationWarning) warnings.warn(deprecated_msg + replaced_msg + reason_msg, category=DeprecationWarning, stacklevel=2) warnings.simplefilter("default", category=DeprecationWarning) return function(*args, **kwargs) return wrapper return decorator
def deprecated(end_version, *, reason="", replaced_by=""): """ Deprecate a function or method and raise a `DeprecationWarning`. # ------------------------------------------------------------------------------------------------- DEPRECATION def deprecated(end_version, *, reason="", replaced_by=""): The `@deprecated` decorator is used to deprecate functions and methods. Several cases are supported. For example one can use it to depcreate a function that has become redundant or renaming a function. The following code examples provide different use cases of how to use decorator. .. code-block:: python @deprecated("0.1.5", replaced_by="sum") def simple_addition(a, b): return a + b :param end_version: Release version of removal. :type end_version: `str` :param reason: Additional deprecation reason. :type reason: `str` :param replaced_by: Function that replaces deprecated function. :type replaced_by: `str` """ def decorator(function): reason_msg = "\n" + reason if reason else reason replaced_msg = f" It will be replaced by '{replaced_by}'." if replaced_by else replaced_by deprecated_msg = ( f"Function '{function.__name__}' is deprecated and will be removed in future release {end_version}." ) @wraps(function) def wrapper(*args, **kwargs): warnings.simplefilter("always", category=DeprecationWarning) warnings.warn(deprecated_msg + replaced_msg + reason_msg, category=DeprecationWarning, stacklevel=2) warnings.simplefilter("default", category=DeprecationWarning) return function(*args, **kwargs) return wrapper return decorator
32,453
def get_last_run(events: list, last_run: dict) -> dict: # type: ignore """ Args: events (list): list of the event from the api last_run (dict): the dictionary containing the last run times for the event types Returns: A dictionary with the times for the next run """ alerts = get_sorted_events_by_type(events, event_type='alert') audit_events = get_sorted_events_by_type(events, event_type='audit') applications_events = get_sorted_events_by_type(events, event_type='application') network_events = get_sorted_events_by_type(events, event_type='network') if not alerts: alerts_time = last_run['alert'] else: alerts_time = alerts[-1]['timestamp'] if not applications_events: applications_time = last_run['application'] else: applications_time = applications_events[-1]['timestamp'] if not audit_events: audit_time = last_run['audit'] else: audit_time = audit_events[-1]['timestamp'] if not network_events: network_time = last_run['network'] else: network_time = network_events[-1]['timestamp'] return {'alert': alerts_time, 'application': applications_time, 'audit': audit_time, 'network': network_time}
def create_last_run(events: list, last_run: dict) -> dict: # type: ignore """ Args: events (list): list of the event from the api last_run (dict): the dictionary containing the last run times for the event types Returns: A dictionary with the times for the next run """ alerts = get_sorted_events_by_type(events, event_type='alert') audit_events = get_sorted_events_by_type(events, event_type='audit') applications_events = get_sorted_events_by_type(events, event_type='application') network_events = get_sorted_events_by_type(events, event_type='network') if not alerts: alerts_time = last_run['alert'] else: alerts_time = alerts[-1]['timestamp'] if not applications_events: applications_time = last_run['application'] else: applications_time = applications_events[-1]['timestamp'] if not audit_events: audit_time = last_run['audit'] else: audit_time = audit_events[-1]['timestamp'] if not network_events: network_time = last_run['network'] else: network_time = network_events[-1]['timestamp'] return {'alert': alerts_time, 'application': applications_time, 'audit': audit_time, 'network': network_time}
58,758
def _mx_npx_reshape(inputs, attrs): shape = attrs.get_int_tuple("newshape") reverse = attrs.get_bool("reverse", False) shape_list = list(shape) new_shape_list = [] if -3 not in shape_list: for num in shape_list: if num > 0 or num == -1: new_shape_list.append(num) elif num == -2: new_shape_list.append(0) elif num == -4: new_shape_list.append(-2) elif num == -5: new_shape_list.append(-3) elif num == -6: new_shape_list.append(-4) else: raise tvm.error.OpAttributeInvalid("Shape dimension %d is not supported" % num) shape = tuple(new_shape_list) if reverse: return _op.reverse_reshape(inputs[0], newshape=shape) return _op.reshape(inputs[0], newshape=shape) else: old_shape = get_tuple_shape(_infer_type(inputs[0]).checked_type.shape) new_shape = [] if reverse: old_shape = old_shape[::-1] shape_list = shape_list[::-1] ptr = 0 unknown_axis = None src_ptr = 0 while src_ptr < len(shape_list): ele = shape_list[src_ptr] src_ptr += 1 if ele > 0: new_shape.append(ele) ptr += 1 elif ele == -1: new_shape.append(-1) assert unknown_axis is None, "Can only have one unknown axis." unknown_axis = len(new_shape) ptr += 1 elif ele == -2: new_shape.append(old_shape[ptr]) ptr += 1 elif ele == -3: assert old_shape[ptr] == 1 ptr += 1 elif ele == -4: new_shape += old_shape[ptr:] break elif ele == -5: new_shape.append(old_shape[ptr] * old_shape[ptr + 1]) ptr += 2 elif ele == -6: # Split axis lhs = shape_list[src_ptr] rhs = shape_list[src_ptr + 1] src_ptr += 2 assert not (lhs == -1 and rhs == -1) if lhs == -1: assert old_shape[ptr] % rhs == 0 lhs = old_shape[ptr] // rhs if rhs == -1: assert old_shape[ptr] % lhs == 0 rhs = old_shape[ptr] // lhs new_shape.append(lhs) new_shape.append(rhs) ptr += 1 else: raise tvm.error.OpAttributeInvalid("Shape dimension %d is not supported" % ele) if reverse: new_shape = new_shape[::-1] return _op.reshape(inputs[0], newshape=new_shape)
def _mx_npx_reshape(inputs, attrs): shape = attrs.get_int_tuple("newshape") reverse = attrs.get_bool("reverse", False) shape_list = list(shape) new_shape_list = [] if -3 not in shape_list: for num in shape_list: if num > 0 or num == -1: new_shape_list.append(num) elif num in [-2, -4, -5, -6]: new_shape_list.append(num + 2) else: raise tvm.error.OpAttributeInvalid("Shape dimension %d is not supported" % num) shape = tuple(new_shape_list) if reverse: return _op.reverse_reshape(inputs[0], newshape=shape) return _op.reshape(inputs[0], newshape=shape) else: old_shape = get_tuple_shape(_infer_type(inputs[0]).checked_type.shape) new_shape = [] if reverse: old_shape = old_shape[::-1] shape_list = shape_list[::-1] ptr = 0 unknown_axis = None src_ptr = 0 while src_ptr < len(shape_list): ele = shape_list[src_ptr] src_ptr += 1 if ele > 0: new_shape.append(ele) ptr += 1 elif ele == -1: new_shape.append(-1) assert unknown_axis is None, "Can only have one unknown axis." unknown_axis = len(new_shape) ptr += 1 elif ele == -2: new_shape.append(old_shape[ptr]) ptr += 1 elif ele == -3: assert old_shape[ptr] == 1 ptr += 1 elif ele == -4: new_shape += old_shape[ptr:] break elif ele == -5: new_shape.append(old_shape[ptr] * old_shape[ptr + 1]) ptr += 2 elif ele == -6: # Split axis lhs = shape_list[src_ptr] rhs = shape_list[src_ptr + 1] src_ptr += 2 assert not (lhs == -1 and rhs == -1) if lhs == -1: assert old_shape[ptr] % rhs == 0 lhs = old_shape[ptr] // rhs if rhs == -1: assert old_shape[ptr] % lhs == 0 rhs = old_shape[ptr] // lhs new_shape.append(lhs) new_shape.append(rhs) ptr += 1 else: raise tvm.error.OpAttributeInvalid("Shape dimension %d is not supported" % ele) if reverse: new_shape = new_shape[::-1] return _op.reshape(inputs[0], newshape=new_shape)
32,566
def main(): ip_list = [] domain_list = [] hash_list = [] d_args = demisto.args() entry_id = d_args['entryID'] if 'entryID' in d_args else None file_name = d_args['file'] if 'file' in d_args else None # file arg deprecated parse_ip = int(d_args['ips']) if 'ips' in d_args else -1 parse_domain = int(d_args['domains']) if 'domains' in d_args else -1 parse_hash = int(d_args['hashes']) if 'hashes' in d_args else -1 parse_all = True if d_args['parseAll'] == 'yes' else False if parse_ip == -1 and parse_domain == -1 and parse_hash == -1 and not parse_all: return_error('Select a field to extract or set parseAll=yes to parse the whole CSV file') if file_name is None and entry_id is None: return_error('Please provide entryID.') if entry_id is None: # search entry by file name try: entry = get_entry_by_file_name(file_name) entry_id = entry['ID'] except ValueError as e: return_error(e) res = demisto.getFilePath(entry_id) if not res: return_error("Entry {} not found".format(entry_id)) file_path = res['path'] file_name = res['name'] if not file_name.lower().endswith('.csv'): return_error( '"{}" is not in csv format. Please ensure the file is in correct format and has a ".csv" extension'.format( file_name)) if parse_all: all_csv = [] with open(file_path) as f: records = unicode_dict_reader(f) # `records` is a list contains CSV rows (without headers) # so if it doesn't exists - it can be empty or one-lined CSV if records: for row in records: all_csv.append(row) else: # Can be one-line csv f.seek(0) line = f.read() all_csv = line.split(',') output = { 'ParseCSV.ParsedCSV': all_csv } if is_one_dimension_list(all_csv): human_readable = tableToMarkdown(file_name, all_csv, headers=["CSV list"]) else: human_readable = tableToMarkdown(file_name, all_csv) demisto.results({ "Type": entryTypes["note"], "ContentsFormat": formats["json"], "ReadableContentsFormat": formats["markdown"], "Contents": all_csv, "EntryContext": output, "HumanReadable": human_readable }) elif not (parse_ip == -1 and parse_domain == -1 and parse_hash == -1): # if need to parse ips/domains/hashes, keep the script running if sum(1 for line in open(file_path)) <= 1: # checks if there are less than one line return_error('No data to parse. CSV file might be empty or one-lined. try the `ParseAll=yes` argument.') with open(file_path, 'rU') as f: has_header = csv.Sniffer().has_header(f.read(1024)) f.seek(0) csv_data = csv.reader(f) if has_header: next(csv_data) md = '### Parsed Data Table\n' + ('IPs |' if 'ips' in d_args else '') + ( 'Domains |' if 'domains' in d_args else '') + ('Hashes |' if 'hashes' in d_args else '') + '\n' md += ('- |' if 'ips' in d_args else '') + ('- |' if 'domains' in d_args else '') + ( '- |' if 'hashes' in d_args else '') + '\n' content = '' for row in csv_data: content += ','.join(row) + '\n' if parse_ip != -1: md += (row[parse_ip] + '|' if row[parse_ip] else ' |') is_ip = re.search(r'([0-9]{1,3}\.){3}[0-9]{1,3}', row[parse_ip]) is_valid = is_ip_valid(row[parse_ip]) if is_ip and is_valid: ip_list.append(row[parse_ip]) if parse_domain != -1: md += (row[parse_domain] + '|' if row[parse_domain] else ' |') has_dot = '.' in row[parse_domain] no_spaces = ' ' not in row[parse_domain] if has_dot and no_spaces: domain_list.append(row[parse_domain]) if parse_hash != -1: md += (row[parse_hash] + '|' if row[parse_hash] else ' |') is_hash = re.search(r'[0-9A-Fa-f]{32,128}', row[parse_hash]) if is_hash: hash_list.append(row[parse_hash]) md += '\n' context = {} # type: dict if ip_list: old_ip_list = list(demisto.get(demisto.context(), 'ips')) if demisto.get(demisto.context(), 'ips') else [] ip_list = list(set(ip_list) - set(old_ip_list)) if len(ip_list) > 0: context["IP"] = [] for ip in ip_list: context["IP"].append({"Address": ip}) if domain_list: old_domain_list = list(demisto.get(demisto.context(), 'domains')) if demisto.get(demisto.context(), 'domains') else [] domain_list = list(set(domain_list) - set(old_domain_list)) if len(domain_list) > 0: context["Domain"] = [] for domain in domain_list: context["Domain"].append({"Name": domain}) if hash_list: old_hash_list = list(demisto.get(demisto.context(), 'hashes')) if demisto.get(demisto.context(), 'hashes') else [] hash_list = list(set(hash_list) - set(old_hash_list)) if len(hash_list) > 0: context["File"] = [] for hash_string in hash_list: if len(hash_string) == 32: context["File"].append({"MD5": hash_string}) if len(hash_string) == 64: context["File"].append({"SHA256": hash_string}) if len(hash_string) == 40: context["File"].append({"SHA1": hash_string}) demisto.results({ "Type": entryTypes["note"], "ContentsFormat": formats["text"], "Contents": content, "HumanReadable": md, "EntryContext": context })
def main(): ip_list = [] domain_list = [] hash_list = [] d_args = demisto.args() entry_id = d_args['entryID'] if 'entryID' in d_args else None file_name = d_args['file'] if 'file' in d_args else None # file arg deprecated parse_ip = int(d_args['ips']) if 'ips' in d_args else -1 parse_domain = int(d_args['domains']) if 'domains' in d_args else -1 parse_hash = int(d_args['hashes']) if 'hashes' in d_args else -1 parse_all = True if d_args['parseAll'] == 'yes' else False if parse_ip == -1 and parse_domain == -1 and parse_hash == -1 and not parse_all: return_error('Select a field to extract or set parseAll=yes to parse the whole CSV file') if file_name is None and entry_id is None: return_error('Please provide entryID.') if entry_id is None: # search entry by file name try: entry = get_entry_by_file_name(file_name) entry_id = entry['ID'] except ValueError as e: return_error(str(e)) res = demisto.getFilePath(entry_id) if not res: return_error("Entry {} not found".format(entry_id)) file_path = res['path'] file_name = res['name'] if not file_name.lower().endswith('.csv'): return_error( '"{}" is not in csv format. Please ensure the file is in correct format and has a ".csv" extension'.format( file_name)) if parse_all: all_csv = [] with open(file_path) as f: records = unicode_dict_reader(f) # `records` is a list contains CSV rows (without headers) # so if it doesn't exists - it can be empty or one-lined CSV if records: for row in records: all_csv.append(row) else: # Can be one-line csv f.seek(0) line = f.read() all_csv = line.split(',') output = { 'ParseCSV.ParsedCSV': all_csv } if is_one_dimension_list(all_csv): human_readable = tableToMarkdown(file_name, all_csv, headers=["CSV list"]) else: human_readable = tableToMarkdown(file_name, all_csv) demisto.results({ "Type": entryTypes["note"], "ContentsFormat": formats["json"], "ReadableContentsFormat": formats["markdown"], "Contents": all_csv, "EntryContext": output, "HumanReadable": human_readable }) elif not (parse_ip == -1 and parse_domain == -1 and parse_hash == -1): # if need to parse ips/domains/hashes, keep the script running if sum(1 for line in open(file_path)) <= 1: # checks if there are less than one line return_error('No data to parse. CSV file might be empty or one-lined. try the `ParseAll=yes` argument.') with open(file_path, 'rU') as f: has_header = csv.Sniffer().has_header(f.read(1024)) f.seek(0) csv_data = csv.reader(f) if has_header: next(csv_data) md = '### Parsed Data Table\n' + ('IPs |' if 'ips' in d_args else '') + ( 'Domains |' if 'domains' in d_args else '') + ('Hashes |' if 'hashes' in d_args else '') + '\n' md += ('- |' if 'ips' in d_args else '') + ('- |' if 'domains' in d_args else '') + ( '- |' if 'hashes' in d_args else '') + '\n' content = '' for row in csv_data: content += ','.join(row) + '\n' if parse_ip != -1: md += (row[parse_ip] + '|' if row[parse_ip] else ' |') is_ip = re.search(r'([0-9]{1,3}\.){3}[0-9]{1,3}', row[parse_ip]) is_valid = is_ip_valid(row[parse_ip]) if is_ip and is_valid: ip_list.append(row[parse_ip]) if parse_domain != -1: md += (row[parse_domain] + '|' if row[parse_domain] else ' |') has_dot = '.' in row[parse_domain] no_spaces = ' ' not in row[parse_domain] if has_dot and no_spaces: domain_list.append(row[parse_domain]) if parse_hash != -1: md += (row[parse_hash] + '|' if row[parse_hash] else ' |') is_hash = re.search(r'[0-9A-Fa-f]{32,128}', row[parse_hash]) if is_hash: hash_list.append(row[parse_hash]) md += '\n' context = {} # type: dict if ip_list: old_ip_list = list(demisto.get(demisto.context(), 'ips')) if demisto.get(demisto.context(), 'ips') else [] ip_list = list(set(ip_list) - set(old_ip_list)) if len(ip_list) > 0: context["IP"] = [] for ip in ip_list: context["IP"].append({"Address": ip}) if domain_list: old_domain_list = list(demisto.get(demisto.context(), 'domains')) if demisto.get(demisto.context(), 'domains') else [] domain_list = list(set(domain_list) - set(old_domain_list)) if len(domain_list) > 0: context["Domain"] = [] for domain in domain_list: context["Domain"].append({"Name": domain}) if hash_list: old_hash_list = list(demisto.get(demisto.context(), 'hashes')) if demisto.get(demisto.context(), 'hashes') else [] hash_list = list(set(hash_list) - set(old_hash_list)) if len(hash_list) > 0: context["File"] = [] for hash_string in hash_list: if len(hash_string) == 32: context["File"].append({"MD5": hash_string}) if len(hash_string) == 64: context["File"].append({"SHA256": hash_string}) if len(hash_string) == 40: context["File"].append({"SHA1": hash_string}) demisto.results({ "Type": entryTypes["note"], "ContentsFormat": formats["text"], "Contents": content, "HumanReadable": md, "EntryContext": context })
42,941
def rbfkernel(R, sigma): r"""This function generates a radial basis function (RBF) kernel matrix. The elements of the RBF kernel are computed as: .. math:: K_{i,j} = e^{-\|\bf{r}_i-\bf{r}_j\|^2/(2*\sigma^2)}, where :math:`\bf{r}_i` is the coordinates of point :math:`i` and :math:`\sigma` is a constant. **Example usage:** >>> R = array([[0, 1], [1, 0], [0, 0], [1, 1]]) >>> sigma = 1.0 >>> rbfkernel (R, sigma) array([[1. , 0.36787944, 0.60653066, 0.60653066], [0.36787944, 1. , 0.60653066, 0.60653066], [0.60653066, 0.60653066, 1. , 0.36787944], [0.60653066, 0.60653066, 0.36787944, 1. ]]) Args: R (array): coordinates of the points. sigma (float): a constant. Returns: K (array): the kernel matrix. """ K = np.exp(-(cdist(R, R)) ** 2 / 2 / sigma ** 2) return K
def rbfkernel(R, sigma): r"""This function generates a radial basis function (RBF) kernel matrix. The elements of the RBF kernel are computed as: .. math:: K_{i,j} = e^{-\|\bf{r}_i-\bf{r}_j\|^2/(2*\sigma^2)}, where :math:`\bf{r}_i` is the coordinates of point :math:`i` and :math:`\sigma` is a kernel parameter that determines the scale of the kernel. Points that are much further than a distance :math: `\sigma` from each other lead to small entries of the kernel matrix, whereas points much closer than :math: `\sigma` generate large entries. **Example usage:** >>> R = array([[0, 1], [1, 0], [0, 0], [1, 1]]) >>> sigma = 1.0 >>> rbfkernel (R, sigma) array([[1. , 0.36787944, 0.60653066, 0.60653066], [0.36787944, 1. , 0.60653066, 0.60653066], [0.60653066, 0.60653066, 1. , 0.36787944], [0.60653066, 0.60653066, 0.36787944, 1. ]]) Args: R (array): coordinates of the points. sigma (float): a constant. Returns: K (array): the kernel matrix. """ K = np.exp(-(cdist(R, R)) ** 2 / 2 / sigma ** 2) return K
24,318
def test_ssh(aggregator): check = CheckSSH('ssh_check', {}, [common.INSTANCES['main']]) nb_threads = threading.active_count() check.check(None) for sc in aggregator.service_checks(CheckSSH.SSH_SERVICE_CHECK_NAME): assert sc.status == CheckSSH.OK for tag in sc.tags: assert tag in ('instance:io.netgarage.org-22', 'optional:tag1') # Check that we've closed all connections, if not we're leaking threads common.wait_for_threads() assert nb_threads == threading.active_count()
def test_ssh(aggregator): check = CheckSSH('ssh_check', {}, [common.INSTANCES['main']]) nb_threads = threading.active_count() check.check(common.INSTANCES['main']) for sc in aggregator.service_checks(CheckSSH.SSH_SERVICE_CHECK_NAME): assert sc.status == CheckSSH.OK for tag in sc.tags: assert tag in ('instance:io.netgarage.org-22', 'optional:tag1') # Check that we've closed all connections, if not we're leaking threads common.wait_for_threads() assert nb_threads == threading.active_count()
7,875
def test_get_elements(): # test that zero elements exist on creation m = openmc.Material() assert len(m.get_elements()) == 0 # test addition of a single element m.add_element('Li', 0.2) assert len(m.get_elements()) == 1 assert 'Li' in m.get_elements() # test that adding the same element m.add_element('Li', 0.3) assert len(m.get_elements()) == 1 assert 'Li' in m.get_elements() # test adding another element m.add_element('Si', 0.3) assert len(m.get_elements()) == 2 assert 'Si' in m.get_elements() # test adding a third element m.add_element('O', 0.4) assert len(m.get_elements()) == 3 # test removal of nuclides m.remove_nuclide('O16') m.remove_nuclide('O17') assert 'O' not in m.get_elements() assert 'Si' in m.get_elements() assert 'Li' in m.get_elements() assert len(m.get_elements()) == 2
def test_get_elements(): # test that zero elements exist on creation m = openmc.Material() assert len(m.get_elements()) == 0 # test addition of a single element m.add_element('Li', 0.2) assert len(m.get_elements()) == 1 assert 'Li' in m.get_elements() # test that adding the same element m.add_element('Li', 0.3) assert len(m.get_elements()) == 1 assert 'Li' in m.get_elements() # test adding another element m.add_element('Si', 0.3) assert len(m.get_elements()) == 2 assert 'Si' in m.get_elements() # test adding a third element m.add_element('O', 0.4) assert len(m.get_elements()) == 3 assert m.get_elements() == ["Li", "O", "Si"] # test removal of nuclides m.remove_nuclide('O16') m.remove_nuclide('O17') assert 'O' not in m.get_elements() assert 'Si' in m.get_elements() assert 'Li' in m.get_elements() assert len(m.get_elements()) == 2
20,622
def get_accepted_mimetype(accept_header, choices=None): """Return the preferred mimetype from an Accept header If `choices` is given, return the first match, otherwise return the first accepted item Return None if choices is given and no match is found, or nothing is specified. """ for (mime, params, q) in _parse_accept_header(accept_header): if choices: if mime in choices: return mime else: continue else: return mime return None
def get_accepted_mimetype(accept_header, choices=None): """Return the preferred mimetype from an Accept header If `choices` is given, return the first match, otherwise return the first accepted item Return `None` if choices is given and no match is found, or nothing is specified. """ for (mime, params, q) in _parse_accept_header(accept_header): if choices: if mime in choices: return mime else: continue else: return mime return None
55,354
def reflection_mat2D(data): """apply 2D reflection""" assert data.shape[-1] ==2, 'data is not 2D' theta = np.radians(np.random.randint(0,360)) ref = np.array([[np.cos(2*theta), np.sin(2*theta)], [np.sin(2*theta), -np.cos(2*theta)]]) return tf.matmul(data,ref)
def reflection_mat2D(data): """apply 2D reflection""" assert data.shape[-1] ==2, 'data is not 2D' theta = np.radians(np.random.randint(0,360)) ref = np.array([[np.cos(2*theta), np.sin(2*theta)], [np.sin(2*theta), -np.cos(2*theta)]]) return tf.matmul(data,ref)
31,731
def get_employee_identity_analysis_genome_data_command(client, args): email_address = str(args.get('email_address', '')) response = client.get_employee_identity_analysis_genome_data_request(email_address) headers = [ 'description', 'key', 'name', 'values', ] markdown = tableToMarkdown( f"Analysis of {email_address}", response.get('histograms', []), headers=headers) command_results = CommandResults( readable_output=markdown, outputs_prefix='AbnormalSecurity.EmployeeIdentityDetails', outputs_key_field='', outputs=response, raw_response=response ) return command_results
def get_employee_identity_analysis_genome_data_command(client, args): email_address = str(args.get('email_address', '')) response = client.get_employee_identity_analysis_genome_data_request(email_address) headers = [ 'description', 'key', 'name', 'values', ] markdown = tableToMarkdown( f"Analysis of {email_address}", response.get('histograms', []), headers=headers) command_results = CommandResults( readable_output=markdown, outputs_prefix='AbnormalSecurity.Employee', outputs_key_field='', outputs=response, raw_response=response ) return command_results