code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def __setitem__(self, key, val): <NEW_LINE> <INDENT> if key not in MobsterConfigDict.CONFIG_DEFAULTS.keys(): <NEW_LINE> <INDENT> raise KeyError('Invalid config key {0}'.format(key)) <NEW_LINE> <DEDENT> return dict.__setitem__(self, key, val)
Do not allow assignment to key values not contained in the defaults
625941b68e05c05ec3eea191
def filter_metadata(self, metadata): <NEW_LINE> <INDENT> rslt = {} <NEW_LINE> for elt in metadata: <NEW_LINE> <INDENT> if not os.path.dirname(elt) and elt.endswith('spatial_meta.txt'): <NEW_LINE> <INDENT> spatial_meta = metadata[elt] <NEW_LINE> break <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise MetadataError('The spatial metadata is unexpectedly missing') <NEW_LINE> <DEDENT> <DEDENT> rslt['ccf_spatial'] = {k : v for k, v in spatial_meta.items()} <NEW_LINE> return rslt
This extracts the metadata which is actually desired downstream from the bulk of the metadata which has been collected.
625941b699cbb53fe6792a07
def _get_lines(dec_init_pairs, main_lines, dummy_vars, safe, include_cond_bufwrite): <NEW_LINE> <INDENT> setup_lines = _get_setup_lines(dec_init_pairs) <NEW_LINE> lines = setup_lines + main_lines <NEW_LINE> body_tags = [Tag.BODY for _ in lines] <NEW_LINE> if include_cond_bufwrite: <NEW_LINE> <INDENT> query_tag = Tag.BUFWRITE_COND_SAFE if safe else Tag.BUFWRITE_COND_UNSAFE <NEW_LINE> body_tags[-1] = query_tag <NEW_LINE> <DEDENT> min_num_dummies = 0 if include_cond_bufwrite else MIN_NUM_DUMMIES_TAUTONLY <NEW_LINE> num_dummies = random.randrange(min_num_dummies, MAX_NUM_DUMMIES + 1) <NEW_LINE> lines, body_tags = _insert_dummies( setup_lines, main_lines, dummy_vars, num_dummies, body_tags, include_cond_bufwrite) <NEW_LINE> return lines, body_tags
Create full body lines with setup, main content, and dummy interaction Args: dec_init_pairs (list of tuple) main_lines (list of str): lines that use the declared vars dummy_vars (list of str): variable names available for dummy use safe (bool): whether the query line access is safe (for tags) or None, if no conditional query line should be added include_cond_bufwrite (bool): whether to include the control flow-sensitive buffer write Returns: lines (list of str) body_tags (list of Tag instances): tags for each body line
625941b663b5f9789fde6f05
def load_dataset(path: str) -> pd.DataFrame: <NEW_LINE> <INDENT> with open(path) as f: <NEW_LINE> <INDENT> data = [] <NEW_LINE> problems = 0 <NEW_LINE> for line_num, line in enumerate(f): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> cleaned_line = re.sub(r"(?<={|\s)'|'(?=,|:|})", '"', line) <NEW_LINE> cleaned_line = re.sub( r"(?<!{)(?<!,\s|:\s)\"(?!,|:|})", '\\"', cleaned_line ) <NEW_LINE> cleaned_line = cleaned_line.replace("\\'", "'") <NEW_LINE> data_dict = json.loads(cleaned_line) <NEW_LINE> for k in data_dict.keys(): <NEW_LINE> <INDENT> assert len(k) < 20 <NEW_LINE> <DEDENT> data.append(data_dict) <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> problems += 1 <NEW_LINE> <DEDENT> <DEDENT> assess_problematic_entries(n_problems=problems, data=data) <NEW_LINE> return pd.DataFrame(data)
Load a dataset from a pseudo-JSON format into a dataframe using regular expressions. Args: path (str): location where file is stored Returns: The dataset read into a dataframe
625941b66fb2d068a760eec1
def getPorts(self): <NEW_LINE> <INDENT> if platform.startswith('win'): <NEW_LINE> <INDENT> return ['COM%s' % (i + 1) for i in range(256)] <NEW_LINE> <DEDENT> elif platform.startswith('linux') or platform.startswith('cygwin'): <NEW_LINE> <INDENT> return glob.glob('/dev/tty[A-Za-z]*') <NEW_LINE> <DEDENT> elif startswith('darwin'): <NEW_LINE> <INDENT> return glob.glob('/dev/tty.*') <NEW_LINE> <DEDENT> return None
Lists serial port names :returns: A list of the serial ports available on the system
625941b67b25080760e3927b
def handleSample(self, sample): <NEW_LINE> <INDENT> if not self.subscribers: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> s = sseMsg([sample.counter, sample.eeg, sample.accelerometer], "sensorData") <NEW_LINE> dropouts = [] <NEW_LINE> for subscriber in self.subscribers: <NEW_LINE> <INDENT> if not subscriber.transport.disconnected: <NEW_LINE> <INDENT> subscriber.write(s) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> dropouts.append(subscriber) <NEW_LINE> <DEDENT> <DEDENT> for dropout in dropouts: <NEW_LINE> <INDENT> self.subscribers.remove(dropout)
:type sample: txopenbci.control.RawSample
625941b6046cf37aa974cb6b
def __len__(self): <NEW_LINE> <INDENT> return len(self.atoms)
Number of atoms in container.
625941b650485f2cf553cbb9
def set_play_mode(self, mode): <NEW_LINE> <INDENT> self.current_play_mode = getattr(self.play_modes, mode) <NEW_LINE> self._update_play_queue_order()
Set the playback mode. :param mode: current valid values are "NORMAL" and "SHUFFLE"
625941b6f548e778e58cd39c
def position(n, m1, m2, m3, m4, m5, m6): <NEW_LINE> <INDENT> command("P", n, m1, m2, m3, m4, m5, m6)
Save the specified position in slot n 1-100
625941b67cff6e4e811177a6
def decode_bytes(rawdata: bytes) -> str: <NEW_LINE> <INDENT> if rawdata: <NEW_LINE> <INDENT> encoding = chardet.detect(rawdata)['encoding'] <NEW_LINE> return markdown(rawdata.decode(encoding)) <NEW_LINE> <DEDENT> return ''
Returns a decoded string from byte data formatted for markdown.
625941b691af0d3eaac9b834
@cli.command() <NEW_LINE> @click.argument('filename') <NEW_LINE> def pagenumbers(filename): <NEW_LINE> <INDENT> ombpdf.pagenumbers.main(get_doc(filename))
Show page numbers in a PDF.
625941b692d797404e303faa
def forecast_weights_by_instrument_csv_to_yaml(filename_input, filename_output): <NEW_LINE> <INDENT> data = pd.read_csv(filename_input) <NEW_LINE> data_instruments = list(data.columns) <NEW_LINE> forecast_header_column = data_instruments[0] <NEW_LINE> data_instruments = data_instruments[1:] <NEW_LINE> rule_names = data[forecast_header_column].values <NEW_LINE> my_config = {} <NEW_LINE> for instrument in data_instruments: <NEW_LINE> <INDENT> data_weights = data[instrument].values <NEW_LINE> my_config[instrument] = dict( [ (rule_name, float(weight)) for (rule_name, weight) in zip(rule_names, data_weights) ] ) <NEW_LINE> <DEDENT> my_config_nested = dict(forecast_weights=my_config) <NEW_LINE> with open(filename_output, "w") as outfile: <NEW_LINE> <INDENT> outfile.write(yaml.dump(my_config_nested, default_flow_style=False)) <NEW_LINE> <DEDENT> return my_config
Read in a configuration csv file containing forecast weights, different for each instrument, and output as yaml :param filename_input: full path and filename :param filename_output: full path and filename :return: data written to yaml
625941b6de87d2750b85fbae
def test_music_scope_preview_open_close(self): <NEW_LINE> <INDENT> scope = self.unity.dash.reveal_music_scope() <NEW_LINE> self.addCleanup(self.unity.dash.ensure_hidden) <NEW_LINE> category = scope.get_category_by_name("Songs") <NEW_LINE> if category is None or not category.is_visible: <NEW_LINE> <INDENT> self.skipTest("This scope is probably empty") <NEW_LINE> <DEDENT> self.assertThat(lambda: len(category.get_results()), Eventually(GreaterThan(0), timeout=20)) <NEW_LINE> results = category.get_results() <NEW_LINE> result = results[0] <NEW_LINE> result.preview() <NEW_LINE> self.assertThat(self.unity.dash.preview_displaying, Eventually(Equals(True))) <NEW_LINE> self.keyboard.press_and_release("Escape") <NEW_LINE> self.assertThat(self.unity.dash.preview_displaying, Eventually(Equals(False)))
Right-clicking on a music scope result must show its preview.
625941b666656f66f7cbbfca
def main_window() -> MainWindow: <NEW_LINE> <INDENT> global _qt_win <NEW_LINE> assert _qt_win <NEW_LINE> return _qt_win
Return the actor-global Qt window.
625941b63c8af77a43ae35be
def objectText(self): <NEW_LINE> <INDENT> return self.objText
TOWRITE :return: TOWRITE :rtype: str
625941b6f7d966606f6a9e29
def calculate_values(self, workbook, worksheet, project_object): <NEW_LINE> <INDENT> header = workbook.add_format({'bold': True, 'bg_color': '#316AC5', 'font_color': 'white', 'text_wrap': True, 'border': 1, 'font_size': 8}) <NEW_LINE> calculation = workbook.add_format({'bg_color': '#FFF2CC', 'text_wrap': True, 'border': 1, 'font_size': 8}) <NEW_LINE> worksheet.write('W2', 'Optimistic (hours)' if self.data_type == DataType.ABSOLUTE else 'Optimistic (%)', header) <NEW_LINE> worksheet.write('X2', 'Most probable (hours)' if self.data_type == DataType.ABSOLUTE else 'Most probable (%)', header) <NEW_LINE> worksheet.write('Y2', 'Pessimistic (hours)' if self.data_type == DataType.ABSOLUTE else 'Pessimistic (%)', header) <NEW_LINE> counter = 2 <NEW_LINE> for activity in project_object.activities: <NEW_LINE> <INDENT> ra = activity.risk_analysis <NEW_LINE> if ra: <NEW_LINE> <INDENT> if ra.distribution_type == DistributionType.MANUAL and ra.distribution_units == ManualDistributionUnit.ABSOLUTE and self.data_type == DataType.RELATIVE: <NEW_LINE> <INDENT> dur = self.get_hours(activity.baseline_schedule.duration, project_object.agenda) <NEW_LINE> if dur != 0: <NEW_LINE> <INDENT> worksheet.write(counter, 22, int((ra.optimistic_duration/dur)*100), calculation) <NEW_LINE> worksheet.write(counter, 23, int((ra.probable_duration/dur)*100), calculation) <NEW_LINE> worksheet.write(counter, 24, int((ra.pessimistic_duration/dur)*100), calculation) <NEW_LINE> <DEDENT> <DEDENT> elif (ra.distribution_type == DistributionType.STANDARD or (ra.distribution_type == DistributionType.MANUAL and ra.distribution_units == ManualDistributionUnit.RELATIVE)) and self.data_type == DataType.ABSOLUTE: <NEW_LINE> <INDENT> dur = self.get_hours(activity.baseline_schedule.duration, project_object.agenda) <NEW_LINE> if dur != 0: <NEW_LINE> <INDENT> worksheet.write(counter, 22, int((ra.optimistic_duration/100.0)*dur), calculation) <NEW_LINE> worksheet.write(counter, 23, int((ra.probable_duration/100.0)*dur), calculation) <NEW_LINE> worksheet.write(counter, 24, int((ra.pessimistic_duration/100.0)*dur), calculation) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> worksheet.write(counter, 22, ra.optimistic_duration, calculation) <NEW_LINE> worksheet.write(counter, 23, ra.probable_duration, calculation) <NEW_LINE> worksheet.write(counter, 24, ra.pessimistic_duration, calculation) <NEW_LINE> <DEDENT> <DEDENT> counter += 1
Calculate all values according to data_type and write them to columns W,X,Y :param workbook: Workbook :param worksheet: Worksheet :param project_object: ProjectObject
625941b6cb5e8a47e48b78d0
def p_boolean_primary_is_not_null(p): <NEW_LINE> <INDENT> p[0] = ('IS NOT NULL', p[1])
boolean_primary : boolean_primary IS NOT NULL
625941b6d99f1b3c44c673b8
def _get_installed_version(self, name): <NEW_LINE> <INDENT> metadata_location = os.path.join( self._home, "plugins", name, "META-INF", "MANIFEST.MF" ) <NEW_LINE> if not os.path.exists(metadata_location): <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> with open(metadata_location, "r") as metadata: <NEW_LINE> <INDENT> version = [ line for line in metadata.readlines() if line.startswith("Plugin-Version: ") ][0][15:].strip() <NEW_LINE> log.debug("Detected installed version {version}".format(version=version)) <NEW_LINE> return version
Finds the version of the plugin that is currently installed, if any. :param str name: The name of the plugin to check. :returns: The installed version or None if the plugin is not installed.
625941b6f8510a7c17cf9525
def mtFeatureExtractionToFile(fileName, midTermSize, midTermStep, shortTermSize, shortTermStep, outPutFile, storeStFeatures=False, storeToCSV=False, PLOT=False): <NEW_LINE> <INDENT> [Fs, x] = audioBasicIO.readAudioFile(fileName) <NEW_LINE> x = audioBasicIO.stereo2mono(x) <NEW_LINE> if storeStFeatures: <NEW_LINE> <INDENT> [mtF, stF] = mtFeatureExtraction(x, Fs, round(Fs * midTermSize), round(Fs * midTermStep), round(Fs * shortTermSize), round(Fs * shortTermStep)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> [mtF, _] = mtFeatureExtraction(x, Fs, round(Fs*midTermSize), round(Fs * midTermStep), round(Fs * shortTermSize), round(Fs * shortTermStep)) <NEW_LINE> <DEDENT> numpy.save(outPutFile, mtF) <NEW_LINE> if PLOT: <NEW_LINE> <INDENT> print("Mid-term numpy file: " + outPutFile + ".npy saved") <NEW_LINE> <DEDENT> if storeToCSV: <NEW_LINE> <INDENT> numpy.savetxt(outPutFile+".csv", mtF.T, delimiter=",") <NEW_LINE> if PLOT: <NEW_LINE> <INDENT> print("Mid-term CSV file: " + outPutFile + ".csv saved") <NEW_LINE> <DEDENT> <DEDENT> if storeStFeatures: <NEW_LINE> <INDENT> numpy.save(outPutFile+"_st", stF) <NEW_LINE> if PLOT: <NEW_LINE> <INDENT> print("Short-term numpy file: " + outPutFile + "_st.npy saved") <NEW_LINE> <DEDENT> if storeToCSV: <NEW_LINE> <INDENT> numpy.savetxt(outPutFile+"_st.csv", stF.T, delimiter=",") <NEW_LINE> if PLOT: <NEW_LINE> <INDENT> print("Short-term CSV file: " + outPutFile + "_st.csv saved")
This function is used as a wrapper to: a) read the content of a WAV file b) perform mid-term feature extraction on that signal c) write the mid-term feature sequences to a numpy file
625941b63c8af77a43ae35bf
def rocking_curve(detector, motor, read_field, coarse_step, fine_step, bounds=None, average=None, fine_space=5, initial_guess=None, position_field='user_readback', show_plot=True): <NEW_LINE> <INDENT> if not bounds: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> bounds = motor.limits <NEW_LINE> logger.debug("Bounds were not specified, the area " "between %s and %s will searched", bounds[0], bounds[1]) <NEW_LINE> <DEDENT> except AttributeError as exc: <NEW_LINE> <INDENT> raise UndefinedBounds("Bounds are not defined by motor {} or " "plan".format(motor.name)) from exc <NEW_LINE> <DEDENT> <DEDENT> if show_plot: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> model = yield from maximize_lorentz(detector, motor, read_field, step_size=coarse_step, bounds=bounds, average=average, position_field=position_field, initial_guess=initial_guess) <NEW_LINE> <DEDENT> except ValueError as exc: <NEW_LINE> <INDENT> raise ValueError("Unable to find a proper maximum value" "during rough scan") from exc <NEW_LINE> <DEDENT> center = model.result.values['center'] <NEW_LINE> bounds = (max(center - fine_space, bounds[0]), min(center + fine_space, bounds[1])) <NEW_LINE> logger.info("Rough scan of region yielded maximum of %s, " "performing fine scan from %s to %s ...", center, bounds[0], bounds[1]) <NEW_LINE> if show_plot: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> fit = yield from maximize_lorentz(detector, motor, read_field, step_size=fine_step, bounds=bounds, average=average, position_field=position_field, initial_guess=model.result.values) <NEW_LINE> <DEDENT> except ValueError as exc: <NEW_LINE> <INDENT> raise ValueError("Unable to find a proper maximum value" "during fine scan") from exc <NEW_LINE> <DEDENT> if show_plot: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> return fit
Travel to the maxima of a bell curve The rocking curve scan is two repeated calls of :func:`.maximize_lorentz`. The first is a rough step scan which searches the area given by ``bounds`` using ``coarse_step``, the idea is that this will populate the model enough such that we can do a more accurate scan of a smaller region of the search space. Once the rough scan is completed, the maxima of the fit is used as the center of the new fine scan that probes a region of space with a region twice as large as the ``fine_space`` parameter. After this, the motor is translated to the calculated maxima of the model Parameters ---------- detector : obj The object to be read during the plan motor : obj The object to be moved via the plan. read_field : str Field of detector to maximize coarse_step : float Step size for the initial rough scan fine_step : float Step size for the fine scan bounds : tuple, optional Bounds for the original rough scan. If not provided, the soft limits of the motor are used average : int, optional Number of shots to average at each step fine_space : float, optional The amount to scan on either side of the rough scan result. Note that the rocking_curve will never tell the secondary scan to travel outside position_field : str, optional Motor field that will have the Lorentzian relationship with the given signal initial_guess : dict, optional Initial guess to the Lorentz model parameters of `sigma` `center` `amplitude` of the ``bounds``, so this region may be truncated. show_plot : bool, optional Create a plot displaying the progress of the `rocking_curve`
625941b6fbf16365ca6f5fdc
def unzip(source_filename, dest_dir): <NEW_LINE> <INDENT> with zipfile.ZipFile(source_filename) as zf: <NEW_LINE> <INDENT> for member in zf.infolist(): <NEW_LINE> <INDENT> words = member.filename.split('/') <NEW_LINE> path = dest_dir <NEW_LINE> for word in words[:-1]: <NEW_LINE> <INDENT> drive, word = os.path.splitdrive(word) <NEW_LINE> head, word = os.path.split(word) <NEW_LINE> if word in (os.curdir, os.pardir, ''): continue <NEW_LINE> path = os.path.join(path, word) <NEW_LINE> <DEDENT> zf.extract(member, path)
Taken from: http://stackoverflow.com/questions/12886768/how-to-unzip-file-in-python-on-all-oses
625941b6d486a94d0b98df6f
def token_evaluator(dataset, label=None, writer=None, mapper=None, config=defaults): <NEW_LINE> <INDENT> if config.token_level_eval: <NEW_LINE> <INDENT> evaluator = TokenLevelEvaluator <NEW_LINE> <DEDENT> elif is_iob_tagging(unique(dataset.tokens.target_strs)): <NEW_LINE> <INDENT> evaluator = ConllEvaluator <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> evaluator = TokenLevelEvaluator <NEW_LINE> <DEDENT> info('using {} for {}'.format(evaluator.__name__, dataset.name)) <NEW_LINE> callbacks = [] <NEW_LINE> callbacks.append(Predictor(dataset.tokens)) <NEW_LINE> callbacks.append(evaluator(dataset, label=label, writer=writer)) <NEW_LINE> if mapper is not None: <NEW_LINE> <INDENT> callbacks.append(PredictionMapper(dataset.sentences, mapper)) <NEW_LINE> callbacks.append(evaluator(dataset, label=label, writer=writer)) <NEW_LINE> <DEDENT> return CallbackChain(callbacks)
Return appropriate evaluator callback for dataset.
625941b6baa26c4b54cb0f44
def __unicode__(self): <NEW_LINE> <INDENT> return str(self.id)
Representacion unicode del objeto asigna sistema
625941b61f037a2d8b94601f
def test_EOF(self): <NEW_LINE> <INDENT> tst = self.create() <NEW_LINE> self.assertTrue(tst.onecmd("EOF"))
exit command
625941b6bf627c535bc12ff7
def show_decr(myfile): <NEW_LINE> <INDENT> ctext = "" <NEW_LINE> with open(myfile, "r") as f: <NEW_LINE> <INDENT> iv = base64.b64decode(f.readline().strip()) <NEW_LINE> for line in f: <NEW_LINE> <INDENT> ctext += line <NEW_LINE> <DEDENT> <DEDENT> print("Dencrypt..") <NEW_LINE> key = getpass.getpass() <NEW_LINE> ctx2 = pyelliptic.Cipher(key, iv, 0, ciphername='bf-cfb') <NEW_LINE> del key <NEW_LINE> try: <NEW_LINE> <INDENT> out = str(ctx2.ciphering(base64.b64decode(ctext)), 'utf8') <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> return("Error") <NEW_LINE> <DEDENT> return out
decryption
625941b673bcbd0ca4b2be9e
def _handle_get_root(self, path_match, data): <NEW_LINE> <INDENT> write = lambda txt: self.wfile.write((txt + "\n").encode("UTF-8")) <NEW_LINE> self.send_response(HTTP_OK) <NEW_LINE> self.send_header('Content-type', 'text/html; charset=utf-8') <NEW_LINE> self.end_headers() <NEW_LINE> if self.server.development: <NEW_LINE> <INDENT> app_url = "polymer/splash-login.html" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> app_url = "frontend-{}.html".format(frontend.VERSION) <NEW_LINE> <DEDENT> write(("<!doctype html>" "<html>" "<head><title>Home Assistant</title>" "<meta name='mobile-web-app-capable' content='yes'>" "<link rel='shortcut icon' href='/static/favicon.ico' />" "<link rel='icon' type='image/png' " " href='/static/favicon-192x192.png' sizes='192x192'>" "<meta name='viewport' content='width=device-width, " " user-scalable=no, initial-scale=1.0, " " minimum-scale=1.0, maximum-scale=1.0' />" "<meta name='theme-color' content='#03a9f4'>" "</head>" "<body fullbleed>" "<h3 id='init' align='center'>Initializing Home Assistant</h3>" "<script" " src='/static/webcomponents.min.js'></script>" "<link rel='import' href='/static/{}' />" "<splash-login auth='{}'></splash-login>" "</body></html>").format(app_url, data.get('api_password', '')))
Renders the debug interface.
625941b6d164cc6175782b6e
def duplicate(self, new_host=None): <NEW_LINE> <INDENT> _host = new_host or self._host <NEW_LINE> return StoryUWGProperties(_host)
Get a copy of this object. new_host: A new Story object that hosts these properties. If None, the properties will be duplicated with the same host.
625941b699cbb53fe6792a08
def pids_active(pids_computer): <NEW_LINE> <INDENT> pid_valid = {} <NEW_LINE> for pid in pids_computer: <NEW_LINE> <INDENT> data = None <NEW_LINE> try: <NEW_LINE> <INDENT> process = psutil.Process(pid) <NEW_LINE> data = {"pid": process.pid, "status": process.status(), "percent_cpu_used": process.cpu_percent(interval=0.0), "percent_memory_used": process.memory_percent()} <NEW_LINE> <DEDENT> except (psutil.ZombieProcess, psutil.AccessDenied, psutil.NoSuchProcess): <NEW_LINE> <INDENT> data = None <NEW_LINE> <DEDENT> if data is not None: <NEW_LINE> <INDENT> pid_valid[process.name()] = data <NEW_LINE> <DEDENT> <DEDENT> return pid_valid
This function find pids of computer and return the valid.
625941b6293b9510aa2c30ba
def __init__(self,idx): <NEW_LINE> <INDENT> self.idx = str(idx) <NEW_LINE> try: <NEW_LINE> <INDENT> r = re.compile(r'^([A-Z]+)([0-9]+)$').match(idx) <NEW_LINE> self.column = r.group(1) <NEW_LINE> self.row = int(r.group(2)) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> self.column = None <NEW_LINE> self.row = None <NEW_LINE> if str(idx).isalpha(): <NEW_LINE> <INDENT> self.column = idx <NEW_LINE> <DEDENT> elif str(idx).isdigit(): <NEW_LINE> <INDENT> self.row = int(idx)
Create a new CellIndex instance Arguments idx: cell index e.g. 'A1', 'BZ112'
625941b68a349b6b435e7f95
def is_masked(self): <NEW_LINE> <INDENT> unmasked = portage.db[portage.root]["porttree"].dbapi.xmatch( "match-visible", self.cpv ) <NEW_LINE> return self.cpv not in unmasked
Returns True if this package is masked against installation. @note: We blindly assume that the package actually exists on disk.
625941b615fb5d323cde092a
def handle_draw(self, canvas): <NEW_LINE> <INDENT> pass
Pseudo-abstract method for drawing
625941b624f1403a9260098b
def __init__(self,in_channels,weight_init=Kaiming_Normal(),bias_init=Zeros(),use_bias=False): <NEW_LINE> <INDENT> super(SelfAttention,self).__init__() <NEW_LINE> self.q = Conv2d(in_channels,in_channels//8,kernel_size=1,weight_init=weight_init,bias_init=bias_init,bias=use_bias) <NEW_LINE> self.k = Conv2d(in_channels,in_channels//8,kernel_size=1,weight_init=weight_init,bias_init=bias_init,bias=use_bias) <NEW_LINE> self.v = Conv2d(in_channels,in_channels,kernel_size=1,weight_init=weight_init,bias_init=bias_init,bias=use_bias) <NEW_LINE> self.softmax = nn.Softmax(dim=-1) <NEW_LINE> self.atten_weight = nn.Parameter(torch.tensor([0.0]))
:param in_channels: :param weight_init: :param bias_init: :param use_bias:
625941b6aad79263cf39085b
def export_decide(source_data, report_file, year): <NEW_LINE> <INDENT> result = True if append_to_file(report_file, reports.decide(source_data, year)) else False <NEW_LINE> return result
Append actual report to report file. Check out reports.py for content of actual report. @param source_data string Name of the file that contains source data @param report_file string Name of the file to be created @param year int Year parameter for inside function @return bool True if successful, otherwise False
625941b6507cdc57c6306af3
def token_endpoint(self, authn="", **kwargs): <NEW_LINE> <INDENT> _sdb = self.sdb <NEW_LINE> logger.debug("- token -") <NEW_LINE> body = kwargs["request"] <NEW_LINE> logger.debug("body: %s" % sanitize(body)) <NEW_LINE> areq = AccessTokenRequest().deserialize(body, "urlencoded") <NEW_LINE> try: <NEW_LINE> <INDENT> self.client_authn(self, areq, authn) <NEW_LINE> <DEDENT> except FailedAuthentication as err: <NEW_LINE> <INDENT> logger.error(err) <NEW_LINE> err = TokenErrorResponse(error="unauthorized_client", error_description="%s" % err) <NEW_LINE> return Response(err.to_json(), content="application/json", status="401 Unauthorized") <NEW_LINE> <DEDENT> logger.debug("AccessTokenRequest: %s" % sanitize(areq)) <NEW_LINE> try: <NEW_LINE> <INDENT> assert areq["grant_type"] == "authorization_code" <NEW_LINE> <DEDENT> except AssertionError: <NEW_LINE> <INDENT> err = TokenErrorResponse(error="invalid_request", error_description="Wrong grant type") <NEW_LINE> return Response(err.to_json(), content="application/json", status="401 Unauthorized") <NEW_LINE> <DEDENT> _info = _sdb[areq["code"]] <NEW_LINE> resp = self.token_scope_check(areq, _info) <NEW_LINE> if resp: <NEW_LINE> <INDENT> return resp <NEW_LINE> <DEDENT> if "redirect_uri" in _info: <NEW_LINE> <INDENT> assert areq["redirect_uri"] == _info["redirect_uri"] <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> _tinfo = _sdb.upgrade_to_token(areq["code"], issue_refresh=True) <NEW_LINE> <DEDENT> except AccessCodeUsed: <NEW_LINE> <INDENT> err = TokenErrorResponse(error="invalid_grant", error_description="Access grant used") <NEW_LINE> return Response(err.to_json(), content="application/json", status="401 Unauthorized") <NEW_LINE> <DEDENT> logger.debug("_tinfo: %s" % sanitize(_tinfo)) <NEW_LINE> atr = AccessTokenResponse(**by_schema(AccessTokenResponse, **_tinfo)) <NEW_LINE> logger.debug("AccessTokenResponse: %s" % sanitize(atr)) <NEW_LINE> return Response(atr.to_json(), content="application/json", headers=OAUTH2_NOCACHE_HEADERS)
This is where clients come to get their access tokens
625941b6e64d504609d74661
def max_sentence_length(self, text): <NEW_LINE> <INDENT> all_senteces = self.sentence_split(text) <NEW_LINE> try: <NEW_LINE> <INDENT> return (max(list(map(len, all_senteces)))) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> print(e) <NEW_LINE> return 0
count and return the maximum length of sentences list
625941b6de87d2750b85fbaf
def intersect_with(self, other: "CountRange") -> "CountRange": <NEW_LINE> <INDENT> if self.is_empty() or other.is_empty(): <NEW_LINE> <INDENT> return CountRange.never() <NEW_LINE> <DEDENT> start = self.start <NEW_LINE> if other.start is not None and (self.start is None or self.start < other.start): <NEW_LINE> <INDENT> start = other.start <NEW_LINE> <DEDENT> end = self.end <NEW_LINE> if other.end is not None and (self.end is None or self.end > other.end): <NEW_LINE> <INDENT> end = other.end <NEW_LINE> <DEDENT> inclusivity = CountRange.EXCLUSIVE <NEW_LINE> if start is None or (start in self and start in other): <NEW_LINE> <INDENT> inclusivity |= CountRange.INCLUDE_START <NEW_LINE> <DEDENT> if end is None or (end in self and end in other): <NEW_LINE> <INDENT> inclusivity |= CountRange.INCLUDE_END <NEW_LINE> <DEDENT> if start is not None and end is not None and start > end: <NEW_LINE> <INDENT> return CountRange.never() <NEW_LINE> <DEDENT> return CountRange(start, end, inclusivity)
Return a range which represents the intersection of this range with another
625941b6cb5e8a47e48b78d1
def parse(self,content,location,storage,baseurl, searchurl): <NEW_LINE> <INDENT> js = json.loads(content, 'utf-8')['xsearch'] <NEW_LINE> totalhits = str(js['records']) <NEW_LINE> for result in js['list']: <NEW_LINE> <INDENT> title = self.htmlCode(result['title']) if result.has_key('title') else '' <NEW_LINE> author = self.htmlCode(result['creator']) if result.has_key('creator') else '' <NEW_LINE> type = self.htmlCode(result['type']) if result.has_key('type') else '' <NEW_LINE> year = self.htmlCode(result['date']) if result.has_key('date') else '' <NEW_LINE> url = self.htmlCode(result['identifier']) if result.has_key('identifier') else '' <NEW_LINE> storage.append(MediaItem(title, location, author, type, year, url)) <NEW_LINE> <DEDENT> return (str(len(storage)), totalhits)
Parse content, add any contained items to storage and return number of items found as a string Arguments content -- (html-)content to parse location -- library location storage -- list to which MediaItems will be added as they are found in content baseurl -- base url to media content searchurl -- search url
625941b699fddb7c1c9de1b4
def _get_link_name(path, basepath, ident, tag): <NEW_LINE> <INDENT> path = os.path.relpath(path, basepath) <NEW_LINE> path = path.replace('/', '-') <NEW_LINE> path = path.replace('_', '-') <NEW_LINE> p = re.compile('Se(\d+)-') <NEW_LINE> m = p.findall(path) <NEW_LINE> series = '00' <NEW_LINE> if m: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> series = '{0:02d}'.format(int(m[0])) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> filename = '{exam}_{tag}_{series}_{tail}'.format(exam=str(ident), tag=tag, series=series, tail=path) <NEW_LINE> return(filename)
Take the path to the file, and mangle it so that we have a unique "description" to use in the final name. For instance, the path: data/RESOURCES/DTI_CMH_H001_01_01/A/B/C/sprl.nii will get mangled like so: 1. Strip off 'data/RESOURCES/DTI_CMH_H001_01_01/' 2. Convert all / to dashes - 3. Convert all _ to dashes - the result is the string: A-B-C-sprl.nii Example: >>> _get_link_name(data/RESOURCES/DTI_CMH_H001_01_01/A/B/C/sprl.nii, 'data/RESOURCES/DTI_CMH_H001_01_01/', datman.scanid.parse('DTI_CMH_H001_01_01'), 'SPRL') DTI_CMH_H001_01_01_SPRL_A-B-C-sprl.nii
625941b6cc40096d61595774
def create_table(): <NEW_LINE> <INDENT> print("Create last_name table") <NEW_LINE> query = "CREATE TABLE last_name (id INT(11) NOT NULL PRIMARY KEY, name VARCHAR(255))" <NEW_LINE> connector = get_database_connection() <NEW_LINE> cursor = connector.cursor() <NEW_LINE> cursor.execute(query) <NEW_LINE> connector.commit()
It creates table last_name which id as primary key and name as other column
625941b60c0af96317bb800a
def raises(error): <NEW_LINE> <INDENT> if error: <NEW_LINE> <INDENT> if isinstance(error, Exception): <NEW_LINE> <INDENT> match = str(error) <NEW_LINE> error = error.__class__ <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> match = None <NEW_LINE> <DEDENT> return tmp(error, match=match) <NEW_LINE> <DEDENT> @contextmanager <NEW_LINE> def not_raises(): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> yield <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> raise e <NEW_LINE> <DEDENT> <DEDENT> return not_raises()
Wrapper around pytest.raises to support None.
625941b61d351010ab85593f
@bp.route('/<int:id>') <NEW_LINE> @login_required <NEW_LINE> @permission_required(Permission.READ_PROJECT) <NEW_LINE> def project(user_project): <NEW_LINE> <INDENT> project = user_project.project <NEW_LINE> return render_template( 'project.html', title=project.title, current_user_project=user_project, member_user_projects=project.user_projects.order_by(UserProject.timestamp), open_issues=project.issues.filter_by(status='Open').order_by(Issue.timestamp), in_progress_issues=project.issues.filter_by( status='In Progress', priority='High' ) .order_by(Issue.timestamp) .union_all( project.issues.filter_by(status='In Progress', priority='Medium').order_by( Issue.timestamp ) ) .union_all( project.issues.filter_by(status='In Progress', priority='Low').order_by( Issue.timestamp ) ), resolved_issues=project.issues.filter_by(status='Resolved').order_by( Issue.resolved_timestamp ), closed_issues=project.issues.filter_by(status='Closed').order_by( Issue.closed_timestamp ), )
Renders the project page.
625941b6aad79263cf39085c
def getlabel(self, name): <NEW_LINE> <INDENT> if not self.label: <NEW_LINE> <INDENT> return u"" <NEW_LINE> <DEDENT> lid=makeid(name) <NEW_LINE> if self.required and "required" not in self.label_attr.get('class',''): <NEW_LINE> <INDENT> self.label_attr['class']+=" required" <NEW_LINE> <DEDENT> attrs="" <NEW_LINE> for key,val in self.label_attr.items(): <NEW_LINE> <INDENT> attrs+='%s="%s" ' % (key, val) <NEW_LINE> <DEDENT> return u"""<label for="%s" %s>%s</label>""" % (lid, attrs, self.label)
Generate the label in html
625941b6e1aae11d1e749ad4
def __init__(self, data, labels, k): <NEW_LINE> <INDENT> self.data = np.hstack((data, np.expand_dims(labels, axis=1))) <NEW_LINE> self.n = len(self.data) <NEW_LINE> self.w = len(self.data[0]) - 1 <NEW_LINE> self.k = min(k, self.n) <NEW_LINE> self.head = self.create_tree(0, len(data), 0)
:param data: 训练集数据 :param labels: 训练集标签 :param k: 参与多数表决的节点数量
625941b6e1aae11d1e749ad5
def new_evaluation(current_colour_choices): <NEW_LINE> <INDENT> rightly_positioned, permutated = get_evaluation() <NEW_LINE> if rightly_positioned == number_of_positions: <NEW_LINE> <INDENT> return(current_colour_choices, (rightly_positioned, permutated)) <NEW_LINE> <DEDENT> if not answer_ok((rightly_positioned, permutated)): <NEW_LINE> <INDENT> return(current_colour_choices, (-1, permutated)) <NEW_LINE> <DEDENT> guesses.append((current_colour_choices, (rightly_positioned, permutated))) <NEW_LINE> view_guesses() <NEW_LINE> current_colour_choices = create_new_guess() <NEW_LINE> if not current_colour_choices: <NEW_LINE> <INDENT> return(current_colour_choices, (-1, permutated)) <NEW_LINE> <DEDENT> return(current_colour_choices, (rightly_positioned, permutated))
This funtion gets an evaluation of the current guess, checks the consistency of this evaluation, adds the guess together with the evaluation to the list of guesses, shows the previous guesses and creates a new guess
625941b68a43f66fc4b53e8b
def dalecv2(clab, cf, cr, cw, cl, cs, theta_min, f_auto, f_fol, f_roo, clspan, theta_woo, theta_roo, theta_lit, theta_som, Theta, ceff, d_onset, f_lab, cronset, d_fall, crfall, clma, dC, x): <NEW_LINE> <INDENT> phi_on = phi_onset(d_onset, cronset, dC, x) <NEW_LINE> phi_off = phi_fall(d_fall, crfall, clspan, dC, x) <NEW_LINE> gpp = acm(cf, clma, ceff, dC, x) <NEW_LINE> temp = temp_term(Theta, dC, x) <NEW_LINE> clab2 = (1 - phi_on)*clab + (1-f_auto)*(1-f_fol)*f_lab*gpp <NEW_LINE> cf2 = (1 - phi_off)*cf + phi_on*clab + (1-f_auto)*f_fol*gpp <NEW_LINE> cr2 = (1 - theta_roo)*cr + (1-f_auto)*(1-f_fol)*(1-f_lab)*f_roo*gpp <NEW_LINE> cw2 = (1 - theta_woo)*cw + (1-f_auto)*(1-f_fol)*(1-f_lab)*(1-f_roo)*gpp <NEW_LINE> cl2 = (1-(theta_lit+theta_min)*temp)*cl + theta_roo*cr + phi_off*cf <NEW_LINE> cs2 = (1 - theta_som*temp)*cs + theta_woo*cw + theta_min*temp*cl <NEW_LINE> return np.array((clab2, cf2, cr2, cw2, cl2, cs2, theta_min, f_auto, f_fol, f_roo, clspan, theta_woo, theta_roo, theta_lit, theta_som, Theta, ceff, d_onset, f_lab, cronset, d_fall, crfall, clma))
DALECV2 carbon balance model ------------------------------- evolves carbon pools to the next time step, taking the 6 carbon pool values and 17 parameters at time t and evolving them to time t+1. Function also requires a dataClass (dC) and a time step x.
625941b6fff4ab517eb2f25a
def dimensions_compatible(dimensions, other_dimensions): <NEW_LINE> <INDENT> for dim, other_dim in zip(dimensions, other_dimensions): <NEW_LINE> <INDENT> if not dim == other_dim: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> return True
Check to see if two dimensions share the same common coordinates. Note that this will only compare the dimensions up to the length of the shortest list. :param dimensions: dimension list :param other_dimensions: other dimension list
625941b657b8e32f524832c1
def _get_password(self) -> str: <NEW_LINE> <INDENT> return self._password
Return the hashed version of the password.
625941b6046cf37aa974cb6c
@pytest.mark.django_db(transaction=False) <NEW_LINE> def test_assignment_db_queries(): <NEW_LINE> <INDENT> for index in range(10): <NEW_LINE> <INDENT> Assignment.objects.create(title=f"assignment{index}", open_posts=1) <NEW_LINE> <DEDENT> assert count_queries(Assignment.get_elements) == 15
Tests that only the following db queries are done: * 1 requests to get the list of all assignments, * 1 request to get all related users, * 1 request to get the agenda item, * 1 request to get the polls, * 1 request to get the tags and * 10 request to fetch each related user again. TODO: The last request are a bug.
625941b656b00c62f0f1447e
def create_updates(loss, params, update_algo, learning_rate, momentum=None): <NEW_LINE> <INDENT> if update_algo == 'sgd': <NEW_LINE> <INDENT> return lasagne.updates.sgd(loss, params=params, learning_rate=learning_rate) <NEW_LINE> <DEDENT> elif update_algo == 'momentum': <NEW_LINE> <INDENT> return lasagne.updates.momentum(loss, params=params, learning_rate=learning_rate, momentum=momentum) <NEW_LINE> <DEDENT> elif update_algo == 'nesterov': <NEW_LINE> <INDENT> return lasagne.updates.nesterov_momentum(loss, params=params, learning_rate=learning_rate, momentum=momentum) <NEW_LINE> <DEDENT> elif update_algo == 'adadelta': <NEW_LINE> <INDENT> return lasagne.updates.adadelta(loss, params=params, learning_rate=learning_rate) <NEW_LINE> <DEDENT> elif update_algo == 'adam': <NEW_LINE> <INDENT> return lasagne.updates.adam(loss, params=params, learning_rate=learning_rate) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError('unkown update algorithm: %s' % update_algo)
create updates for training :param loss: loss for gradient :param params: parameters for update :param update_algo: update algorithm :param learning_rate: learning rate :param momentum: momentum :return: updates
625941b6377c676e91271fcc
def random_translation(min, max, prng=DEFAULT_PRNG): <NEW_LINE> <INDENT> return translation(_random_vector(min, max, prng))
Construct a random 2D translation between min and max. Args min: a 2D vector with the minimum translation for each dimension max: a 2D vector with the maximum translation for each dimension prng: the pseudo-random number generator to use. Returns a homogeneous 3 by 3 translation matrix
625941b696565a6dacc8f4f6
def tsne(X=np.array([]), no_dims=2, initial_dims=50, perplexity=30.0): <NEW_LINE> <INDENT> if isinstance(no_dims, float): <NEW_LINE> <INDENT> print("Error: array X should have type float.") <NEW_LINE> return -1 <NEW_LINE> <DEDENT> if round(no_dims) != no_dims: <NEW_LINE> <INDENT> print("Error: number of dimensions should be an integer.") <NEW_LINE> return -1 <NEW_LINE> <DEDENT> X = pca(X, initial_dims).real <NEW_LINE> (n, d) = X.shape <NEW_LINE> max_iter = 1000 <NEW_LINE> initial_momentum = 0.5 <NEW_LINE> final_momentum = 0.8 <NEW_LINE> eta = 500 <NEW_LINE> min_gain = 0.01 <NEW_LINE> Y = np.random.randn(n, no_dims) <NEW_LINE> dY = np.zeros((n, no_dims)) <NEW_LINE> iY = np.zeros((n, no_dims)) <NEW_LINE> gains = np.ones((n, no_dims)) <NEW_LINE> P = x2p(X, 1e-5, perplexity) <NEW_LINE> P = P + np.transpose(P) <NEW_LINE> P = P / np.sum(P) <NEW_LINE> P = P * 4. <NEW_LINE> P = np.maximum(P, 1e-12) <NEW_LINE> for iter in range(max_iter): <NEW_LINE> <INDENT> sum_Y = np.sum(np.square(Y), 1) <NEW_LINE> num = -2. * np.dot(Y, Y.T) <NEW_LINE> num = 1. / (1. + np.add(np.add(num, sum_Y).T, sum_Y)) <NEW_LINE> num[range(n), range(n)] = 0. <NEW_LINE> Q = num / np.sum(num) <NEW_LINE> Q = np.maximum(Q, 1e-12) <NEW_LINE> PQ = P - Q <NEW_LINE> for i in range(n): <NEW_LINE> <INDENT> dY[i, :] = np.sum(np.tile(PQ[:, i] * num[:, i], (no_dims, 1)).T * (Y[i, :] - Y), 0) <NEW_LINE> <DEDENT> if iter < 20: <NEW_LINE> <INDENT> momentum = initial_momentum <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> momentum = final_momentum <NEW_LINE> <DEDENT> gains = (gains + 0.2) * ((dY > 0.) != (iY > 0.)) + (gains * 0.8) * ((dY > 0.) == (iY > 0.)) <NEW_LINE> gains[gains < min_gain] = min_gain <NEW_LINE> iY = momentum * iY - eta * (gains * dY) <NEW_LINE> Y = Y + iY <NEW_LINE> Y = Y - np.tile(np.mean(Y, 0), (n, 1)) <NEW_LINE> if (iter + 1) % 10 == 0: <NEW_LINE> <INDENT> C = np.sum(P * np.log(P / Q)) <NEW_LINE> print("Iteration %d: error is %f" % (iter + 1, C)) <NEW_LINE> <DEDENT> if iter == 100: <NEW_LINE> <INDENT> P = P / 4. <NEW_LINE> <DEDENT> <DEDENT> return Y
Runs t-SNE on the dataset in the NxD array X to reduce its dimensionality to no_dims dimensions. The syntaxis of the function is `Y = tsne.tsne(X, no_dims, perplexity), where X is an NxD NumPy array.
625941b67b180e01f3dc4628
def GetContentServerAddress(self): <NEW_LINE> <INDENT> _name = self.__cfg.get("contentserver", "name") <NEW_LINE> _ip = self.__cfg.get("contentserver", "ip") <NEW_LINE> _port = self.__cfg.get("contentserver", "port") <NEW_LINE> return _name,_ip, _port
获取内容服务器地址
625941b6ac7a0e7691ed3efb
def _create_custom_package_docs(package): <NEW_LINE> <INDENT> project = SphinxProject( CUSTOM_PACKAGES_PATH / package, CUSTOM_PACKAGES_DOCS_PATH / package) <NEW_LINE> if project.project_exists(): <NEW_LINE> <INDENT> logger.log_message( 'Sphinx project already exists for custom' + ' package "{0}".'.format(package)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> project.create('Unknown') <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> logger.log_message( 'An error occured while creating Sphinx project for ' + 'custom package "{0}".'.format(package)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> logger.log_message( 'Sphinx project has been created for' + ' custom package "{0}".'.format(package))
Create a Sphinx project for a custom package.
625941b632920d7e50b27fed
def RouteChat(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None): <NEW_LINE> <INDENT> raise NotImplementedError()
A Bidirectional streaming RPC. Accepts a stream of RouteNotes sent while a route is being traversed, while receiving other RouteNotes (e.g. from other users).
625941b6187af65679ca4f3e
def getGraph(self, name): <NEW_LINE> <INDENT> for graph in self.mdi.windowList(): <NEW_LINE> <INDENT> if qt.safe_str(graph.caption())== name: <NEW_LINE> <INDENT> return graph <NEW_LINE> <DEDENT> <DEDENT> return None
Return GraphWindow instance indexed by name Or None if not found
625941b60a50d4780f666cb0
def __init__(self, mesg): <NEW_LINE> <INDENT> super(RBError, self).__init__(mesg) <NEW_LINE> self.mesg = mesg
Create new RBError object with given error message.
625941b6379a373c97cfa96c
def resizeEvent(self, event): <NEW_LINE> <INDENT> self.fitInView(QtCore.QRectF(0, 0, 1264, 714))
Esta función es para cuando se haga un resize de la ventana siempre estamos viendo la cancion redimensionada en consecuencia para ello volvemos a definir el la resolución interna del visor event: el evento que ha disparado este resize
625941b6e64d504609d74662
def add_control_delete_user(self, user_id): <NEW_LINE> <INDENT> self["@controls"]["flight-booking-system:delete"] = { "href": api.url_for(User, user_id=user_id), "title": "Delete this user", "method": "DELETE" }
Adds the control to delete a user. :param str user_id: The id of the user to remove
625941b67b25080760e3927c
def list_all_rating(self): <NEW_LINE> <INDENT> return self.get_queryset().list_all_rating()
Get all companies with a rating using an optimized query.
625941b6283ffb24f3c5572e
def assert_variables_replaced(paths): <NEW_LINE> <INDENT> for path in paths: <NEW_LINE> <INDENT> if is_binary(path): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> with open(path, 'r') as f: <NEW_LINE> <INDENT> contents = f.read() <NEW_LINE> <DEDENT> match = RE_OBJ.search(contents) <NEW_LINE> msg = 'cookiecutter variable not replaced in {0} at {1}' <NEW_LINE> assert match is None, msg.format(path, match.start())
Method to check that all paths have correct substitutions.
625941b6090684286d50eb01
def __init__(self, name=None, cube_func=None, coord_values=None, **kwargs): <NEW_LINE> <INDENT> if not (name is None or isinstance(name, basestring)): <NEW_LINE> <INDENT> raise TypeError('name must be None or string, got %r' % name) <NEW_LINE> <DEDENT> if not (cube_func is None or callable(cube_func)): <NEW_LINE> <INDENT> raise TypeError('cube_func must be None or callable, got %r' % cube_func) <NEW_LINE> <DEDENT> if not (coord_values is None or isinstance(coord_values, collections.Mapping)): <NEW_LINE> <INDENT> raise TypeError('coord_values must be None or a collections.Mapping, got %r' % coord_values) <NEW_LINE> <DEDENT> coord_values = coord_values or {} <NEW_LINE> duplicate_keys = coord_values.viewkeys() & kwargs.viewkeys() <NEW_LINE> if duplicate_keys: <NEW_LINE> <INDENT> raise ValueError('Duplicate coordinate conditions specified for: %s' % list(duplicate_keys)) <NEW_LINE> <DEDENT> self._name = name <NEW_LINE> self._cube_func = cube_func <NEW_LINE> self._coord_values = coord_values.copy() <NEW_LINE> self._coord_values.update(kwargs) <NEW_LINE> self._coord_constraints = [] <NEW_LINE> for coord_name, coord_thing in self._coord_values.items(): <NEW_LINE> <INDENT> self._coord_constraints.append(_CoordConstraint(coord_name, coord_thing))
Creates a new instance of a Constraint which can be used for filtering cube loading or cube list extraction. Args: * name: string or None If a string, it is used as the name to match against Cube.name(). * cube_func: callable or None If a callable, it must accept a Cube as its first and only argument and return either True or False. * coord_values: dict or None If a dict, it must map coordinate name to the condition on the associated coordinate. * `**kwargs`: The remaining keyword arguments are converted to coordinate constraints. The name of the argument gives the name of a coordinate, and the value of the argument is the condition to meet on that coordinate:: Constraint(model_level=10) Coordinate level constraints can be of several types: * **string, int or float** - the value of the coordinate to match. e.g. ``model_level=10`` * **list of values** - the possible values that the coordinate may have to match. e.g. ``model_level=[10, 12]`` * **callable** - a function which accepts a :class:`iris.coords.Cell` instance as its first and only argument returning True or False if the value of the Cell is desired. e.g. ``model_level=lambda cell: 5 < cell < 10`` The :ref:`user guide <loading_iris_cubes>` covers cube much of constraining in detail, however an example which uses all of the features of this class is given here for completeness:: Constraint(name='air_potential_temperature', cube_func=lambda cube: cube.units == 'kelvin', coord_values={'latitude':lambda cell: 0 < cell < 90}, model_level=[10, 12]) & Constraint(ensemble_member=2) Constraint filtering is performed at the cell level. For further details on how cell comparisons are performed see :class:`iris.coords.Cell`.
625941b66e29344779a62437
@click.command() <NEW_LINE> def main(): <NEW_LINE> <INDENT> config_path = get_config_path(ensure_exists=True) <NEW_LINE> click.launch(config_path)
Edit the configuration. Launches the config folder for modifying settings.
625941b630c21e258bdfa2be
def get_touched(self): <NEW_LINE> <INDENT> data = SPI.command(CMD_CAP_TOUCHED, returned=2) <NEW_LINE> return (data[0] << 8) | data[1]
Returns the values of the touch registers, each bit corresponds to one electrode. :return: values of the touch registers, :rtype: list
625941b64428ac0f6e5ba613
def GetDataType(self): <NEW_LINE> <INDENT> choice = self.__dtype.GetSelection() <NEW_LINE> return self.__dtypeChoices[choice][1]
Returns the currently selected data type setting as a ``numpy.dtype``, one of ``uint8``, ``int16``, ``int32``, ``float32``, or ``float64``.
625941b67047854f462a122f
def random_position(): <NEW_LINE> <INDENT> return _Position( random_number(screen.left, screen.right), random_number(screen.bottom, screen.top) )
Returns a random position on the screen. A position has an `x` and `y` e.g.: position = play.random_position() sprite.x = position.x sprite.y = position.y or equivalently: sprite.go_to(play.random_position())
625941b6d18da76e235322f3
def _create_binary_tree(self): <NEW_LINE> <INDENT> vocab_size = len(self.wv.vocab) <NEW_LINE> logger.info("constructing a huffman tree from %i words" % vocab_size) <NEW_LINE> heap = list(self.wv.vocab.values()) <NEW_LINE> heapq.heapify(heap) <NEW_LINE> for i in range(vocab_size - 1): <NEW_LINE> <INDENT> min1, min2 = heapq.heappop(heap), heapq.heappop(heap) <NEW_LINE> heapq.heappush(heap, Vocab(count=min1.count + min2.count, index=i + vocab_size, left=min1, right=min2)) <NEW_LINE> <DEDENT> if heap: <NEW_LINE> <INDENT> max_depth, stack = 0, [(heap[0], [], [])] <NEW_LINE> while stack: <NEW_LINE> <INDENT> node, codes, points = stack.pop() <NEW_LINE> if node.index < vocab_size: <NEW_LINE> <INDENT> node.code, node.point = codes, points <NEW_LINE> max_depth = max(len(codes), max_depth) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> points = np.array(list(points) + [node.index - vocab_size], dtype=int) <NEW_LINE> stack.append((node.left, np.array(list(codes) + [0], dtype=int), points)) <NEW_LINE> stack.append((node.right, np.array(list(codes) + [1], dtype=int), points)) <NEW_LINE> <DEDENT> <DEDENT> logger.info("built huffman tree with maximum node depth %i" % max_depth)
Create a binary Huffman tree for the hs model using stored vocabulary word counts. Frequent words will have shorter binary codes.
625941b699fddb7c1c9de1b5
def _response(data, code = 200, serialize = True): <NEW_LINE> <INDENT> r = HttpResponse() <NEW_LINE> r.status_code = code <NEW_LINE> if serialize: <NEW_LINE> <INDENT> is_object = isinstance(data, models.query.QuerySet) <NEW_LINE> if isinstance(data, models.query.QuerySet) or (isinstance(data, list) and (isinstance(data[0], models.query.QuerySet))): <NEW_LINE> <INDENT> data = serializers.serialize('json', data) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> data = json.dumps(data) <NEW_LINE> <DEDENT> <DEDENT> r.content = data <NEW_LINE> r.content_type = 'application/json;charset=utf-8' <NEW_LINE> return r
Return a successful normal HttpResponse (code 200). Serializes by default any object passed.
625941b60a50d4780f666cb1
def get_name(self): <NEW_LINE> <INDENT> return self._name
Gets the nickname
625941b6baa26c4b54cb0f45
def calcDMPlus(self, ohlc): <NEW_LINE> <INDENT> if len(ohlc) < 2: <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> HM = ohlc[-1][1] - ohlc[-2][1] <NEW_LINE> LM = ohlc[-1][2] - ohlc[-2][2] <NEW_LINE> if HM > LM and HM > 0: <NEW_LINE> <INDENT> return HM <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return 0
calcDMPlus(self, ohlc) -> int calculate the current DM+ value Returns ------- DM+ : int the urrent DM+ value
625941b6eab8aa0e5d26d980
def __init__(self): <NEW_LINE> <INDENT> super(Policy, self).__init__()
Initialize the Module
625941b6091ae35668666d88
def show_toast(self, title="Notification", msg="Here comes the message", icon_path=None, duration=5, threaded=False, callback_on_click=None, **kwargs): <NEW_LINE> <INDENT> if not threaded: <NEW_LINE> <INDENT> self._show_toast(title, msg, icon_path, duration, callback_on_click, **kwargs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if self.notification_active(): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> self._thread = threading.Thread(target=self._show_toast, args=( title, msg, icon_path, duration, callback_on_click ), **kwargs) <NEW_LINE> self._thread.start() <NEW_LINE> <DEDENT> return True
Notification settings. :title: notification title :msg: notification message :icon_path: path to the .ico file to custom notification :duration: delay in seconds before notification self-destruction
625941b6956e5f7376d70c9d
def _post_json(self, url, data, **kw): <NEW_LINE> <INDENT> data = json.dumps(data) <NEW_LINE> headers = kw.pop('headers', {}) <NEW_LINE> headers.setdefault('Content-Type', 'application/json') <NEW_LINE> headers.setdefault('Accept', 'application/json') <NEW_LINE> kw['headers'] = headers <NEW_LINE> kw['data'] = data <NEW_LINE> return self._post(url, **kw)
Makes a POST request, setting Authorization and Content-Type headers by default
625941b6e5267d203edcdac3
def destroy(self, request, pk: str = None): <NEW_LINE> <INDENT> item = get_object_or_404(DownloadedSuite, id=pk) <NEW_LINE> delete_info = item.delete() <NEW_LINE> return Response(json.dumps(delete_info), status=status.HTTP_202_ACCEPTED)
delete one, local files will delete crontab with celery
625941b6be7bc26dc91cd428
def _generate_episode(self): <NEW_LINE> <INDENT> env_info = self.env.reset(train_mode=True)[self.brain_name] <NEW_LINE> state = torch.Tensor(env_info.vector_observations).cpu() <NEW_LINE> episode = [] <NEW_LINE> episode_rewards = np.zeros(self.num_agents) <NEW_LINE> while True: <NEW_LINE> <INDENT> action, log_prob, value = self.network(state) <NEW_LINE> env_info = self.env.step(action.cpu().detach().numpy())[self.brain_name] <NEW_LINE> reward = env_info.rewards <NEW_LINE> done = np.array(env_info.local_done) <NEW_LINE> episode_rewards += reward <NEW_LINE> episode.append([state, value.detach(), action.detach(), log_prob.detach(), reward, 1 - done]) <NEW_LINE> state = torch.Tensor(env_info.vector_observations).cpu() <NEW_LINE> if np.any(done): <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> _, _, last_value = self.network(state) <NEW_LINE> episode.append([state, last_value, None, None, None, None]) <NEW_LINE> return episode, last_value, episode_rewards
Generate an episode until reaching a terminal state with any of the parallel agents.
625941b699cbb53fe6792a09
def clean_files(self, matches): <NEW_LINE> <INDENT> notrecognized = [] <NEW_LINE> errors = [] <NEW_LINE> dir_path = self.get_path() <NEW_LINE> for match in matches: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> parsed = self.file_format_cls.load( os.path.join(dir_path, match), ) <NEW_LINE> if not self.file_format_cls.is_valid(parsed): <NEW_LINE> <INDENT> errors.append('%s: %s' % ( match, _('File does not seem to be valid!') )) <NEW_LINE> <DEDENT> <DEDENT> except ValueError: <NEW_LINE> <INDENT> notrecognized.append(match) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> errors.append('%s: %s' % (match, str(e))) <NEW_LINE> <DEDENT> <DEDENT> if len(notrecognized) > 0: <NEW_LINE> <INDENT> msg = ( _('Format of %d matched files could not be recognized.') % len(notrecognized) ) <NEW_LINE> raise ValidationError('%s\n%s' % ( msg, '\n'.join(notrecognized) )) <NEW_LINE> <DEDENT> if len(errors) > 0: <NEW_LINE> <INDENT> raise ValidationError('%s\n%s' % ( (_('Failed to parse %d matched files!') % len(errors)), '\n'.join(errors) ))
Validates whether we can parse translation files.
625941b6b5575c28eb68de1f
def compact(self): <NEW_LINE> <INDENT> compacted = OrderedDict() <NEW_LINE> for k, v in self._data.items(): <NEW_LINE> <INDENT> for k2, v2 in v.items(): <NEW_LINE> <INDENT> for k3, v3 in v2.items(): <NEW_LINE> <INDENT> if v3 > 0: <NEW_LINE> <INDENT> ( compacted.setdefault(k, {}) .setdefault(k2, Counter()) .update({k3: v3}) ) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> self._data = compacted
Compacts self._data by removing message types with values of zero or less. Links having all of their Counter values with values zero or less will be removed completely as well.
625941b66aa9bd52df036bc4
def hdfs_set_owner(server, username, path, **args): <NEW_LINE> <INDENT> response = _namenode_request(server, username, 'PUT', path, 'SETOWNER', args) <NEW_LINE> content = response.read() <NEW_LINE> _check_code(response.status, content)
Set the owner of a file or directory.
625941b6f548e778e58cd39e
def setUp(self): <NEW_LINE> <INDENT> super(TestModuleI18nService, self).setUp() <NEW_LINE> self.test_language = 'dummy language' <NEW_LINE> self.request = mock.Mock() <NEW_LINE> self.course = CourseFactory.create() <NEW_LINE> self.field_data = mock.Mock() <NEW_LINE> self.descriptor = ItemFactory(category="pure", parent=self.course) <NEW_LINE> self.runtime = _preview_module_system( self.request, self.descriptor, self.field_data, ) <NEW_LINE> self.addCleanup(translation.deactivate)
Setting up tests
625941b630bbd722463cbbe5
@task(check_environment) <NEW_LINE> def rollback_cron(ctx): <NEW_LINE> <INDENT> deployment_name = ctx.config['kubernetes']['apps']['cron']['deployment_name'] <NEW_LINE> rollback(ctx, deployment_name)
Undo a cron deployment
625941b6498bea3a759b98d4
def get_deprovisioning_job_status_with_http_info(self, id, **kwargs): <NEW_LINE> <INDENT> all_params = ['id'] <NEW_LINE> all_params.append('async_req') <NEW_LINE> all_params.append('_return_http_data_only') <NEW_LINE> all_params.append('_preload_content') <NEW_LINE> all_params.append('_request_timeout') <NEW_LINE> params = locals() <NEW_LINE> for key, val in six.iteritems(params['kwargs']): <NEW_LINE> <INDENT> if key not in all_params: <NEW_LINE> <INDENT> raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_deprovisioning_job_status" % key ) <NEW_LINE> <DEDENT> params[key] = val <NEW_LINE> <DEDENT> del params['kwargs'] <NEW_LINE> if ('id' not in params or params['id'] is None): <NEW_LINE> <INDENT> raise ValueError("Missing the required parameter `id` when calling `get_deprovisioning_job_status`") <NEW_LINE> <DEDENT> collection_formats = {} <NEW_LINE> path_params = {} <NEW_LINE> if 'id' in params: <NEW_LINE> <INDENT> path_params['id'] = params['id'] <NEW_LINE> <DEDENT> query_params = [] <NEW_LINE> header_params = {} <NEW_LINE> form_params = [] <NEW_LINE> local_var_files = {} <NEW_LINE> body_params = None <NEW_LINE> header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) <NEW_LINE> auth_settings = ['BasicAuth'] <NEW_LINE> return self.api_client.call_api( '/zero-touch/deprovisioning-jobs/{id}/status', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='JobStatusJson', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
Get the status of the deprovisioning job with the given id # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_deprovisioning_job_status_with_http_info(id, async_req=True) >>> result = thread.get() :param async_req bool :param int id: The id of the deprovisioning job (required) :return: JobStatusJson If the method is called asynchronously, returns the request thread.
625941b691af0d3eaac9b836
def make_bond(self, atom1, atom2): <NEW_LINE> <INDENT> if atom1 == atom2: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> if not atom1 in atom2.bonded_atoms: <NEW_LINE> <INDENT> atom2.bonded_atoms.append(atom1) <NEW_LINE> <DEDENT> if not atom2 in atom1.bonded_atoms: <NEW_LINE> <INDENT> atom1.bonded_atoms.append(atom2) <NEW_LINE> <DEDENT> return
Makes a bond between atom1 and atom2
625941b6de87d2750b85fbb0
def move(self, disk_count, source_peg, destination_peg, intermediate_peg, callback=(lambda *args, **kwargs: None)): <NEW_LINE> <INDENT> assert disk_count > 0 <NEW_LINE> if disk_count > 1: <NEW_LINE> <INDENT> self.move(disk_count - 1, source_peg, intermediate_peg, destination_peg, callback) <NEW_LINE> <DEDENT> destination_peg.push(source_peg.pop()) <NEW_LINE> callback([source_peg, destination_peg, intermediate_peg]) <NEW_LINE> if disk_count > 1: <NEW_LINE> <INDENT> self.move(disk_count - 1, intermediate_peg, destination_peg, source_peg, callback)
Moves the specified count of disks from the source peg to the destination peg. :param disk_count: The count of disks to move; must be positive. :type disk_count: int :param source_peg: The peg containing the disks to move. :type source_peg: Peg :param destination_peg: The peg to which the disks will be moved. :type destination_peg: Peg :param intermediate_peg: The peg to be used to facilitate the move according to the game rules. :type intermediate_peg: Peg :param callback: The optional callback to be invoked *after* each disk is moved. The callback will receive a sequence of all pegs in no particular order.
625941b61f5feb6acb0c4977
def _calc_checksum(self, secret): <NEW_LINE> <INDENT> return str_to_uascii( hashlib.sha256(mysql_aes_encrypt(self.salt, secret)).hexdigest() )
Calculate string. :param secret: The secret key. :returns: The checksum.
625941b6be8e80087fb20a71
def logger(name=__name__, output=None, uuid=False, timestamp=False): <NEW_LINE> <INDENT> processors = [] <NEW_LINE> if output == 'json': <NEW_LINE> <INDENT> processors.append(structlog.processors.JSONRenderer()) <NEW_LINE> <DEDENT> if uuid: <NEW_LINE> <INDENT> processors.append(add_unique_id) <NEW_LINE> <DEDENT> if uuid: <NEW_LINE> <INDENT> processors.append(add_timestamp) <NEW_LINE> <DEDENT> return structlog.wrap_logger( logbook.Logger(name), processors=processors )
Configure and return a new logger for hivy modules
625941b6d486a94d0b98df71
def select_frequent_set(self, chains): <NEW_LINE> <INDENT> for c in chains: <NEW_LINE> <INDENT> for item, count in chains[c]: <NEW_LINE> <INDENT> depth = max(count) <NEW_LINE> max_freq_item = tuple((i, item[i]) for i in range(len(count)) if count[i] == depth) <NEW_LINE> if depth and 0 < len(max_freq_item) <= self.max_size: <NEW_LINE> <INDENT> self.select_frequent_subset(max_freq_item, c, chains)
Extract frequent itemsets from chains and calculate thier frequency. The results are stored in self.frequent_set. Args: chains: a list of (value, count).
625941b6d7e4931a7ee9dd3e
def __repr__(self): <NEW_LINE> <INDENT> return (f'<Koekje({self.id}, {self.count}, {self.author},' f' {self.created_at})>')
Represent as <Koekje(id, count, author, created_at)>.
625941b61f037a2d8b946021
def get_info(self): <NEW_LINE> <INDENT> self.browser.get(self.url.format(1)) <NEW_LINE> try: <NEW_LINE> <INDENT> all_page_num = self.browser.find_elements_by_class_name("page")[-1].text <NEW_LINE> end = int(all_page_num.strip()) + 1 <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> logging.error("总页数获取失败,程序退出") <NEW_LINE> sys.exit() <NEW_LINE> <DEDENT> for i in range(1, end): <NEW_LINE> <INDENT> url = self.url.format(i) <NEW_LINE> self.browser.get(url) <NEW_LINE> self.my_wait("list_dl") <NEW_LINE> dls = self.browser.find_elements_by_class_name("list_dl") <NEW_LINE> for dl in dls: <NEW_LINE> <INDENT> dl_list = dl.text.split('\n') <NEW_LINE> if len(dl_list) == 7: <NEW_LINE> <INDENT> href = dl.find_elements_by_tag_name("a")[0] <NEW_LINE> dl_list.append(href.get_attribute('href')) <NEW_LINE> dl_list.append(url) <NEW_LINE> yield dl_list
解析html
625941b69f2886367277a6b4
def response_to_twill(self, response): <NEW_LINE> <INDENT> path = response.request.get('PATH_INFO') <NEW_LINE> url = path and SITE + path.lstrip('/') or path <NEW_LINE> headers_msg = '\n'.join('%s: %s' % (k, v) for k, v in response.items()) <NEW_LINE> headers_msg = StringIO(headers_msg) <NEW_LINE> headers = httplib.HTTPMessage(headers_msg) <NEW_LINE> io_response = StringIO(response.content) <NEW_LINE> urllib_response = addinfourl(io_response, headers, url, response.status_code) <NEW_LINE> urllib_response._headers = headers <NEW_LINE> urllib_response._url = url <NEW_LINE> urllib_response.msg = u'OK' <NEW_LINE> urllib_response.seek = urllib_response.fp.seek <NEW_LINE> self.get_browser()._browser._set_response(urllib_response, False) <NEW_LINE> self.get_browser().result = ResultWrapper(response.status_code, url, response.content) <NEW_LINE> self._apply_xhtml()
Wrap Django response to work with Twill.
625941b6d164cc6175782b70
def create_homepage_section_with_http_info(self, **kwargs): <NEW_LINE> <INDENT> all_params = ['body', 'fields'] <NEW_LINE> all_params.append('callback') <NEW_LINE> all_params.append('_return_http_data_only') <NEW_LINE> all_params.append('_preload_content') <NEW_LINE> all_params.append('_request_timeout') <NEW_LINE> params = locals() <NEW_LINE> for key, val in iteritems(params['kwargs']): <NEW_LINE> <INDENT> if key not in all_params: <NEW_LINE> <INDENT> raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_homepage_section" % key ) <NEW_LINE> <DEDENT> params[key] = val <NEW_LINE> <DEDENT> del params['kwargs'] <NEW_LINE> collection_formats = {} <NEW_LINE> resource_path = '/homepage_sections'.replace('{format}', 'json') <NEW_LINE> path_params = {} <NEW_LINE> query_params = {} <NEW_LINE> if 'fields' in params: <NEW_LINE> <INDENT> query_params['fields'] = params['fields'] <NEW_LINE> <DEDENT> header_params = {} <NEW_LINE> form_params = [] <NEW_LINE> local_var_files = {} <NEW_LINE> body_params = None <NEW_LINE> if 'body' in params: <NEW_LINE> <INDENT> body_params = params['body'] <NEW_LINE> <DEDENT> header_params['Accept'] = self.api_client. select_header_accept(['application/json']) <NEW_LINE> header_params['Content-Type'] = self.api_client. select_header_content_type(['application/json']) <NEW_LINE> auth_settings = [] <NEW_LINE> return self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='HomepageSection', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
Create Homepage section ### Create a new homepage section. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.create_homepage_section_with_http_info(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param HomepageSection body: Homepage section :param str fields: Requested fields. :return: HomepageSection If the method is called asynchronously, returns the request thread.
625941b6293b9510aa2c30bc
def is_nonnegative_integer_equivalent_number(expr): <NEW_LINE> <INDENT> from abjad.tools import mathtools <NEW_LINE> return mathtools.is_integer_equivalent_number(expr) and 0 <= expr
Is true when `expr` is a nonnegative integer-equivalent number. Otherwise false: :: >>> mathtools.is_nonnegative_integer_equivalent_number(Duration(4, 2)) True Returns boolean.
625941b615fb5d323cde092c
def p_template_param_list2 (self, p): <NEW_LINE> <INDENT> p[1].append(p[3]) <NEW_LINE> p[0] = p[1]
template_param_list : template_param_list COMMA template_param
625941b66aa9bd52df036bc5
def __init__( self, De, Di, y0for1, populations, gamma_func_kwargs, Pmn_func_kwargs, alpha_I, alpha_E, protect=False, protect_args=None, score_type="mse" ): <NEW_LINE> <INDENT> super().__init__( De, Di, y0for1, populations, gamma_func_kwargs, Pmn_func_kwargs, alpha_I, alpha_E, protect, protect_args, score_type ) <NEW_LINE> if self.protect and self.protect_args is None: <NEW_LINE> <INDENT> raise ValueError("protect_args must not be None!") <NEW_LINE> <DEDENT> self.gamma_func = utils.GammaFunc1(**self.gamma_func_kwargs) <NEW_LINE> self.Pmn_func = utils.PmnFunc(**self.Pmn_func_kwargs) <NEW_LINE> self.num_regions = len(self.populations) <NEW_LINE> self.theta = 1 / self.De <NEW_LINE> self.beta = 1 / self.Di <NEW_LINE> I0 = np.zeros(self.num_regions) <NEW_LINE> I0[0] = self.y0for1 <NEW_LINE> self.y0 = np.r_[ self.populations - I0, np.zeros(self.num_regions), I0, np.zeros(self.num_regions) ] <NEW_LINE> self.I_flow_func = lambda t: t < self.protect_args["t0"]
一个基于复杂人口迁徙网络的SEIR模型。 Arguments: De {float} -- 平均潜伏期 Di {float} -- 平均患病期 y0for1 {float} -- t0时刻,region-1的患病(I)人数 populations {ndarray} -- 每个region的人口 gamma_func_kwargs {dict} -- 用于GammaFunc函数实例化的参数,表示人口迁出 比的变化趋势 Pmn_func_kwargs {dict} -- 用于PmnFunc函数实例化的参数,表示各个region到 其他regions人口迁出率的变化 alpha_I {float} -- 患病者的感染率系数 alpha_E {float} -- 潜伏者的感染率系数 Keyword Arguments: protect {bool} -- 是否加入防控措施 (default: {False}) protect_args {dict} -- 防控措施函数需要的参数,除了时间t (default: {None}) score_type {str} -- 使用的score类型, mse或nll (default: {"mse"}) Raises: ValueError: [description] ValueError: [description] NotImplementedError: [description] Returns: NetSEIR对象 -- 用于接下来的拟合、预测
625941b6442bda511e8be249
@mock.patch('time.sleep') <NEW_LINE> @mock.patch('library.cv_server_provision.switch_info') <NEW_LINE> def test_configlet_update_task_no_task(mock_info, mock_sleep): <NEW_LINE> <INDENT> module = mock.Mock() <NEW_LINE> mock_info.side_effect = [dict(), dict(), dict()] <NEW_LINE> result = cv_server_provision.configlet_update_task(module) <NEW_LINE> assert_is_none(result) <NEW_LINE> assert_equals(mock_sleep.call_count, 3) <NEW_LINE> assert_equals(mock_info.call_count, 3)
Test configlet_update_task does not get task after three tries.
625941b631939e2706e4cc93
def clsfy_triangle(side1, side2, side3): <NEW_LINE> <INDENT> if isinstance(side1, int) and isinstance(side2, int) and isinstance(side3, int) and side1 > 0 and side2 > 0 and side3 > 0: <NEW_LINE> <INDENT> if side1 + side2 > side3 and side2 + side3 > side1 and side1 + side3 > side2: <NEW_LINE> <INDENT> if side1 == side2 == side3: <NEW_LINE> <INDENT> return "Equilateral" <NEW_LINE> <DEDENT> elif side1 == side2 or side1 == side3 or side2 == side3: <NEW_LINE> <INDENT> return "Isosceles" <NEW_LINE> <DEDENT> elif side1*side1 + side2*side2 == side3*side3 or side1*side1 + side3*side3 == side2*side2 or side2*side2 + side3*side3 == side1*side1: <NEW_LINE> <INDENT> return "Right" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return "Scalene" <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> return "NotATriangle" <NEW_LINE> <DEDENT> <DEDENT> return "InvalidInput"
:param side1: int :param side2: int :param side3: int :return: string
625941b666656f66f7cbbfcd
def patch_RPCManager(mocker) -> MagicMock: <NEW_LINE> <INDENT> mocker.patch('freqtrade.freqtradebot.RPCManager._init', MagicMock()) <NEW_LINE> rpc_mock = mocker.patch('freqtrade.freqtradebot.RPCManager.send_msg', MagicMock()) <NEW_LINE> return rpc_mock
This function mock RPC manager to avoid repeating this code in almost every tests :param mocker: mocker to patch RPCManager class :return: RPCManager.send_msg MagicMock to track if this method is called
625941b676e4537e8c35149a
def train_model(data, model, learning_rate=0.001, batch_size=16, num_steps=1000, shuffle=True): <NEW_LINE> <INDENT> pass <NEW_LINE> copy_dataset = np.concatenate((data['image'], data['label']), axis=1) <NEW_LINE> for i in range(num_steps): <NEW_LINE> <INDENT> if shuffle: <NEW_LINE> <INDENT> np.random.shuffle(copy_dataset) <NEW_LINE> <DEDENT> number_example = len(copy_dataset) <NEW_LINE> number_batch = number_example // batch_size <NEW_LINE> if (number_example%batch_size != 0): <NEW_LINE> <INDENT> number_batch +=1 <NEW_LINE> <DEDENT> batches = np.array_split(copy_dataset, number_batch) <NEW_LINE> idx = randint(0,number_batch-1) <NEW_LINE> select_batch = batches[idx] <NEW_LINE> batch_x = select_batch[:,0:-1] <NEW_LINE> batch_y = select_batch[:,-1] <NEW_LINE> batch_y = batch_y.reshape(batch_y.shape[0], 1) <NEW_LINE> update_step(batch_x, batch_y, model, learning_rate) <NEW_LINE> <DEDENT> return model
Implements the training loop of stochastic gradient descent. Performs stochastic gradient descent with the indicated batch_size. If shuffle is true: Shuffle data at every epoch, including the 0th epoch. If the number of example is not divisible by batch_size, the last batch will simply be the remaining examples. Args: data(dict): Data from utils.data_tools.preprocess_data. model(LinearModel): Initialized linear model. learning_rate(float): Learning rate of your choice batch_size(int): Batch size of your choise. num_steps(int): Number of steps to run the updated. shuffle(bool): Whether to shuffle data at every epoch. Returns: model(LinearModel): Returns a trained model.
625941b6de87d2750b85fbb1
def isEquivalent(*args, **kwargs): <NEW_LINE> <INDENT> pass
Test for equivalence of two matrices, within a tolerance.
625941b626238365f5f0ec8c
def merge_tree( cwd, ref1, ref2, base=None, user=None, password=None, ignore_retcode=False, output_encoding=None, ): <NEW_LINE> <INDENT> cwd = _expand_path(cwd, user) <NEW_LINE> command = ["git", "merge-tree"] <NEW_LINE> if base is None: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> base = merge_base(cwd, refs=[ref1, ref2], output_encoding=output_encoding) <NEW_LINE> <DEDENT> except (SaltInvocationError, CommandExecutionError): <NEW_LINE> <INDENT> raise CommandExecutionError( "Unable to determine merge base for {0} and {1}".format(ref1, ref2) ) <NEW_LINE> <DEDENT> <DEDENT> command.extend([base, ref1, ref2]) <NEW_LINE> return _git_run( command, cwd=cwd, user=user, password=password, ignore_retcode=ignore_retcode, output_encoding=output_encoding, )["stdout"]
.. versionadded:: 2015.8.0 Interface to `git-merge-tree(1)`_, shows the merge results and conflicts from a 3-way merge without touching the index. cwd The path to the git checkout ref1 First ref/commit to compare ref2 Second ref/commit to compare base The base tree to use for the 3-way-merge. If not provided, then :py:func:`git.merge_base <salt.modules.git.merge_base>` will be invoked on ``ref1`` and ``ref2`` to determine the merge base to use. user User under which to run the git command. By default, the command is run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 ignore_retcode : False if ``True``, do not log an error to the minion log if the git command returns a nonzero exit status. output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-merge-tree(1)`: http://git-scm.com/docs/git-merge-tree CLI Examples: .. code-block:: bash salt myminion git.merge_tree /path/to/repo HEAD upstream/dev salt myminion git.merge_tree /path/to/repo HEAD upstream/dev base=aaf3c3d
625941b6e5267d203edcdac4
def crop_and_resize(image, boxes, size): <NEW_LINE> <INDENT> box_ind = keras.backend.zeros_like(boxes, "int32") <NEW_LINE> box_ind = box_ind[..., 0] <NEW_LINE> box_ind = keras.backend.reshape(box_ind, [-1]) <NEW_LINE> boxes = keras.backend.reshape(boxes, [-1, 4]) <NEW_LINE> return tensorflow.image.crop_and_resize(image, boxes, box_ind, size)
Crop the image given boxes and resize with bilinear interplotation. # Parameters image: Input image of shape (1, image_height, image_width, depth) boxes: Regions of interest of shape (1, num_boxes, 4), each row [y1, x1, y2, x2] size: Fixed size [h, w], e.g. [7, 7], for the output slices. # Returns 4D Tensor (number of regions, slice_height, slice_width, channels)
625941b6cc40096d61595776
def test_ap_wpa2_psk_ext_key_id_ptk_rekey_sta0(dev, apdev): <NEW_LINE> <INDENT> run_ap_wpa2_psk_ext_key_id_ptk_rekey_sta(dev, apdev, 0)
Extended Key ID and PTK rekey by station (Ext Key ID disabled on AP)
625941b61d351010ab855941