code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def receive(): <NEW_LINE> <INDENT> while True: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> msg = client_socket.recv(BUFSIZ).decode("utf8") <NEW_LINE> msg_list.insert(tkinter.END, msg) <NEW_LINE> <DEDENT> except OSError: <NEW_LINE> <INDENT> break
Handle receiving of messages.
625941b556ac1b37e6263fe5
@login_required <NEW_LINE> def start_process_view(request): <NEW_LINE> <INDENT> next_users = None <NEW_LINE> current_user_position_id = request.user.position_id <NEW_LINE> if current_user_position_id < 3: <NEW_LINE> <INDENT> next_users = MyUser.objects.filter(position_id=current_user_position_id + 1).all() <NEW_LINE> <DEDENT> if next_users is None: <NEW_LINE> <INDENT> messages.add_message(request, messages.INFO, "You need not create process!") <NEW_LINE> return HttpResponseRedirect(reverse('OAsysterm:index')) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if request.method == 'POST': <NEW_LINE> <INDENT> contets_md = markdown.markdown(request.POST.get('contents')) <NEW_LINE> Processes.objects.create( theme=request.POST.get('theme'), contents=contets_md, next_approver=request.POST.get('nextuser'), level=request.POST.get('level'), author=request.user ) <NEW_LINE> return HttpResponseRedirect(reverse('OAsysterm:index')) <NEW_LINE> <DEDENT> <DEDENT> return render(request, template_name='main/startprocess.html', context={'next_users': next_users})
发起流程.有两种情况,一种是当前用户职位是Boss,则无需发流程; 其他情况发流程则需要选中下一个比自己职级高一级的人员
625941b515fb5d323cde090d
def validate_authorization_request(self): <NEW_LINE> <INDENT> return validate_code_authorization_request(self)
The client constructs the request URI by adding the following parameters to the query component of the authorization endpoint URI using the "application/x-www-form-urlencoded" format. Per `Section 4.1.1`_. response_type REQUIRED. Value MUST be set to "code". client_id REQUIRED. The client identifier as described in Section 2.2. redirect_uri OPTIONAL. As described in Section 3.1.2. scope OPTIONAL. The scope of the access request as described by Section 3.3. state RECOMMENDED. An opaque value used by the client to maintain state between the request and callback. The authorization server includes this value when redirecting the user-agent back to the client. The parameter SHOULD be used for preventing cross-site request forgery as described in Section 10.12. The client directs the resource owner to the constructed URI using an HTTP redirection response, or by other means available to it via the user-agent. For example, the client directs the user-agent to make the following HTTP request using TLS (with extra line breaks for display purposes only): .. code-block:: http GET /authorize?response_type=code&client_id=s6BhdRkqt3&state=xyz &redirect_uri=https%3A%2F%2Fclient%2Eexample%2Ecom%2Fcb HTTP/1.1 Host: server.example.com The authorization server validates the request to ensure that all required parameters are present and valid. If the request is valid, the authorization server authenticates the resource owner and obtains an authorization decision (by asking the resource owner or by establishing approval via other means). .. _`Section 4.1.1`: https://tools.ietf.org/html/rfc6749#section-4.1.1
625941b5e8904600ed9f1d2d
def make_instance(self, include_optional): <NEW_LINE> <INDENT> if include_optional : <NEW_LINE> <INDENT> return StatusGroupInnerResult( status = 'None', total = 56 ) <NEW_LINE> <DEDENT> else : <NEW_LINE> <INDENT> return StatusGroupInnerResult( )
Test StatusGroupInnerResult include_option is a boolean, when False only required params are included, when True both required and optional params are included
625941b5462c4b4f79d1d4d5
def delete_project(self, id): <NEW_LINE> <INDENT> url = "{}/api/projects/{}".format(self.base_url, id) <NEW_LINE> response = self.client.delete(url) <NEW_LINE> return response
DELETE /api/projects/<id>
625941b5009cb60464c631c2
def _block_index_(ii, block_number, length, mode = 'sequential'): <NEW_LINE> <INDENT> block_length = int(numpy.round(length / block_number)) <NEW_LINE> index = numpy.arange(length) <NEW_LINE> if (mode == 'sequential')|(mode is None): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> elif mode == 'random': <NEW_LINE> <INDENT> random.shuffle(index) <NEW_LINE> <DEDENT> elif mode == 'equidistant': <NEW_LINE> <INDENT> index = numpy.mod(numpy.arange(length) * block_length, length) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError('Indexer type not recognized! Use: sequential/random/equidistant') <NEW_LINE> <DEDENT> first = ii * block_length <NEW_LINE> last = min((length + 1, (ii + 1) * block_length)) <NEW_LINE> return index[first:last]
Create a slice for a projection block
625941b55e10d32532c5ed34
def displayable(inputstr, strip=True): <NEW_LINE> <INDENT> return clean(str(inputstr), strip, rem_indents=False, encoding_errors="backslashreplace")
Make a string displayable with minimal loss of information.
625941b566656f66f7cbbfaf
def findKey(lis, key): <NEW_LINE> <INDENT> ret = [] <NEW_LINE> for (fst, snd, trd) in lis: <NEW_LINE> <INDENT> if fst == key or snd == key: <NEW_LINE> <INDENT> ret.append(trd) <NEW_LINE> <DEDENT> <DEDENT> return ret
W liście trzykrotek szuka key jako pierwszego lub drugiego elementu i zwraca trzeci element.
625941b5b7558d58953c4d21
@pytest.mark.skip(reason="Now seems to raise TypeError: readonly attribute?") <NEW_LINE> def test_printer_none_encoding(monkeypatch): <NEW_LINE> <INDENT> monkeypatch.setattr("sys.stdout.encoding", None) <NEW_LINE> p = Printer()
Test that printer works even if sys.stdout.encoding is set to None. This previously caused a very confusing error.
625941b596565a6dacc8f4da
def load_model_weights(self, model_name, weights_file): <NEW_LINE> <INDENT> net = getattr(self, f"net_{model_name}") <NEW_LINE> print(f"loading the model {model_name} from {weights_file}") <NEW_LINE> state_dict = torch.load(weights_file, map_location=self.device) <NEW_LINE> if hasattr(state_dict, "_metadata"): <NEW_LINE> <INDENT> del state_dict._metadata <NEW_LINE> <DEDENT> net.load_state_dict(state_dict) <NEW_LINE> return self
Loads the weights for a single model Args: model_name: name of the model to load parameters into weights_file: path to weights file
625941b59c8ee82313fbb57a
def do_cluster_state(self, args): <NEW_LINE> <INDENT> params = ( ('pretty', ''), ) <NEW_LINE> try: <NEW_LINE> <INDENT> response = requests.get('http://{}:{}/_cluster/state'.format(self.host, self.port), params=params) <NEW_LINE> if not response: <NEW_LINE> <INDENT> print(response.status_code) <NEW_LINE> sys.stdout.buffer.write(response.content) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> sys.stdout.buffer.write(response.content) <NEW_LINE> <DEDENT> <DEDENT> except HTTPError as http_err: <NEW_LINE> <INDENT> print('HTTP error occurred: {}'.format(http_err)) <NEW_LINE> <DEDENT> except Exception as err: <NEW_LINE> <INDENT> print("Other error occurred: {}".format(err))
Show cluster state
625941b515fb5d323cde090e
def clone(self, table_restrictions=None, clone_factory=None): <NEW_LINE> <INDENT> clone_factory = clone_factory or PanDatFactory <NEW_LINE> from ticdat import TicDatFactory <NEW_LINE> no_copy_predicate_kwargs_maker = clone_factory == TicDatFactory <NEW_LINE> if hasattr(clone_factory, "create_from_full_schema"): <NEW_LINE> <INDENT> clone_factory = clone_factory.create_from_full_schema <NEW_LINE> <DEDENT> full_schema = utils.clone_a_anchillary_info_schema(self.schema(include_ancillary_info=True), table_restrictions) <NEW_LINE> rtn = clone_factory(full_schema) <NEW_LINE> for tbl, row_predicates in self._data_row_predicates.items(): <NEW_LINE> <INDENT> if table_restrictions is None or tbl in table_restrictions: <NEW_LINE> <INDENT> for pn, rpi in row_predicates.items(): <NEW_LINE> <INDENT> if not (rpi.predicate_kwargs_maker and no_copy_predicate_kwargs_maker): <NEW_LINE> <INDENT> rtn.add_data_row_predicate(tbl, predicate=rpi.predicate, predicate_name=pn, predicate_kwargs_maker=rpi.predicate_kwargs_maker, predicate_failure_response=rpi.predicate_failure_response) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return rtn
clones the PanDatFactory :param table_restrictions : if None, then argument is ignored. Otherwise, a container listing the tables to keep in the clone. Tables outside table_restrictions are removed from the clone. :param clone_factory : optional. Defaults to PanDatFactory. Can also be TicDatFactory. Can also be a function, in which case it should behave similarly to create_from_full_schema. If clone_factory=TicDatFactory, the row predicates that use predicate_kwargs_maker won't be copied over. :return: a clone of the PanDatFactory. Returned object will be based on clone_factory, if provided.
625941b5283ffb24f3c55712
def action(a, b): <NEW_LINE> <INDENT> return multVect(a, b)
This function returns the action of a matrix on a vector :param a: Vector :param b: Matrix :return: Array
625941b6091ae35668666d6b
def create_mgmt_port(self, tenant_id, hostname, mgmt_sec_grp): <NEW_LINE> <INDENT> mgmt_net_id = cfg.CONF.lbaas_settings.management_network <NEW_LINE> neutron = self.get_neutron_client() <NEW_LINE> port = { "port": { "admin_state_up": True, "name": "mgmt-%s" % hostname, "network_id": mgmt_net_id, "security_groups": [mgmt_sec_grp], "tenant_id": tenant_id } } <NEW_LINE> mgmt_port = neutron.create_port(port) <NEW_LINE> return mgmt_port
Creates a port for management traffic.
625941b6b5575c28eb68de02
def setUp(self): <NEW_LINE> <INDENT> self.config_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'client_tests')) <NEW_LINE> if os.path.isdir(self.config_dir): <NEW_LINE> <INDENT> shutil.rmtree(os.path.join(self.config_dir)) <NEW_LINE> <DEDENT> os.makedirs(os.path.join(self.config_dir)) <NEW_LINE> config_data_file = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'data', 'resourceclient.conf') <NEW_LINE> self.server_bin = os.path.abspath( os.path.join(os.path.dirname(__file__), '..', '..', '..', 'bin', 'sb-resourceserver')) <NEW_LINE> resource_config = _create_resource_configs() <NEW_LINE> _create_server_config(self.config_dir, resource_config) <NEW_LINE> config_file = _create_client_config(self.config_dir) <NEW_LINE> self.config = Config() <NEW_LINE> self.config.init_default_config(config_data_file) <NEW_LINE> self.config.load(config_file)
Setup the test case. Nothing to yet.
625941b6d18da76e235322d6
def build_record_sample_form (app_name) : <NEW_LINE> <INDENT> sample_information = {} <NEW_LINE> sample_information['species'] = get_species() <NEW_LINE> sample_information['lab_requested'] = get_lab_requested() <NEW_LINE> sample_information['sampleType'] = get_sample_type(app_name) <NEW_LINE> sample_information['sample_project'] = get_defined_sample_projects (app_name) <NEW_LINE> sample_information['sample_project'].insert(0,'None') <NEW_LINE> return sample_information
Description: The function collect the stored information of species, sample origin and sample type to use in the selected form. Input: Functions: get_species located at this file get_lab_requested located at this file get_sample_type located at this file Variables: sample_information: Dictionnary to collect the information Return: sample_information
625941b6796e427e537b03c7
def make_tree_dag(self, request): <NEW_LINE> <INDENT> with transaction.atomic(): <NEW_LINE> <INDENT> assert self.path_type==LP_SEQUENCE <NEW_LINE> self.select_for_update() <NEW_LINE> nodes = self.get_ordered_nodes() <NEW_LINE> edges = PathEdge.objects.filter(parent__path=self) <NEW_LINE> for edge in edges: <NEW_LINE> <INDENT> edge.delete() <NEW_LINE> <DEDENT> self.path_type = LP_DAG <NEW_LINE> self.save() <NEW_LINE> root = nodes[0] <NEW_LINE> max_order = 0 <NEW_LINE> for node in nodes[1:]: <NEW_LINE> <INDENT> max_order += 10 <NEW_LINE> self.add_edge(root, node, request, order=max_order) <NEW_LINE> <DEDENT> return root
convert from LP_COLLECTION to LP_DAG, making all other nodes children of the root node and adding explicit ordering to edges
625941b62c8b7c6e89b355c9
def _show_menu (self, actions, menu_args): <NEW_LINE> <INDENT> if not actions: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> for i in (0, -1): <NEW_LINE> <INDENT> if actions[i] is None: <NEW_LINE> <INDENT> actions.pop(i) <NEW_LINE> <DEDENT> <DEDENT> if not actions: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> menu = self._temp_menu = gtk.Menu() <NEW_LINE> f = lambda widget, cb, *args: cb(*args) <NEW_LINE> for x in actions: <NEW_LINE> <INDENT> if x is None: <NEW_LINE> <INDENT> item = gtk.SeparatorMenuItem() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> name, tooltip, cb, *cb_args = x <NEW_LINE> item = gtk.ImageMenuItem(name) <NEW_LINE> if name.startswith('gtk-'): <NEW_LINE> <INDENT> item.set_use_stock(True) <NEW_LINE> <DEDENT> elif '_' in name: <NEW_LINE> <INDENT> item.set_use_underline(True) <NEW_LINE> <DEDENT> if tooltip is not None: <NEW_LINE> <INDENT> item.set_tooltip_text(tooltip) <NEW_LINE> <DEDENT> item.connect('activate', f, cb, *cb_args) <NEW_LINE> <DEDENT> menu.append(item) <NEW_LINE> <DEDENT> menu.show_all() <NEW_LINE> menu.popup(*menu_args)
Create a display a popup menu.
625941b666673b3332b91e9c
def Ts(Ref, Tmin=None, Tmax=None, show=False, axis=None, *args, **kwargs): <NEW_LINE> <INDENT> plt = PropsPlot(Ref, 'Ts', smin=Tmin, smax=Tmax, axis=axis, *args, **kwargs) <NEW_LINE> plt._draw_graph() <NEW_LINE> if show: <NEW_LINE> <INDENT> plt.show() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> plt._draw_graph() <NEW_LINE> <DEDENT> return plt.axis
Deprecated. Use :py:func:`CoolProps.Plots.PropsPlot`
625941b6fb3f5b602dac3494
def encrypted_dict(self): <NEW_LINE> <INDENT> keyword_encrypt_dict = {letter: keyletter for letter, keyletter in zip(self.alphabet_list, self.encrypted_list)} <NEW_LINE> return keyword_encrypt_dict
dict comprehension so i can enter the dict's value's and key's easily.
625941b615baa723493c3d77
def sched_move(self, track): <NEW_LINE> <INDENT> assert(not self.busy) <NEW_LINE> self.busy = True <NEW_LINE> self.cart.start_moving(track.dst) <NEW_LINE> s.enter(track.cost, 0, self.perform_move)
schedule a move
625941b6498bea3a759b98b8
def __init__(self, ent): <NEW_LINE> <INDENT> self.ent = ent
Creation.
625941b6507cdc57c6306ad8
def make_query_dict_interactively(): <NEW_LINE> <INDENT> query_dict = make_query_dict() <NEW_LINE> query_dict["all"] = [] <NEW_LINE> query_dict["exact"] = [] <NEW_LINE> query_dict["none"] = [] <NEW_LINE> query_dict["language"] = [] <NEW_LINE> query_dict["region"] = [] <NEW_LINE> query_dict["date"] = [] <NEW_LINE> query_dict["site"] = [] <NEW_LINE> return query_dict
Fill a query dictionary asking questions to the user.
625941b621a7993f00bc7aee
def create_unit_procedure( self, name, start_batch_tag, stop_batch_tag, order, start_rule, stop_rule, asset_list ): <NEW_LINE> <INDENT> from quartic_sdk.core.entity_helpers.entity_factory import EntityFactory <NEW_LINE> try: <NEW_LINE> <INDENT> start_rule.validate_rule_raw_json() <NEW_LINE> stop_rule.validate_rule_raw_json() <NEW_LINE> unit_procedure_request_body = { "name": name, "start_batch_tag": start_batch_tag.id, "stop_batch_tag": stop_batch_tag.id, "step_type": self.UNIT_PROCEDURE, "procedure": self.id, "order": order, "start_rule": start_rule.rule_schema(), "stop_rule": stop_rule.rule_schema(), "assets": asset_list } <NEW_LINE> unit_procedure_creation_response = self.api_helper.call_api( Constants.PROCEDURE_STEPS, method_type=Constants.API_POST, body=unit_procedure_request_body ).json() <NEW_LINE> return EntityFactory(Constants.PROCEDURE_STEP_ENTITY, unit_procedure_creation_response, self.api_helper) <NEW_LINE> <DEDENT> except HTTPError as exception: <NEW_LINE> <INDENT> raise Exception(f'Exception in creating Unit Procedure: {exception.response.content.decode()}')
This method is used to create UnitProcedure inside a particular Procedure :param name: UnitProcedure Name :param start_batch_tag: Tag Object :param stop_batch_tag: Tag Object :param order: sequence in which we want to add child nodes inside parent(procedure) node :param start_rule: Rule (Util Class) Object :param stop_rule: Rule (Util Class) Object :param asset_list: List containing asset ids :return: UnitProcedure(ProcedureStep Entity) Object
625941b6dc8b845886cb533a
def columnarize(instances): <NEW_LINE> <INDENT> columns = collections.defaultdict(list) <NEW_LINE> for instance in instances: <NEW_LINE> <INDENT> for k, v in six.iteritems(instance): <NEW_LINE> <INDENT> columns[k].append(v) <NEW_LINE> <DEDENT> <DEDENT> return columns
Columnarize inputs. Each line in the input is a dictionary of input names to the value for that input (a single instance). For each input "column", this method appends each of the input values to a list. The result is a dict mapping input names to a batch of input data. This can be directly used as the feed dict during prediction. For example, instances = [{"a": [1.0, 2.0], "b": "a"}, {"a": [3.0, 4.0], "b": "c"}, {"a": [5.0, 6.0], "b": "e"},] batch = prediction_server_lib.columnarize(instances) assert batch == {"a": [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], "b": ["a", "c", "e"]} Arguments: instances: (list of dict) where the dictionaries map input names to the values for those inputs. Returns: A dictionary mapping input names to values, as described above.
625941b60fa83653e4656dc4
def number_class(self, context=None): <NEW_LINE> <INDENT> if self.is_snan(): <NEW_LINE> <INDENT> return 'sNaN' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if self.is_qnan(): <NEW_LINE> <INDENT> return 'NaN' <NEW_LINE> <DEDENT> inf = self._isinfinity() <NEW_LINE> if inf == 1: <NEW_LINE> <INDENT> return '+Infinity' <NEW_LINE> <DEDENT> if inf == -1: <NEW_LINE> <INDENT> return '-Infinity' <NEW_LINE> <DEDENT> if self.is_zero(): <NEW_LINE> <INDENT> if self._sign: <NEW_LINE> <INDENT> return '-Zero' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return '+Zero' <NEW_LINE> <DEDENT> <DEDENT> if context is None: <NEW_LINE> <INDENT> context = getcontext() <NEW_LINE> <DEDENT> if self.is_subnormal(context=context): <NEW_LINE> <INDENT> if self._sign: <NEW_LINE> <INDENT> return '-Subnormal' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return '+Subnormal' <NEW_LINE> <DEDENT> <DEDENT> if self._sign: <NEW_LINE> <INDENT> return '-Normal' <NEW_LINE> <DEDENT> return '+Normal' <NEW_LINE> return
Returns an indication of the class of self. The class is one of the following strings: sNaN NaN -Infinity -Normal -Subnormal -Zero +Zero +Subnormal +Normal +Infinity
625941b65fc7496912cc378b
@nb.jit <NEW_LINE> def plotyz(sol,S=100): <NEW_LINE> <INDENT> N = (int)(S/(sol["t"][1]-sol["t"][0])) <NEW_LINE> s = plt.figure(figsize=(8,6)) <NEW_LINE> a = plt.axes() <NEW_LINE> yvals = sol["y"] <NEW_LINE> zvals = sol["z"] <NEW_LINE> a.plot(yvals[N:],zvals[N:], color = "green") <NEW_LINE> a.set_xlabel("y values") <NEW_LINE> a.set_ylabel("z values") <NEW_LINE> a.set_title("z vs y") <NEW_LINE> plt.show()
plotyz Args: sol - the panda dataframe Returns: Plots y v z
625941b68a349b6b435e7f7a
def testIndicateReschedule(self): <NEW_LINE> <INDENT> missed_future_date = date(2200,5,1) <NEW_LINE> original_visit = Visit.objects.get(te_visit_id='01-123456789') <NEW_LINE> self.assertTrue(original_visit.date < missed_future_date) <NEW_LINE> visit = self.importer.update_local_missed_visit( self.user, self.clinic, create_instance(MissedVisit, { 'key_id': '01-123456789', 'te_id': '01-12345', 'missed_date': '%s 00:00:00' % missed_future_date }) ) <NEW_LINE> visit = reload_record(visit) <NEW_LINE> self.assertEqual(visit.history.latest().get_history_type_display(), 'Changed') <NEW_LINE> self.assertEquals(visit.status, 'r') <NEW_LINE> self.assertEquals(visit.date, date(2200, 5, 1))
reschedule a visit
625941b67047854f462a1213
def _create_main_architecture(self, x_input: 'Input') -> 'Model': <NEW_LINE> <INDENT> y = Dense( self.main_layers[1], activation=self.activation, name="hidden_layer_{}".format(1), kernel_initializer=RandomUniform( minval=-self.s, maxval=self.s, seed=self.seed ), bias_initializer=RandomUniform( minval=-self.s, maxval=self.s, seed=self._update_seed(self.seed, 1) ), kernel_regularizer=l2(self.l2reg), bias_regularizer=l2(self.l2reg), trainable=not self.RSN )(x_input) <NEW_LINE> if self.dropout_prob != 0: <NEW_LINE> <INDENT> y = Dropout(self.dropout_prob)(y) <NEW_LINE> <DEDENT> for i, n in enumerate(self.main_layers[2: -1]): <NEW_LINE> <INDENT> y = Dense( n, activation=self.activation, name="hidden_layer_{}".format(i + 2), kernel_initializer=RandomUniform( minval=-self.s, maxval=self.s, seed=self._update_seed(self.seed, 2 * i + 2) ), bias_initializer=RandomUniform( minval=-self.s, maxval=self.s, seed=self._update_seed(self.seed, 2 * i + 3) ), kernel_regularizer=l2(self.l2reg), bias_regularizer=l2(self.l2reg), trainable=not self.RSN )(y) <NEW_LINE> <DEDENT> if self.dropout_prob != 0: <NEW_LINE> <INDENT> y = Dropout(self.dropout_prob)(y) <NEW_LINE> <DEDENT> y_output = Dense( self.main_layers[-1], activation='linear', name="output_layer", kernel_initializer=RandomUniform( minval=-self.s, maxval=self.s, seed=self._update_seed(self.seed, -1) ), bias_initializer=RandomUniform( minval=-self.s, maxval=self.s, seed=self._update_seed(self.seed, -2) ), kernel_regularizer=l2(self.l2reg), bias_regularizer=l2(self.l2reg) )(y) <NEW_LINE> return y, y_output
creates the model structure for the main met which is responsible for the learning and prediction of the y value. :param x_input: Input layer :return: constructed network model of the main net
625941b6090684286d50eae5
def test_unexpected_add_service_event(self): <NEW_LINE> <INDENT> event = { 'type': 'ADDED', 'object': V1Service(api_version='v1', kind='Service', metadata=V1ObjectMeta(name='someservice', namespace='othernamespace', ), spec=V1ServiceSpec(cluster_ip='10.0.0.5'), ), } <NEW_LINE> watcher = hostess.Watcher(env=self.envvars) <NEW_LINE> log = logging.getLogger() <NEW_LINE> log.setLevel(logging.INFO) <NEW_LINE> with self.assertLogs(log, level='DEBUG') as context_manager: <NEW_LINE> <INDENT> watcher.handle_service_event(event) <NEW_LINE> self.assertIn('DEBUG:hostess.watcher:Ignoring event for {} in {}'.format( 'someservice', 'othernamespace'), context_manager.output)
test recieving a normal added event for a service we are not expecting
625941b64c3428357757c131
def numEdges(graph): <NEW_LINE> <INDENT> return gr.numEdges(graph)
Retorna el numero de arcos en el grafo graph Args: graph: El grafo sobre el que se ejecuta la operacion Returns: El numero de vertices del grafo Raises: Exception
625941b60383005118ecf3ea
def cat_to_json(item): <NEW_LINE> <INDENT> return { 'id': item.id, 'name': item.name, 'path': item.path, 'icon': item.icon, 'parentId': item.parent_id }
Turn nested dictionary to json
625941b6e64d504609d74646
def __populate_levels(): <NEW_LINE> <INDENT> for n in _names: <NEW_LINE> <INDENT> i = eval(n) <NEW_LINE> _levels[n] = i <NEW_LINE> _level_names[i] = n
Fills in the _levels and _level_names dictionaries.
625941b68da39b475bd64d7c
def apply_transformation(X, T): <NEW_LINE> <INDENT> if X.shape[1] != 2 and X.shape[1] != 3: <NEW_LINE> <INDENT> warnings.warn("Input array must be of shape Nx2 or Nx3!") <NEW_LINE> <DEDENT> X = np.vstack([X.T, np.ones(X.shape[0])]) <NEW_LINE> t_X = T.dot(X).T <NEW_LINE> t_X = np.asarray([row/row[-1] for row in t_X]) <NEW_LINE> return t_X[:,0:-1]
Applies the 2D or 3D transformation matrix T to 2D or 3D data points in X. :param X: Nx2 or Nx3 numpy ndarray :param T: 3x3 (2D) or 4x4 (3D) numpy ndarray :return: Transformed points
625941b64e696a04525c925a
def invokeFunction(func, args): <NEW_LINE> <INDENT> if len(args) != len(func.params): <NEW_LINE> <INDENT> raise LJTypeErr('incorrect no. of arguments ... (%s)' % lj_repr(args)[1:-1]); <NEW_LINE> <DEDENT> if func.crEnv is None: raise Exception(); <NEW_LINE> assert func.crEnv is not None; <NEW_LINE> newEnv = func.crEnv.makeChild(func.params, args); <NEW_LINE> newEnv.setDepth(env.depth + 1); <NEW_LINE> treeClone = cloneLi(func.tree); <NEW_LINE> try: <NEW_LINE> <INDENT> run(treeClone, newEnv, maxLoopTime, writer); <NEW_LINE> <DEDENT> except LJReturn as r: <NEW_LINE> <INDENT> inter = r.args[0]; <NEW_LINE> return inter; <NEW_LINE> <DEDENT> raise LJTypeErr('non-returning function');
Helps invokes non-native functions.
625941b630c21e258bdfa2a2
def get_for_qs(get, qs): <NEW_LINE> <INDENT> kwargs = {} <NEW_LINE> for k, v in get.iteritems(): <NEW_LINE> <INDENT> if not (k == 'page' or k == 'pop' or k == 'q'): <NEW_LINE> <INDENT> if not k == 'o': <NEW_LINE> <INDENT> if k == "ot": <NEW_LINE> <INDENT> qs = query_ordena(qs, get["o"], get["ot"]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> kwargs[str(k)] = v <NEW_LINE> qs = qs.filter(**kwargs) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return qs
Verifica atributos do GET e retorna queryset correspondente
625941b630c21e258bdfa2a3
def test_array_split(self): <NEW_LINE> <INDENT> items = range(90) <NEW_LINE> expected_lengths = (50, 40) <NEW_LINE> batches = util.array_split(items, 50) <NEW_LINE> for idx, batch in enumerate(batches): <NEW_LINE> <INDENT> assert len(batch) == expected_lengths[idx]
Test ``util.array_split()``
625941b676d4e153a657e936
def find(self, colname): <NEW_LINE> <INDENT> for index in range(len(self.columns)): <NEW_LINE> <INDENT> if self.columns[index].colname == colname: <NEW_LINE> <INDENT> return index <NEW_LINE> <DEDENT> <DEDENT> return -1
Finds the column number for a name. The method looks through all columns of the instance for a matching column name. In case the column name exists, the column index is returned. If the column name does not exist, -1 is returned. @param colname: the name of the column @type colname: string @return: the index of the column, or -1 @rtype: integer
625941b6d6c5a10208143e4c
def is_square(n): <NEW_LINE> <INDENT> if n < 0: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> elif n == 0: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> elif (n ** 0.5).is_integer(): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False
A square of squares You like building blocks. You especially like building blocks that are squares. And what you even like more, is to arrange them into a square of square building blocks! However, sometimes, you can't arrange them into a square. Instead, you end up with an ordinary rectangle! Those blasted things! If you just had a way to know, whether you're currently working in vain… Wait! That's it! You just have to check if your number of building blocks is a perfect square. Task Given an integral number, determine if it's a square number: In mathematics, a square number or perfect square is an integer that is the square of an integer; in other words, it is the product of some integer with itself. The tests will always use some integral number, so don't worry about that in dynamic typed languages. Examples -1 => false 0 => true 3 => false 4 => true 25 => true 26 => false
625941b62eb69b55b151c6b0
def flush(): <NEW_LINE> <INDENT> logging.info('flushing...') <NEW_LINE> send_command(b'#XS') <NEW_LINE> send_command(b'#E0')
remove pending data in socket stream
625941b645492302aab5e0c5
def handle(verb, arg): <NEW_LINE> <INDENT> for listener in listeners: <NEW_LINE> <INDENT> handler = getattr(listener, verb, None) <NEW_LINE> if handler: <NEW_LINE> <INDENT> log_call(listener, verb) <NEW_LINE> listener = listener() <NEW_LINE> listener.action = action <NEW_LINE> listener.request = request <NEW_LINE> arg = handler(listener, arg) <NEW_LINE> <DEDENT> <DEDENT> return arg
Обработка данных @arg подписанными слушателями, которые имеют обработчик @verb
625941b663d6d428bbe442f5
def all(self): <NEW_LINE> <INDENT> return [dict(product) for product in db['products'].all()]
Get all products
625941b6fff4ab517eb2f23f
def workflows_stepCompleted( self, *, workflow_step_execute_id: str, outputs: dict = None, **kwargs ) -> Union[Future, SlackResponse]: <NEW_LINE> <INDENT> kwargs.update({"workflow_step_execute_id": workflow_step_execute_id}) <NEW_LINE> if outputs: <NEW_LINE> <INDENT> kwargs.update({"outputs": outputs}) <NEW_LINE> <DEDENT> return self.api_call("workflows.stepCompleted", json=kwargs)
Indicate a successful outcome of a workflow step's execution. Args: workflow_step_execute_id (str): A unique identifier of the workflow step to be updated. e.g. 'add_task' outputs (dict): A key-value object of outputs from your step. e.g. { 'task_name': 'Task Name' }
625941b68c0ade5d55d3e7c5
def registered_module_names(): <NEW_LINE> <INDENT> apm_reg_names = VitalObject.VITAL_LIB.vital_apm_registered_module_names <NEW_LINE> sl_free = VitalObject.VITAL_LIB.vital_common_free_string_list <NEW_LINE> apm_reg_names.argtypes = [ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.POINTER(ctypes.c_char_p))] <NEW_LINE> sl_free.argtypes = [ctypes.c_uint, ctypes.POINTER(ctypes.c_char_p)] <NEW_LINE> length = ctypes.c_uint(0) <NEW_LINE> keys = ctypes.POINTER(ctypes.c_char_p)() <NEW_LINE> apm_reg_names(ctypes.byref(length), ctypes.byref(keys)) <NEW_LINE> r = [] <NEW_LINE> for i in range(length.value): <NEW_LINE> <INDENT> r.append(keys[i]) <NEW_LINE> <DEDENT> sl_free(length, keys) <NEW_LINE> return r
Get a list of registered module name strings A module's name is defined as the filename minus the standard platform module library suffix. For example, on Windows, if a module library was named ``vital_foo.dll``, the module's name would be "vital_foo". Similarly on a unix system, ``vital_bar.so`` would have the name "vital_bar". :return: List of registered module names :rtype: list of str
625941b64f88993c3716be7a
def tune_auth_backend(self, backend_type, mount_point=None, default_lease_ttl=None, max_lease_ttl=None, description=None, audit_non_hmac_request_keys=None, audit_non_hmac_response_keys=None, listing_visibility=None, passthrough_request_headers=None): <NEW_LINE> <INDENT> if not mount_point: <NEW_LINE> <INDENT> mount_point = backend_type <NEW_LINE> <DEDENT> optional_parameters = [ 'default_lease_ttl', 'max_lease_ttl', 'description', 'audit_non_hmac_request_keys', 'audit_non_hmac_response_keys', 'listing_visibility', 'passthrough_request_headers', ] <NEW_LINE> params = {} <NEW_LINE> for optional_parameter in optional_parameters: <NEW_LINE> <INDENT> if locals().get(optional_parameter) is not None: <NEW_LINE> <INDENT> params[optional_parameter] = locals().get(optional_parameter) <NEW_LINE> <DEDENT> <DEDENT> return self._post('/v1/sys/auth/{0}/tune'.format(mount_point), json=params)
POST /sys/auth/<mount point>/tune :param backend_type: str, Name of the auth backend to modify (e.g., token, approle, etc.) :param mount_point: str, The path the associated auth backend is mounted under. :param description: str, Specifies the description of the mount. This overrides the current stored value, if any. :param default_lease_ttl: int, :param max_lease_ttl: int, :param audit_non_hmac_request_keys: list, Specifies the comma-separated list of keys that will not be HMAC'd by audit devices in the request data object. :param audit_non_hmac_response_keys: list, Specifies the comma-separated list of keys that will not be HMAC'd by audit devices in the response data object. :param listing_visibility: str, Speficies whether to show this mount in the UI-specific listing endpoint. Valid values are "unauth" or "". :param passthrough_request_headers: list, Comma-separated list of headers to whitelist and pass from the request to the backend. :return: dict, The JSON response from Vault
625941b650812a4eaa59c12c
def reformat_lig_pdb(ligands): <NEW_LINE> <INDENT> for lig_pdb in ligands: <NEW_LINE> <INDENT> with open(lig_pdb, 'r') as f: <NEW_LINE> <INDENT> write_lines = [] <NEW_LINE> for line in f: <NEW_LINE> <INDENT> if 'HETATM' in line and 'LG1 X' not in line: <NEW_LINE> <INDENT> line_list = re.split(r'(\s+)', line) <NEW_LINE> line_list[6] = 'LG1 X' <NEW_LINE> line_list[8] = '1' <NEW_LINE> write_line = ''.join(line_list) <NEW_LINE> write_lines.append(write_line) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> write_lines.append(line) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> with open(lig_pdb, 'w') as f: <NEW_LINE> <INDENT> f.writelines(write_lines)
reformat conformer file so that output is Rosetta compatible
625941b6be383301e01b5293
def contour_psf(self, id=None, **kwargs): <NEW_LINE> <INDENT> self._contour(id, self._psfcontour, **kwargs)
Contour the PSF applied to the model of an image data set. If the data set has no PSF applied to it, the model is displayed. Parameters ---------- id : int or str, optional The data set that provides the model. If not given then the default identifier is used, as returned by `get_default_id`. replot : bool, optional Set to ``True`` to use the values calculated by the last call to `contour_psf`. The default is ``False``. overcontour : bool, optional If ``True`` then add the data to an exsiting plot, otherwise create a new contour plot. The default is ``False``. See Also -------- get_psf_contour : Return the data used by contour_psf. get_default_id : Return the default data set identifier. contour : Create one or more plot types. contour_kernel : Contour the kernel applied to the model of an image data set. sherpa.astro.ui.set_coord : Set the coordinate system to use for image analysis. set_psf : Add a PSF model to a data set.
625941b6b57a9660fec33685
def setCursor(self, row=0, col=0): <NEW_LINE> <INDENT> print("\x1b[%d;%dH" % (row, col), end="")
Sets cursor position. Parameters: - `row`: The row to set the cursor position (row 1 is the top) - `col`: The column to set the cursor position (column 0 is the leftmost column)
625941b6a934411ee37514a1
def produce_features(self, chip): <NEW_LINE> <INDENT> img = self.get_image(chip) <NEW_LINE> img = img.resize(self.image_size, Image.BICUBIC) <NEW_LINE> img_x, img_y = img.size <NEW_LINE> channels = img.split() <NEW_LINE> hist_features = np.full(shape=3 * self.histogram_bins_per_channel, fill_value=-1) <NEW_LINE> if len(channels) < 3: <NEW_LINE> <INDENT> print("Non-RBG image! Vector will be padded with -1!") <NEW_LINE> <DEDENT> if len(channels) > 3: <NEW_LINE> <INDENT> print("Non-RBG image! Channels beyond the first three will be ignored!") <NEW_LINE> channels = channel[:3] <NEW_LINE> <DEDENT> for i, channel in enumerate(channels): <NEW_LINE> <INDENT> channel_array = np.array(channel) <NEW_LINE> values, _ = np.histogram(channel_array.flat, bins=self.histogram_bins_per_channel) <NEW_LINE> start = i * self.histogram_bins_per_channel <NEW_LINE> end = (i+1) * self.histogram_bins_per_channel <NEW_LINE> hist_features[start:end] = values <NEW_LINE> <DEDENT> img = color.rgb2gray(np.array(img)) <NEW_LINE> features = hog( img, orientations=self.orientations, pixels_per_cell=(img_x / self.cells[0], img_y / self.cells[1]), cells_per_block=self.cells, ) <NEW_LINE> return np.concatenate((features, hist_features))
Takes a chip object and returns a feature vector of size self.feat_size.
625941b6b5575c28eb68de03
def save_nodenet(nodenet_uid): <NEW_LINE> <INDENT> nodenet = nodenets[nodenet_uid] <NEW_LINE> nodenet.save(os.path.join(RESOURCE_PATH, NODENET_DIRECTORY, nodenet_uid + '.json')) <NEW_LINE> nodenet_data[nodenet_uid] = Bunch(**nodenet.data) <NEW_LINE> return True
Stores the nodenet on the server (but keeps it open).
625941b66fece00bbac2d541
def getRepositoriesURI(response): <NEW_LINE> <INDENT> git_uris = [] <NEW_LINE> for page in response.keys(): <NEW_LINE> <INDENT> for value in response[page]['values']: <NEW_LINE> <INDENT> git_uris.append(value['links']['clone'][0]['href']) <NEW_LINE> <DEDENT> <DEDENT> return sorted(git_uris)
Get repositories URIs :param response: dictionary :return: list
625941b6d99f1b3c44c6739e
def archives(request, year, month): <NEW_LINE> <INDENT> posts = Post.objects.filter(create_time__year=year, create_time__month=month ) <NEW_LINE> return posts_paginator(posts, request)
归档(按照年月)
625941b610dbd63aa1bd29b6
@api_view(['GET', 'POST']) <NEW_LINE> def comment_post_form(request): <NEW_LINE> <INDENT> if request.method == 'GET': <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> elif request.method == 'POST': <NEW_LINE> <INDENT> user = User.objects.get(username=request.user) <NEW_LINE> print(request.data) <NEW_LINE> param = { 'user': user.pk, 'post': int(request.data['post']), 'text': request.data['text'], } <NEW_LINE> serializer = CommentSerializer(data=param) <NEW_LINE> if serializer.is_valid(): <NEW_LINE> <INDENT> serializer.save() <NEW_LINE> return JsonResponse(serializer.data) <NEW_LINE> <DEDENT> return JsonResponse(serializer.errors, status=400)
With POST method comment form submit. In this view the recieved data updated and pass to serializer Inputs: data from POST => post,text,user.id that get from request.user Outputs: Serializer.data [not impostant becuase in the js if request is success alert with notificatin
625941b66fb2d068a760eea8
def check_repo(repodir, keys, fmri_dic, expected): <NEW_LINE> <INDENT> sr = self.get_repo(repodir) <NEW_LINE> for key in keys: <NEW_LINE> <INDENT> f = fmri_dic[key] <NEW_LINE> with open(sr.manifest(f), "rb") as manf: <NEW_LINE> <INDENT> actual = "".join( sorted(l for l in manf)).strip() <NEW_LINE> <DEDENT> self.assertEqualDiff(expected[f], actual) <NEW_LINE> <DEDENT> fmris = [fmri_dic[key] for key in keys] <NEW_LINE> pubs = set([fmri.PkgFmri(entry).get_publisher() for entry in fmris]) <NEW_LINE> known_pubs = set( [p.prefix for p in sr.get_publishers()]) <NEW_LINE> self.assert_(pubs == known_pubs, "Repository at %s didn't contain the " "expected set of publishers") <NEW_LINE> for pub in sr.get_publishers(): <NEW_LINE> <INDENT> cat = sr.get_catalog(pub=p.prefix) <NEW_LINE> for f in cat.fmris(): <NEW_LINE> <INDENT> if f.get_fmri() not in fmris: <NEW_LINE> <INDENT> self.assert_(False, "%s not in repository" % f)
Check that packages corresponding to the list of keys 'keys' to items in 'fmri_dic' are present in the repository, and match the contents from the dictionary 'expected'. We also check that the repository has no packages other than those specified by 'keys', and no more publishers than are present in those packages.
625941b61f5feb6acb0c495c
def update_key(self, key): <NEW_LINE> <INDENT> url = urljoin(self.url_key, '%d/' % int(key.id)) <NEW_LINE> r = requests.put(url, data=key.serialize(), headers=self.headers) <NEW_LINE> r.raise_for_status() <NEW_LINE> return models.JSONModel.deserialize(json_obj=r.json())
update a key
625941b6cb5e8a47e48b78b6
def log_images(self, images, num_images, epoch, n_batch, num_batches, normalize=True): <NEW_LINE> <INDENT> if type(images) == np.ndarray: <NEW_LINE> <INDENT> images = torch.from_numpy(images) <NEW_LINE> <DEDENT> images = images.transpose(1,3) <NEW_LINE> step = Logger._step(epoch, n_batch, num_batches) <NEW_LINE> img_name = '{}/images{}'.format(self.comment, '') <NEW_LINE> horizontal_grid = vutils.make_grid( images, normalize=normalize, scale_each=True) <NEW_LINE> nrows = int(np.sqrt(num_images)) <NEW_LINE> grid = vutils.make_grid( images, nrow=nrows, normalize=True, scale_each=True) <NEW_LINE> self.writer.add_image(img_name, horizontal_grid, step) <NEW_LINE> self.save_torch_images(horizontal_grid, grid, epoch, n_batch)
input images are expected in format (NHWC)
625941b6b7558d58953c4d22
def download_publication(self, publication): <NEW_LINE> <INDENT> r = get(self.url, self.timeout) <NEW_LINE> html = BeautifulSoup(r.text, 'html.parser') <NEW_LINE> download_url = self.get_download_url(html) <NEW_LINE> if download_url is None: <NEW_LINE> <INDENT> raise CouldntFindDownloadUrl(self.url) <NEW_LINE> <DEDENT> filename = publication.filename() <NEW_LINE> print(f"Downloading '{filename}'") <NEW_LINE> data = get(download_url, self.timeout, stream=True) <NEW_LINE> save_file(filename, data)
Downloads a publication from 'self.url'.
625941b697e22403b379cd9f
def sample_graph_client_with_per_request_retry_options(): <NEW_LINE> <INDENT> client = GraphClient(credential=browser_credential) <NEW_LINE> result = client.get( '/me/messages', scopes=['mail.read'], retry_on_status_codes=[429, 502, 503, 504] ) <NEW_LINE> pprint(result.json())
Sending a request using the graph client with retry options for that specific request. This will override the default config for the retry handler
625941b6d58c6744b4257a67
def __call__(self, sid): <NEW_LINE> <INDENT> return ServiceContext(self._version, sid=sid, )
Constructs a ServiceContext :param sid: The sid :returns: twilio.rest.preview.sync.service.ServiceContext :rtype: twilio.rest.preview.sync.service.ServiceContext
625941b6f548e778e58cd382
def copy( self, file_id, title=None, copy_permissions=False, folder_id=None, copy_comments=True, ): <NEW_LINE> <INDENT> url = "{}/{}/copy".format(DRIVE_FILES_API_V2_URL, file_id) <NEW_LINE> payload = { "title": title, "mimeType": "application/vnd.google-apps.spreadsheet", } <NEW_LINE> if folder_id is not None: <NEW_LINE> <INDENT> payload["parents"] = [{"id": folder_id}] <NEW_LINE> <DEDENT> params = {"supportsAllDrives": True} <NEW_LINE> r = self.request("post", url, json=payload, params=params) <NEW_LINE> spreadsheet_id = r.json()["id"] <NEW_LINE> new_spreadsheet = self.open_by_key(spreadsheet_id) <NEW_LINE> if copy_permissions is True: <NEW_LINE> <INDENT> original = self.open_by_key(file_id) <NEW_LINE> permissions = original.list_permissions() <NEW_LINE> for p in permissions: <NEW_LINE> <INDENT> if p.get("deleted"): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> new_spreadsheet.share( value=p["emailAddress"], perm_type=p["type"], role=p["role"], notify=False, ) <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if copy_comments is True: <NEW_LINE> <INDENT> source_url = DRIVE_FILES_API_V3_COMMENTS_URL % (file_id) <NEW_LINE> page_token = "" <NEW_LINE> comments = [] <NEW_LINE> params = { "fields": "comments/content,comments/anchor,nextPageToken", "includeDeleted": False, "pageSize": 100, } <NEW_LINE> while page_token is not None: <NEW_LINE> <INDENT> params["pageToken"] = page_token <NEW_LINE> res = self.request("get", source_url, params=params).json() <NEW_LINE> comments.extend(res["comments"]) <NEW_LINE> page_token = res.get("nextPageToken", None) <NEW_LINE> <DEDENT> destination_url = DRIVE_FILES_API_V3_COMMENTS_URL % (new_spreadsheet.id) <NEW_LINE> params = {"fields": "id"} <NEW_LINE> for comment in comments: <NEW_LINE> <INDENT> self.request("post", destination_url, json=comment, params=params) <NEW_LINE> <DEDENT> <DEDENT> return new_spreadsheet
Copies a spreadsheet. :param str file_id: A key of a spreadsheet to copy. :param str title: (optional) A title for the new spreadsheet. :param bool copy_permissions: (optional) If True, copy permissions from the original spreadsheet to the new spreadsheet. :param str folder_id: Id of the folder where we want to save the spreadsheet. :param bool copy_comments: (optional) If True, copy the comments from the original spreadsheet to the new spreadsheet. :returns: a :class:`~gspread.models.Spreadsheet` instance. .. versionadded:: 3.1.0 .. note:: If you're using custom credentials without the Drive scope, you need to add ``https://www.googleapis.com/auth/drive`` to your OAuth scope in order to use this method. Example:: scope = [ 'https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive' ] Otherwise, you will get an ``Insufficient Permission`` error when you try to copy a spreadsheet.
625941b6de87d2750b85fb94
def find_blade_root_dir(working_dir): <NEW_LINE> <INDENT> blade_root_dir = working_dir <NEW_LINE> if blade_root_dir.endswith('/'): <NEW_LINE> <INDENT> blade_root_dir = blade_root_dir[:-1] <NEW_LINE> <DEDENT> while blade_root_dir and blade_root_dir != "/": <NEW_LINE> <INDENT> if os.path.isfile(os.path.join(blade_root_dir, "BLADE_ROOT")): <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> blade_root_dir = os.path.dirname(blade_root_dir) <NEW_LINE> <DEDENT> if not blade_root_dir or blade_root_dir == "/": <NEW_LINE> <INDENT> error_exit("Can't find the file 'BLADE_ROOT' in this or any upper directory.\n" "Blade need this file as a placeholder to locate the root source directory " "(aka the directory where you #include start from).\n" "You should create it manually at the first time.") <NEW_LINE> <DEDENT> return blade_root_dir
find_blade_root_dir to find the dir holds the BLADE_ROOT file. The blade_root_dir is the directory which is the closest upper level directory of the current working directory, and containing a file named BLADE_ROOT.
625941b632920d7e50b27fd2
def _get(self, obj, type=None): <NEW_LINE> <INDENT> def start(): <NEW_LINE> <INDENT> json = RequestApis.Timer.start(obj.connection, obj.json) <NEW_LINE> obj.updateJson(json) <NEW_LINE> <DEDENT> return start
Return a method to start the Timer
625941b624f1403a92600970
def stats(update, context): <NEW_LINE> <INDENT> total_data = f"Total Confirmed Cases : {total_cases:,}\nTotal Active Cases : {total_active:,}\nTotal Recovered : {total_recovered:,}\nTotal Deaths : {total_deaths:,}" <NEW_LINE> update.message.reply_text(total_data)
COVID CASES UPDATE
625941b6b545ff76a8913c26
def __init__(self, a): <NEW_LINE> <INDENT> self.a = float(a) <NEW_LINE> self.f_0 = (jnjnp_zeros(2)[0][1] * c / (2 * pi * self.a)) <NEW_LINE> self.f_max = (self.LARGEST_ZERO * c / (2 * pi * self.a))
Return a Circular waveguide with radius a in meters.
625941b6925a0f43d2549c7a
def set_lines(self, lines): <NEW_LINE> <INDENT> self.lines = lines
Set the lines
625941b644b2445a33931ea7
def _add_example(self, example): <NEW_LINE> <INDENT> if len(example.fields) != 1: <NEW_LINE> <INDENT> raise InvalidSpec( 'Example for union must specify exactly one tag.', example.lineno, example.path) <NEW_LINE> <DEDENT> example_field = list(example.fields.values())[0] <NEW_LINE> tag = example_field.name <NEW_LINE> for field in self.all_fields: <NEW_LINE> <INDENT> if tag == field.name: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise InvalidSpec( "Unknown tag '%s' in example." % tag, example.lineno, example.path ) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> field.data_type.check_example(example_field) <NEW_LINE> <DEDENT> except InvalidSpec as e: <NEW_LINE> <INDENT> e.msg = "Bad example for field '{}': {}".format( field.name, e.msg) <NEW_LINE> raise e <NEW_LINE> <DEDENT> self._raw_examples[example.label] = example
Adds a "raw example" for this type. This does basic sanity checking to ensure that the example is valid (required fields specified, no unknown fields, correct types, ...). The example is not available via :meth:`get_examples` until :meth:`_compute_examples` is called. Args: example (stone.stone.parser.StoneExample): An example of this type.
625941b6711fe17d82542182
def get_length(buf): <NEW_LINE> <INDENT> (v,l,p) = _unpack_preamble(buf) <NEW_LINE> return l
Returns the length of the Radiotap header. Use this to determine where the start of the next header in the packet is.
625941b6a4f1c619b28afe49
def do_or_form(vals, env): <NEW_LINE> <INDENT> num_vals = len(vals) <NEW_LINE> if num_vals == 0: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> for i, val in enumerate(vals): <NEW_LINE> <INDENT> if i == num_vals - 1: <NEW_LINE> <INDENT> return val <NEW_LINE> <DEDENT> evaluated = scheme_eval(val, env) <NEW_LINE> if scheme_true(evaluated): <NEW_LINE> <INDENT> return quote(evaluated)
Evaluate short-circuited or with parameters VALS in environment ENV.
625941b6be7bc26dc91cd40d
def add_dataset(doc, uri, index, sources_policy=None, update=None, **kwargs): <NEW_LINE> <INDENT> from datacube.index.hl import Doc2Dataset <NEW_LINE> from datacube.utils import changes <NEW_LINE> resolver = Doc2Dataset(index, **kwargs) <NEW_LINE> dataset, err = resolver(sanitize_inf(doc), uri) <NEW_LINE> buff = io.StringIO() <NEW_LINE> if err is None: <NEW_LINE> <INDENT> with redirect_stderr(buff): <NEW_LINE> <INDENT> if update and index.datasets.get(dataset.id): <NEW_LINE> <INDENT> index.datasets.update(dataset, {tuple(): changes.allow_any}) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> index.datasets.add(dataset, sources_policy=sources_policy) <NEW_LINE> <DEDENT> <DEDENT> val = buff.getvalue() <NEW_LINE> if val.count('is already in the database'): <NEW_LINE> <INDENT> def warning_without_trace(message, *args, **kwargs): <NEW_LINE> <INDENT> return f'{message}' <NEW_LINE> <DEDENT> warnings.formatwarning = warning_without_trace <NEW_LINE> warnings.warn(val) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError(err) <NEW_LINE> <DEDENT> return dataset
Add a dataset document to the index database. Args: doc: The dataset document. uri: Some URI to point to the document (this doesn't have to actually point anywhere). index: An instance of a datacube index. sources_policy (optional): The source policy to be checked. update: Update datasets if they already exist. Returns: The dataset to be indexed and any errors encountered.
625941b663f4b57ef0000f29
def __setitem__(name, other): <NEW_LINE> <INDENT> pass
Set object ``other' into this folder under the name ``name``. ``name`` must be a Unicode object or a bytestring object. If ``name`` is a bytestring object, it must be decodable using the system default encoding or the UTF-8 encoding. ``name`` cannot be the empty string. When ``other`` is seated into this folder, it will also be decorated with a ``__parent__`` attribute (a reference to the folder into which it is being seated) and ``__name__`` attribute (the name passed in to this function. If a value already exists in the foldr under the name ``name``, raise :exc:`KeyError`. When this method is called, emit an ``IObjectWillBeAdded`` event before the object obtains a ``__name__`` or ``__parent__`` value. Emit an ``IObjectAdded`` event after the object obtains a ``__name__`` and ``__parent__`` value.
625941b6460517430c393f96
def bread(): <NEW_LINE> <INDENT> flour_menu = ["0: all purpose", "1: bread", "2: whole wheat", "3: oat", "4: rye", "5: spelt", "6: barley"] <NEW_LINE> print("\n".join(flour_menu), "\npick up to three by number") <NEW_LINE> flour_list = [] <NEW_LINE> for i in range(3): <NEW_LINE> <INDENT> flour = input("select a flour type or finish with flours\n> ") <NEW_LINE> if flour.isnumeric() and int(flour) in range(7): <NEW_LINE> <INDENT> flour_list.append(flour_menu[int(flour)]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> bread_recipe = [] <NEW_LINE> total_flour = 0 <NEW_LINE> for item in flour_list: <NEW_LINE> <INDENT> test = input("how much " + item + "? (format: \"x g\" or \"y %\")\n> ") <NEW_LINE> amount = g_or_p(test) <NEW_LINE> if amount < 1: <NEW_LINE> <INDENT> bread_recipe.append(Entry((amount * 433.5), 'g', item)) <NEW_LINE> total_flour += (amount * 433.5) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> bread_recipe.append(Entry(amount, 'g', item)) <NEW_LINE> total_flour += amount <NEW_LINE> <DEDENT> <DEDENT> water = input("what percent hydration?" "(just a number, if unsure, \"80\")\n> ") <NEW_LINE> bread_recipe.append(Entry(((float(water) / 100) * total_flour), 'g', "water")) <NEW_LINE> salt = input("how much salt? (just a number in grams. if unsure, \"9\"\n> ") <NEW_LINE> bread_recipe.append(Entry(float(salt), 'g', "salt")) <NEW_LINE> bread_recipe.append(Entry(92, 'g', "starter")) <NEW_LINE> return bread_recipe
Builds a bread recipe. Steps: Prompt number of loaves (a loaf is about 880g) List flours, by grams or percentage of each, based on a total Numbered list, pick by number Hydration percentage or grams (starting with 346g/loaf, maybe adjust for flour types) Default 9g salt per loaf Default 92g starter per loaf Generate file for finished recipe. This is not quite finished, but it mostly works. Current problem: there is a "close" for the recipe file, but bread never opens it. solution is probably a flag.
625941b67047854f462a1214
def bb_norm_to_corner_pixels(bb_vertices, img_shape): <NEW_LINE> <INDENT> y_pix, x_pix = img_shape[0], img_shape[1] <NEW_LINE> width_pix = int((bb_vertices[1]['x'] - bb_vertices[0]['x']) * x_pix) <NEW_LINE> height_pix = int((bb_vertices[2]['y'] - bb_vertices[1]['y']) * y_pix) <NEW_LINE> upper_left_x = int(bb_vertices[0]['x'] * x_pix) <NEW_LINE> upper_left_y = int(bb_vertices[0]['y'] * y_pix) <NEW_LINE> upper_right_x = upper_left_x + width_pix <NEW_LINE> upper_right_y = upper_left_y <NEW_LINE> lower_left_x = upper_left_x <NEW_LINE> lower_left_y = upper_left_y + height_pix <NEW_LINE> lower_right_x = lower_left_x + width_pix <NEW_LINE> lower_right_y = lower_left_y <NEW_LINE> return [ {'x': upper_left_x, 'y': upper_left_y}, {'x': upper_right_x, 'y': upper_right_y}, {'x': lower_right_x, 'y': lower_right_y}, {'x': lower_left_x, 'y': lower_left_y} ]
Takes in a list of {'x': x_val, 'y': y_val} dicts and transforms the values to fit pixel values of an img.
625941b6e8904600ed9f1d2f
def average_performance(self): <NEW_LINE> <INDENT> return (self.sensitivity() + self.positive_predictive_value()) / 2.
(sensitivity()+positive_predictive_value())/2
625941b6099cdd3c635f0a63
def encode_multipart_formdata(fields, files): <NEW_LINE> <INDENT> BOUNDARY = 'xYzZY' <NEW_LINE> CRLF = '\r\n' <NEW_LINE> L = [] <NEW_LINE> for (key, value) in fields: <NEW_LINE> <INDENT> L.append('--' + BOUNDARY) <NEW_LINE> L.append('Content-Disposition: form-data; name="%s"' % key) <NEW_LINE> L.append('') <NEW_LINE> L.append(value) <NEW_LINE> <DEDENT> for (key, filename, value) in files: <NEW_LINE> <INDENT> L.append('--' + BOUNDARY) <NEW_LINE> L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)) <NEW_LINE> L.append('Content-Type: %s' % get_content_type(filename)) <NEW_LINE> L.append('') <NEW_LINE> L.append(value) <NEW_LINE> <DEDENT> L.append('--' + BOUNDARY + '--') <NEW_LINE> L.append('') <NEW_LINE> body = CRLF.join(L) <NEW_LINE> content_type = 'multipart/form-data' <NEW_LINE> return content_type, body
fields is a sequence of (name, value) elements for regular form fields. files is a sequence of (name, filename, value) elements for data to be uploaded as files Return (content_type, body) ready for httplib.HTTP instance
625941b631939e2706e4cc78
def addResultsTransform(self, id, transformData): <NEW_LINE> <INDENT> kparams = KalturaParams() <NEW_LINE> kparams.addIntIfDefined("id", id); <NEW_LINE> kparams.addStringIfDefined("transformData", transformData) <NEW_LINE> self.client.queueServiceActionCall("contentdistribution_genericdistributionprovideraction", "addResultsTransform", "KalturaGenericDistributionProviderAction", kparams) <NEW_LINE> if self.client.isMultiRequest(): <NEW_LINE> <INDENT> return self.client.getMultiRequestResult() <NEW_LINE> <DEDENT> resultNode = self.client.doQueue() <NEW_LINE> return KalturaObjectFactory.create(resultNode, 'KalturaGenericDistributionProviderAction')
Add results transform file to generic distribution provider action
625941b65e10d32532c5ed36
def clone(self): <NEW_LINE> <INDENT> newChamfer=Chamfer(self._obj1 , self._obj2 , self.distance1, self.distance2, self.pointClick1, self.pointClick2) <NEW_LINE> return newChamfer
Clone the Chamfer .. I do not why somone whant to clone a chamfer .. But Tis is the functionality .. :-)
625941b6a8370b77170526a8
def DualDemandLevel(Sizes,Demand,Areas,RainData,drop=0.1): <NEW_LINE> <INDENT> SizeA,SizeB=Sizes <NEW_LINE> AreaA,AreaB=Areas <NEW_LINE> containerA=[SizeA] <NEW_LINE> containerB=[SizeB] <NEW_LINE> HarvestedA,HarvestedB=AreaA*RainData,AreaB*RainData <NEW_LINE> if type(Demand)==int or type(Demand)==float: <NEW_LINE> <INDENT> Static=True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> Static=False <NEW_LINE> <DEDENT> if Static: <NEW_LINE> <INDENT> lastSizeA,lastSizeB=SizeA-Demand,SizeB <NEW_LINE> Index=[ val for val in zip(HarvestedA,HarvestedB)] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> lastSizeA,lastSizeB=SizeA-Demand[0],SizeB <NEW_LINE> Index=[val for val in zip(HarvestedA,HarvestedB,Demand)] <NEW_LINE> <DEDENT> for inx in Index: <NEW_LINE> <INDENT> if Static: <NEW_LINE> <INDENT> val,sal=inx <NEW_LINE> dal=Demand <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> val,sal,dal=inx <NEW_LINE> <DEDENT> containerA.append(lastSizeA) <NEW_LINE> lastSizeA=min([lastSizeA-dal+val,SizeA]) <NEW_LINE> if lastSizeA<SizeA*(1-drop): <NEW_LINE> <INDENT> demandB=SizeA-lastSizeA <NEW_LINE> if demandB>containerB[-1]: <NEW_LINE> <INDENT> lastSizeB=containerB[-1] <NEW_LINE> containerB.append(lastSizeB+sal) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> lastSizeB=min([lastSizeB-demandB+sal,SizeB]) <NEW_LINE> lastSizeA=SizeA <NEW_LINE> containerB.append(lastSizeB) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> lastSizeB=min([containerB[-1]+sal,SizeB]) <NEW_LINE> containerB.append(lastSizeB) <NEW_LINE> <DEDENT> <DEDENT> return containerA,containerB
Discrete model of the evolution of two water sources. A fish tank used for fertirrigation and a reserve tank used to replenish the fish tank. Parameters ---------- Sizes : list Sizes of the fish tank and the reserve tank in liters . Demand : float,array-like Dayly crop irrigation demand in liters. If float a fixed demand, else an array that contains the daily crop demand. Areas: list Cointains the sizes of the rain water harvesting systems that feeds the fish tank and the reserve tank RainData : array-like Contains the daily rain water fall in the region. drop: float (0,1) Max level drop at the fish tank before being replenished by the reserve tank Returns ------- containerA : list Fish tank level evolution. containerB : list Reserve tank level evolution.
625941b6fbf16365ca6f5fc2
def __dummyProcess(self, item): <NEW_LINE> <INDENT> return (1,1)
Pretends to work, but does nothing -- for debugging
625941b63c8af77a43ae35a5
def collect_results(name: str) -> dict: <NEW_LINE> <INDENT> full_response = {} <NEW_LINE> fllresponse = {} <NEW_LINE> target_name = dns.name.from_text(name) <NEW_LINE> response = lookup(target_name, dns.rdatatype.CNAME, ip) <NEW_LINE> cnames = [] <NEW_LINE> for answers in response.answer: <NEW_LINE> <INDENT> for answer in answers: <NEW_LINE> <INDENT> cnames.append({"name": answer, "alias": name}) <NEW_LINE> <DEDENT> <DEDENT> response = lookup(target_name, dns.rdatatype.A, ip) <NEW_LINE> arecords = [] <NEW_LINE> for answers in response.answer: <NEW_LINE> <INDENT> a_name = answers.name <NEW_LINE> for answer in answers: <NEW_LINE> <INDENT> if answer.rdtype == 1: <NEW_LINE> <INDENT> arecords.append({"name": a_name, "address": str(answer)}) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> response = lookup(target_name, dns.rdatatype.AAAA, ip) <NEW_LINE> aaaarecords = [] <NEW_LINE> for answers in response.answer: <NEW_LINE> <INDENT> aaaa_name = answers.name <NEW_LINE> for answer in answers: <NEW_LINE> <INDENT> if answer.rdtype == 28: <NEW_LINE> <INDENT> aaaarecords.append({"name": aaaa_name, "address": str(answer)}) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> response = lookup(target_name, dns.rdatatype.MX, ip) <NEW_LINE> mxrecords = [] <NEW_LINE> for answers in response.answer: <NEW_LINE> <INDENT> mx_name = answers.name <NEW_LINE> for answer in answers: <NEW_LINE> <INDENT> if answer.rdtype == 15: <NEW_LINE> <INDENT> mxrecords.append({"name": mx_name, "preference": answer.preference, "exchange": str(answer.exchange)}) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if (not arecords and not aaaarecords and not mxrecords and cnames): <NEW_LINE> <INDENT> cname = str(cnames) <NEW_LINE> cnamesplit = cname.split("rdata: ") <NEW_LINE> cnamesplit1 = cnamesplit[1].split(".>,") <NEW_LINE> target_name = str(cnamesplit1[0]) <NEW_LINE> response = lookup(target_name, dns.rdatatype.A, ip) <NEW_LINE> arecords = [] <NEW_LINE> for answers in response.answer: <NEW_LINE> <INDENT> a_name = answers.name <NEW_LINE> for answer in answers: <NEW_LINE> <INDENT> if answer.rdtype == 1: <NEW_LINE> <INDENT> arecords.append({"name": a_name, "address": str(answer)}) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> response = lookup(target_name, dns.rdatatype.AAAA, ip) <NEW_LINE> aaaarecords = [] <NEW_LINE> for answers in response.answer: <NEW_LINE> <INDENT> aaaa_name = answers.name <NEW_LINE> for answer in answers: <NEW_LINE> <INDENT> if answer.rdtype == 28: <NEW_LINE> <INDENT> aaaarecords.append({"name": aaaa_name, "address": str(answer)}) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> response = lookup(target_name, dns.rdatatype.MX, ip) <NEW_LINE> mxrecords = [] <NEW_LINE> for answers in response.answer: <NEW_LINE> <INDENT> mx_name = answers.name <NEW_LINE> for answer in answers: <NEW_LINE> <INDENT> if answer.rdtype == 15: <NEW_LINE> <INDENT> mxrecords.append({"name": mx_name, "preference": answer.preference, "exchange": str(answer.exchange)}) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> full_response["CNAME"] = cnames <NEW_LINE> full_response["A"] = arecords <NEW_LINE> full_response["AAAA"] = aaaarecords <NEW_LINE> full_response["MX"] = mxrecords <NEW_LINE> return full_response
This function parses final answers into the proper data structure that print_results requires. The main work is done within the `lookup` function.
625941b63317a56b86939a71
def dummy_decorator(name, fn): <NEW_LINE> <INDENT> def wrapped_func(*args, **kwarg): <NEW_LINE> <INDENT> return fn(*args, **kwarg) <NEW_LINE> <DEDENT> return wrapped_func
Dummy decorator which is used from utils.monkey_patch(). :param name: name of the function :param fn: - object of the function :returns: fn -- decorated function
625941b6d8ef3951e3243344
def get_season_in_year(self, isText=False): <NEW_LINE> <INDENT> if isText: <NEW_LINE> <INDENT> return self.SEASON_ORDER[int(self.data['db']['time']['CURRENT_DAY'] % self.days_in_a_year / self.days_in_a_season)] <NEW_LINE> <DEDENT> return int(self.data['db']['time']['CURRENT_DAY'] % self.days_in_a_year / self.days_in_a_season) + 1
第几季(年内):春/夏/秋/冬
625941b61d351010ab855924
def short_uuid4(uid): <NEW_LINE> <INDENT> return str(uid)[:8]
When seeing a glimpse of the UID is enough.
625941b666656f66f7cbbfb1
def list_languages(self): <NEW_LINE> <INDENT> url = self._build_url('languages', base_url=self._api) <NEW_LINE> json = self._json(self._get(url), 200) <NEW_LINE> return [(k, v) for k, v in json.items()]
List the programming languages used in the repository. :returns: list of tuples
625941b6ac7a0e7691ed3ee1
def tensor_array_gather(handle, indices, flow_in, dtype, element_shape=None, name=None): <NEW_LINE> <INDENT> _ctx = _context._context or _context.context() <NEW_LINE> tld = _ctx._thread_local_data <NEW_LINE> if tld.is_eager: <NEW_LINE> <INDENT> raise RuntimeError("tensor_array_gather op does not support eager execution. Arg 'handle' is a ref.") <NEW_LINE> <DEDENT> dtype = _execute.make_type(dtype, "dtype") <NEW_LINE> if element_shape is None: <NEW_LINE> <INDENT> element_shape = None <NEW_LINE> <DEDENT> element_shape = _execute.make_shape(element_shape, "element_shape") <NEW_LINE> _, _, _op, _outputs = _op_def_library._apply_op_helper( "TensorArrayGather", handle=handle, indices=indices, flow_in=flow_in, dtype=dtype, element_shape=element_shape, name=name) <NEW_LINE> _result = _outputs[:] <NEW_LINE> if _execute.must_record_gradient(): <NEW_LINE> <INDENT> _attrs = ("dtype", _op._get_attr_type("dtype"), "element_shape", _op.get_attr("element_shape")) <NEW_LINE> _inputs_flat = _op.inputs <NEW_LINE> _execute.record_gradient( "TensorArrayGather", _inputs_flat, _attrs, _result) <NEW_LINE> <DEDENT> _result, = _result <NEW_LINE> return _result
TODO: add doc. Args: handle: A `Tensor` of type mutable `string`. indices: A `Tensor` of type `int32`. flow_in: A `Tensor` of type `float32`. dtype: A `tf.DType`. element_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`. name: A name for the operation (optional). Returns: A `Tensor` of type `dtype`.
625941b61d351010ab855925
def pause(self): <NEW_LINE> <INDENT> self.__lifecycle.mark_test_finished() <NEW_LINE> self._is_idle_evt.wait() <NEW_LINE> self._await_primaries() <NEW_LINE> self._do_wait_for_mongos_retarget()
Pause the thread.
625941b6d58c6744b4257a68
def create(self, validated_data): <NEW_LINE> <INDENT> return Property_2.objects.create(**validated_data)
Create and return a new `Snippet` instance, given the validated data.
625941b6fff4ab517eb2f240
def connect_to(self, *neighbor_ids): <NEW_LINE> <INDENT> for neigh_id in neighbor_ids: <NEW_LINE> <INDENT> neigh = SimpleGraphNode.node_from_id[neigh_id] <NEW_LINE> if neigh is not None: <NEW_LINE> <INDENT> self._adjacent.add(neigh_id) <NEW_LINE> neigh._adjacent.add(self._node_id)
Add an undirected (two-way) edge from this node to other(s). Parameters ---------- *neighbor_ids : int or sequence of int ID(s) of node(s) to make adjacent to this one, possibly empty.
625941b6498bea3a759b98b9
def findMinArrowShots(self, points): <NEW_LINE> <INDENT> if not points: return 0 <NEW_LINE> def cmpPoints(a, b): <NEW_LINE> <INDENT> diff = a[1] - b[1] <NEW_LINE> return diff if diff != 0 else a[0] - b[0] <NEW_LINE> <DEDENT> points = sorted(points, key=cmp_to_key(cmpPoints)) <NEW_LINE> count = 1 <NEW_LINE> top = points[0] <NEW_LINE> length = len(points) <NEW_LINE> for i in range(1, length): <NEW_LINE> <INDENT> below = points[i] <NEW_LINE> if below[0] > top[1]: <NEW_LINE> <INDENT> count += 1 <NEW_LINE> top = points[i] <NEW_LINE> <DEDENT> <DEDENT> return count
:type points: List[List[int]] :rtype: int
625941b62eb69b55b151c6b1
def dnac_get_module_count(dnac_session, dnac_host, dnac_headers, device_id): <NEW_LINE> <INDENT> tmp_url = 'https://%s/dna/intent/api/v1/network-device/module/MISSION' % dnac_host <NEW_LINE> tmp_params = {'deviceId': device_id} <NEW_LINE> r = dnac_session.get(tmp_url, verify=False, headers=dnac_headers, params=tmp_params) <NEW_LINE> r.raise_for_status() <NEW_LINE> return r.json()['response']
DNAC Module Count of a Network Device
625941b6b830903b967e971f
def test_intersection_with_stability_selection_one_threshold(): <NEW_LINE> <INDENT> coefs = np.array([ [[2, 1, -1, 0, 4], [4, 0, 2, -1, 5], [1, 2, 3, 4, 5]], [[2, 0, 0, 0, 0], [3, 1, 1, 0, 3], [6, 7, 8, 9, 10]], [[2, 0, 0, 0, 0], [2, -1, 3, 0, 2], [2, 4, 6, 8, 9]]]) <NEW_LINE> true_intersection = np.array([ [True, False, False, False, False], [True, True, True, False, True], [True, True, True, True, True]]) <NEW_LINE> selection_thresholds = np.array([2]) <NEW_LINE> estimated_intersection = intersection( coefs=coefs, selection_thresholds=selection_thresholds) <NEW_LINE> assert_array_equal( np.sort(true_intersection, axis=0), np.sort(estimated_intersection, axis=0))
Tests whether intersection correctly performs a soft intersection.
625941b685dfad0860c3ac60
def make_car(make, model, **options): <NEW_LINE> <INDENT> car = {} <NEW_LINE> car['make'], car['model'] = make, model <NEW_LINE> for key, value in options.items(): <NEW_LINE> <INDENT> car[key] = value <NEW_LINE> <DEDENT> return car
Creates dictionary of a car.
625941b6e1aae11d1e749abb
def __getitem__(self, label): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return self.bands[label] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> message = "The label '{}' was not found in the defined bands.".format(label) <NEW_LINE> raise ValueError(message) from None
Define indexing as returning the definition of a requested band label.
625941b6377c676e91271fb2
def create_zone(zone, private=False, vpc_id=None, vpc_region=None, region=None, key=None, keyid=None, profile=None): <NEW_LINE> <INDENT> if region is None: <NEW_LINE> <INDENT> region = 'universal' <NEW_LINE> <DEDENT> conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) <NEW_LINE> _zone = conn.get_zone(zone, private_zone=private, vpc_id=vpc_id, vpc_region=vpc_region) <NEW_LINE> if _zone: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> conn.create_zone(zone) <NEW_LINE> return True
Create a Route53 hosted zone. .. versionadded:: 2015.8.0 CLI Example:: salt myminion boto_route53.create_zone example.org
625941b68e7ae83300e4add3
def show_frame(self, frame=None): <NEW_LINE> <INDENT> if frame is not None: <NEW_LINE> <INDENT> cv2.imshow(self.video_name, frame) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> cv2.imshow(self.video_name, self.frame)
Display the frame in the Capture's window using cv2.imshow If no frame is provided, the previous frame is used.
625941b623849d37ff7b2e99
def IsVertical(self): <NEW_LINE> <INDENT> fld = self.GetParent().GetGrandParent() <NEW_LINE> if isinstance(fld, FoldPanelBar): <NEW_LINE> <INDENT> return self.GetParent().GetGrandParent().IsVertical() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise "ERROR: Wrong Parent " + repr(fld)
Returns wether the CaptionBar Has Default Orientation Or Not. Default is vertical.
625941b67d43ff24873a2aab
def write_async(self, fname: str, compression: bool = False): <NEW_LINE> <INDENT> worker = Thread( target=self.write, args=(fname,), kwargs={"compression": compression} ) <NEW_LINE> worker.daemon = True <NEW_LINE> worker.start()
!EXPERIMENTAL Write data to disk async, only works if this file is not manipulated until the operation is finished.
625941b650485f2cf553cba0
def _insert_and_compare(self, importer, model, samples): <NEW_LINE> <INDENT> importer(model, ['data'], [[sample] for sample in samples]) <NEW_LINE> for (a, b) in zip(model.objects.all(), samples): <NEW_LINE> <INDENT> self.assertJSONEqual("[{}]".format(json.dumps(a.data)), [b])
Inserts sample data using the given importer and asserts that the 'data' attribute of the objects are equal as JSON
625941b6004d5f362079a13f
def test_chickadee_force_single(self): <NEW_LINE> <INDENT> expected_results = self.expected_result <NEW_LINE> class MockResolver: <NEW_LINE> <INDENT> def __init__(self, *args, **kwargs): <NEW_LINE> <INDENT> self.data = None <NEW_LINE> <DEDENT> def single(self): <NEW_LINE> <INDENT> return [x for x in expected_results if x['query'] == self.data] <NEW_LINE> <DEDENT> <DEDENT> chickadee = Chickadee() <NEW_LINE> chickadee.ignore_bogon = False <NEW_LINE> chickadee.force_single = True <NEW_LINE> chickadee.fields = self.fields <NEW_LINE> with patch("libchickadee.chickadee.ipapi.Resolver", MockResolver): <NEW_LINE> <INDENT> data = chickadee.run(','.join(self.test_data_ips)) <NEW_LINE> <DEDENT> self.assertCountEqual(data, self.expected_result)
Batch Query Method Test
625941b650812a4eaa59c12d
def startsWith(self, prefix): <NEW_LINE> <INDENT> top = self.root <NEW_LINE> for c in prefix: <NEW_LINE> <INDENT> i = self.char2num(c) <NEW_LINE> if not top.child[i]: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> top = top.child[i] <NEW_LINE> <DEDENT> return True
Returns if there is any word in the trie that starts with the given prefix. :type prefix: str :rtype: bool
625941b6e8904600ed9f1d30
def test_repeat_decorator_error(self): <NEW_LINE> <INDENT> @pool_manager.re_request() <NEW_LINE> def function(): <NEW_LINE> <INDENT> raise pool_manager.DBManagerError <NEW_LINE> <DEDENT> nose.tools.assert_raises(pool_manager.DBManagerError, function())
test if decorator raise error
625941b60c0af96317bb7ff1