code
stringlengths 4
4.48k
| docstring
stringlengths 1
6.45k
| _id
stringlengths 24
24
|
---|---|---|
def set_search_area(self, search_area): <NEW_LINE> <INDENT> min_col, max_col, min_row, max_row = search_area <NEW_LINE> ml.log_event('search area set..\n min_col: \'{}\'\n max_col: \'{}\'\n min_row: \'{}\'\n max_row: \'{}\''.format( min_col, max_col, min_row, max_row)) <NEW_LINE> self.min_col = min_col <NEW_LINE> self.max_col = max_col <NEW_LINE> self.min_row = min_row <NEW_LINE> self.max_row = max_row
|
set_search_area does the following..
1. allows the user to set a custom search area for all spreadsheets, smaller is faster
:param search_area: a tuple containing..
..min_col: far left,
..max_col: far right,
..min_row: top,
..max_row: bottom
:return:
|
625941b66fece00bbac2d54e
|
def forwards(self, orm): <NEW_LINE> <INDENT> spot_test = orm.GamificationBadgeTemplate.objects.create(name="Spot Test", image="img/OP_Badge_Event_01_SpotTest.png") <NEW_LINE> five_spot_test = orm.GamificationBadgeTemplate.objects.create(name="5 Spot Tests", image="img/OP_Badge_Event_02_5SpotTests.png") <NEW_LINE> exam = orm.GamificationBadgeTemplate.objects.create(name="Exam", image="img/OP_Badge_Event_03_Exam.png") <NEW_LINE> spot_test_champ = orm.GamificationBadgeTemplate.objects.create(name="Spot Test Champ", image="img/OP_Badge_Event_04_SpotTestChamp.png") <NEW_LINE> exam_champ = orm.GamificationBadgeTemplate.objects.create(name="Exam Champ", image="img/OP_Badge_Event_05_ExamChamp.png") <NEW_LINE> orm.GamificationScenario.objects.create(name="Spot Test", badge=spot_test, event="SPOT_TEST") <NEW_LINE> orm.GamificationScenario.objects.create(name="5 Spot Tests", badge=five_spot_test, event="5_SPOT_TEST") <NEW_LINE> orm.GamificationScenario.objects.create(name="Exam", badge=exam, event="EXAM") <NEW_LINE> orm.GamificationScenario.objects.create(name="Spot Test Champ", badge=spot_test_champ, event="SPOT_TEST_CHAMP") <NEW_LINE> orm.GamificationScenario.objects.create(name="Exam Champ", badge=exam_champ, event="EXAM_CHAMP")
|
Write your forwards methods here.
|
625941b650812a4eaa59c139
|
@process <NEW_LINE> def server(host, port): <NEW_LINE> <INDENT> sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) <NEW_LINE> sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) <NEW_LINE> sock.bind((host, port)) <NEW_LINE> sock.listen(5) <NEW_LINE> while True: <NEW_LINE> <INDENT> conn_sock, conn_addr = sock.accept() <NEW_LINE> request = conn_sock.recv(4096).strip() <NEW_LINE> if request.startswith('GET'): <NEW_LINE> <INDENT> handler_ok(request, conn_sock).start() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> handler_not_found(request, conn_sock).start()
|
Simple CSP based web server.
|
625941b64c3428357757c13e
|
def load_batch_of_images(path, dirs, labels, image_shape, loader, num_per_class, batch_size=None, random=False, indices=None, normalize=0): <NEW_LINE> <INDENT> dirs = np.array(dirs) <NEW_LINE> if batch_size == None: <NEW_LINE> <INDENT> batch_dirs = dirs <NEW_LINE> <DEDENT> elif random == True: <NEW_LINE> <INDENT> assert batch_size != None <NEW_LINE> batch_dirs = np.random.choice(dirs, batch_size) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> assert (batch_size != None) & (indices is not None) <NEW_LINE> batch_dirs = dirs[indices] <NEW_LINE> <DEDENT> samples = np.zeros((num_per_class * len(batch_dirs), *image_shape)) <NEW_LINE> batch_labels = np.ones(num_per_class * len(batch_dirs)).astype(int) <NEW_LINE> for i, dir_name in enumerate(batch_dirs): <NEW_LINE> <INDENT> dir_path = os.path.join(path, dir_name) <NEW_LINE> filenames = os.listdir(dir_path) <NEW_LINE> filenames = np.random.choice(filenames, num_per_class) <NEW_LINE> batch = np.zeros((num_per_class, *image_shape)) <NEW_LINE> for j, filename in enumerate(filenames): <NEW_LINE> <INDENT> batch[j,:,:,:] = loader(os.path.join(dir_path, filename), image_shape) <NEW_LINE> <DEDENT> samples[i*num_per_class: i*num_per_class + num_per_class, :, :, :] = batch <NEW_LINE> batch_labels[i*num_per_class: i*num_per_class + num_per_class] = batch_labels[i*num_per_class: i*num_per_class + num_per_class] * labels[i] <NEW_LINE> <DEDENT> if normalize == 1: <NEW_LINE> <INDENT> samples = (samples - 127.5) / 127.5 <NEW_LINE> <DEDENT> elif normalize == 2: <NEW_LINE> <INDENT> samples = samples / 255 <NEW_LINE> <DEDENT> return samples, batch_labels
|
Loads a random batch of images
Keyword Arguments:
-----------
- path: string
Path to the image source directory on disk.
Source directory should be divided into directories, one directory per class.
- dirs: [string]
List of directories to be considered in the path. Every directory should contain images of a single class.
- labels: [int]
Class labels. Should correspond to classes.
- image_shape: tuple (H,W,C)
H - image height
W - image width
C - number of channels
- loader: Function
Should take path to image file and image_shape as parameters.
Should return numpy array
- num_per_class: int
Number of images that should be randomly chosen from each class.
- batch_size: int
Number of classes to use in a single batch. Total number of samples will be batch_size * num_per_class.
If batch_size == None, samples will be taken from every one of dirs.
- random: bool
If classes for a batch should be chosen randomly.
- start_idx: int
Start index, if batch is not random.
- end_idx: int
End index, if batch is not random.
- normalize: int
If images should be normalized. 0 - do not normalize, 1 - normalize between -1 and 1, 2 - normalize between 0 and 1.
Returns:
--------
- samples: ndarray (N, H, W, C)
Numpy array of randomly chosen images resized according to model's input shape.
N - number of samples
H - height
W - width
C - number of channels
- batch_labels: [int]
Sample labels.
|
625941b6ec188e330fd5a5bb
|
def addDigits(self, num): <NEW_LINE> <INDENT> while len(str(num))!=1: <NEW_LINE> <INDENT> num = sum([int(x) for x in str(num)]) <NEW_LINE> <DEDENT> return num
|
:type num: int
:rtype: int
|
625941b6d18da76e235322e5
|
def tree_csv(self, file_name=None, leaves_only=False): <NEW_LINE> <INDENT> if self.boosting: <NEW_LINE> <INDENT> raise AttributeError("This method is not available for boosting" " models.") <NEW_LINE> <DEDENT> headers_names = [] <NEW_LINE> if self.regression: <NEW_LINE> <INDENT> headers_names.append( self.fields[self.tree.objective_id]['name']) <NEW_LINE> headers_names.append("error") <NEW_LINE> for index in range(0, self._max_bins): <NEW_LINE> <INDENT> headers_names.append("bin%s_value" % index) <NEW_LINE> headers_names.append("bin%s_instances" % index) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> headers_names.append( self.fields[self.tree.objective_id]['name']) <NEW_LINE> headers_names.append("confidence") <NEW_LINE> headers_names.append("impurity") <NEW_LINE> for category, _ in self.tree.distribution: <NEW_LINE> <INDENT> headers_names.append(category) <NEW_LINE> <DEDENT> <DEDENT> nodes_generator = self.get_nodes_info(headers_names, leaves_only=leaves_only) <NEW_LINE> if file_name is not None: <NEW_LINE> <INDENT> with UnicodeWriter(file_name) as writer: <NEW_LINE> <INDENT> writer.writerow([header.encode("utf-8") for header in headers_names]) <NEW_LINE> for row in nodes_generator: <NEW_LINE> <INDENT> writer.writerow([item if not isinstance(item, basestring) else item.encode("utf-8") for item in row]) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> rows = [] <NEW_LINE> rows.append(headers_names) <NEW_LINE> for row in nodes_generator: <NEW_LINE> <INDENT> rows.append(row) <NEW_LINE> <DEDENT> return rows
|
Outputs the node structure to a CSV file or array
|
625941b6cc0a2c11143dccac
|
def getOthers(self): <NEW_LINE> <INDENT> return self.others
|
Objective: To return the othets value of the object.
Input Parameters:
self: (implicit) node object
Output: others of the object
|
625941b6adb09d7d5db6c5a7
|
def scale(self, **kwargs): <NEW_LINE> <INDENT> return type(self)(affinity.scale(self.shape, **kwargs), name=self.name, fid=self.fid)
|
Returns a scaled geometry, scaled by factors 'xfact' and 'yfact'
along each dimension. The 'origin' keyword can be 'center' for the
object bounding box center (default), 'centroid' for the geometry’s
centroid, or coordinate tuple (x0, y0) for fixed point.
Negative scale factors will mirror or reflect coordinates.
Kwargs:
xfact=1.0
yfact=1.0
origin='center'
|
625941b6fff4ab517eb2f24d
|
def delete_application_snapshot(ApplicationName=None, SnapshotName=None, SnapshotCreationTimestamp=None): <NEW_LINE> <INDENT> pass
|
Deletes a snapshot of application state.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_application_snapshot(
ApplicationName='string',
SnapshotName='string',
SnapshotCreationTimestamp=datetime(2015, 1, 1)
)
:type ApplicationName: string
:param ApplicationName: [REQUIRED]
The name of an existing application.
:type SnapshotName: string
:param SnapshotName: [REQUIRED]
The identifier for the snapshot delete.
:type SnapshotCreationTimestamp: datetime
:param SnapshotCreationTimestamp: [REQUIRED]
The creation timestamp of the application snapshot to delete. You can retrieve this value using or .
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
KinesisAnalyticsV2.Client.exceptions.ResourceInUseException
KinesisAnalyticsV2.Client.exceptions.InvalidArgumentException
KinesisAnalyticsV2.Client.exceptions.UnsupportedOperationException
KinesisAnalyticsV2.Client.exceptions.InvalidRequestException
KinesisAnalyticsV2.Client.exceptions.ResourceNotFoundException
:return: {}
:returns:
(dict) --
|
625941b67c178a314d6ef26c
|
def pc_nproduced_avg(self): <NEW_LINE> <INDENT> return _ccsds_swig.mpsk_ambiguity_resolver_f_sptr_pc_nproduced_avg(self)
|
pc_nproduced_avg(mpsk_ambiguity_resolver_f_sptr self) -> float
|
625941b6a17c0f6771cbde68
|
def read_config(filenames=None): <NEW_LINE> <INDENT> default_configs = ['/etc/obs/services/git-buildpackage', '~/.obs/git-buildpackage'] <NEW_LINE> defaults = {'repo-cache-dir': '/var/cache/obs/git-buildpackage-repos/', 'gbp-tmp-dir': '/tmp/obs-service-gbp/', 'gbp-user': None, 'gbp-group': None, 'repo-cache-refs-hack': 'no'} <NEW_LINE> configs = default_configs if filenames is None else filenames <NEW_LINE> configs = [os.path.expanduser(fname) for fname in configs] <NEW_LINE> LOGGER.debug('Trying %s config files: %s', len(configs), configs) <NEW_LINE> parser = SafeConfigParser(defaults=defaults) <NEW_LINE> read = parser.read(configs) <NEW_LINE> LOGGER.debug('Read %s config files: %s', len(read), read) <NEW_LINE> if not parser.has_section('general'): <NEW_LINE> <INDENT> parser.add_section('general') <NEW_LINE> <DEDENT> for key in defaults.keys(): <NEW_LINE> <INDENT> envvar ='OBS_GIT_BUILDPACKAGE_%s' % key.replace('-', '_').upper() <NEW_LINE> if envvar in os.environ: <NEW_LINE> <INDENT> parser.set('general', key, os.environ[envvar]) <NEW_LINE> <DEDENT> <DEDENT> return dict(parser.items('general'))
|
Read configuration file(s)
|
625941b6b57a9660fec33693
|
def test_spell_activity_date_started_field(self): <NEW_LINE> <INDENT> field = self.spell_activity_record.find('field[@name=\'date_started\']') <NEW_LINE> regex_match = re.match(self.eval_regex, field.attrib['eval']) <NEW_LINE> self.assertEqual(len(regex_match.groups()), 1, 'Incorrect date_started eval on activity')
|
Make sure the state field for the spell activity is correct
|
625941b6eab8aa0e5d26d972
|
def existeAresta(self, v1, v2): <NEW_LINE> <INDENT> for v in self.adj[v1]: <NEW_LINE> <INDENT> if v.vertice == v2: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> <DEDENT> for v22 in self.adj[v2]: <NEW_LINE> <INDENT> if v22.vertice == v1: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> <DEDENT> return False
|
Retorna se existe uma aresta entre v1 -> v2
|
625941b60a366e3fb873e62b
|
def calc_global_palette(self): <NEW_LINE> <INDENT> seencolors = {self.backdrop: 0} <NEW_LINE> self.global_to_local = {} <NEW_LINE> for paletteid, colors in self.palettes.items(): <NEW_LINE> <INDENT> self.global_to_local[paletteid] = g2l = bytearray(256) <NEW_LINE> for color, index in colors: <NEW_LINE> <INDENT> seencolors.setdefault(color, len(seencolors)) <NEW_LINE> g2l[seencolors[color]] = index <NEW_LINE> <DEDENT> <DEDENT> palette = [bytes(self.backdrop)] * 256 <NEW_LINE> for color, index in seencolors.items(): <NEW_LINE> <INDENT> palette[index] = bytes(color) <NEW_LINE> <DEDENT> self.global_palette = b"".join(palette)
|
Calculate a 256-entry palette containing all subpalettes.
Convert self.palette to these:
- self.global_palette, a 768-byte bytes containing the backdrop and
all subpalettes defined in the cel list
- self.global_to_local, a dict from self.palettes keys to 256-byte
bytes representing mappings from indices in self.global_palette
to indices in the subpalette, intended as LUTs for im.point()
|
625941b6be7bc26dc91cd41a
|
def test_get(self): <NEW_LINE> <INDENT> self.assertEqual(200, self.resp.status_code)
|
get /inscricao/ must return status code 200
|
625941b6b5575c28eb68de11
|
def moderator_statistics(contributions): <NEW_LINE> <INDENT> moderators = {} <NEW_LINE> for contribution in contributions: <NEW_LINE> <INDENT> if contribution["status"] == "unreviewed": <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> moderator = contribution["moderator"] <NEW_LINE> if moderator == "BANNED": <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> moderators.setdefault( moderator, { "moderator": moderator, "category": [], "average_score": [], "average_without_0": [] } ) <NEW_LINE> moderators[moderator]["average_score"].append(contribution["score"]) <NEW_LINE> moderators[moderator]["category"].append(contribution["category"]) <NEW_LINE> if contribution["score"] > 0: <NEW_LINE> <INDENT> moderators[moderator]["average_without_0"].append( contribution["score"]) <NEW_LINE> <DEDENT> <DEDENT> moderator_list = [] <NEW_LINE> for moderator, value in moderators.items(): <NEW_LINE> <INDENT> value["category"] = Counter(value["category"]).most_common() <NEW_LINE> value["average_score"] = average(value["average_score"]) <NEW_LINE> value["average_without_0"] = average(value["average_without_0"]) <NEW_LINE> moderator_list.append(value) <NEW_LINE> <DEDENT> return {"moderators": moderator_list}
|
Returns a dictionary containing statistics about all moderators.
|
625941b650485f2cf553cbad
|
def blitme(self) -> object: <NEW_LINE> <INDENT> self.screen.blit(self.image, self.rect)
|
Draw the ship at its current location.
|
625941b6f9cc0f698b14041a
|
def w_grad(weigth, center, sig, xs, ys): <NEW_LINE> <INDENT> pass
|
Params:
weigth,center - neuron params
sig - sigma, neighborhood_radius
xs - array of x
ys - array of target value
|
625941b6046cf37aa974cb5f
|
def startsWith(self, prefix): <NEW_LINE> <INDENT> root = self.root <NEW_LINE> for c in prefix: <NEW_LINE> <INDENT> ind = ord(c) - ord('a') <NEW_LINE> if not root or not root.children[ind]: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> root = root.children[ind] <NEW_LINE> <DEDENT> return True
|
Returns if there is any word in the trie that starts with the given prefix.
:type prefix: str
:rtype: bool
|
625941b64527f215b584c26f
|
@register.filter(name='cut') <NEW_LINE> def cut(value,arg): <NEW_LINE> <INDENT> return value.replace(arg, '')
|
This cuts out all values of 'arg from the String'
|
625941b64e4d5625662d41f2
|
def nsca_unknown(host_name, service_name, text_output, remote_host, **kwargs): <NEW_LINE> <INDENT> return send_nsca( status=STATE_UNKNOWN, host_name=host_name, service_name=service_name, text_output=text_output, remote_host=remote_host, **kwargs )
|
Wrapper for the send_nsca() function to easily send an UNKNONW
Arguments:
host_name: Host name to report as
service_name: Service to report as
text_output: Freeform text, should be under 1kb
remote_host: Host name to send to
All other arguments are passed to the NscaSender constructor
|
625941b6f548e778e58cd390
|
def getSelectedObject(self): <NEW_LINE> <INDENT> sel = DCC.getSelectedObjects() <NEW_LINE> if not sel: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> newObj = sel[0] <NEW_LINE> if not newObj: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> self.loadObject(newObj)
|
Load the first selected DCC object into the UI
|
625941b691af0d3eaac9b828
|
def test(self, model, test_set: pd.DataFrame, test_label: pd.Series, args): <NEW_LINE> <INDENT> score = model.score(test_set, test_label) <NEW_LINE> self.logger.info('Test set score: %.2f' % score)
|
Test the model with test set
:param test_set pd.DataFrame The vectorized test dataset
:param test_label pd.Series The test set label
|
625941b632920d7e50b27fe0
|
def join_iostat_to_sard(self, list_sar_d, list_iostat, disk_map): <NEW_LINE> <INDENT> list_join = [] <NEW_LINE> for sar_line in list_sar_d: <NEW_LINE> <INDENT> dev = sar_line[constant.DICT_KEYS_DEV] <NEW_LINE> device_dict = Common.get_dict_by_name(constant.DICT_KEYS_DEVNAME, dev, disk_map) <NEW_LINE> if not device_dict or constant.DICT_KEYS_NAME not in device_dict: <NEW_LINE> <INDENT> sar_line[constant.DICT_KEYS_RRQM] = "0.00" <NEW_LINE> sar_line[constant.DICT_KEYS_WRQM] = "0.00" <NEW_LINE> list_join.append(sar_line) <NEW_LINE> continue <NEW_LINE> <DEDENT> device_name = device_dict[constant.DICT_KEYS_NAME] <NEW_LINE> iostat = Common.get_dict_by_name(constant.DICT_KEYS_DEVICE, device_name, list_iostat) <NEW_LINE> if not iostat or constant.DICT_KEYS_RRQM not in iostat or constant.DICT_KEYS_WRQM not in iostat: <NEW_LINE> <INDENT> sar_line[constant.DICT_KEYS_RRQM] = "0.00" <NEW_LINE> sar_line[constant.DICT_KEYS_WRQM] = "0.00" <NEW_LINE> list_join.append(sar_line) <NEW_LINE> continue <NEW_LINE> <DEDENT> sar_line[constant.DICT_KEYS_RRQM] = iostat[constant.DICT_KEYS_RRQM] <NEW_LINE> sar_line[constant.DICT_KEYS_WRQM] = iostat[constant.DICT_KEYS_WRQM] <NEW_LINE> list_join.append(sar_line) <NEW_LINE> <DEDENT> return list_join
|
join list_iostat to list_sar_d
disk_map:translate DEV to Device
:param list_sar_d:
:param list_iostat:
:param disk_map:
:return:
|
625941b64527f215b584c270
|
def map_user_movies(data): <NEW_LINE> <INDENT> dict_user_movies = dict() <NEW_LINE> for i in range( data.shape[0]): <NEW_LINE> <INDENT> user = data.loc[i, 'user'] <NEW_LINE> movie = data.loc[i, 'movie'] <NEW_LINE> if (user in dict_user_movies): <NEW_LINE> <INDENT> dict_user_movies[user].add(movie) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> dict_user_movies[user] = set() <NEW_LINE> dict_user_movies[user].add(movie) <NEW_LINE> <DEDENT> <DEDENT> return dict_user_movies
|
Getting all watched movies by the user
ARGS:
All data which is users and movies, also the rating
RETURN:
Dict: A dictionary with all users and their watched movies (key = userId and values = List of watched movies)
|
625941b6f8510a7c17cf9519
|
def add_event_listener(self, event_type, callback): <NEW_LINE> <INDENT> logger.debug("adding event listener %s", event_type) <NEW_LINE> if event_type not in self.__listeners: <NEW_LINE> <INDENT> self.__listeners[event_type] = [] <NEW_LINE> <DEDENT> self.__listeners[event_type].append(callback)
|
Adds an event listener for the given type and
callback function.
|
625941b667a9b606de4a7cd1
|
def close(entries, date, conversion_currency, account_conversions): <NEW_LINE> <INDENT> if date is not None: <NEW_LINE> <INDENT> entries = truncate(entries, date) <NEW_LINE> <DEDENT> index = len(entries) <NEW_LINE> entries = conversions(entries, account_conversions, conversion_currency, date) <NEW_LINE> return entries, index
|
Truncate entries that occur after a particular date and ensure balance.
This method essentially removes entries after a date. It truncates the
future. To do so, it will
1. Remove all entries which occur after 'date', if given.
2. Insert conversion transactions at the end of the list of entries to
ensure that the total balance of all postings sums up to empty.
The result is a list of entries with a total balance of zero, with possibly
non-zero balances for the income/expense accounts. To produce a final
balance sheet, use transfer() to move the net income to the equity accounts.
Args:
entries: A list of directive tuples.
date: A datetime.date instance, one day beyond the end of the period. This
date can be optionally left to None in order to close at the end of the
list of entries.
conversion_currency: A string, the transfer currency to use for zero prices
on the conversion entry.
account_conversions: A string, tne name of the equity account to
book currency conversions against.
Returns:
A new list of entries is returned, and the index that points to one beyond
the last original transaction that was provided. Further entries may have
been inserted to normalize conversions and ensure the total balance sums
to zero.
|
625941b63c8af77a43ae35b2
|
def __sub__(self,fraction2): <NEW_LINE> <INDENT> return Fraction(self.num * fraction2.den - self.den * fraction2.num, self.den * fraction2.den).reduire()
|
Permet de redéfinir la soustraction
|
625941b6d486a94d0b98df63
|
def parse_test(self): <NEW_LINE> <INDENT> self.assertEqual( [ { 'commit': '9093ace390c4e44910774ebddef403689435f046', 'author': 'Some Author <some.author@example.com>', 'date': datetime(2015, 1, 1, 12, 34, 56, tzinfo=timezone(timedelta(0, 3600))), 'count': 4, }, ], self.parser.parse(self.complete_commit, ignore=["ignored/*"]) )
|
The correct git commit dicts should be retrieved
|
625941b6004d5f362079a14c
|
def ping(): <NEW_LINE> <INDENT> warnings.warn(DeprecationWarning( "The ping task has been deprecated and will be removed in Celery " "v2.3. Please use inspect.ping instead.")) <NEW_LINE> return PingTask.apply_async().get()
|
Deprecated and scheduled for removal in Celery 2.3.
Please use :meth:`celery.task.control.ping` instead.
|
625941b616aa5153ce36228d
|
def reset(self): <NEW_LINE> <INDENT> pass
|
Reset :py:class:`pyctrl.block.Block`.
Does nothing here but allows another :py:class:`pyctrl.block.Block` to reset itself.
|
625941b624f1403a9260097f
|
def __iter__(self): <NEW_LINE> <INDENT> self._fetch() <NEW_LINE> return iter(self._cache)
|
Return iterator over the current object.
|
625941b6d164cc6175782b62
|
def get(self): <NEW_LINE> <INDENT> self.checkpoison() <NEW_LINE> item = self._store <NEW_LINE> self._store = None <NEW_LINE> return item
|
Get a Python object from a process-safe store.
|
625941b6090684286d50eaf4
|
def close(self): <NEW_LINE> <INDENT> if self.closed: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> io.RawIOBase.close(self) <NEW_LINE> self._sock = None
|
Close the SocketIO object. This doesn't close the underlying
socket, except if all references to it have disappeared.
|
625941b6ab23a570cc24ff95
|
def set_AccessToken(self, value): <NEW_LINE> <INDENT> super(ListSearchResultsInputSet, self)._set_input('AccessToken', value)
|
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth process. This is required for OAuth authentication unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.)
|
625941b630dc7b766590177f
|
def serialize(self): <NEW_LINE> <INDENT> return { 'id': self.id, 'user_type_id': self.user_type_id, 'name': self.name, 'email': self.email, 'created': self.created.strftime("%Y-%m-%d %H:%M:%S"), 'update': self.update.strftime("%Y-%m-%d %H:%M:%S"), 'active': self.active, }
|
Serialize object for json
:param self:
|
625941b6462c4b4f79d1d4e5
|
def betterEvaluationFunction(currentGameState): <NEW_LINE> <INDENT> score = currentGameState.getScore() <NEW_LINE> newPos = currentGameState.getPacmanPosition() <NEW_LINE> newFood = currentGameState.getFood() <NEW_LINE> newGhostStates = currentGameState.getGhostStates() <NEW_LINE> newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates] <NEW_LINE> if currentGameState.isWin(): <NEW_LINE> <INDENT> return float("inf") <NEW_LINE> <DEDENT> if currentGameState.isLose(): <NEW_LINE> <INDENT> return -float("inf") <NEW_LINE> <DEDENT> closestGhost = min([util.manhattanDistance(newPos,ghostState.getPosition()) for ghostState in newGhostStates]) <NEW_LINE> closestFood = min([util.manhattanDistance(newPos,foodPos) for foodPos in newFood.asList()]) <NEW_LINE> score += closestFood - 2*closestGhost <NEW_LINE> return score
|
Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
evaluation function (question 5).
DESCRIPTION: <write something here so we know what you did>
|
625941b656ac1b37e6263ff5
|
def convert_column_to_ndarray(col: ColumnObject) -> pa.Array: <NEW_LINE> <INDENT> if col.offset != 0: <NEW_LINE> <INDENT> raise NotImplementedError("column.offset > 0 not handled yet") <NEW_LINE> <DEDENT> if col.describe_null[0] not in (0, 1, 3, 4): <NEW_LINE> <INDENT> raise NotImplementedError("Null values represented as" "sentinel values not handled yet") <NEW_LINE> <DEDENT> _buffer, _dtype = col.get_buffers()["data"] <NEW_LINE> x = buffer_to_ndarray(_buffer, _dtype) <NEW_LINE> if col.describe_null[0] in (3, 4) and col.null_count > 0: <NEW_LINE> <INDENT> mask_buffer, mask_dtype = col._get_validity_buffer() <NEW_LINE> mask = buffer_to_ndarray(mask_buffer, mask_dtype) <NEW_LINE> x = pa.array(x, mask=mask) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> x = pa.array(x) <NEW_LINE> <DEDENT> return x, _buffer
|
Convert an int, uint, float or bool column to an arrow array
|
625941b6460517430c393fa4
|
def negabs_off_diagonal_elements(self): <NEW_LINE> <INDENT> for i in range(self.nbasis): <NEW_LINE> <INDENT> for j in range(i+1,self.nbasis): <NEW_LINE> <INDENT> self.hamil[i][j] = -abs(self.hamil[i][j]) <NEW_LINE> self.hamil[j][i] = -abs(self.hamil[j][i])
|
Set off-diagonal elements of the Hamiltonian matrix to be negative.
This converts the Hamiltonian into the lesser sign-problem matrix discussed by
Spencer, Blunt and Foulkes.
|
625941b6e5267d203edcdab6
|
def swap(self, fpos, spos): <NEW_LINE> <INDENT> self.heap[fpos], self.heap[spos] = self.heap[spos], self.heap[fpos]
|
Swap the two nodes
|
625941b6be7bc26dc91cd41b
|
def is_wild(self): <NEW_LINE> <INDENT> return self.denomination in ('wild', 'wild4')
|
Return True if wild or wild4.
|
625941b65fcc89381b1e14d9
|
def testCaptureDepthImageThenBurstImage100Times(self): <NEW_LINE> <INDENT> for i in range(100): <NEW_LINE> <INDENT> sm.switchCaptureMode('depth') <NEW_LINE> tb.captureAndCheckPicCount('single',2) <NEW_LINE> sm.switchCaptureMode('burstfast') <NEW_LINE> tb.captureAndCheckPicCount('single',2) <NEW_LINE> time.sleep(2)
|
test capture depth image and then capture burst image 100 times.
back camera
|
625941b615fb5d323cde091e
|
def install_proxy(proxy_handler): <NEW_LINE> <INDENT> proxy_support = urllib2.ProxyHandler(proxy_handler) <NEW_LINE> opener = urllib2.build_opener(proxy_support) <NEW_LINE> urllib2.install_opener(opener)
|
install global proxy.
:param proxy_handler:
:samp:`{"http":"http://my.proxy.com:1234", "https":"https://my.proxy.com:1234"}`
:return:
|
625941b68a43f66fc4b53e7f
|
def getSFPFGUrls(self): <NEW_LINE> <INDENT> pc = getToolByName(self.context, "portal_catalog") <NEW_LINE> return [{'url':b.getURL(), 'title':b.Title,} for b in pc(portal_type="SalesforcePFGAdapter", path='/'.join(self.context.getPhysicalPath()))]
|
Return a list of the full urls and titles of the SalesforcePFGAdapters in the form (we support multiple because
they can be chained. But the wizard only would create at most one.)
|
625941b6b5575c28eb68de12
|
def feed(self, name_in): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> name = name_in.replace("-", "_") <NEW_LINE> try: <NEW_LINE> <INDENT> metadata_dir = pkg_resources.get_distribution(name).egg_info <NEW_LINE> <DEDENT> except (pkg_resources.DistributionNotFound,) as e: <NEW_LINE> <INDENT> msg = "%s:%s" % (name_in, e) <NEW_LINE> logger.warning(msg) <NEW_LINE> self.mgr.s_pip_wo_packageinfo.add(name_in) <NEW_LINE> self.mgr._debug[name_in] = msg <NEW_LINE> return <NEW_LINE> <DEDENT> fnp = os.path.join(metadata_dir, "top_level.txt") <NEW_LINE> try: <NEW_LINE> <INDENT> with open(fnp) as fi: <NEW_LINE> <INDENT> packagenames = fi.readlines() <NEW_LINE> <DEDENT> <DEDENT> except (IOError,) as e: <NEW_LINE> <INDENT> msg = "%s:IOError reading top_level.txt" % (name_in) <NEW_LINE> logger.warning(msg) <NEW_LINE> self.mgr.s_pip_wo_packageinfo.add(name_in) <NEW_LINE> self.mgr._debug[name_in] = msg <NEW_LINE> return <NEW_LINE> <DEDENT> packagenames = [pn.strip() for pn in packagenames if pn.strip()] <NEW_LINE> len_packagenames = len(packagenames) <NEW_LINE> if len_packagenames == 1: <NEW_LINE> <INDENT> self.mgr.di_pip_imp[name_in] = packagenames[0] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if name in packagenames: <NEW_LINE> <INDENT> self.mgr.di_pip_imp[name_in] = name <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> msg = "%s:too many packagenames in %s" % (name_in, packagenames) <NEW_LINE> logger.warning(msg) <NEW_LINE> self.mgr.s_pip_wo_packageinfo.add(name_in) <NEW_LINE> self.mgr._debug[name_in] = msg <NEW_LINE> return <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> except (Exception,) as e: <NEW_LINE> <INDENT> if cpdb(): <NEW_LINE> <INDENT> pdb.set_trace() <NEW_LINE> <DEDENT> raise
|
credit for pkg_resources code belongs to
https://stackoverflow.com/a/54853084
|
625941b663d6d428bbe44304
|
def get_metric(self, measurand: str): <NEW_LINE> <INDENT> return self._metrics[measurand].value
|
Return last known value for given measurand.
|
625941b68e7ae83300e4ade0
|
def update(self,users): <NEW_LINE> <INDENT> dict.update(self,{}) <NEW_LINE> for entry in users: <NEW_LINE> <INDENT> self[entry.login] = entry.passwd
|
Update the user cache from database results. This is actually called
from ClientSessionManager.
|
625941b6d18da76e235322e6
|
@contextmanager <NEW_LINE> def get_cursor(): <NEW_LINE> <INDENT> db = connect() <NEW_LINE> cursor = db.cursor() <NEW_LINE> yield cursor <NEW_LINE> db.commit() <NEW_LINE> cursor.close() <NEW_LINE> db.close()
|
Helper function that takes care of the db connection and cursor
|
625941b6ac7a0e7691ed3eef
|
def change_pt_crs(x, y, in_epsg, out_epsg): <NEW_LINE> <INDENT> in_crs = pyproj.Proj("+init=EPSG:" + str(in_epsg)) <NEW_LINE> out_crs = pyproj.Proj("+init=EPSG:" + str(out_epsg)) <NEW_LINE> return pyproj.transform(in_crs, out_crs, x, y)
|
Purpose:
To return the coordinates of given points in a different coordinate
system.
Description of arguments:
x (int or float, single or list): The horizontal position of the
input point.
y (int or float, single or list): The vertical position of the
input point.
Note: In case of x and y in list form, the output is also in a
list form.
in_epsg (string or int): The EPSG code of the input coordinate system
out_epsg (string or int): The EPSG code of the output coordinate system
|
625941b6099cdd3c635f0a72
|
def getBestLearner(self, meanErrors, paramDict, X, y, idx=None): <NEW_LINE> <INDENT> if idx == None: <NEW_LINE> <INDENT> return super(DecisionTreeLearner, self).getBestLearner(meanErrors, paramDict, X, y, idx) <NEW_LINE> <DEDENT> bestInds = numpy.unravel_index(numpy.argmin(meanErrors), meanErrors.shape) <NEW_LINE> currentInd = 0 <NEW_LINE> learner = self.copy() <NEW_LINE> for key, val in paramDict.items(): <NEW_LINE> <INDENT> method = getattr(learner, key) <NEW_LINE> method(val[bestInds[currentInd]]) <NEW_LINE> currentInd += 1 <NEW_LINE> <DEDENT> treeSizes = [] <NEW_LINE> for trainInds, testInds in idx: <NEW_LINE> <INDENT> validX = X[trainInds, :] <NEW_LINE> validY = y[trainInds] <NEW_LINE> learner.learnModel(validX, validY) <NEW_LINE> treeSizes.append(learner.tree.getNumVertices()) <NEW_LINE> <DEDENT> bestGamma = int(numpy.round(numpy.array(treeSizes).mean())) <NEW_LINE> learner.setGamma(bestGamma) <NEW_LINE> learner.learnModel(X, y) <NEW_LINE> return learner
|
Given a grid of errors, paramDict and examples, labels, find the
best learner and train it. In this case we set gamma to the real
size of the tree as learnt using CV. If idx == None then we simply
use the gamma corresponding to the lowest error.
|
625941b6e1aae11d1e749ac9
|
def execute(self, parents=None): <NEW_LINE> <INDENT> results = OrderedDict() <NEW_LINE> for row in self.data_query(parents=parents): <NEW_LINE> <INDENT> data = row._asdict() <NEW_LINE> id = data.get('id') <NEW_LINE> if id not in results: <NEW_LINE> <INDENT> results[id] = self.base_object(data) <NEW_LINE> <DEDENT> value = data.get('value') <NEW_LINE> attr = qualified[data.get('attribute')] <NEW_LINE> if attr.data_type not in ['type', 'entity']: <NEW_LINE> <INDENT> conv = attr.converter(attr) <NEW_LINE> value = conv.deserialize_safe(value) <NEW_LINE> <DEDENT> node = self.get_node(attr.name) <NEW_LINE> if attr.many if node is None else node.many: <NEW_LINE> <INDENT> if attr.name not in results[id]: <NEW_LINE> <INDENT> results[id][attr.name] = [] <NEW_LINE> <DEDENT> results[id][attr.name].append(value) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> results[id][attr.name] = value <NEW_LINE> <DEDENT> <DEDENT> return results
|
Run the data query and construct entities from it's results.
|
625941b67d43ff24873a2ab9
|
def dailyClose(self, type): <NEW_LINE> <INDENT> raise NotImplementedError
|
Cierre Z (diario) o X (parcial)
@param type Z (diario), X (parcial)
|
625941b63eb6a72ae02ec2ef
|
def factorise(n): <NEW_LINE> <INDENT> if n < 2: <NEW_LINE> <INDENT> return [] <NEW_LINE> <DEDENT> factors = [] <NEW_LINE> p = 2 <NEW_LINE> while True: <NEW_LINE> <INDENT> if n == 1: <NEW_LINE> <INDENT> return factors <NEW_LINE> <DEDENT> r = n % p <NEW_LINE> if r == 0: <NEW_LINE> <INDENT> factors.append(p) <NEW_LINE> n /= p <NEW_LINE> <DEDENT> elif p * p >= n: <NEW_LINE> <INDENT> factors.append(n) <NEW_LINE> return factors <NEW_LINE> <DEDENT> elif p > 2: <NEW_LINE> <INDENT> p += 2 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> p += 1
|
Factorise an input integer n.
|
625941b6379a373c97cfa960
|
def isCell(): <NEW_LINE> <INDENT> if not isPPC(): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> with open('/proc/cpuinfo', 'r') as f: <NEW_LINE> <INDENT> for line in f: <NEW_LINE> <INDENT> if 'Cell' in line: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return False
|
:return: True if the hardware is the Cell platform, False otherwise.
:rtype: boolean
|
625941b68e71fb1e9831d5c3
|
def get(self): <NEW_LINE> <INDENT> if self._expired_exception: <NEW_LINE> <INDENT> raise self._expired_exception <NEW_LINE> <DEDENT> if self._not_ready_exception: <NEW_LINE> <INDENT> raise self._not_ready_exception <NEW_LINE> <DEDENT> return self.results
|
Return the results now.
:return: the membership request results
:rtype: :class:`~groupy.api.memberships.MembershipResult.Results`
:raises groupy.exceptions.ResultsNotReady: if the results are not ready
:raises groupy.exceptions.ResultsExpired: if the results have expired
|
625941b66e29344779a6242b
|
def init_tworld(self): <NEW_LINE> <INDENT> self.twopts = opts <NEW_LINE> self.twlog = logging.getLogger("tornado.general") <NEW_LINE> self.mongodb = None <NEW_LINE> self.twlocalize = twcommon.localize.Localization() <NEW_LINE> self.twsessionmgr = tweblib.session.SessionMgr(self) <NEW_LINE> self.twservermgr = tweblib.servers.ServerMgr(self) <NEW_LINE> self.twconntable = tweblib.connections.ConnectionTable(self) <NEW_LINE> tornado.ioloop.IOLoop.instance().add_callback(self.init_timers)
|
Perform app-specific initialization.
|
625941b6004d5f362079a14d
|
def set_image(self, image): <NEW_LINE> <INDENT> self.image = image <NEW_LINE> return image
|
Set wrapper's content.
Returns:
object: return wrapper's content.
|
625941b62c8b7c6e89b355d9
|
def add(self, ob: Array[float, OB], act: int, val: int, rew: float) -> None: <NEW_LINE> <INDENT> self.obs.append(ob) <NEW_LINE> self.acts.append(act) <NEW_LINE> self.vals.append(val) <NEW_LINE> self.rews.append(rew)
|
Add an observation, action, and reward to storage.
|
625941b6e8904600ed9f1d3e
|
def normalize(self, x, train=True): <NEW_LINE> <INDENT> if True: <NEW_LINE> <INDENT> mean, variance = tf.nn.moments(x, [0,1,2]) <NEW_LINE> assign_mean = self.mean.assign(mean) <NEW_LINE> assign_variance = self.variance.assign(variance) <NEW_LINE> with tf.control_dependencies([assign_mean, assign_variance]): <NEW_LINE> <INDENT> return tf.nn.batch_norm_with_global_normalization( x, mean, variance, self.beta, self.gamma, self.epsilon, self.scale_after_norm) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> mean = self.ewma_trainer.average(self.mean) <NEW_LINE> variance = self.ewma_trainer.average(self.variance) <NEW_LINE> local_beta = tf.identity(self.beta) <NEW_LINE> local_gamma = tf.identity(self.gamma) <NEW_LINE> return tf.nn.batch_norm_with_global_normalization( x, mean, variance, local_beta, local_gamma, self.epsilon, self.scale_after_norm)
|
Returns a batch-normalized version of x.
|
625941b6091ae35668666d7b
|
def __init__(self, timeoutSecs=5): <NEW_LINE> <INDENT> self.timeoutSecs = timeoutSecs
|
Initialize.
timeoutSecs: Seconds to wait before timing out.
|
625941b6097d151d1a222c72
|
def on_touch_up(self, touch) -> bool: <NEW_LINE> <INDENT> bRet:bool = False <NEW_LINE> if oDownWidget is not None: <NEW_LINE> <INDENT> if not oDownWidget.bProcessUp: <NEW_LINE> <INDENT> oDownWidget.bProcessUp = True <NEW_LINE> return oDownWidget.on_touch_up(touch) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if hasattr(self,"oOrcaWidget"): <NEW_LINE> <INDENT> if self.oOrcaWidget is not None: <NEW_LINE> <INDENT> Logger.error ("on_touch_up: Up without Down on Widget:"+self.oOrcaWidget.uName) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> Logger.error ("on_touch_up: Up without Down") <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if self.bProcessUp: <NEW_LINE> <INDENT> self.bIsDown = False <NEW_LINE> self.Delete_LongPressClock(touch) <NEW_LINE> self.Delete_RepeatClock(touch) <NEW_LINE> if self.bForceUp: <NEW_LINE> <INDENT> self.bProcessed = False <NEW_LINE> touch.is_double_tap = False <NEW_LINE> <DEDENT> if self.uTapType == u'' or self.uTapType == u'down' or self.uTapType == u'long_up' or self.uTapType == u'repeat_down': <NEW_LINE> <INDENT> self.uTapType ="up" <NEW_LINE> <DEDENT> if not self.bProcessed and not self.bWaitForDouble: <NEW_LINE> <INDENT> if self.fDoublePress(touch): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> <DEDENT> if 'line' in touch.ud: <NEW_LINE> <INDENT> self.aActions=GetTouchActions(touch,self.dGestures) <NEW_LINE> if self.aActions: <NEW_LINE> <INDENT> self.dispatch('on_gesture') <NEW_LINE> <DEDENT> <DEDENT> return bRet <NEW_LINE> <DEDENT> return bRet
|
called by the framework, if the touch ends
|
625941b650812a4eaa59c13b
|
def get_content_type(self, headers, args): <NEW_LINE> <INDENT> content_type = 'application/json' <NEW_LINE> format_ = args.get('f') <NEW_LINE> if headers and 'Accept' in headers: <NEW_LINE> <INDENT> if 'text/html' in headers['Accept']: <NEW_LINE> <INDENT> content_type = 'text/html' <NEW_LINE> <DEDENT> elif 'application/xml' in headers['Accept']: <NEW_LINE> <INDENT> content_type = 'application/xml' <NEW_LINE> <DEDENT> <DEDENT> if format_ is not None: <NEW_LINE> <INDENT> if format_ == 'json': <NEW_LINE> <INDENT> content_type = 'application/json' <NEW_LINE> <DEDENT> elif format_ == 'xml': <NEW_LINE> <INDENT> content_type = 'application/xml' <NEW_LINE> <DEDENT> elif format_ == 'html': <NEW_LINE> <INDENT> content_type = 'text/html' <NEW_LINE> <DEDENT> <DEDENT> return content_type
|
Decipher content type requested
:param headers: `dict` of HTTP request headers
:param args: `dict` of query arguments
:returns: `str` of response content type
|
625941b694891a1f4081b8bd
|
def get_best(self): <NEW_LINE> <INDENT> pass
|
Abstract method for getting the best instance of this strategy.
|
625941b60383005118ecf3fa
|
def PullFeeds(self, req): <NEW_LINE> <INDENT> self.send_PullFeeds(req) <NEW_LINE> return self.recv_PullFeeds()
|
Parameters:
- req
|
625941b6f548e778e58cd391
|
def get_schema_fields(self, collection): <NEW_LINE> <INDENT> res, con_info = self.solr.transport.send_request(endpoint='schema/fields',collection=collection) <NEW_LINE> return res
|
Returns Schema Fields from a Solr Collection
|
625941b6d10714528d5ffaf5
|
def p_program_b(p): <NEW_LINE> <INDENT> pass
|
program_b : let prog1
| class prog2
|
625941b68c0ade5d55d3e7d5
|
def __str__(self): <NEW_LINE> <INDENT> s = self.entete_str() <NEW_LINE> totalPrix = 0.0 <NEW_LINE> totalTVA = 0.0 <NEW_LINE> for art in self.articles() : <NEW_LINE> <INDENT> s += self.article_str(art) <NEW_LINE> totalPrix = totalPrix + art.prix() <NEW_LINE> totalTVA = totalTVA + art.tva() <NEW_LINE> <DEDENT> s += self.totaux_str(totalPrix, totalTVA) <NEW_LINE> return s
|
Retourne la représentation string d'une facture, à imprimer avec la méthode print().
|
625941b630c21e258bdfa2b3
|
def FetchFonts(self, currentobject): <NEW_LINE> <INDENT> pdf_fonts = {} <NEW_LINE> fonts = currentobject["/Resources"].getObject()['/Font'] <NEW_LINE> for key in fonts: <NEW_LINE> <INDENT> pdf_fonts[key] = fonts[key]['/BaseFont'][1:] <NEW_LINE> <DEDENT> return pdf_fonts
|
Return the standard fonts in current page or form.
|
625941b64c3428357757c141
|
def setDisplayWidth(self, val): <NEW_LINE> <INDENT> self.displayWidth = val
|
Sets self.displayWidth the the argument value.
|
625941b6a219f33f3462878b
|
def create_user(self, phone_number, password=None, **extra_fields): <NEW_LINE> <INDENT> if not phone_number: <NEW_LINE> <INDENT> raise ValueError('This object requires an phone number') <NEW_LINE> <DEDENT> user = self.model(phone_number=phone_number, **extra_fields) <NEW_LINE> user.set_password(password) <NEW_LINE> user.save(using=self._db) <NEW_LINE> return user
|
Creates and saves a new user
|
625941b67c178a314d6ef26e
|
def get(self, key, fallback=None): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return self[key] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> return fallback
|
Return the key's value, or fallback in case of KeyError.
|
625941b6d8ef3951e3243353
|
def permute(self, nums): <NEW_LINE> <INDENT> def recurse(nums, i, sol): <NEW_LINE> <INDENT> if i == len(nums)-1: <NEW_LINE> <INDENT> sol.append(nums) <NEW_LINE> <DEDENT> for j in range(i, len(nums)): <NEW_LINE> <INDENT> nums = list(nums) <NEW_LINE> nums[i], nums[j] = nums[j], nums[i] <NEW_LINE> recurse(nums, i+1, sol) <NEW_LINE> <DEDENT> <DEDENT> sol = [] <NEW_LINE> recurse(nums, 0, sol) <NEW_LINE> return sol
|
:type nums: List[int]
:rtype: List[List[int]]
|
625941b623e79379d52ee37e
|
def resolve_names(self): <NEW_LINE> <INDENT> replace_names = { "id": "id", "name": "name", "restaurant": "restaurant", "can_scrape": "can_scrape", "image_link": "image_link", } <NEW_LINE> retval = dict() <NEW_LINE> return APIHelper.resolve_names(self, replace_names, retval)
|
Creates a dictionary representation of this object.
This method converts an object to a dictionary that represents the
format that the model should be in when passed into an API Request.
Because of this, the generated dictionary may have different
property names to that of the model itself.
Returns:
dict: The dictionary representing the object.
|
625941b60a366e3fb873e62c
|
def procs_per_node(system = system()): <NEW_LINE> <INDENT> if has_gpu(system): <NEW_LINE> <INDENT> return gpus_per_node(system) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return 2
|
Default number of processes per node.
|
625941b699fddb7c1c9de1a9
|
@bp.route('/', methods=['GET']) <NEW_LINE> def home(): <NEW_LINE> <INDENT> if request.method == 'GET': <NEW_LINE> <INDENT> player_name = request.args.get("player-name") <NEW_LINE> if player_name: <NEW_LINE> <INDENT> player_id = get_player_id(player_name)['player_id'] <NEW_LINE> return redirect(url_for('baseball.player', player_id=player_id)) <NEW_LINE> <DEDENT> <DEDENT> return render_template('baseball/home.html')
|
Returns HTML template for baseball homepage. Checks to see if a player is searched for.
|
625941b62c8b7c6e89b355da
|
def _add_scalars(self, epoch, train_log, valid_log): <NEW_LINE> <INDENT> for key in train_log: <NEW_LINE> <INDENT> self.writer.add_scalars(key, {'train': train_log[key], 'valid': valid_log[key]}, epoch)
|
Plot the training curves.
Args:
epoch (int): The number of trained epochs.
train_log (dict): The training log information.
valid_log (dict): The validation log information.
|
625941b682261d6c526ab2b9
|
def start(self, group, params): <NEW_LINE> <INDENT> raise NotImplementedError
|
Override this method.
Begin writing an output representation. GROUP is the name of the
configuration file group which is causing this output to be produced.
PARAMS is a dictionary of any named subexpressions of regular expressions
defined in the configuration file, plus the key 'author' contains the
author of the action being reported.
|
625941b6eab8aa0e5d26d974
|
def set_min_thresholded_stack(self, resized_stack, plot_results): <NEW_LINE> <INDENT> thresholded_stack = [] <NEW_LINE> for i in range(0, len(resized_stack)): <NEW_LINE> <INDENT> print("Applying minimum threshold to image %s of %s" % (i, len(resized_stack)-1)) <NEW_LINE> image = resized_stack[i] <NEW_LINE> thresh = threshold_minimum(image) <NEW_LINE> binary = image > thresh <NEW_LINE> thresholded_stack.append(binary) <NEW_LINE> if plot_results == True: <NEW_LINE> <INDENT> fig, axes = plt.subplots(ncols=2, figsize=(8, 3)) <NEW_LINE> ax = axes.ravel() <NEW_LINE> ax[0].imshow(image, cmap=plt.cm.gray) <NEW_LINE> ax[0].set_title('Original image') <NEW_LINE> ax[1].imshow(binary, cmap=plt.cm.gray) <NEW_LINE> ax[1].set_title('Minimum Threshold') <NEW_LINE> for a in ax: <NEW_LINE> <INDENT> a.axis('off') <NEW_LINE> <DEDENT> plt.show() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> return thresholded_stack
|
Return a minimum thresholded version of resized_stack. Note that the returned
image is a matrix of booleans, not integers. The stack can be converted to
integers using skimage.io.img_as_uint. plot_results is a boolean for whether to
plot the results of thresholding for quality control. I typically set this to False.
Cell area in the returned stack is True, everywhere else is False.
|
625941b6fb3f5b602dac34a4
|
def sql_and(a: Any, b: Any) -> bool: <NEW_LINE> <INDENT> return a and b
|
SQL-AND Operator.
|
625941b61b99ca400220a8c6
|
def decrypt_aes(c1_message: bytes, priv_key_AES: bytes = None): <NEW_LINE> <INDENT> if priv_key_AES is None: <NEW_LINE> <INDENT> priv_key_AES = aes_crypto._key_open_AESPriv() <NEW_LINE> <DEDENT> decoded = aes_crypto.decrypt_aes(c1_message, priv_key_AES) <NEW_LINE> return decoded.decode("utf-8")
|
AES priv key already in position
|
625941b65fdd1c0f98dc0048
|
def __init__(self, accel, lights=None, volume_region=None): <NEW_LINE> <INDENT> self.aggregate = accel <NEW_LINE> if lights: <NEW_LINE> <INDENT> self.lights = lights <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.lights = [] <NEW_LINE> <DEDENT> self.volume_region = volume_region <NEW_LINE> self.bound = self.aggregate.world_bound() <NEW_LINE> if self.volume_region: <NEW_LINE> <INDENT> self.bound = union(self.bound, self.volume_region.world_bound())
|
Default constructor for Scene.
|
625941b6167d2b6e312189b4
|
def __init__(self, description: str = None, item_list: list({str})=None, item_dict: dict({str: str})=None, auto_display: bool = False): <NEW_LINE> <INDENT> if item_list: <NEW_LINE> <INDENT> self._item_list = item_list <NEW_LINE> self._item_dict = None <NEW_LINE> self.value = item_list[0] <NEW_LINE> <DEDENT> elif item_dict: <NEW_LINE> <INDENT> self._item_list = list(item_dict.keys()) <NEW_LINE> self._item_dict = item_dict <NEW_LINE> self.value = list(self._item_dict.values())[0] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError( "One of item_list or item_dict must be supplied.") <NEW_LINE> <DEDENT> self._wgt_select = widgets.Select(options=self._item_list, description=description, layout=Layout( width='50%', height='100px'), style={'description_width': 'initial'}) <NEW_LINE> if auto_display: <NEW_LINE> <INDENT> self.display()
|
Initialize and display list picker.
:param description=None: List label
:param item_list=None: Item List
:param item_dict=None: Item dictionary { display_string: value }
|
625941b65166f23b2e1a4f6f
|
def formats(): <NEW_LINE> <INDENT> from atooms import trajectory <NEW_LINE> txt = 'available trajectory formats:\n' <NEW_LINE> fmts = trajectory.Trajectory.formats <NEW_LINE> maxlen = max([len(name) for name in fmts]) <NEW_LINE> for name in sorted(fmts): <NEW_LINE> <INDENT> class_name = fmts[name] <NEW_LINE> if class_name.__doc__: <NEW_LINE> <INDENT> docline = class_name.__doc__.split('\n')[0].rstrip('.') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> docline = '...no description...' <NEW_LINE> <DEDENT> fmt = ' %-' + str(maxlen) + 's : %s\n' <NEW_LINE> txt += fmt % (name, docline) <NEW_LINE> <DEDENT> return txt
|
Return a string with the available trajectory formats.
|
625941b645492302aab5e0d6
|
@pytest.yield_fixture(scope="function") <NEW_LINE> def new_program_rest(): <NEW_LINE> <INDENT> service = ProgramsService() <NEW_LINE> yield service.create_programs(1)[0]
|
Creates Program via REST API
|
625941b6b57a9660fec33696
|
def return_answer(self, value1, value2): <NEW_LINE> <INDENT> return self.strategy.calculate(value1, value2)
|
:param value1: Integer or Float
:param value2: Integer or Float
:return: Calls the calculate() method of the object defined in self.strategy
|
625941b61f5feb6acb0c496b
|
def __getitem__(self, idx): <NEW_LINE> <INDENT> return self.cvc1s[idx], self.cvc2s[idx], self.gts[idx]
|
return: (tensor) data send to network
obj[idx] == obj.__getitem__(idx)
|
625941b691af0d3eaac9b82a
|
def why_not_bookable(self, time, machine, user): <NEW_LINE> <INDENT> if hasattr(self, 'bookable_cache'): <NEW_LINE> <INDENT> why = self.bookable_cache[machine.number] <NEW_LINE> if isinstance(why, int): <NEW_LINE> <INDENT> return why <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> why = why[user.username] <NEW_LINE> if isinstance(why, int): <NEW_LINE> <INDENT> return why <NEW_LINE> <DEDENT> return why[time] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> if not machine.isAvailable: <NEW_LINE> <INDENT> return 21 <NEW_LINE> <DEDENT> if not user.groups.filter(name='enduser').exists(): <NEW_LINE> <INDENT> return 31 <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> washuser = WashUser.objects.get(pk=user) <NEW_LINE> if not washuser.isActivated: <NEW_LINE> <INDENT> return 31 <NEW_LINE> <DEDENT> if washuser.remaining_ration < 1: <NEW_LINE> <INDENT> return 32 <NEW_LINE> <DEDENT> <DEDENT> except WashUser.DoesNotExist: <NEW_LINE> <INDENT> return 31 <NEW_LINE> <DEDENT> if self.appointment_exists(time, machine): <NEW_LINE> <INDENT> return 41 <NEW_LINE> <DEDENT> if time not in self.scheduled_appointment_times(): <NEW_LINE> <INDENT> return 11
|
Reason of why an appointment for the machine at this time can
not be booked by the user. Return None if bookable.
|
625941b632920d7e50b27fe2
|
def get_mimutes_to_start(self): <NEW_LINE> <INDENT> if self.event is not None: <NEW_LINE> <INDENT> return floor((self.event.get('dtstart').dt - datetime.now(_TIME_ZONE)).total_seconds() / 60) <NEW_LINE> <DEDENT> return None
|
Return the current meeting
|
625941b6d53ae8145f87a08d
|
def closed(self, event): <NEW_LINE> <INDENT> raise NotImplementedError
|
Abstract method to handle a `ServerClosedEvent`.
:Parameters:
- `event`: An instance of :class:`ServerClosedEvent`.
|
625941b64527f215b584c272
|
def __init__( self, types, dtype ): <NEW_LINE> <INDENT> super( Uniform, self ).__init__() <NEW_LINE> self._types = _generate_enum_map( types ) <NEW_LINE> self._dtype = dtype <NEW_LINE> self._program = None <NEW_LINE> self._name = None <NEW_LINE> self._type = None <NEW_LINE> self._func = None <NEW_LINE> self._num_values = None <NEW_LINE> self._location = None
|
Creates a new Uniform object.
This should only be called by inherited Uniform classes.
Types is a dictionary with the following format:
key: GL enumeration type as a string, Eg. 'GL_FLOAT_VEC4'.
value: (uniform setter function, number of values per variable)
The function is used when setting the uniform value.
The number of values per variable is used to determine the number of
variables passed to a uniform.
Ie. Numver of variables = number of values / values per variable
|
625941b667a9b606de4a7cd3
|
def get_signature(self): <NEW_LINE> <INDENT> return self.oauth_params['oauth_signature']
|
Returns the signature string.
|
625941b6f8510a7c17cf951b
|
def read_data(data_dir): <NEW_LINE> <INDENT> files = glob.glob('data/{}/*.txt'.format(data_dir)) <NEW_LINE> df_dict = {} <NEW_LINE> for file in files: <NEW_LINE> <INDENT> key = file.split('/')[-1][:-4].lower() <NEW_LINE> df_dict[key] = pd.read_csv(file) <NEW_LINE> rows = len(df_dict[key].index) <NEW_LINE> df_dict[key].rename( columns=lambda x: '{}.'.format(key) + x.lower(), inplace = True) <NEW_LINE> print('Read {} records from {}'.format( rows, file)) <NEW_LINE> <DEDENT> return df_dict
|
returns all .txt files in specified directory as a dict of DataFrames
|
625941b63346ee7daa2b2b7f
|
def my_gaussian_elimination(A, partial_pivoting=True): <NEW_LINE> <INDENT> import numpy as np <NEW_LINE> A_type = A.dtype <NEW_LINE> if A_type != 'complex_': <NEW_LINE> <INDENT> A_type = 'float64' <NEW_LINE> A = A.astype(A_type) <NEW_LINE> <DEDENT> m = np.shape(A)[0] <NEW_LINE> n = np.shape(A)[1] <NEW_LINE> assert (n == m), '\n\n\tMATRIX MUST BE SQUARE TO PERFORM GAUSS ' + 'ELIMINATION! \n'+ '\n\t\t\tNo. of ROWS: \t\t m = '+str(m)+'\n' + '\t\t\tNo. of COLUMNS: \t n = '+str(n)+'\n' <NEW_LINE> L = np.eye(m, dtype=A_type) <NEW_LINE> U = np.copy(A) <NEW_LINE> if partial_pivoting: <NEW_LINE> <INDENT> P = np.eye(m, dtype=A_type) <NEW_LINE> <DEDENT> for k in range(m-1): <NEW_LINE> <INDENT> if partial_pivoting: <NEW_LINE> <INDENT> possible_pivots = [abs(U[row_index][k]) for row_index in range(k,m)] <NEW_LINE> i = k + possible_pivots.index(max(possible_pivots)) <NEW_LINE> row_k = np.copy(U[k][k:m]) <NEW_LINE> U[k][k:m] = U[i][k:m] <NEW_LINE> U[i][k:m] = row_k <NEW_LINE> row_k = np.copy(L[k][:k]) <NEW_LINE> L[k][:k] = L[i][:k] <NEW_LINE> L[i][:k] = row_k <NEW_LINE> row_k = np.copy(P[k]) <NEW_LINE> P[k] = P[i] <NEW_LINE> P[i] = row_k <NEW_LINE> <DEDENT> for j in range(k+1,m): <NEW_LINE> <INDENT> L[j][k] = U[j][k]/U[k][k] <NEW_LINE> U[j][k:m] = U[j][k:m] - L[j][k]*U[k][k:m] <NEW_LINE> <DEDENT> <DEDENT> if partial_pivoting: <NEW_LINE> <INDENT> return P, L, U <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return L, U
|
this function performs Gaussian Elimination with partial pivioting on the
given m x m matrix. it returns the lower- and upper-triangular
decomposition (L and U) along with the corresponding permutation matrix, P.
if partial pivoting is turned off, then only L and U are returned.
|
625941b6566aa707497f4391
|
def make_params(self, **params): <NEW_LINE> <INDENT> p = lmfit.Parameters() <NEW_LINE> for k in self.param_names: <NEW_LINE> <INDENT> p.add(k) <NEW_LINE> <DEDENT> for param,val in params.items(): <NEW_LINE> <INDENT> if param not in p: <NEW_LINE> <INDENT> p.add(param) <NEW_LINE> <DEDENT> if isinstance(val, str): <NEW_LINE> <INDENT> p[param].expr = val <NEW_LINE> <DEDENT> elif np.isscalar(val): <NEW_LINE> <INDENT> p[param].value = val <NEW_LINE> <DEDENT> elif isinstance(val, tuple): <NEW_LINE> <INDENT> if len(val) == 2: <NEW_LINE> <INDENT> assert val[1] == 'fixed' <NEW_LINE> p[param].value = val[0] <NEW_LINE> p[param].vary = False <NEW_LINE> <DEDENT> elif len(val) == 3: <NEW_LINE> <INDENT> p[param].value = val[0] <NEW_LINE> p[param].min = val[1] if val[1] is not None else -float('inf') <NEW_LINE> p[param].max = val[2] if val[2] is not None else float('inf') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise TypeError("Tuple parameter specifications must be (val, 'fixed')" " or (val, min, max).") <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise TypeError("Invalid parameter specification: %r" % val) <NEW_LINE> <DEDENT> <DEDENT> global_ns = np.__dict__ <NEW_LINE> for param,val in params.items(): <NEW_LINE> <INDENT> if isinstance(val, str): <NEW_LINE> <INDENT> p[param].value = eval(val, global_ns, p.valuesdict()) <NEW_LINE> <DEDENT> <DEDENT> return p
|
Make parameters used for fitting with this model.
Keyword arguments are used to generate parameters for the fit. Each
parameter may be specified by the following formats:
param=value :
The initial value of the parameter
param=(value, 'fixed') :
Fixed value for the parameter
param=(value, min, max) :
Initial value and min, max values, which may be float or None
param='expression' :
Expression used to compute parameter value. See:
http://lmfit.github.io/lmfit-py/constraints.html#constraints-chapter
|
625941b63c8af77a43ae35b5
|
def get_predicate_sort(self, predicate_uuid): <NEW_LINE> <INDENT> pred = self.get_predicate_item(predicate_uuid) <NEW_LINE> if pred is not False: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> sort = float(pred.sort) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> sort = 0 <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> sort = 0 <NEW_LINE> <DEDENT> return sort
|
gets a predicate data type
|
625941b61f037a2d8b946015
|
def addCell(self, dest, source, **kwargs): <NEW_LINE> <INDENT> return self.doCellTimes(source, self.inc(), dest=dest, **kwargs)
|
Add contents of source to dest.
:param dest: destination to be added to
:param source: cell to be added to dest
:param kwargs: destructive
:return: brainfuck commands
|
625941b624f1403a92600981
|
def index(request): <NEW_LINE> <INDENT> return render(request, 'maps/index.html')
|
Place holder. probably where the main map page will go
|
625941b624f1403a92600980
|
def main(): <NEW_LINE> <INDENT> os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'celery_exm.settings') <NEW_LINE> try: <NEW_LINE> <INDENT> from django.core.management import execute_from_command_line <NEW_LINE> <DEDENT> except ImportError as exc: <NEW_LINE> <INDENT> raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc <NEW_LINE> <DEDENT> execute_from_command_line(sys.argv)
|
Run administrative tasks.
|
625941b6090684286d50eaf6
|
def test_302_neutron_dhcp_agent_config(self): <NEW_LINE> <INDENT> u.log.debug('Checking neutron gateway dhcp agent config file data...') <NEW_LINE> unit = self.neutron_gateway_sentry <NEW_LINE> conf = '/etc/neutron/dhcp_agent.ini' <NEW_LINE> expected = { 'state_path': '/var/lib/neutron', 'interface_driver': 'neutron.agent.linux.interface.' 'OVSInterfaceDriver', 'dhcp_driver': 'neutron.agent.linux.dhcp.Dnsmasq', 'root_helper': 'sudo /usr/bin/neutron-rootwrap ' '/etc/neutron/rootwrap.conf', 'ovs_use_veth': 'True' } <NEW_LINE> section = 'DEFAULT' <NEW_LINE> ret = u.validate_config_data(unit, conf, section, expected) <NEW_LINE> if ret: <NEW_LINE> <INDENT> message = "dhcp agent config error: {}".format(ret) <NEW_LINE> amulet.raise_status(amulet.FAIL, msg=message)
|
Verify the data in the dhcp agent config file.
|
625941b6462c4b4f79d1d4e7
|
def getCoordinateSystems(self): <NEW_LINE> <INDENT> return self._CoordinateSystems
|
All coordinate systems used to describe position points of this location.
|
625941b699fddb7c1c9de1aa
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.