Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
16,300 |
def stats(self) -> pd.DataFrame:
cumul = []
for f in self:
info = {
"flight_id": f.flight_id,
"callsign": f.callsign,
"origin": f.origin,
"destination": f.destination,
"duration": f.stop - f.start,
}
cumul.append(info)
return (
pd.DataFrame.from_records(cumul)
.set_index("flight_id")
.sort_values("duration", ascending=False)
)
|
Statistics about flights contained in the structure.
Useful for a meaningful representation.
|
16,301 |
def get_suitable_slot_for_reference(self, reference):
if not IReferenceSample.providedBy(reference):
return -1
occupied = self.get_slot_positions(type=) or [0]
wst = self.getWorksheetTemplate()
if not wst:
slot_to = max(occupied) + 1
return slot_to
slot_type = reference.getBlank() and or
layout = wst.getLayout()
for pos in layout:
if pos[] != slot_type:
continue
slot_to = int(pos[])
if slot_to in occupied:
continue
return slot_to
occupied.append(len(layout))
slot_to = max(occupied) + 1
return slot_to
|
Returns the suitable position for reference analyses, taking into
account if there is a WorksheetTemplate assigned to this worksheet.
By default, returns a new slot at the end of the worksheet unless there
is a slot defined for a reference of the same type (blank or control)
in the worksheet template's layout that hasn't been used yet.
:param reference: ReferenceSample the analyses will be created from
:return: suitable slot position for reference analyses
|
16,302 |
def deliver_tx(self, raw_transaction):
self.abort_if_abci_chain_is_not_synced()
logger.debug(, raw_transaction)
transaction = self.bigchaindb.is_valid_transaction(
decode_transaction(raw_transaction), self.block_transactions)
if not transaction:
logger.debug()
return ResponseDeliverTx(code=CodeTypeError)
else:
logger.debug()
self.block_txn_ids.append(transaction.id)
self.block_transactions.append(transaction)
return ResponseDeliverTx(code=CodeTypeOk)
|
Validate the transaction before mutating the state.
Args:
raw_tx: a raw string (in bytes) transaction.
|
16,303 |
def computational_form(data):
if isinstance(data.iloc[0], DataFrame):
dslice = Panel.from_dict(dict([(i,data.iloc[i])
for i in xrange(len(data))]))
elif isinstance(data.iloc[0], Series):
dslice = DataFrame(data.tolist())
dslice.index = data.index
else:
dslice = data
return dslice
|
Input Series of numbers, Series, or DataFrames repackaged
for calculation.
Parameters
----------
data : pandas.Series
Series of numbers, Series, DataFrames
Returns
-------
pandas.Series, DataFrame, or Panel
repacked data, aligned by indices, ready for calculation
|
16,304 |
def _convolve3_old(data, h, dev=None):
if dev is None:
dev = get_device()
if dev is None:
raise ValueError("no OpenCLDevice found...")
dtype = data.dtype.type
dtypes_options = {np.float32: "",
np.uint16: "-D SHORTTYPE"}
if not dtype in dtypes_options:
raise TypeError("data type %s not supported yet, please convert to:" % dtype, list(dtypes_options.keys()))
prog = OCLProgram(abspath("kernels/convolve3.cl"),
build_options=dtypes_options[dtype])
hbuf = OCLArray.from_array(h.astype(np.float32))
img = OCLImage.from_array(data)
res = OCLArray.empty(data.shape, dtype=np.float32)
Ns = [np.int32(n) for n in data.shape + h.shape]
prog.run_kernel("convolve3d", img.shape, None,
img, hbuf.data, res.data,
*Ns)
return res.get()
|
convolves 3d data with kernel h on the GPU Device dev
boundary conditions are clamping to edge.
h is converted to float32
if dev == None the default one is used
|
16,305 |
def options(self):
config = self._config
o = {}
o.update(self._default_smtp_options)
o.update(self._default_message_options)
o.update(self._default_backend_options)
o.update(get_namespace(config, , valid_keys=o.keys()))
o[] = int(o[])
o[] = float(o[])
return o
|
Reads all EMAIL_ options and set default values.
|
16,306 |
def update(ctx, no_restart, no_rebuild):
instance = ctx.obj[]
log()
run_process(, [, , , ])
run_process(, [, , , ])
if not no_rebuild:
log()
install_frontend(instance, forcerebuild=True, install=False, development=True)
if not no_restart:
log()
if instance != :
instance = + instance
run_process(, [, , , instance])
log()
|
Update a HFOS node
|
16,307 |
def token_is_valid(self,):
elapsed_time = time.time() - self.token_time
logger.debug("ELAPSED TIME : {0}".format(elapsed_time))
if elapsed_time > 3540:
logger.debug("TOKEN HAS EXPIRED")
return False
logger.debug("TOKEN IS STILL VALID")
return True
|
Check the validity of the token :3600s
|
16,308 |
def _check_len(self, pkt):
if len(pkt) % 2:
last_chr = pkt[-1]
if last_chr <= b:
return pkt[:-1] + b + last_chr
else:
return pkt[:-1] + b + chb(orb(last_chr) - 1)
else:
return pkt
|
Check for odd packet length and pad according to Cisco spec.
This padding is only used for checksum computation. The original
packet should not be altered.
|
16,309 |
def bsrchi(value, ndim, array):
value = ctypes.c_int(value)
ndim = ctypes.c_int(ndim)
array = stypes.toIntVector(array)
return libspice.bsrchi_c(value, ndim, array)
|
Do a binary search for a key value within an integer array,
assumed to be in increasing order. Return the index of the
matching array entry, or -1 if the key value is not found.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bsrchi_c.html
:param value: Value to find in array.
:type value: int
:param ndim: Dimension of array.
:type ndim: int
:param array: Array to be searched.
:type array: Array of ints
:return: index
:rtype: int
|
16,310 |
def load_config(self, filepath=None):
def load_settings(filepath):
instruments_loaded = {}
probes_loaded = {}
scripts_loaded = {}
if filepath and os.path.isfile(filepath):
in_data = load_b26_file(filepath)
instruments = in_data[] if in in_data else {}
scripts = in_data[] if in in_data else {}
probes = in_data[] if in in_data else {}
try:
instruments_loaded, failed = Instrument.load_and_append(instruments)
if len(failed) > 0:
print((, failed))
scripts_loaded, failed, instruments_loaded = Script.load_and_append(
script_dict=scripts,
instruments=instruments_loaded,
log_function=self.log,
data_path=self.gui_settings[])
if len(failed) > 0:
print((, failed))
probes_loaded, failed, instruments_loadeds = Probe.load_and_append(
probe_dict=probes,
probes=probes_loaded,
instruments=instruments_loaded)
self.log()
except ImportError:
self.log()
self.log()
return instruments_loaded, scripts_loaded, probes_loaded
config = None
try:
config = load_b26_file(filepath)
config_settings = config[]
if config_settings[] != filepath:
print((
.format(
config_settings[], filepath)))
config_settings[] = filepath
except Exception as e:
if filepath:
self.log()
config_settings = self._DEFAULT_CONFIG
for x in self._DEFAULT_CONFIG.keys():
if x in config_settings:
if not os.path.exists(config_settings[x]):
try:
os.makedirs(config_settings[x])
except Exception:
config_settings[x] = self._DEFAULT_CONFIG[x]
os.makedirs(config_settings[x])
print((.format(config_settings[x])))
else:
config_settings[x] = self._DEFAULT_CONFIG[x]
os.makedirs(config_settings[x])
print((.format(x, config_settings[x])))
if filepath is not None and os.path.exists(os.path.dirname(filepath)):
config_settings[] = filepath
self.gui_settings = config_settings
if(config):
self.gui_settings_hidden = config[]
else:
self.gui_settings_hidden[] =
self.instruments, self.scripts, self.probes = load_settings(filepath)
self.refresh_tree(self.tree_gui_settings, self.gui_settings)
self.refresh_tree(self.tree_scripts, self.scripts)
self.refresh_tree(self.tree_settings, self.instruments)
self._hide_parameters(filepath)
|
checks if the file is a valid config file
Args:
filepath:
|
16,311 |
def exclude(self, target, operation, role, value):
target = {"result": self.data["proxies"]["result"],
"instance": self.data["proxies"]["instance"],
"plugin": self.data["proxies"]["plugin"]}[target]
if operation == "add":
target.add_exclusion(role, value)
elif operation == "remove":
target.remove_exclusion(role, value)
else:
raise TypeError("operation must be either `add` or `remove`")
|
Exclude a `role` of `value` at `target`
Arguments:
target (str): Destination proxy model
operation (str): "add" or "remove" exclusion
role (str): Role to exclude
value (str): Value of `role` to exclude
|
16,312 |
def reshape(self, newshape, order=):
oldshape = self.shape
ar = np.asarray(self).reshape(newshape, order=order)
if (newshape is -1 and len(oldshape) is 1 or
(isinstance(newshape, numbers.Integral) and
newshape == oldshape[0]) or
(isinstance(newshape, Sequence) and
(newshape[0] == oldshape[0] or
(newshape[0] is -1 and np.array(oldshape[1:]).prod() ==
np.array(newshape[1:]).prod())))):
newlabels = [None] * ar.ndim
i = 1
while i < ar.ndim and i < self.ndim and ar.shape[i] == oldshape[i]:
newlabels[i] = self.labels[i]
i += 1
return Timeseries(ar, self.tspan, newlabels)
else:
return ar
|
If axis 0 is unaffected by the reshape, then returns a Timeseries,
otherwise returns an ndarray. Preserves labels of axis j only if all
axes<=j are unaffected by the reshape.
See ``numpy.ndarray.reshape()`` for more information
|
16,313 |
def update_variables(X, Z, U, prox_f, step_f, prox_g, step_g, L):
if not hasattr(prox_g, ):
if prox_g is not None:
dX = step_f/step_g * L.T.dot(L.dot(X) - Z + U)
X[:] = prox_f(X - dX, step_f)
LX, R, S = do_the_mm(X, step_f, Z, U, prox_g, step_g, L)
else:
S = -X.copy()
X[:] = prox_f(X, step_f)
LX = X
Z[:] = X[:]
R = np.zeros(X.shape, dtype=X.dtype)
S += X
else:
M = len(prox_g)
dX = np.sum([step_f/step_g[i] * L[i].T.dot(L[i].dot(X) - Z[i] + U[i]) for i in range(M)], axis=0)
X[:] = prox_f(X - dX, step_f)
LX = [None] * M
R = [None] * M
S = [None] * M
for i in range(M):
LX[i], R[i], S[i] = do_the_mm(X, step_f, Z[i], U[i], prox_g[i], step_g[i], L[i])
return LX, R, S
|
Update the primal and dual variables
Note: X, Z, U are updated inline
Returns: LX, R, S
|
16,314 |
def getclientloansurl(idclient, *args, **kwargs):
getparams = []
if kwargs:
try:
if kwargs["fullDetails"] == True:
getparams.append("fullDetails=true")
else:
getparams.append("fullDetails=false")
except Exception as ex:
pass
try:
getparams.append("accountState=%s" % kwargs["accountState"])
except Exception as ex:
pass
clientidparam = "/" + idclient
url = getmambuurl(*args,**kwargs) + "clients" + clientidparam + "/loans" + ( "" if len(getparams) == 0 else "?" + "&".join(getparams) )
return url
|
Request Client loans URL.
How to use it? By default MambuLoan uses getloansurl as the urlfunc.
Override that behaviour by sending getclientloansurl (this function)
as the urlfunc to the constructor of MambuLoans (note the final 's')
and voila! you get the Loans just for a certain client.
If idclient is set, you'll get a response adequate for a
MambuLoans object.
If not set, you'll get a Jar Jar Binks object, or something quite
strange and useless as JarJar. A MambuError must likely since I
haven't needed it for anything but for loans of one and just
one client.
See mambuloan module and pydoc for further information.
Currently implemented filter parameters:
* accountState
See Mambu official developer documentation for further details, and
info on parameters that may be implemented here in the future.
|
16,315 |
def input(msg="", default="", title="Lackey Input", hidden=False):
root = tk.Tk()
input_text = tk.StringVar()
input_text.set(default)
PopupInput(root, msg, title, hidden, input_text)
root.focus_force()
root.mainloop()
return str(input_text.get())
|
Creates an input dialog with the specified message and default text.
If `hidden`, creates a password dialog instead. Returns the entered value.
|
16,316 |
def upload(self, timeout=None):
if self._vehicle._wpts_dirty:
self._vehicle._master.waypoint_clear_all_send()
start_time = time.time()
if self._vehicle._wploader.count() > 0:
self._vehicle._wp_uploaded = [False] * self._vehicle._wploader.count()
self._vehicle._master.waypoint_count_send(self._vehicle._wploader.count())
while False in self._vehicle._wp_uploaded:
if timeout and time.time() - start_time > timeout:
raise TimeoutError
time.sleep(0.1)
self._vehicle._wp_uploaded = None
self._vehicle._wpts_dirty = False
|
Call ``upload()`` after :py:func:`adding <CommandSequence.add>` or :py:func:`clearing <CommandSequence.clear>` mission commands.
After the return from ``upload()`` any writes are guaranteed to have completed (or thrown an
exception) and future reads will see their effects.
:param int timeout: The timeout for uploading the mission. No timeout if not provided or set to None.
|
16,317 |
def _add_rule(self, state, rule):
if rule.strip() == "-":
parsed_rule = None
else:
parsed_rule = rule.split()
if (len(parsed_rule) != 3 or
parsed_rule[1] not in [, , ] or
len(parsed_rule[2]) > 1):
raise SyntaxError( + rule)
if parsed_rule[0] == "":
parsed_rule[0] = self.alphabet[len(self.states[state])]
if parsed_rule[2] == "":
parsed_rule[2] = state
self.states[state].append(parsed_rule)
|
Parse rule and add it to machine (for internal use).
|
16,318 |
def clear(self):
self._closed = False
self._ready.clear()
self._connection.clear()
self.http.recreate()
|
Clears the internal state of the bot.
After this, the bot can be considered "re-opened", i.e. :meth:`.is_closed`
and :meth:`.is_ready` both return ``False`` along with the bot's internal
cache cleared.
|
16,319 |
def check(self, metainfo, datapath, progress=None):
if datapath:
self.datapath = datapath
def check_piece(filename, piece):
"Callback for new piece"
if piece != metainfo["info"]["pieces"][check_piece.piece_index:check_piece.piece_index+20]:
self.LOG.warn("Piece
check_piece.piece_index += 20
check_piece.piece_index = 0
datameta, _ = self._make_info(int(metainfo["info"]["piece length"]), progress,
[datapath] if "length" in metainfo["info"] else
(os.path.join(*([datapath] + i["path"])) for i in metainfo["info"]["files"]),
piece_callback=check_piece
)
return datameta["pieces"] == metainfo["info"]["pieces"]
|
Check piece hashes of a metafile against the given datapath.
|
16,320 |
def connect(self):
"Initiate the connection to a proxying hub"
log.info("connecting")
self._peer = connection.Peer(
None, self._dispatcher, self._addrs.popleft(),
backend.Socket(), reconnect=False)
self._peer.start()
|
Initiate the connection to a proxying hub
|
16,321 |
def fill_translation_cache(instance):
if hasattr(instance, ):
return
instance._translation_cache = {}
if not instance.pk:
return
for language_code in get_language_code_list():
field_alias = get_translated_field_alias(, language_code)
if getattr(instance, field_alias, None) is not None:
field_names = [f.attname for f in instance._meta.translation_model._meta.fields]
field_data = {}
for fname in field_names:
field_data[fname] = getattr(instance,
get_translated_field_alias(fname, language_code))
translation = instance._meta.translation_model(**field_data)
instance._translation_cache[language_code] = translation
if len(instance._translation_cache.keys()) == 0:
for translation in instance.translations.all():
instance._translation_cache[translation.language_code] = translation
|
Fill the translation cache using information received in the
instance objects as extra fields.
You can not do this in post_init because the extra fields are
assigned by QuerySet.iterator after model initialization.
|
16,322 |
def append_hdus(hdulist, srcmap_file, source_names, hpx_order):
sys.stdout.write(" Extracting %i sources from %s" % (len(source_names), srcmap_file))
try:
hdulist_in = fits.open(srcmap_file)
except IOError:
try:
hdulist_in = fits.open( % srcmap_file)
except IOError:
sys.stdout.write(" Missing file %s\n" % srcmap_file)
return
for source_name in source_names:
sys.stdout.write()
sys.stdout.flush()
if hpx_order is None:
hdulist.append(hdulist_in[source_name])
else:
try:
hpxmap = HpxMap.create_from_hdulist(hdulist_in, hdu=source_name)
except IndexError:
print(" Index error on source %s in file %s" % (source_name, srcmap_file))
continue
except KeyError:
print(" Key error on source %s in file %s" % (source_name, srcmap_file))
continue
hpxmap_out = hpxmap.ud_grade(hpx_order, preserve_counts=True)
hdulist.append(hpxmap_out.create_image_hdu(name=source_name))
sys.stdout.write("\n")
hdulist.flush()
hdulist_in.close()
|
Append HEALPix maps to a list
Parameters
----------
hdulist : list
The list being appended to
srcmap_file : str
Path to the file containing the HDUs
source_names : list of str
Names of the sources to extract from srcmap_file
hpx_order : int
Maximum order for maps
|
16,323 |
def _download_helper(url):
try:
request = urllib.request.urlopen(url)
try:
size = int(dict(request.info())[].strip())
except KeyError:
try:
size = int(dict(request.info())[].strip())
except KeyError:
size = 0
doc = b""
doc_size = 0
while True:
buf = request.read(1024)
if buf:
doc += buf
doc_size += len(buf)
if size != 0:
done = int(50 * doc_size / size)
sys.stdout.write("\r[%s%s]" %
(*done, *(50-done)))
sys.stdout.write(" "+str(int(float(done)/52*100))+"%")
sys.stdout.flush()
else:
break
contenttype = None
contenttype_req = None
try:
contenttype_req = dict(request.info())[]
except KeyError:
try:
contenttype_req = dict(request.info())[]
except KeyError:
return None
if in contenttype_req:
contenttype =
elif in contenttype_req:
contenttype =
if request.getcode() != 200 or contenttype is None:
return None
return (doc, contenttype)
except (urllib.error.URLError, socket.error, ValueError):
return None
|
Handle the download of an URL, using the proxy currently set in \
:mod:`socks`.
:param url: The URL to download.
:returns: A tuple of the raw content of the downloaded data and its \
associated content-type. Returns None if it was \
unable to download the document.
|
16,324 |
def predict_proba(self, dataframe):
ret = numpy.ones((dataframe.shape[0], 2))
ret[:, 0] = (1 - self.mean)
ret[:, 1] = self.mean
return ret
|
Predict probabilities using the model
:param dataframe: Dataframe against which to make predictions
|
16,325 |
def load_data(flist, drop_duplicates=False):
trainv1_stage1_all_fold.csvv2_stage1_all_fold.csvv3_stage1_all_fold.csvtraintargettarget.csvtargettestv1_stage1_test.csvv2_stage1_test.csvv3_stage1_test.csv
if (len(flist[])==0) or (len(flist[])==0) or (len(flist[])==0):
raise Exception()
X_train = pd.DataFrame()
test = pd.DataFrame()
print
for i in flist[]:
X_train = pd.concat([X_train, paratext.load_csv_to_pandas(PATH+i, allow_quoted_newlines=True)],axis=1)
print
print
y_train = paratext.load_csv_to_pandas(PATH+flist[][0], allow_quoted_newlines=True)[]
print
for i in flist[]:
test = pd.concat([test, paratext.load_csv_to_pandas(PATH+i, allow_quoted_newlines=True)],axis=1)
assert( (False in X_train.columns == test.columns) == False)
print .format(X_train.shape)
if drop_duplicates == True:
unique_col = X_train.T.drop_duplicates().T.columns
X_train = X_train[unique_col]
test = test[unique_col]
assert( all(X_train.columns == test.columns))
print .format(X_train.shape)
return X_train, y_train, test
|
Usage: set train, target, and test key and feature files.
FEATURE_LIST_stage2 = {
'train':(
TEMP_PATH + 'v1_stage1_all_fold.csv',
TEMP_PATH + 'v2_stage1_all_fold.csv',
TEMP_PATH + 'v3_stage1_all_fold.csv',
),#target is not in 'train'
'target':(
INPUT_PATH + 'target.csv',
),#target is in 'target'
'test':(
TEMP_PATH + 'v1_stage1_test.csv',
TEMP_PATH + 'v2_stage1_test.csv',
TEMP_PATH + 'v3_stage1_test.csv',
),
}
|
16,326 |
def set_cache_buster(self, path, hash):
oz.aws_cdn.set_cache_buster(self.redis(), path, hash)
|
Sets the cache buster value for a given file path
|
16,327 |
def register(self, request, **cleaned_data):
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
create_user = RegistrationProfile.objects.create_inactive_user
new_user = create_user(
cleaned_data[],
cleaned_data[],
cleaned_data[],
site,
send_email=False
)
new_user.first_name = cleaned_data[]
new_user.last_name = cleaned_data[]
new_user.save()
user_info = UserInfo(
user=new_user,
company=cleaned_data[],
function=cleaned_data[],
address=cleaned_data[],
postal_code=cleaned_data[],
city=cleaned_data[],
country=cleaned_data[],
phone=cleaned_data[],
)
user_info.save()
send_activation_email(new_user, site, user_info)
send_activation_pending_email(new_user, site, user_info)
signals.user_registered.send(sender=self.__class__, user=new_user, request=request)
return new_user
|
Given a username, email address and password, register a new
user account, which will initially be inactive.
Along with the new ``User`` object, a new
``registration.models.RegistrationProfile`` will be created,
tied to that ``User``, containing the activation key which
will be used for this account.
Two emails will be sent. First one to the admin; this email should
contain an activation link and a resume of the new user infos.
Second one, to the user, for inform him that his request is pending.
After the ``User`` and ``RegistrationProfile`` are created and
the activation email is sent, the signal
``registration.signals.user_registered`` will be sent, with
the new ``User`` as the keyword argument ``user`` and the
class of this backend as the sender.
|
16,328 |
def compute_hardwired_weights(rho,N_E,N_I,periodic, onlyI=False):
weight_sizes = np.asarray([[N_I,N_E], [N_I,N_E], [N_E,N_I], [N_E,N_I], [N_I,N_I]])
gamma_param = np.asarray([N_I/N_E, N_I/N_E, N_E/N_I, N_E/N_I, N_I/N_I])
eta_param = np.asarray([1.5*21, 1.5*21, 8, 8, 24])
epsilon_param = np.asarray([0, 0, 0, 0, 1])
sigma_param = np.asarray([2, 2, 5, 5, 3])
Delta_param = np.asarray([-2, 2, 8, -8, 3])
mu_param = np.asarray([0, 0, -1, 1, 0])
delta_param = np.asarray([0, 0, 3, 3, 3])
for k in [4,3,2,1,0]:
N_1 = weight_sizes[k][0]
N_2 = weight_sizes[k][1]
A_1 = create_envelope(periodic,N_1)[0]
A_2 = create_envelope(periodic,N_2)[0]
G = np.zeros((N_1, N_2))
for i in range(N_1):
for j in range(N_2):
x = i - gamma_param[k]*j
c_left = min(N_1 - np.abs(np.mod(x - Delta_param[k], N_1)), np.abs(np.mod(x - Delta_param[k], N_1)))
c_right = min(N_1 - np.abs(np.mod(x + Delta_param[k], N_1)), np.abs(np.mod(x + Delta_param[k], N_1)))
c_0 = min(N_1 - np.abs(np.mod(x, N_1)), np.abs(np.mod(x, N_1)))
G[i, j] = eta_param[k]/rho*A_1[i]*A_2[j]*((c_0-delta_param[k]*rho) >= 0)*(((-mu_param[k]*x) >= 0)*((mu_param[k]*(x+mu_param[k]*N_1/2)) >= 0) +
((mu_param[k]*(x-mu_param[k]*N_1/2)) >= 0))*(np.exp(-c_left**2/(2*(sigma_param[k]*rho)**2)) +
epsilon_param[k]*np.exp(-c_right**2/(2*(sigma_param[k]*rho)**2)))
if k==0:
G_I_EL = G
elif k==1:
G_I_ER = G
elif k==2:
G_EL_I = G
elif k==3:
G_ER_I = G
else:
G_I_I = G
if onlyI:
return G_I_I, G_I_I, G_I_I, G_I_I, G_I_I
return G_I_EL, G_I_ER, G_EL_I, G_ER_I, G_I_I
|
%This function returns the synaptic weight matrices
%(G_I_EL,G_I_ER,G_EL_I,G_ER_I,G_I_I) and the suppressive envelope
%(A_env), based on:
%
% - the scale of the synaptic profiles (rho)
% - the size of the exctitatory and inhibitory pops (N_E, N_I)
% - the boundary conditions of the network (periodic=1 for periodic b.c.s; periodic = 0 for aperiodic b.c.s)
%The parameters below are arranged according to the following order of
%synaptic weights: EL->I, ER->I, I->EL, I->ER, I->I
% (see Supplementary Methods of PNAS paper for description of params below)
It was ported directly from code provided by Ila Fiete.
|
16,329 |
def compare_names(first, second):
first = name_to_vector(first)
second = name_to_vector(second)
zipped = zip(first, second)
if not zipped:
return 0
similarity_factor = 0
for fitem, _ in zipped:
if fitem in second:
similarity_factor += 1
return (float(similarity_factor) / len(zipped)) * 100
|
Compare two names in complicated, but more error prone way.
Algorithm is using vector comparison.
Example:
>>> compare_names("Franta Putšálek", "ing. Franta Putšálek")
100.0
>>> compare_names("F. Putšálek", "ing. Franta Putšálek")
50.0
Args:
first (str): Fisst name as string.
second (str): Second name as string.
Returns:
float: Percentage of the similarity.
|
16,330 |
def transform(self, X=None, y=None):
rotation = random.gauss(self.rotation_range[0], self.rotation_range[1])
self.params = rotation
tx = Rotate2D(rotation,
reference=self.reference,
lazy=self.lazy)
return tx.transform(X,y)
|
Transform an image using an Affine transform with
rotation parameters randomly generated from the user-specified
range. Return the transform if X=None.
Arguments
---------
X : ANTsImage
Image to transform
y : ANTsImage (optional)
Another image to transform
Returns
-------
ANTsImage if y is None, else a tuple of ANTsImage types
Examples
--------
>>> import ants
>>> img = ants.image_read(ants.get_data('r16'))
>>> tx = ants.contrib.RandomRotate2D(rotation_range=(-10,10))
>>> img2 = tx.transform(img)
|
16,331 |
def setup(app):
app.add_domain(EverettDomain)
app.add_directive(, AutoComponentDirective)
return {
: __version__,
: True,
: True
}
|
Register domain and directive in Sphinx.
|
16,332 |
def fcoe_fcoe_map_fcoe_map_fabric_map_fcoe_map_fabric_map_name(self, **kwargs):
config = ET.Element("config")
fcoe = ET.SubElement(config, "fcoe", xmlns="urn:brocade.com:mgmt:brocade-fcoe")
fcoe_map = ET.SubElement(fcoe, "fcoe-map")
fcoe_map_name_key = ET.SubElement(fcoe_map, "fcoe-map-name")
fcoe_map_name_key.text = kwargs.pop()
fcoe_map_fabric_map = ET.SubElement(fcoe_map, "fcoe-map-fabric-map")
fcoe_map_fabric_map_name = ET.SubElement(fcoe_map_fabric_map, "fcoe-map-fabric-map-name")
fcoe_map_fabric_map_name.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config)
|
Auto Generated Code
|
16,333 |
def host_resolution_order(ifo, env=, epoch=,
lookback=14*86400):
hosts = []
if env and os.getenv(env):
hosts = parse_nds_env(env)
if to_gps() - to_gps(epoch) > lookback:
ifolist = [None, ifo]
else:
ifolist = [ifo, None]
for difo in ifolist:
try:
host, port = DEFAULT_HOSTS[difo]
except KeyError:
else:
if (host, port) not in hosts:
hosts.append((host, port))
return list(hosts)
|
Generate a logical ordering of NDS (host, port) tuples for this IFO
Parameters
----------
ifo : `str`
prefix for IFO of interest
env : `str`, optional
environment variable name to use for server order,
default ``'NDSSERVER'``. The contents of this variable should
be a comma-separated list of `host:port` strings, e.g.
``'nds1.server.com:80,nds2.server.com:80'``
epoch : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS epoch of data requested
lookback : `float`
duration of spinning-disk cache. This value triggers defaulting to
the CIT NDS2 server over those at the LIGO sites
Returns
-------
hro : `list` of `2-tuples <tuple>`
ordered `list` of ``(host, port)`` tuples
|
16,334 |
def to_string(xml, **kwargs):
if isinstance(xml, OneLogin_Saml2_XML._text_class):
return xml
if isinstance(xml, OneLogin_Saml2_XML._element_class):
OneLogin_Saml2_XML.cleanup_namespaces(xml)
return OneLogin_Saml2_XML._unparse_etree(xml, **kwargs)
raise ValueError("unsupported type %r" % type(xml))
|
Serialize an element to an encoded string representation of its XML tree.
:param xml: The root node
:type xml: str|bytes|xml.dom.minidom.Document|etree.Element
:returns: string representation of xml
:rtype: string
|
16,335 |
def interval_timer(interval, func, *args, **kwargs):
stopped = Event()
def loop():
while not stopped.wait(interval):
func(*args, **kwargs)
Thread(name=, target=loop).start()
return stopped.set
|
Interval timer function.
Taken from: http://stackoverflow.com/questions/22498038/improvement-on-interval-python/22498708
|
16,336 |
def _pack_with_tf_ops(dataset, keys, length):
empty_example = {}
for k in keys:
empty_example[k] = tf.zeros([0], dtype=tf.int32)
empty_example[k + "_position"] = tf.zeros([0], dtype=tf.int32)
keys_etc = empty_example.keys()
def write_packed_example(partial, outputs):
new_partial = empty_example.copy()
new_outputs = {}
for k in keys_etc:
new_outputs[k] = outputs[k].write(
outputs[k].size(),
tf.pad(partial[k], [[0, length - tf.size(partial[k])]]))
return new_partial, new_outputs
def map_fn(x):
partial = empty_example.copy()
i = tf.zeros([], dtype=tf.int32)
dynamic_batch_size = tf.shape(x[keys[0]])[0]
outputs = {}
for k in keys:
outputs[k] = tf.TensorArray(
tf.int32, size=0, dynamic_size=True, element_shape=[length])
outputs[k + "_position"] = tf.TensorArray(
tf.int32, size=0, dynamic_size=True, element_shape=[length])
def cond_fn(i, partial, outputs):
del partial, outputs
return i < dynamic_batch_size
def body_fn(i, partial, outputs):
can_append = True
one_example = {}
for k in keys:
val = tf.cast(x[k][i], tf.int32)
val = val[:tf.reduce_sum(tf.cast(tf.not_equal(val, 0), tf.int32))]
one_example[k] = val
for k in keys:
can_append = tf.logical_and(
can_append,
tf.less_equal(
tf.size(partial[k]) + tf.size(one_example[k]), length))
def false_fn():
return write_packed_example(partial, outputs)
def true_fn():
return partial, outputs
partial, outputs = tf.cond(can_append, true_fn, false_fn)
new_partial = {}
for k in keys:
new_seq = one_example[k][:length]
new_seq_len = tf.size(new_seq)
new_partial[k] = tf.concat([partial[k], new_seq], 0)
new_partial[k + "_position"] = tf.concat(
[partial[k + "_position"],
tf.range(new_seq_len, dtype=tf.int32)], 0)
partial = new_partial
return i+1, partial, outputs
i, partial, outputs = tf.while_loop(
cond_fn, body_fn, (i, partial, outputs),
back_prop=False,
shape_invariants=(
tf.TensorShape([]),
{k: tf.TensorShape([None]) for k in keys_etc},
{k: tf.TensorShape(None) for k in keys_etc},
))
partial, outputs = write_packed_example(partial, outputs)
packed = {k: outputs[k].stack() for k in keys_etc}
for k in keys:
packed[k + "_segmentation"] = (
tf.cumsum(
tf.cast(tf.equal(packed[k + "_position"], 0), tf.int32), axis=1) *
tf.cast(tf.not_equal(packed[k], 0), tf.int32))
return packed
dataset = dataset.map(map_fn,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset.flat_map(tf.data.Dataset.from_tensor_slices)
|
Helper-function for packing a dataset which has already been batched.
See pack_dataset()
Uses tf.while_loop. Slow.
Args:
dataset: a dataset containing padded batches of examples.
keys: a list of strings
length: an integer
Returns:
a dataset.
|
16,337 |
def unpackcFunc(self):
self.cFunc = []
for solution_t in self.solution:
self.cFunc.append(solution_t.cFunc)
self.addToTimeVary()
|
"Unpacks" the consumption functions into their own field for easier access.
After the model has been solved, the consumption functions reside in the
attribute cFunc of each element of ConsumerType.solution. This method
creates a (time varying) attribute cFunc that contains a list of consumption
functions.
Parameters
----------
none
Returns
-------
none
|
16,338 |
def __geo_point(lat, lon, elev):
logger_noaa_lpd.info("enter geo_point")
coordinates = []
geo_dict = OrderedDict()
geometry_dict = OrderedDict()
for index, point in enumerate(lat):
coordinates.append(lat[index])
coordinates.append(lon[index])
if elev:
coordinates = coordinates + elev
geometry_dict[] =
geometry_dict[] = coordinates
geo_dict[] =
geo_dict[] = geometry_dict
return geo_dict
|
GeoJSON standard:
Create a geoJson Point-type dictionary
:param list lat:
:param list lon:
:return dict:
|
16,339 |
def handle_signal(self, sig, frame):
if sig in [signal.SIGINT]:
log.warning("Ctrl-C pressed, shutting down...")
if sig in [signal.SIGTERM]:
log.warning("SIGTERM received, shutting down...")
self.cleanup()
sys.exit(-sig)
|
Handles signals, surprisingly.
|
16,340 |
def write_to_file(self, file_path=, date=str(datetime.date.today()),
organization=, members=0, teams=0):
self.checkDir(file_path)
with open(file_path, ) as output:
output.write(
+
+
+ + date +
+ organization + + str(members) + + str(teams) +
+ str(len(self.unique_contributors)) + )
for repo in self.all_repos:
output.write( + repo.name + + str(repo.contributors)
+ + str(repo.forks) +
+ str(repo.stargazers) + + str(repo.pull_requests) +
+ str(repo.open_issues) + + str(repo.readme) +
+ str(repo.license) + + .join(sorted(repo.languages))
+ + str(repo.pull_requests_open) +
+ str(repo.pull_requests_closed) + + str(repo.commits)
+ + str(repo.closed_issues) + + str(repo.issues)
+ )
output.write( + str(self.total_repos) +
+ str(self.total_contributors) +
+ str(self.total_forks) + + str(self.total_stars) +
+ str(self.total_pull_reqs) + + str(self.total_open_issues)
+ + str(self.total_readmes) + + str(self.total_licenses)
+ + str(self.total_pull_reqs_open) +
+ str(self.total_pull_reqs_closed) +
+ str(self.total_commits) + + str(self.total_closed_issues)
+ + str(self.total_issues))
output.close()
self.write_totals(file_path="../github_stats_output/total.csv", date=date,
organization=organization, members=members, teams=teams)
self.write_languages(file_path=,
date=date)
|
Writes the current organization information to file (csv).
|
16,341 |
def always_fails(
self,
work_dict):
label = "always_fails"
log.info(("task - {} - start "
"work_dict={}")
.format(label,
work_dict))
raise Exception(
work_dict.get(
"test_failure",
"simulating a failure"))
log.info(("task - {} - done")
.format(label))
return True
|
always_fails
:param work_dict: dictionary for key/values
|
16,342 |
def clone(local_root, new_root, remote, branch, rel_dest, exclude):
log = logging.getLogger(__name__)
output = run_command(local_root, [, , ])
remotes = dict()
for match in RE_ALL_REMOTES.findall(output):
remotes.setdefault(match[0], [None, None])
if match[2] == :
remotes[match[0]][0] = match[1]
else:
remotes[match[0]][1] = match[1]
if not remotes:
raise GitError(, output)
if remote not in remotes:
raise GitError(.format(remote), output)
try:
run_command(new_root, [, , remotes[remote][0], , , branch, ])
except CalledProcessError as exc:
raise GitError(, exc.output)
|
Clone "local_root" origin into a new directory and check out a specific branch. Optionally run "git rm".
:raise CalledProcessError: Unhandled git command failure.
:raise GitError: Handled git failures.
:param str local_root: Local path to git root directory.
:param str new_root: Local path empty directory in which branch will be cloned into.
:param str remote: The git remote to clone from to.
:param str branch: Checkout this branch.
:param str rel_dest: Run "git rm" on this directory if exclude is truthy.
:param iter exclude: List of strings representing relative file paths to exclude from "git rm".
|
16,343 |
def default():
return dict(
max_name_length=64,
max_prop_count=32,
max_str_length=100,
max_byte_length=100,
max_array_length=100,
max_file_length=200,
minimal_property=False,
minimal_parameter=False,
files=[],
object_template={},
parameter_template={},
max_property=False,
max_parameter=False,
)
|
return default options, available options:
- max_name_length: maximum length of name for additionalProperties
- max_prop_count: maximum count of properties (count of fixed properties + additional properties)
- max_str_length: maximum length of string type
- max_byte_length: maximum length of byte type
- max_array_length: maximum length of array
- max_file_length: maximum length of file, in byte
- minimal_property: only generate 'required' properties
- minimal_parameter: only generate 'required' parameter
- files: registered file object: refer to pyswagger.primitives.File for details
- object_template: dict of default values assigned for properties when 'name' matched
- parameter_template: dict of default values assigned for parameters when 'name matched
- max_property: all properties are generated, ignore 'required'
- max_parameter: all parameters are generated, ignore 'required'
:return: options
:rtype: dict
|
16,344 |
def install(path, capture_error=False):
cmd = % _process.python_executable()
if has_requirements(path):
cmd +=
logger.info(, cmd)
_process.check_error(shlex.split(cmd), _errors.InstallModuleError, cwd=path, capture_error=capture_error)
|
Install a Python module in the executing Python environment.
Args:
path (str): Real path location of the Python module.
capture_error (bool): Default false. If True, the running process captures the
stderr, and appends it to the returned Exception message in case of errors.
|
16,345 |
def subnet_distance(self):
return [(Element.from_href(entry.get()), entry.get())
for entry in self.data.get()]
|
Specific subnet administrative distances
:return: list of tuple (subnet, distance)
|
16,346 |
def is_complete(self, zmax=118):
for z in range(1, zmax):
if not self[z]: return False
return True
|
True if table is complete i.e. all elements with Z < zmax have at least on pseudopotential
|
16,347 |
def schema_delete_field(cls, key):
root = .join([API_ROOT, , cls.__name__])
payload = {
: cls.__name__,
: {
key: {
:
}
}
}
cls.PUT(root, **payload)
|
Deletes a field.
|
16,348 |
def _authenticate_x509(credentials, sock_info):
query = SON([(, 1),
(, )])
if credentials.username is not None:
query[] = credentials.username
elif sock_info.max_wire_version < 5:
raise ConfigurationError(
"A username is required for MONGODB-X509 authentication "
"when connected to MongoDB versions older than 3.4.")
sock_info.command(, query)
|
Authenticate using MONGODB-X509.
|
16,349 |
def write(text, path):
with open(path, "wb") as f:
f.write(text.encode("utf-8"))
|
Writer text to file with utf-8 encoding.
Usage::
>>> from angora.dataIO import textfile
or
>>> from angora.dataIO import *
>>> textfile.write("hello world!", "test.txt")
|
16,350 |
def generate_token(self):
response = self._make_request()
self.auth = response
self.token = response[]
|
Make request in API to generate a token.
|
16,351 |
def from_hex_key(cls, key, network=BitcoinMainNet):
if len(key) == 130 or len(key) == 66:
try:
key = unhexlify(key)
except TypeError:
pass
key = ensure_bytes(key)
compressed = False
id_byte = key[0]
if not isinstance(id_byte, six.integer_types):
id_byte = ord(id_byte)
if id_byte == 4:
if len(key) != 65:
raise KeyParseError("Invalid key length")
public_pair = PublicPair(
long_or_int(hexlify(key[1:33]), 16),
long_or_int(hexlify(key[33:]), 16))
elif id_byte in [2, 3]:
compressed = True
if len(key) != 33:
raise KeyParseError("Invalid key length")
y_odd = bool(id_byte & 0x01)
x = long_or_int(hexlify(key[1:]), 16)
curve = SECP256k1.curve
p = curve.p()
alpha = (pow(x, 3, p) + curve.a() * x + curve.b()) % p
beta = square_root_mod_prime(alpha, p)
y_even = not y_odd
if y_even == bool(beta & 1):
public_pair = PublicPair(x, p - beta)
else:
public_pair = PublicPair(x, beta)
else:
raise KeyParseError("The given key is not in a known format.")
return cls.from_public_pair(public_pair, network=network,
compressed=compressed)
|
Load the PublicKey from a compressed or uncompressed hex key.
This format is defined in PublicKey.get_key()
|
16,352 |
def get_directory(request):
def get_url(url):
return reverse(url, request=request) if url else url
def is_active_url(path, url):
return path.startswith(url) if url and path else False
path = request.path
directory_list = []
def sort_key(r):
return r[0]
for group_name, endpoints in sorted(
six.iteritems(directory),
key=sort_key
):
endpoints_list = []
for endpoint_name, endpoint in sorted(
six.iteritems(endpoints),
key=sort_key
):
if endpoint_name[:1] == :
continue
endpoint_url = get_url(endpoint.get(, None))
active = is_active_url(path, endpoint_url)
endpoints_list.append(
(endpoint_name, endpoint_url, [], active)
)
url = get_url(endpoints.get(, None))
active = is_active_url(path, url)
directory_list.append(
(group_name, url, endpoints_list, active)
)
return directory_list
|
Get API directory as a nested list of lists.
|
16,353 |
def compute_dkl(fsamps, prior_fsamps, **kwargs):
parallel = kwargs.pop(, False)
cache = kwargs.pop(, )
tqdm_kwargs = kwargs.pop(, {})
if kwargs:
raise TypeError( % kwargs)
if cache:
cache = Cache(cache + )
try:
return cache.check(fsamps, prior_fsamps)
except CacheException as e:
print(e)
zip_fsamps = list(zip(fsamps, prior_fsamps))
dkls = parallel_apply(DKL, zip_fsamps, parallel=parallel,
tqdm_kwargs=tqdm_kwargs)
dkls = numpy.array(dkls)
if cache:
cache.save(fsamps, prior_fsamps, dkls)
return dkls
|
Compute the Kullback Leibler divergence for function samples for posterior
and prior pre-calculated at a range of x values.
Parameters
----------
fsamps: 2D numpy.array
Posterior function samples, as computed by
:func:`fgivenx.compute_samples`
prior_fsamps: 2D numpy.array
Prior function samples, as computed by :func:`fgivenx.compute_samples`
parallel, tqdm_kwargs: optional
see docstring for :func:`fgivenx.parallel.parallel_apply`.
cache: str, optional
File root for saving previous calculations for re-use.
Returns
-------
1D numpy.array:
Kullback-Leibler divergences at each value of x. `shape=(len(fsamps))`
|
16,354 |
def compute_score(markers, bonus, penalty):
nmarkers = len(markers)
s = [bonus] * nmarkers
f = [-1] * nmarkers
for i in xrange(1, nmarkers):
for j in xrange(i):
mi, mj = markers[i], markers[j]
t = bonus if mi.mlg == mj.mlg else penalty + bonus
if s[i] < s[j] + t:
s[i] = s[j] + t
f[i] = j
highest_score = max(s)
si = s.index(highest_score)
onchain = set()
while True:
if si < 0:
break
si = f[si]
onchain.add(si)
return [x for i, x in enumerate(markers) if i in onchain]
|
Compute chain score using dynamic programming. If a marker is the same
linkage group as a previous one, we add bonus; otherwise, we penalize the
chain switching.
|
16,355 |
def forProperty(instance,propertyName,useGetter=False):
assert isinstance(propertyName,str)
if propertyName.startswith("get") or propertyName.startswith("set"):
getterName = "get" + propertyName[3:]
setterName = "set" + propertyName[3:]
if len(propertyName[3:]) > 1:
signalName = propertyName[3].lower() + propertyName[4:] + "Changed"
else:
signalName = propertyName.lower() + "Changed"
assert hasattr(instance,getterName)
assert hasattr(instance,setterName)
assert hasattr(instance,signalName)
getter = getattr(instance,getterName)
setter = getattr(instance,setterName)
signal = getattr(instance,signalName)
elif hasattr(instance, propertyName) and callable(getattr(instance,propertyName)):
getterName = propertyName
setterName = "set" + propertyName.capitalize()
signalName = propertyName + "Changed"
assert hasattr(instance,setterName)
assert hasattr(instance,signalName)
getter = getattr(instance,getterName)
setter = getattr(instance,setterName)
signal = getattr(instance,signalName)
elif hasattr(instance, propertyName):
signalName = propertyName + "Changed"
assert hasattr(instance,signalName)
getter = lambda: getattr(instance,propertyName)
setter = lambda value: setattr(instance,propertyName,value)
signal = getattr(instance,signalName)
else:
if len(propertyName) > 1:
getterName = "get" + propertyName[0].upper() + propertyName[1:]
setterName = "set" + propertyName[0].upper() + propertyName[1:]
signalName = propertyName + "Changed"
else:
getterName = "get" + propertyName.upper()
setterName = "set" + propertyName.upper()
signalName = propertyName.lower() + "Changed"
assert hasattr(instance,getterName)
assert hasattr(instance,setterName)
assert hasattr(instance,signalName)
getter = getattr(instance,getterName)
setter = getattr(instance,setterName)
signal = getattr(instance,signalName)
return BindingEndpoint(instance, setter, signal, getter = getter if useGetter else None)
|
2-way binds to an instance property.
Parameters:
- instance -- the object instance
- propertyName -- the name of the property to bind to
- useGetter: when True, calls the getter method to obtain the value. When False, the signal argument is used as input for the target setter. (default False)
Notes:
2-way binds to an instance property according to one of the following naming conventions:
@property, propertyName.setter and pyqtSignal
- getter: propertyName
- setter: propertyName
- changedSignal: propertyNameChanged
getter, setter and pyqtSignal (this is used when binding to standard QWidgets like QSpinBox)
- getter: propertyName()
- setter: setPropertyName()
- changedSignal: propertyNameChanged
|
16,356 |
def set_inheritance(obj_name, enabled, obj_type=, clear=False):
C:\\Temp
if obj_type not in [, , ]:
raise SaltInvocationError(
.format(obj_name))
if clear:
obj_dacl = dacl(obj_type=obj_type)
else:
obj_dacl = dacl(obj_name, obj_type)
return obj_dacl.save(obj_name, not enabled)
|
Enable or disable an objects inheritance.
Args:
obj_name (str):
The name of the object
enabled (bool):
True to enable inheritance, False to disable
obj_type (Optional[str]):
The type of object. Only three objects allow inheritance. Valid
objects are:
- file (default): This is a file or directory
- registry
- registry32 (for WOW64)
clear (Optional[bool]):
True to clear existing ACEs, False to keep existing ACEs.
Default is False
Returns:
bool: True if successful, otherwise an Error
Usage:
.. code-block:: python
salt.utils.win_dacl.set_inheritance('C:\\Temp', False)
|
16,357 |
def _import_public_names(module):
"Import public names from module into this module, like import *"
self = sys.modules[__name__]
for name in module.__all__:
if hasattr(self, name):
continue
setattr(self, name, getattr(module, name))
|
Import public names from module into this module, like import *
|
16,358 |
def update(self, campaign_id, title, is_smooth, online_status, nick=None):
request = TOPRequest()
request[] = campaign_id
request[] = title
request[] = is_smooth
request[] = online_status
if nick!=None: request[] = nick
self.create(self.execute(request), fields=[,,,,], models={:Campaign})
return self.result
|
xxxxx.xxxxx.campaign.update
===================================
更新一个推广计划,可以设置推广计划名字、是否平滑消耗,只有在设置了日限额后平滑消耗才会产生作用。
|
16,359 |
def _warning(code):
if isinstance(code, str):
return code
message =
if isinstance(code, tuple):
if isinstance(code[0], str):
message = code[1]
code = code[0]
return CFG_BIBRECORD_WARNING_MSGS.get(code, ) + message
|
Return a warning message of code 'code'.
If code = (cd, str) it returns the warning message of code 'cd' and appends
str at the end
|
16,360 |
def expect(
self, re_strings=, timeout=None, output_callback=None, default_match_prefix=,
strip_ansi=True
):
output_callback = output_callback if output_callback else self.output_callback
timeout = timeout if timeout else self.timeout
self.channel.settimeout(timeout)
self.current_output =
self.current_send_string =
if len(re_strings) != 0 and len(found_pattern) != 0:
self.current_output_clean = (
re.sub(
found_pattern[0][1] + , , self.current_output_clean
)
)
self.last_match = found_pattern[0][1]
return found_pattern[0][0]
else:
return -1
|
This function takes in a regular expression (or regular expressions)
that represent the last line of output from the server. The function
waits for one or more of the terms to be matched. The regexes are
matched using expression \n<regex>$ so you'll need to provide an
easygoing regex such as '.*server.*' if you wish to have a fuzzy
match.
:param re_strings: Either a regex string or list of regex strings
that we should expect; if this is not specified,
then EOF is expected (i.e. the shell is completely
closed after the exit command is issued)
:param timeout: Timeout in seconds. If this timeout is exceeded,
then an exception is raised.
:param output_callback: A function used to print ssh output. Printed to stdout
by default. A user-defined logger may be passed like
output_callback=lambda m: mylog.debug(m)
:param default_match_prefix: A prefix to all match regexes, defaults to '.*\n',
can set to '' on cases prompt is the first line,
or the command has no output.
:param strip_ansi: If True, will strip ansi control chars befores regex matching
default to True.
:return: An EOF returns -1, a regex metch returns 0 and a match in a
list of regexes returns the index of the matched string in
the list.
:raises: A socket.timeout exception is raised on timeout.
|
16,361 |
def launch_minecraft(port, installdir="MalmoPlatform", replaceable=False):
launch_script =
if os.name == :
launch_script =
cwd = os.getcwd()
os.chdir(installdir)
os.chdir("Minecraft")
try:
cmd = [launch_script, , str(port), ]
if replaceable:
cmd.append()
subprocess.check_call(cmd)
finally:
os.chdir(cwd)
|
Launch Minecraft listening for malmoenv connections.
Args:
port: the TCP port to listen on.
installdir: the install dir name. Defaults to MalmoPlatform.
Must be same as given (or defaulted) in download call if used.
replaceable: whether or not to automatically restart Minecraft (default is false).
|
16,362 |
def alive(opts):
dev = conn()
thisproxy[].connected = ping()
if not dev.connected:
__salt__[]({}, .format(
opts[][]))
return dev.connected
|
Validate and return the connection status with the remote device.
.. versionadded:: 2018.3.0
|
16,363 |
def select_catalogue(self, selector, distance, selector_type=,
distance_metric=, point_depth=None,
upper_eq_depth=None, lower_eq_depth=None):
circlesquareepicentralhypocentralcirclecirclesquaresquare
if selector.catalogue.get_number_events() < 1:
raise ValueError()
if in selector_type:
self.select_catalogue_within_cell(selector,
distance,
upper_depth=upper_eq_depth,
lower_depth=lower_eq_depth)
elif in selector_type:
self.select_catalogue_within_distance(selector, distance,
distance_metric, point_depth)
else:
raise ValueError()
|
Selects the catalogue associated to the point source.
Effectively a wrapper to the two functions select catalogue within
a distance of the point and select catalogue within cell centred on
point
:param selector:
Populated instance of :class:
`openquake.hmtk.seismicity.selector.CatalogueSelector`
:param float distance:
Distance from point (km) for selection
:param str selector_type:
Chooses whether to select within {'circle'} or within a {'square'}.
:param str distance_metric:
'epicentral' or 'hypocentral' (only for 'circle' selector type)
:param float point_depth:
Assumed hypocentral depth of the point (only applied to 'circle'
distance type)
:param float upper_depth:
Upper seismogenic depth (km) (only for 'square')
:param float lower_depth:
Lower seismogenic depth (km) (only for 'square')
|
16,364 |
def writePlistToString(rootObject):
rootObject
plistData, error = (
NSPropertyListSerialization.
dataFromPropertyList_format_errorDescription_(
rootObject, NSPropertyListXMLFormat_v1_0, None))
if plistData is None:
if error:
error = error.encode(, )
else:
error = "Unknown error"
raise NSPropertyListSerializationException(error)
else:
return str(plistData)
|
Return 'rootObject' as a plist-formatted string.
|
16,365 |
def template(page=None, layout=None, **kwargs):
pkey = "_template_extends__"
def decorator(f):
if inspect.isclass(f):
layout_ = layout or page
extends = kwargs.pop("extends", None)
if extends and hasattr(extends, pkey):
items = getattr(extends, pkey).items()
if "layout" in items:
layout_ = items.pop("layout")
for k, v in items:
kwargs.setdefault(k, v)
if not layout_:
layout_ = "layout.html"
kwargs.setdefault("brand_name", "")
kwargs["layout"] = layout_
setattr(f, pkey, kwargs)
setattr(f, "base_layout", kwargs.get("layout"))
f.g(TEMPLATE_CONTEXT=kwargs)
return f
else:
@functools.wraps(f)
def wrap(*args2, **kwargs2):
response = f(*args2, **kwargs2)
if isinstance(response, dict) or response is None:
response = response or {}
if page:
response.setdefault("template_", page)
if layout:
response.setdefault("layout_", layout)
for k, v in kwargs.items():
response.setdefault(k, v)
return response
return wrap
return decorator
|
Decorator to change the view template and layout.
It works on both View class and view methods
on class
only $layout is applied, everything else will be passed to the kwargs
Using as first argument, it will be the layout.
:first arg or $layout: The layout to use for that view
:param layout: The layout to use for that view
:param kwargs:
get pass to the TEMPLATE_CONTEXT
** on method that return a dict
page or layout are optional
:param page: The html page
:param layout: The layout to use for that view
:param kwargs:
get pass to the view as k/V
** on other methods that return other type, it doesn't apply
:return:
|
16,366 |
def _new_name(method, old_name):
old_name, method.__name__
),
DeprecationWarning,
)
return method(*args, **kwargs)
deprecated_msg = .format(
method.__name__
)
if getattr(_method, "__doc__"):
_method.__doc__ += deprecated_msg
return _method
|
Return a method with a deprecation warning.
|
16,367 |
def on_loss_begin(self, last_output:Tuple[Tensor,Tensor,Tensor], **kwargs):
"Save the extra outputs for later and only returns the true output."
self.raw_out,self.out = last_output[1],last_output[2]
return {: last_output[0]}
|
Save the extra outputs for later and only returns the true output.
|
16,368 |
def serialize(self, keep_readonly=False):
serializer = Serializer(self._infer_class_models())
return serializer._serialize(self, keep_readonly=keep_readonly)
|
Return the JSON that would be sent to azure from this model.
This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`.
:param bool keep_readonly: If you want to serialize the readonly attributes
:returns: A dict JSON compatible object
:rtype: dict
|
16,369 |
def is_standard(action):
boolean_actions = (
_StoreConstAction, _StoreFalseAction,
_StoreTrueAction
)
return (not action.choices
and not isinstance(action, _CountAction)
and not isinstance(action, _HelpAction)
and type(action) not in boolean_actions)
|
actions which are general "store" instructions.
e.g. anything which has an argument style like:
$ script.py -f myfilename.txt
|
16,370 |
def update(self):
is_on = self._device.get_power_state()
if is_on:
self._state = STATE_ON
volume = self._device.get_current_volume()
if volume is not None:
self._volume_level = float(volume) / self._max_volume
input_ = self._device.get_current_input()
if input_ is not None:
self._current_input = input_.meta_name
inputs = self._device.get_inputs()
if inputs is not None:
self._available_inputs = [input_.name for input_ in inputs]
else:
if is_on is None:
self._state = None
else:
self._state = STATE_OFF
self._volume_level = None
self._current_input = None
self._available_inputs = None
|
Retrieve latest state of the device.
|
16,371 |
def name(self):
names = self.names.split()
if len(names) == 1:
return names[0]
elif len(names) == 2:
return .join(names)
else:
return .join([names[0], , names[-1]])
|
Compact representation for the names
|
16,372 |
def get_annotated_list_qs(cls, qs):
result, info = [], {}
start_depth, prev_depth = (None, None)
for node in qs:
depth = node.get_depth()
if start_depth is None:
start_depth = depth
open = (depth and (prev_depth is None or depth > prev_depth))
if prev_depth is not None and depth < prev_depth:
info[] = list(range(0, prev_depth - depth))
info = {: open, : [], : depth - start_depth}
result.append((node, info,))
prev_depth = depth
if start_depth and start_depth > 0:
info[] = list(range(0, prev_depth - start_depth + 1))
return result
|
Gets an annotated list from a queryset.
|
16,373 |
def create_track_token(request):
from tracked_model.models import RequestInfo
request_pk = RequestInfo.create_or_get_from_request(request).pk
user_pk = None
if request.user.is_authenticated():
user_pk = request.user.pk
return TrackToken(request_pk=request_pk, user_pk=user_pk)
|
Returns ``TrackToken``.
``TrackToken' contains request and user making changes.
It can be passed to ``TrackedModel.save`` instead of ``request``.
It is intended to be used when passing ``request`` is not possible
e.g. when ``TrackedModel.save`` will be called from celery task.
|
16,374 |
def simDeath(self):
how_many_die = int(round(self.AgentCount*(1.0-self.LivPrb[0])))
base_bool = np.zeros(self.AgentCount,dtype=bool)
base_bool[0:how_many_die] = True
who_dies = self.RNG.permutation(base_bool)
if self.T_age is not None:
who_dies[self.t_age >= self.T_age] = True
who_lives = np.logical_not(who_dies)
wealth_living = np.sum(self.aLvlNow[who_lives])
wealth_dead = np.sum(self.aLvlNow[who_dies])
Ractuarial = 1.0 + wealth_dead/wealth_living
self.aNrmNow[who_lives] = self.aNrmNow[who_lives]*Ractuarial
self.aLvlNow[who_lives] = self.aLvlNow[who_lives]*Ractuarial
return who_dies
|
Randomly determine which consumers die, and distribute their wealth among the survivors.
This method only works if there is only one period in the cycle.
Parameters
----------
None
Returns
-------
who_dies : np.array(bool)
Boolean array of size AgentCount indicating which agents die.
|
16,375 |
def getSubtotal(self):
if self.supplyorder_lineitems:
return sum(
[(Decimal(obj[]) * Decimal(obj[])) for obj in self.supplyorder_lineitems])
return 0
|
Compute Subtotal
|
16,376 |
def create_table(
self,
impala_name,
kudu_name,
primary_keys=None,
obj=None,
schema=None,
database=None,
external=False,
force=False,
):
self._check_connected()
if not external and (primary_keys is None or len(primary_keys) == 0):
raise ValueError(
)
if obj is not None:
if external:
raise ValueError(
)
if isinstance(obj, pd.DataFrame):
from ibis.impala.pandas_interop import write_temp_dataframe
writer, to_insert = write_temp_dataframe(
self.impala_client, obj
)
else:
to_insert = obj
ast = self.impala_client._build_ast(to_insert)
select = ast.queries[0]
stmt = CTASKudu(
impala_name,
kudu_name,
self.client.master_addrs,
select,
primary_keys,
database=database,
)
else:
if external:
ktable = self.client.table(kudu_name)
kschema = ktable.schema
schema = schema_kudu_to_ibis(kschema)
primary_keys = kschema.primary_keys()
elif schema is None:
raise ValueError(
)
stmt = CreateTableKudu(
impala_name,
kudu_name,
self.client.master_addrs,
schema,
primary_keys,
external=external,
database=database,
can_exist=False,
)
self.impala_client._execute(stmt)
|
Create an Kudu-backed table in the connected Impala cluster. For
non-external tables, this will create a Kudu table with a compatible
storage schema.
This function is patterned after the ImpalaClient.create_table function
designed for physical filesystems (like HDFS).
Parameters
----------
impala_name : string
Name of the created Impala table
kudu_name : string
Name of hte backing Kudu table. Will be created if external=False
primary_keys : list of column names
List of
obj : TableExpr or pandas.DataFrame, optional
If passed, creates table from select statement results
schema : ibis.Schema, optional
Mutually exclusive with expr, creates an empty table with a
particular schema
database : string, default None (optional)
external : boolean, default False
If False, a new Kudu table will be created. Otherwise, the Kudu table
must already exist.
|
16,377 |
def deserialize(cls, assoc_s):
pairs = kvform.kvToSeq(assoc_s, strict=True)
keys = []
values = []
for k, v in pairs:
keys.append(k)
values.append(v)
if keys != cls.assoc_keys:
raise ValueError(, keys)
version, handle, secret, issued, lifetime, assoc_type = values
if version != :
raise ValueError( % version)
issued = int(issued)
lifetime = int(lifetime)
secret = oidutil.fromBase64(secret)
return cls(handle, secret, issued, lifetime, assoc_type)
|
Parse an association as stored by serialize().
inverse of serialize
@param assoc_s: Association as serialized by serialize()
@type assoc_s: str
@return: instance of this class
|
16,378 |
def render(self, name, value, attrs=None, renderer=None):
if self.has_template_widget_rendering:
return super(ClearableFileInputWithImagePreview, self).render(
name, value, attrs=attrs, renderer=renderer
)
else:
context = self.get_context(name, value, attrs)
return render_to_string(self.template_name, context)
|
Render the widget as an HTML string.
Overridden here to support Django < 1.11.
|
16,379 |
def _validate(self, writing=False):
if (((len(self.fragment_offset) != len(self.fragment_length)) or
(len(self.fragment_length) != len(self.data_reference)))):
msg = ("The lengths of the fragment offsets ({len_offsets}), "
"fragment lengths ({len_fragments}), and "
"data reference items ({len_drefs}) must be the same.")
msg = msg.format(len_offsets=len(self.fragment_offset),
len_fragments=len(self.fragment_length),
len_drefs=len(self.data_reference))
self._dispatch_validation_error(msg, writing=writing)
if any([x <= 0 for x in self.fragment_offset]):
msg = "Fragment offsets must all be positive."
self._dispatch_validation_error(msg, writing=writing)
if any([x <= 0 for x in self.fragment_length]):
msg = "Fragment lengths must all be positive."
self._dispatch_validation_error(msg, writing=writing)
|
Validate internal correctness.
|
16,380 |
def _use_framework(module):
import txaio
for method_name in __all__:
if method_name in [, ]:
continue
setattr(txaio, method_name,
getattr(module, method_name))
|
Internal helper, to set this modules methods to a specified
framework helper-methods.
|
16,381 |
def _outer_values_update(self, full_values):
super(BayesianGPLVMMiniBatch, self)._outer_values_update(full_values)
if self.has_uncertain_inputs():
meangrad_tmp, vargrad_tmp = self.kern.gradients_qX_expectations(
variational_posterior=self.X,
Z=self.Z, dL_dpsi0=full_values[],
dL_dpsi1=full_values[],
dL_dpsi2=full_values[],
psi0=self.psi0, psi1=self.psi1, psi2=self.psi2)
self.X.mean.gradient = meangrad_tmp
self.X.variance.gradient = vargrad_tmp
else:
self.X.gradient = self.kern.gradients_X(full_values[], self.X, self.Z)
self.X.gradient += self.kern.gradients_X_diag(full_values[], self.X)
|
Here you put the values, which were collected before in the right places.
E.g. set the gradients of parameters, etc.
|
16,382 |
def process_event(self, event_name: str, data: dict) -> None:
if event_name == "after_epoch":
self.epochs_done = data["epochs_done"]
self.batches_seen = data["batches_seen"]
self.train_examples_seen = data["train_examples_seen"]
return
|
Process event after epoch
Args:
event_name: whether event is send after epoch or batch.
Set of values: ``"after_epoch", "after_batch"``
data: event data (dictionary)
Returns:
None
|
16,383 |
def _nodeSatisfiesValue(cntxt: Context, n: Node, vsv: ShExJ.valueSetValue) -> bool:
vsv = map_object_literal(vsv)
if isinstance_(vsv, ShExJ.objectValue):
return objectValueMatches(n, vsv)
if isinstance(vsv, ShExJ.Language):
if vsv.languageTag is not None and isinstance(n, Literal) and n.language is not None:
return n.language == vsv.languageTag
else:
return False
if isinstance(vsv, ShExJ.IriStem):
return nodeInIriStem(cntxt, n, vsv.stem)
if isinstance(vsv, ShExJ.IriStemRange):
exclusions = vsv.exclusions if vsv.exclusions is not None else []
return nodeInIriStem(cntxt, n, vsv.stem) and not any(
(uriref_matches_iriref(n, excl) if isinstance(excl, ShExJ.IRIREF) else
uriref_startswith_iriref(n, excl.stem)) for excl in exclusions)
if isinstance(vsv, ShExJ.LiteralStem):
return nodeInLiteralStem(cntxt, n, vsv.stem)
if isinstance(vsv, ShExJ.LiteralStemRange):
exclusions = vsv.exclusions if vsv.exclusions is not None else []
return nodeInLiteralStem(cntxt, n, vsv.stem) and not any(str(n) == excl for excl in exclusions)
if isinstance(vsv, ShExJ.LanguageStem):
return nodeInLanguageStem(cntxt, n, vsv.stem)
if isinstance(vsv, ShExJ.LanguageStemRange):
exclusions = vsv.exclusions if vsv.exclusions is not None else []
return nodeInLanguageStem(cntxt, n, vsv.stem) and not any(str(n) == str(excl) for excl in exclusions)
return False
|
A term matches a valueSetValue if:
* vsv is an objectValue and n = vsv.
* vsv is a Language with langTag lt and n is a language-tagged string with a language tag l and l = lt.
* vsv is a IriStem, LiteralStem or LanguageStem with stem st and nodeIn(n, st).
* vsv is a IriStemRange, LiteralStemRange or LanguageStemRange with stem st and exclusions excls and
nodeIn(n, st) and there is no x in excls such that nodeIn(n, excl).
* vsv is a Wildcard with exclusions excls and there is no x in excls such that nodeIn(n, excl).
Note that ObjectLiteral is *not* typed in ShExJ.jsg, so we identify it by a lack of a 'type' variable
.. note:: Mismatch with spec
This won't work correctly if the stem value is passed in to nodeIn, as there will be no way to know whether
we're matching an IRI or other type
... note:: Language issue
The stem range spec shouldn't have the first element in the exclusions
|
16,384 |
def labels(self, hs_dims=None, prune=False):
if self.ca_as_0th:
labels = self._cube.labels(include_transforms_for_dims=hs_dims)[1:]
else:
labels = self._cube.labels(include_transforms_for_dims=hs_dims)[-2:]
if not prune:
return labels
def prune_dimension_labels(labels, prune_indices):
labels = [label for label, prune in zip(labels, prune_indices) if not prune]
return labels
labels = [
prune_dimension_labels(dim_labels, dim_prune_inds)
for dim_labels, dim_prune_inds in zip(labels, self._prune_indices(hs_dims))
]
return labels
|
Get labels for the cube slice, and perform pruning by slice.
|
16,385 |
def nextProperty(self, propuri):
if propuri == self.properties[-1].uri:
return self.properties[0]
flag = False
for x in self.properties:
if flag == True:
return x
if x.uri == propuri:
flag = True
return None
|
Returns the next property in the list of properties. If it's the last one, returns the first one.
|
16,386 |
def sync_firmware(self):
serial_no = self.serial_number
if self.firmware_newer():
try:
self.update_firmware()
except errors.JLinkException as e:
pass
if self.firmware_outdated():
raise errors.JLinkException()
return self.open(serial_no=serial_no)
return None
|
Syncs the emulator's firmware version and the DLL's firmware.
This method is useful for ensuring that the firmware running on the
J-Link matches the firmware supported by the DLL.
Args:
self (JLink): the ``JLink`` instance
Returns:
``None``
|
16,387 |
def wrap_text_in_a_box(body=, title=, style=, **args):
r
def _wrap_row(row, max_col, break_long):
spaces = _RE_BEGINNING_SPACES.match(row).group()
row = row[len(spaces):]
spaces = spaces.expandtabs()
return textwrap.wrap(row, initial_indent=spaces,
subsequent_indent=spaces, width=max_col,
break_long_words=break_long)
def _clean_newlines(text):
text = _RE_LONELY_NEWLINES.sub(, text)
return _RE_NEWLINES_CLEANER.sub(lambda x: x.group()[:-1], text)
body = unicode(body, )
title = unicode(title, )
astyle = dict(CFG_WRAP_TEXT_IN_A_BOX_STYLES[])
if style in CFG_WRAP_TEXT_IN_A_BOX_STYLES:
astyle.update(CFG_WRAP_TEXT_IN_A_BOX_STYLES[style])
astyle.update(args)
horiz_sep = astyle[]
border = astyle[]
tab_str = astyle[] * astyle[]
max_col = max(astyle[] -
len(border[3]) - len(border[4]) - len(tab_str), 1)
min_col = astyle[]
prefix = astyle[]
suffix = astyle[]
force_horiz = astyle[]
break_long = astyle[]
body = _clean_newlines(body)
tmp_rows = [_wrap_row(row, max_col, break_long)
for row in body.split()]
body_rows = []
for rows in tmp_rows:
if rows:
body_rows += rows
else:
body_rows.append()
if not .join(body_rows).strip():
body_rows = []
title = _clean_newlines(title)
tmp_rows = [_wrap_row(row, max_col, break_long)
for row in title.split()]
title_rows = []
for rows in tmp_rows:
if rows:
title_rows += rows
else:
title_rows.append()
if not .join(title_rows).strip():
title_rows = []
max_col = max([len(row) for row in body_rows + title_rows] + [min_col])
mid_top_border_len = max_col + \
len(border[3]) + len(border[4]) - len(border[0]) - len(border[2])
mid_bottom_border_len = max_col + \
len(border[3]) + len(border[4]) - len(border[5]) - len(border[7])
top_border = border[0] + \
(border[1] * mid_top_border_len)[:mid_top_border_len] + border[2]
bottom_border = border[5] + \
(border[6] * mid_bottom_border_len)[:mid_bottom_border_len] + \
border[7]
if isinstance(horiz_sep, tuple) and len(horiz_sep) == 3:
horiz_line = horiz_sep[0] + \
(horiz_sep[1] * (max_col + 2))[:(max_col + 2)] + horiz_sep[2]
else:
horiz_line = border[3] + (horiz_sep * max_col)[:max_col] + border[4]
title_rows = [tab_str + border[3] + row +
* (max_col - len(row)) +
border[4] for row in title_rows]
body_rows = [tab_str + border[3] + row +
* (max_col - len(row)) + border[4] for row in body_rows]
ret = []
if top_border:
ret += [tab_str + top_border]
ret += title_rows
if title_rows or force_horiz:
ret += [tab_str + horiz_line]
ret += body_rows
if bottom_border:
ret += [tab_str + bottom_border]
return (prefix + .join(ret) + suffix).encode()
|
r"""Return a nicely formatted text box.
e.g.
******************
** title **
**--------------**
** body **
******************
Indentation and newline are respected.
:param body: the main text
:param title: an optional title
:param style: the name of one of the style in CFG_WRAP_STYLES. By default
the double_star style is used.
You can further tune the desired style by setting various optional
parameters:
:param horiz_sep: a string that is repeated in order to produce a
separator row between the title and the body (if needed)
or a tuple of three characters in the form (l, c, r)
:param max_col: the maximum number of coulmns used by the box
(including indentation)
:param min_col: the symmetrical minimum number of columns
:param tab_str: a string to represent indentation
:param tab_num: the number of leveles of indentations
:param border: a tuple of 8 element in the form
(tl, t, tr, l, r, bl, b, br) of strings that represent the
different corners and sides of the box
:param prefix: a prefix string added before the box
:param suffix: a suffix string added after the box
:param break_long: wethever to break long words in order to respect
max_col
:param force_horiz: True in order to print the horizontal line even when
there is no title
e.g.:
print wrap_text_in_a_box(title='prova',
body=' 123 prova.\n Vediamo come si indenta',
horiz_sep='-', style='no_border', max_col=20, tab_num=1)
prova
----------------
123 prova.
Vediamo come
si indenta
|
16,388 |
def x_forwarded_for(self):
ip = self._request.META.get()
current_xff = self.headers.get()
return % (current_xff, ip) if current_xff else ip
|
X-Forwarded-For header value.
This is the amended header so that it contains the previous IP address
in the forwarding change.
|
16,389 |
def get_client_class(self, client_class_name):
request_url = self._build_url([, client_class_name])
return self._do_request(, request_url)
|
Returns a specific client class details from CPNR server.
|
16,390 |
def get_template(template_dict, parameter_overrides=None):
template_dict = template_dict or {}
if template_dict:
template_dict = SamTranslatorWrapper(template_dict).run_plugins()
template_dict = SamBaseProvider._resolve_parameters(template_dict, parameter_overrides)
ResourceMetadataNormalizer.normalize(template_dict)
return template_dict
|
Given a SAM template dictionary, return a cleaned copy of the template where SAM plugins have been run
and parameter values have been substituted.
Parameters
----------
template_dict : dict
unprocessed SAM template dictionary
parameter_overrides: dict
Optional dictionary of values for template parameters
Returns
-------
dict
Processed SAM template
|
16,391 |
def _get_username(self, username=None, use_config=True, config_filename=None):
if not username and use_config:
if self._config is None:
self._read_config(config_filename)
username = self._config.get("credentials", "username", fallback=None)
if not username:
username = input("Please enter your username: ").strip()
while not username:
username = input("No username specified. Please enter your username: ").strip()
if not in self._config:
self._config.add_section()
self._config.set("credentials", "username", username)
self._save_config()
return username
|
Determine the username
If a username is given, this name is used. Otherwise the configuration
file will be consulted if `use_config` is set to True. The user is asked
for the username if the username is not available. Then the username is
stored in the configuration file.
:param username: Username (used directly if given)
:type username: ``str``
:param use_config: Whether to read username from configuration file
:type use_config: ``bool``
:param config_filename: Path to the configuration file
:type config_filename: ``str``
|
16,392 |
def IsOutOfLineMethodDefinition(clean_lines, linenum):
for i in xrange(linenum, max(-1, linenum - 10), -1):
if Match(r, clean_lines.elided[i]):
return Match(r, clean_lines.elided[i]) is not None
return False
|
Check if current line contains an out-of-line method definition.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains an out-of-line method definition.
|
16,393 |
def insert(self, var, value, index=None):
current = self.__get(var)
if not isinstance(current, list):
raise KeyError("%s: is not a list" % var)
if index is None:
current.append(value)
else:
current.insert(index, value)
if self.auto_save:
self.save()
|
Insert at the index.
If the index is not provided appends to the end of the list.
|
16,394 |
def datafind_connection(server=None):
if server:
datafind_server = server
else:
if in os.environ:
datafind_server = os.environ["LIGO_DATAFIND_SERVER"]
else:
err = "Trying to obtain the ligo datafind server url from "
err += "the environment, ${LIGO_DATAFIND_SERVER}, but that "
err += "variable is not populated."
raise ValueError(err)
if not datafind_server.endswith("80"):
cert_file, key_file = glue.datafind.find_credential()
else:
cert_file, key_file = None, None
dfs_fields = datafind_server.split(, 1)
server = dfs_fields[0]
port = int(dfs_fields[1]) if len(dfs_fields) == 2 else None
if cert_file and key_file:
connection = glue.datafind.GWDataFindHTTPSConnection(
host=server, port=port, cert_file=cert_file, key_file=key_file)
else:
connection = glue.datafind.GWDataFindHTTPConnection(
host=server, port=port)
return connection
|
Return a connection to the datafind server
Parameters
-----------
server : {SERVER:PORT, string}, optional
A string representation of the server and port.
The port may be ommitted.
Returns
--------
connection
The open connection to the datafind server.
|
16,395 |
def get_source(self, environment, template):
./.././../
_template = template
if template.split(, 1)[0] in (, ):
is_relative = True
else:
is_relative = False
if is_relative:
if not environment or not in environment.globals:
log.warning(
,
template
)
raise TemplateNotFound
base_path = environment.globals[]
_template = os.path.normpath(.join((base_path, _template)))
if _template.split(, 1)[0] == :
log.warning(
, template
)
raise TemplateNotFound(template)
self.check_cache(_template)
if environment and template:
tpldir = os.path.dirname(_template).replace(, )
tplfile = _template
if is_relative:
tpldir = environment.globals.get(, tpldir)
tplfile = template
tpldata = {
: tplfile,
: if tpldir == else tpldir,
: tpldir.replace(, ),
}
environment.globals.update(tpldata)
for spath in self.searchpath:
filepath = os.path.join(spath, _template)
try:
with salt.utils.files.fopen(filepath, ) as ifile:
contents = ifile.read().decode(self.encoding)
mtime = os.path.getmtime(filepath)
def uptodate():
try:
return os.path.getmtime(filepath) == mtime
except OSError:
return False
return contents, filepath, uptodate
except IOError:
continue
raise TemplateNotFound(template)
|
Salt-specific loader to find imported jinja files.
Jinja imports will be interpreted as originating from the top
of each of the directories in the searchpath when the template
name does not begin with './' or '../'. When a template name
begins with './' or '../' then the import will be relative to
the importing file.
|
16,396 |
def add_section(self, section):
self._sections = self._ensure_append(section, self._sections)
|
A block section of code to be used as substitutions
:param section: A block section of code to be used as substitutions
:type section: Section
|
16,397 |
def teetsv(table, source=None, encoding=None, errors=, write_header=True,
**csvargs):
csvargs.setdefault(, )
return teecsv(table, source=source, encoding=encoding, errors=errors,
write_header=write_header, **csvargs)
|
Convenience function, as :func:`petl.io.csv.teecsv` but with different
default dialect (tab delimited).
|
16,398 |
def fundrefxml2json(self, node):
doi = FundRefDOIResolver.strip_doi_host(self.get_attrib(node,
))
oaf_id = FundRefDOIResolver().resolve_by_doi(
"http://dx.doi.org/" + doi)
name = node.find(,
namespaces=self.namespaces).text
acronyms = []
for n in node.findall(,
namespaces=self.namespaces):
usagenode = n.find(, namespaces=self.namespaces)
if usagenode is not None:
if self.get_attrib(usagenode, ) == \
(
):
label = n.find(,
namespaces=self.namespaces)
if label is not None:
acronyms.append(label.text)
parent_node = node.find(, namespaces=self.namespaces)
if parent_node is None:
parent = {}
else:
parent = {
"$ref": self.get_attrib(parent_node, ),
}
country_elem = node.find(, namespaces=self.namespaces)
country_url = self.get_attrib(country_elem, )
country_code = self.cc_resolver.cc_from_url(country_url)
type_ = node.find(,
namespaces=self.namespaces).text
subtype = node.find(,
namespaces=self.namespaces).text
country_elem = node.find(, namespaces=self.namespaces)
modified_elem = node.find(, namespaces=self.namespaces)
created_elem = node.find(, namespaces=self.namespaces)
json_dict = {
: self.schema_formatter.schema_url,
: doi,
: {
: oaf_id,
},
: name,
: acronyms,
: parent,
: country_code,
: type_,
: subtype.lower(),
: (created_elem.text if created_elem is not None
else None),
: (modified_elem.text if modified_elem is not None
else None),
}
return json_dict
|
Convert a FundRef 'skos:Concept' node into JSON.
|
16,399 |
def _summarize_o_mutation_type(model):
from nautilus.api.util import summarize_mutation_io
object_type_name = get_model_string(model)
return summarize_mutation_io(
name=object_type_name,
type=_summarize_object_type(model),
required=False
)
|
This function create the actual mutation io summary corresponding to the model
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.