code
stringlengths 4
4.48k
| docstring
stringlengths 1
6.45k
| _id
stringlengths 24
24
|
---|---|---|
def compare_to_cache(self, vm_cache): <NEW_LINE> <INDENT> if self.dimensions != vm_cache.dimensions: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> changed_pixels = [] <NEW_LINE> for new_pixel in self.pixels: <NEW_LINE> <INDENT> old_pixel = vm_cache.pixels[new_pixel.idx] <NEW_LINE> if new_pixel.rgb() != old_pixel.rgb(): <NEW_LINE> <INDENT> changed_pixels.append(new_pixel) <NEW_LINE> <DEDENT> <DEDENT> return changed_pixels
|
Compares vmatrix to cached vmatrix and returns list of changed pixels
|
625941b782261d6c526ab2d9
|
def plot_icmaps(self, outtype='png', **kwargs): <NEW_LINE> <INDENT> all_icc_plot_f = os.path.join(self.ica_dir, 'all_components_zscore_{}.{}'.format(self.zscore, outtype)) <NEW_LINE> iccs_plot_f = os.path.join(self.ica_dir, 'ic_components_zscore_{}.{}'.format(self.zscore, outtype)) <NEW_LINE> icc_multi_slice = os.path.join(self.ica_dir, 'ic_map_{}_zscore_{}.{}') <NEW_LINE> fig1 = plot_ica_components(self._icc_imgs, **kwargs) <NEW_LINE> fig1.savefig(iccs_plot_f, facecolor=fig1.get_facecolor(), edgecolor='none') <NEW_LINE> fig2 = plot_all_components(self._icc_imgs, **kwargs) <NEW_LINE> fig2.savefig(all_icc_plot_f, facecolor=fig2.get_facecolor(), edgecolor='none') <NEW_LINE> sliced_ic_plots = [] <NEW_LINE> for i, img in enumerate(iter_img(self._icc_imgs)): <NEW_LINE> <INDENT> fig3 = plot_multi_slices( img, cut_dir="z", n_cuts=24, n_cols=4, title="IC {}\n(z-score {})".format(i+1, self.zscore), title_fontsize=32, plot_func=None, **kwargs ) <NEW_LINE> out_f = icc_multi_slice.format(i+1, self.zscore, outtype) <NEW_LINE> fig3.savefig(out_f, facecolor=fig3.get_facecolor(), edgecolor='none') <NEW_LINE> sliced_ic_plots.append(out_f) <NEW_LINE> <DEDENT> return all_icc_plot_f, iccs_plot_f, sliced_ic_plots
|
Plot the thresholded IC spatial maps and store the outputs in the ICA results folder.
Parameters
----------
outtype: str
Extension (without the '.') of the output files, will specify which plot image file you want.
Returns
-------
all_icc_plot_f: str
iccs_plot_f: str
sliced_ic_plots: list of str
|
625941b78e7ae83300e4ae01
|
def _export(self): <NEW_LINE> <INDENT> file_name = questionary.text( 'Type file name for export ending with .csv or .ris', validate=lambda val: val.endswith((".csv", ".ris")), ).ask() <NEW_LINE> if file_name is None: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> as_data = self.as_data <NEW_LINE> proba = self.shared.get('pred_proba', None) <NEW_LINE> if proba is None: <NEW_LINE> <INDENT> proba = np.flip(np.arange(len(as_data))) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> proba = np.array(proba) <NEW_LINE> <DEDENT> labels = self.y <NEW_LINE> pool_idx = np.where(labels == LABEL_NA)[0] <NEW_LINE> one_idx = np.where(labels == 1)[0] <NEW_LINE> zero_idx = np.where(labels == 0)[0] <NEW_LINE> proba_order = np.argsort(-proba[pool_idx]) <NEW_LINE> ranking = np.concatenate( (one_idx, pool_idx[proba_order], zero_idx), axis=None) <NEW_LINE> self.as_data.to_file(fp=file_name, labels=labels, ranking=ranking)
|
Export the results to a csv file.
Order of records is: [included, not reviewed (by proba), excluded]
|
625941b7a8370b77170526d6
|
def fill_bev(bevDict, stimDict, circleTableBoxes): <NEW_LINE> <INDENT> for i in range(1, len(bevDict) + 1): <NEW_LINE> <INDENT> for j in range(1, len(stimDict) + 1): <NEW_LINE> <INDENT> if circleTableBoxes[i, j].get() == ' ' * len(circleTableBoxes[i, j].get()): <NEW_LINE> <INDENT> circleTableBoxes[i, j].delete(0, END) <NEW_LINE> circleTableBoxes[i, j].insert(0, circleTableBoxes[i, 0].cget("text"))
|
(dict, dict, dict) -> (none)
Fills the circle tables entry boxes with each row's
behaviour if that box is empty.
|
625941b730c21e258bdfa2d3
|
def assoc(cls, ops, kwargs): <NEW_LINE> <INDENT> expanded = [(o,) if not isinstance(o, cls) else o.operands for o in ops] <NEW_LINE> return sum(expanded, ()), kwargs
|
Associatively expand out nested arguments of the flat class.
E.g.::
>>> class Plus(Operation):
... simplifications = [assoc, ]
>>> Plus.create(1,Plus(2,3))
Plus(1, 2, 3)
|
625941b7d53ae8145f87a0ac
|
def _compare_cell_output(test_result, reference): <NEW_LINE> <INDENT> skip_compare = ['traceback', 'latex', 'prompt_number'] <NEW_LINE> if _skip_display_data_tests: <NEW_LINE> <INDENT> skip_compare.append('png') <NEW_LINE> <DEDENT> if test_result['output_type'] == 'display_data': <NEW_LINE> <INDENT> skip_compare.append('text') <NEW_LINE> skip_compare.append('metadata') <NEW_LINE> <DEDENT> for key in reference: <NEW_LINE> <INDENT> if key not in test_result: <NEW_LINE> <INDENT> raise Exception(str(reference) + '!!!!!' + str(test_result)) <NEW_LINE> return False <NEW_LINE> <DEDENT> elif key not in skip_compare: <NEW_LINE> <INDENT> if key == 'text': <NEW_LINE> <INDENT> if test_result[key].strip() != reference[key].strip(): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> elif key == 'png': <NEW_LINE> <INDENT> reference_img = reference[key] <NEW_LINE> test_img = test_result[key] <NEW_LINE> if not _compare_images(reference_img, test_img): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if test_result[key] != reference[key]: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return True
|
Compare a cell test output to a reference output.
Parameters
----------
test_results : IPython.nbformat.current.NotebookNode
The cell test result that must be compared to the reference.
reference : IPython.nbformat.current.NotebookNode
The reference cell output to compare to.
Returns
-------
comparison_result : bool
The indicator of equality between the test output and the reference.
|
625941b794891a1f4081b8de
|
def get_is_init(self): <NEW_LINE> <INDENT> return self.__is_init
|
Get the is_init flag
:return: (bool) : is_init flag
|
625941b7ec188e330fd5a5dc
|
def sky2image( self, longitude, latitude, distort=True, find=True, xtol=DEFTOL, ): <NEW_LINE> <INDENT> if find and self.distort["name"] != "none": <NEW_LINE> <INDENT> x, y = self._findxy(longitude, latitude, xtol=xtol) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> u, v = self.sph2image(longitude, latitude) <NEW_LINE> p = self.projection.upper() <NEW_LINE> if p in ["-TAN", "-TPV"]: <NEW_LINE> <INDENT> if distort and self.distort["name"] != "none": <NEW_LINE> <INDENT> u, v = self.Distort(u, v, inverse=True) <NEW_LINE> <DEDENT> xdiff, ydiff = self.ApplyCDMatrix(u, v, inverse=True) <NEW_LINE> <DEDENT> elif p == "-TAN-SIP": <NEW_LINE> <INDENT> u, v = self.ApplyCDMatrix(u, v, inverse=True) <NEW_LINE> if distort and self.distort["name"] != "none": <NEW_LINE> <INDENT> xdiff, ydiff = self.Distort(u, v, inverse=True) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> xdiff, ydiff = u, v <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError("projection '%s' not supported" % p) <NEW_LINE> <DEDENT> x = xdiff + self.crpix[0] <NEW_LINE> y = ydiff + self.crpix[1] <NEW_LINE> <DEDENT> return x, y
|
Usage:
x,y=sky2image(longitude, latitude, distort=True, find=True)
Purpose:
Convert between sky (lon,lat) and image coordinates (x,y)
Inputs:
longitude,latitude: Probably ra,dec. Can be arrays.
Optional Inputs:
distort: Use the distortion model if present. Default is True
find: When the distortion model is present, simply find the
roots of the polynomial rather than using an inverse
polynomial. This is more accurate but slower. Default True.
xtol: tolerance to use when root finding with find=True Default is
1e-8.
Outputs:
x,y: x and y coords in the image. Will have the same shape as
lon,lat
Example:
from esutil import wcsutil
import pyfits
hdr=pyfits.getheader(fname)
wcs = wcsutil.WCS(hdr)
x,y = wcs.image2sky(ra,dec)
|
625941b75510c4643540f22f
|
def testBasics(self): <NEW_LINE> <INDENT> bbox = lsst.geom.Box2I(lsst.geom.Point2I(-31, 22), lsst.geom.Extent2I(100, 85)) <NEW_LINE> imMin = -5 <NEW_LINE> imMax = 2500 <NEW_LINE> tableLen = 2000 <NEW_LINE> tableSigma = 55 <NEW_LINE> for indOffset in (0, -50, 234): <NEW_LINE> <INDENT> for imageClass in (afwImage.ImageF, afwImage.ImageD): <NEW_LINE> <INDENT> inImage = makeRampImage(bbox=bbox, start=imMin, stop=imMax, imageClass=imageClass) <NEW_LINE> table = np.random.normal(scale=tableSigma, size=tableLen) <NEW_LINE> table = np.array(table, dtype=inImage.getArray().dtype) <NEW_LINE> refImage = imageClass(inImage, True) <NEW_LINE> refNumBad = referenceApply(image=refImage, table=table, indOffset=indOffset) <NEW_LINE> measImage = imageClass(inImage, True) <NEW_LINE> measNumBad = applyLookupTable(measImage, table, indOffset) <NEW_LINE> self.assertEqual(refNumBad, measNumBad) <NEW_LINE> self.assertImagesAlmostEqual(refImage, measImage)
|
!Test basic functionality of applyLookupTable
|
625941b70a366e3fb873e64d
|
def _presentcountdown(self): <NEW_LINE> <INDENT> if self._countdown_timer == 0: <NEW_LINE> <INDENT> self._mssg = GLabel(x=GAME_WIDTH/2, y=GAME_HEIGHT/2, text=str(3), font_size=32) <NEW_LINE> <DEDENT> if self._countdown_timer == 63: <NEW_LINE> <INDENT> self._mssg = GLabel(x=GAME_WIDTH/2, y=GAME_HEIGHT/2, text=str(2), font_size=32) <NEW_LINE> <DEDENT> if self._countdown_timer == 126: <NEW_LINE> <INDENT> self._mssg = GLabel(x=GAME_WIDTH/2, y=GAME_HEIGHT/2, text=str(1), font_size=32) <NEW_LINE> <DEDENT> self._countdown_timer += 1 <NEW_LINE> if self._countdown_timer == 188: <NEW_LINE> <INDENT> self._mssg = None <NEW_LINE> self._state = STATE_ACTIVE <NEW_LINE> self._game.serveBall()
|
Presents the countdown text on screen after the first 3 seconds.
Once it finishes countdown, it goes on to start the game by serving
the ball.
|
625941b726238365f5f0ec9f
|
def persist_decorator(klass): <NEW_LINE> <INDENT> for method in ["__setitem__", "__delitem__", "update", "setdefault"]: <NEW_LINE> <INDENT> setattr(klass, method, klass._addsave(getattr(klass, method))) <NEW_LINE> <DEDENT> return klass
|
Add a save behavior to methods that update dict data
|
625941b7f548e778e58cd3b2
|
def removeDuplicates(self, nums): <NEW_LINE> <INDENT> prev0 = 0 <NEW_LINE> prev1 = 1 <NEW_LINE> for i in range(2, len(nums)): <NEW_LINE> <INDENT> if nums[i] != nums[prev0] or nums[i] != nums[prev1]: <NEW_LINE> <INDENT> prev0 += 1 <NEW_LINE> prev1 += 1 <NEW_LINE> nums[prev1] = nums[i] <NEW_LINE> <DEDENT> <DEDENT> return min(len(nums), prev1 + 1)
|
:type nums: List[int]
:rtype: int
|
625941b74e4d5625662d4213
|
def sorted_insert(s, element): <NEW_LINE> <INDENT> if s.is_empty() or element > s.peek(): <NEW_LINE> <INDENT> s.push(element) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> top = s.pop() <NEW_LINE> sorted_insert(s, element) <NEW_LINE> s.push(top)
|
Sorted Insert
:param s:
:param element:
:return:
|
625941b7091ae35668666d9c
|
def completeLink(link, hostUrl, domain): <NEW_LINE> <INDENT> completedLink = link <NEW_LINE> if completedLink == '': <NEW_LINE> <INDENT> return hostUrl <NEW_LINE> <DEDENT> if completedLink.find('[[site]]') != -1: <NEW_LINE> <INDENT> completedLink = completedLink.replace('[[site]]',domain) <NEW_LINE> <DEDENT> elif (completedLink.find('http') == -1): <NEW_LINE> <INDENT> if (completedLink[0] == '?'): <NEW_LINE> <INDENT> completedLink = hostUrl + completedLink <NEW_LINE> <DEDENT> elif (completedLink[0] != '/'): <NEW_LINE> <INDENT> completedLink = hostUrl + '/' + completedLink <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> completedLink = getRoot(hostUrl) + completedLink <NEW_LINE> <DEDENT> <DEDENT> return completedLink
|
Complete the link url
|
625941b74c3428357757c161
|
def bitxor(a, b): <NEW_LINE> <INDENT> return "".join([str(int(x) ^ int(y)) for (x, y) in zip(a, b)])
|
Xor two bit strings (trims the longer input)
|
625941b74f88993c3716beaa
|
def writeSettingsROM(self): <NEW_LINE> <INDENT> command=bytearray([0xFE,0xAA,0xEE]) <NEW_LINE> maxLen=3 <NEW_LINE> try: <NEW_LINE> <INDENT> self.source.write(command) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> raise e <NEW_LINE> <DEDENT> if self.timeout>0: <NEW_LINE> <INDENT> time.sleep(self.timeout) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> responce=self.source.read() <NEW_LINE> header=responce[:3] <NEW_LINE> if responce[:3]!=command:raise ValueError(self._messages[2]) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> raise ValueError(self._messages[2])
|
Команда на запись в ROM
Запрос FE AA EE
Ответ FE AA EE
|
625941b76aa9bd52df036bd8
|
def p_lines(p): <NEW_LINE> <INDENT> p[0] = [p[1]] + p[3]
|
lines : line END lines
|
625941b7851cf427c661a351
|
def set_present_agent(cell, agent): <NEW_LINE> <INDENT> __set_property(cell, PRESENT_AGENT_IDX, agent)
|
Set the agent that is currently present on the cell.
To tell that there is no agent, set agent to None.
|
625941b724f1403a926009a0
|
def sort_parsing_statistic_by_key(): <NEW_LINE> <INDENT> sorted_parsing_statistic = collections.OrderedDict(sorted(parsing_statistic.items())) <NEW_LINE> return sorted_parsing_statistic
|
Sorts parsing_statistic by key.
:return: sorted_parsing_statistic dictionary
|
625941b74a966d76dd550e42
|
def sortColors(self, nums: 'List[int]') -> None: <NEW_LINE> <INDENT> r = len(nums)-1 <NEW_LINE> l = 0 <NEW_LINE> for i in range(len(nums)): <NEW_LINE> <INDENT> if nums[i] == 0: <NEW_LINE> <INDENT> while l < i and nums[l] == 0: <NEW_LINE> <INDENT> l+=1 <NEW_LINE> <DEDENT> nums[i], nums[l] = nums[l], nums[i] <NEW_LINE> if nums[i] == 2: <NEW_LINE> <INDENT> while r > i and nums[r] == 2: <NEW_LINE> <INDENT> r -= 1 <NEW_LINE> <DEDENT> nums[i], nums[r] = nums[r], nums[i] <NEW_LINE> <DEDENT> <DEDENT> elif nums[i] == 2: <NEW_LINE> <INDENT> while r > i and nums[r] == 2: <NEW_LINE> <INDENT> r -= 1 <NEW_LINE> <DEDENT> nums[i], nums[r] = nums[r], nums[i] <NEW_LINE> if nums[i] == 0: <NEW_LINE> <INDENT> while l < i and nums[l] == 0: <NEW_LINE> <INDENT> l+=1 <NEW_LINE> <DEDENT> nums[i], nums[l] = nums[l], nums[i]
|
Do not return anything, modify nums in-place instead.
|
625941b73346ee7daa2b2b9f
|
def show_image(imgarr): <NEW_LINE> <INDENT> print('Image type:', type(imgarr)) <NEW_LINE> print('Image shape:', imgarr.shape, ' dtype:', imgarr.dtype) <NEW_LINE> if imgarr.ndim != 2: <NEW_LINE> <INDENT> raise ValueError('Expected ndim=2') <NEW_LINE> <DEDENT> print('Image data:') <NEW_LINE> for row in imgarr: <NEW_LINE> <INDENT> for v in row: <NEW_LINE> <INDENT> print('%6.2f ' % v, end='') <NEW_LINE> <DEDENT> print() <NEW_LINE> <DEDENT> plt.imshow(imgarr, cmap='gray') <NEW_LINE> plt.show()
|
Given a numpy 2D array for a sample image, show and describe it.
|
625941b7f8510a7c17cf953b
|
def test_column_not_exists(self): <NEW_LINE> <INDENT> df = pandas.DataFrame() <NEW_LINE> data_filtered = FilterRows(df) <NEW_LINE> with self.assertRaisesRegex(ValueError, "Column filepath not in DataFrame columns: \[\]") as context: <NEW_LINE> <INDENT> data_filtered.filter_(["filepath"], "-")
|
Test empty dataframe looking for the corresponding ValueError exception
|
625941b7f7d966606f6a9e3f
|
def test_accepts_random_cities(self): <NEW_LINE> <INDENT> v = validators.CityValidator() <NEW_LINE> for city in [fake.city() for _ in range(100)]: <NEW_LINE> <INDENT> if len(city) > 20: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> result = v.validate(city) <NEW_LINE> self.assertIsNone(result)
|
Generates 100 random cities and validates them.
|
625941b7d8ef3951e3243374
|
def embed_network(prop,embedfiles,pembed_m,pembed_f,vmatrix,vfluid): <NEW_LINE> <INDENT> embedvals = [np.loadtxt(f) for f in embedfiles] <NEW_LINE> nf = len(prop[prop==vmatrix]) <NEW_LINE> nm = len(prop[prop==vfluid]) <NEW_LINE> pf = float(nf)*pembed_f <NEW_LINE> pm = float(nm)*pembed_m <NEW_LINE> count = 0 <NEW_LINE> n = 0 <NEW_LINE> ii = 0 <NEW_LINE> for p,val in [[pf,vfluid],[pm,vmatrix]]: <NEW_LINE> <INDENT> while count < p: <NEW_LINE> <INDENT> if n >= len(embedvals[ii]): <NEW_LINE> <INDENT> n = 0 <NEW_LINE> <DEDENT> k,j,i = [np.random.randint(1,int(nn)-1) for nn in np.shape(prop)] <NEW_LINE> if np.all(prop[np.isfinite(prop[k,j,i])] == val): <NEW_LINE> <INDENT> prop[k,j,i] = embedvals[ii][n] <NEW_LINE> n += 1 <NEW_LINE> count += 1 <NEW_LINE> <DEDENT> <DEDENT> ii += 1
|
embed a network with resistivity values from smaller networks
prop = array containing property values, 2 unique values vmatrix and vfluid
embedfiles = list containing full path to text file containing x, y and z
property values for the embedment
pembed_m = numpy array or list containing x,y,z probability of embedment
for matrix (closed) cells in prop
pembed_f = numpy array or list containing x,y,z probability of embedment
for open cells in prop
vmatrix,vfluid = property values for matrix and fluid in prop
|
625941b7d18da76e23532308
|
def get_max_input(language=''): <NEW_LINE> <INDENT> if language == '': <NEW_LINE> <INDENT> query_results = max_input(Total, Lang2) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> query_results = max_input(Total, language) <NEW_LINE> <DEDENT> if not query_results.v(): <NEW_LINE> <INDENT> return 'No hay aporte.' <NEW_LINE> <DEDENT> query_results = query_results.data[0][0] <NEW_LINE> max_input_string = str('Idioma: ' + query_results[1] + ' a: ' + query_results[2] + ' con ' + str(query_results[0]*100) + '%.') <NEW_LINE> return max_input_string
|
Función principal para obtener el mayor porcentaje de aporte entre los que
ocurren entre todos los lenguajes. En caso de especificar un lenguaje
como parámetro, se calcula el máximo porcentaje entre los que cada
lenguaje aportó solo a ese lenguaje de parámetro
:param language: cadena de texto que representa el lenguaje, languaje=''
para realizar el cálculo del máximo entre todos los aportes existentes
:return: cadena de texto que representa el máximo aporte
|
625941b7bf627c535bc1300d
|
def _chi2_for_color_constraint(self, satellite_flux): <NEW_LINE> <INDENT> if not self._MM: <NEW_LINE> <INDENT> raise NotImplementedError('not yet coded in pixel_lensing') <NEW_LINE> <DEDENT> before_ref = self.event.data_ref <NEW_LINE> if len(self._color_constraint) == 4: <NEW_LINE> <INDENT> (ref_dataset, ref_zero_point) = self._color_constraint[:2] <NEW_LINE> (color, sigma_color) = self._color_constraint[2:] <NEW_LINE> <DEDENT> elif len(self._color_constraint) == 8: <NEW_LINE> <INDENT> (ref_dataset, ref_dataset_1) = self._color_constraint[:2] <NEW_LINE> (ref_dataset_2, ref_zero_point) = self._color_constraint[2:4] <NEW_LINE> (ref_zero_point_1, ref_zero_point_2) = self._color_constraint[4:6] <NEW_LINE> (polynomial, sigma_color) = self._color_constraint[6:] <NEW_LINE> flux_1 = self._get_source_flux(ref_dataset_1) <NEW_LINE> flux_2 = self._get_source_flux(ref_dataset_2) <NEW_LINE> mag_1 = ref_zero_point_1 - 2.5 * np.log10(flux_1) <NEW_LINE> mag_2 = ref_zero_point_2 - 2.5 * np.log10(flux_2) <NEW_LINE> c = mag_1 - mag_2 <NEW_LINE> x = 1. <NEW_LINE> color = 0. <NEW_LINE> for p in polynomial: <NEW_LINE> <INDENT> color += x * p <NEW_LINE> x *= c <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError('wrong size of internal variable') <NEW_LINE> <DEDENT> flux_ref = self._get_source_flux(ref_dataset) <NEW_LINE> self.event.get_ref_fluxes(before_ref) <NEW_LINE> mag_ref = ref_zero_point - 2.5 * np.log10(flux_ref) <NEW_LINE> mag_sat = K2_MAG_ZEROPOINT - 2.5 * np.log10(satellite_flux) <NEW_LINE> color_value = mag_sat - mag_ref <NEW_LINE> out = ((color_value - color) / sigma_color)**2 <NEW_LINE> return out
|
calculate chi2 for flux constraint
|
625941b721bff66bcd68478c
|
def test___init__(self): <NEW_LINE> <INDENT> mgr = manager.SohoHookManager() <NEW_LINE> assert mgr._hooks == {}
|
Test object initialization.
|
625941b7aad79263cf390872
|
def to_matrix(self, mutable=False): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> from sympy import MutableMatrix, ImmutableMatrix <NEW_LINE> <DEDENT> except ImportError: <NEW_LINE> <INDENT> warn("Sympy is not installed.") <NEW_LINE> raise <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if mutable: <NEW_LINE> <INDENT> return MutableMatrix(list(map(list, self))) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return ImmutableMatrix(list(map(list, self)))
|
Consume and extract the entire result as a
`sympy.Matrix <http://docs.sympy.org/latest/tutorial/matrices.html>`_.
.. note::
This method requires `sympy` to be installed.
:param mutable:
:returns: `Matrix <http://docs.sympy.org/latest/tutorial/matrices.html>`_ object.
|
625941b766673b3332b91ece
|
def get_float_runtime_setting(self, setting_id, default_value=None): <NEW_LINE> <INDENT> return self.RUNTIME_SETTINGS_CACHE.get_float_setting(setting_id, default_value)
|
Get a runtime setting as a float value
:param setting_id: The name of the setting
:type setting_id: str|unicode
:param default_value: An optional default value to provide if the setting is not stored
:type default_value: float
:return: The value of the setting.
If the setting is not stored, the optional default_value if provided or 0.0
:rtype: float
|
625941b707d97122c41786c2
|
def show_tips (self, show): <NEW_LINE> <INDENT> if show != self._showtips: <NEW_LINE> <INDENT> self._showtips = show <NEW_LINE> self.dirty = True
|
S.show_tips (...) -> None
Shows or hides the info message display of the StatusBar.
|
625941b724f1403a926009a1
|
def from_config_dict(config, hass=None): <NEW_LINE> <INDENT> if hass is None: <NEW_LINE> <INDENT> hass = homeassistant.HomeAssistant() <NEW_LINE> <DEDENT> logger = logging.getLogger(__name__) <NEW_LINE> config = defaultdict(dict, config) <NEW_LINE> components = {} <NEW_LINE> to_validate = [] <NEW_LINE> validated = [] <NEW_LINE> to_load = [key for key in config.keys() if key != homeassistant.DOMAIN] <NEW_LINE> loader.prepare(hass) <NEW_LINE> while to_load: <NEW_LINE> <INDENT> domain = to_load.pop() <NEW_LINE> component = loader.get_component(domain) <NEW_LINE> if component is not None: <NEW_LINE> <INDENT> components[domain] = component <NEW_LINE> if component.DOMAIN == group.DOMAIN: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> elif not component.DEPENDENCIES: <NEW_LINE> <INDENT> validated.append(domain) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> to_validate.append(domain) <NEW_LINE> for dependency in component.DEPENDENCIES: <NEW_LINE> <INDENT> if dependency not in chain(components.keys(), to_load): <NEW_LINE> <INDENT> to_load.append(dependency) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> group_added = False <NEW_LINE> while to_validate: <NEW_LINE> <INDENT> newly_validated = [] <NEW_LINE> for domain in to_validate: <NEW_LINE> <INDENT> if all(domain in validated for domain in components[domain].DEPENDENCIES): <NEW_LINE> <INDENT> newly_validated.append(domain) <NEW_LINE> <DEDENT> <DEDENT> if newly_validated: <NEW_LINE> <INDENT> validated.extend(newly_validated) <NEW_LINE> for domain in newly_validated: <NEW_LINE> <INDENT> to_validate.remove(domain) <NEW_LINE> <DEDENT> newly_validated.clear() <NEW_LINE> <DEDENT> elif not group_added: <NEW_LINE> <INDENT> group_added = True <NEW_LINE> validated.append(group.DOMAIN) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> for domain in to_validate: <NEW_LINE> <INDENT> missing_deps = [dep for dep in components[domain].DEPENDENCIES if dep not in validated] <NEW_LINE> logger.error( "Could not validate all dependencies for %s: %s", domain, ", ".join(missing_deps)) <NEW_LINE> <DEDENT> break <NEW_LINE> <DEDENT> <DEDENT> if not group_added: <NEW_LINE> <INDENT> validated.append(group.DOMAIN) <NEW_LINE> if group.DOMAIN not in components: <NEW_LINE> <INDENT> components[group.DOMAIN] = loader.get_component(group.DOMAIN) <NEW_LINE> <DEDENT> <DEDENT> if core_components.setup(hass, config): <NEW_LINE> <INDENT> logger.info("Home Assistant core initialized") <NEW_LINE> for domain in validated: <NEW_LINE> <INDENT> component = components[domain] <NEW_LINE> try: <NEW_LINE> <INDENT> if component.setup(hass, config): <NEW_LINE> <INDENT> logger.info("component %s initialized", domain) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> logger.error("component %s failed to initialize", domain) <NEW_LINE> <DEDENT> <DEDENT> except Exception: <NEW_LINE> <INDENT> logger.exception("Error during setup of component %s", domain) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> logger.error(("Home Assistant core failed to initialize. " "Further initialization aborted.")) <NEW_LINE> <DEDENT> return hass
|
Tries to configure Home Assistant from a config dict.
Dynamically loads required components and its dependencies.
|
625941b7a934411ee37514d2
|
def maxProduct(self, A): <NEW_LINE> <INDENT> min_positive = 0 <NEW_LINE> max_negetive = 0 <NEW_LINE> max_product = A[0] <NEW_LINE> now_product = 1 <NEW_LINE> for a in A: <NEW_LINE> <INDENT> if a == 0: <NEW_LINE> <INDENT> now_product = 1 <NEW_LINE> max_negetive = 0 <NEW_LINE> min_positive = 0 <NEW_LINE> max_product = max(max_product, 0) <NEW_LINE> continue <NEW_LINE> <DEDENT> now_product *= a <NEW_LINE> max_product = max(now_product, max_product) <NEW_LINE> if min_positive > 0: <NEW_LINE> <INDENT> max_product = max(max_product, now_product // min_positive) <NEW_LINE> <DEDENT> if max_negetive < 0: <NEW_LINE> <INDENT> max_product = max(max_product, now_product // max_negetive) <NEW_LINE> <DEDENT> if now_product > 0 and (min_positive == 0 or now_product < min_positive): <NEW_LINE> <INDENT> min_positive = now_product <NEW_LINE> <DEDENT> if now_product < 0 and (max_negetive == 0 or now_product > max_negetive): <NEW_LINE> <INDENT> max_negetive = now_product <NEW_LINE> <DEDENT> <DEDENT> return max_product
|
Find the contiguous subarray within an array (containing at least one number) which has the largest product.
For example, given the array [2,3,-2,4],
the contiguous subarray [2,3] has the largest product = 6.
|
625941b7566aa707497f43b1
|
def forward(self, x, split_pos=None): <NEW_LINE> <INDENT> embedding_out = self.embedding_layer.forward(x, input_opt='jagged') <NEW_LINE> ends_for_left = None <NEW_LINE> if split_pos is not None: <NEW_LINE> <INDENT> ends_for_left = ends=[x + 1 for x in split_pos] <NEW_LINE> <DEDENT> left_out = self.left_layer.forward( embedding_out, starts=None, ends=split_pos, reverse=False, output_opt='last' ) <NEW_LINE> right_out = self.right_layer.forward( embedding_out, starts=split_pos, ends=None, reverse=True, output_opt='last' ) <NEW_LINE> recurrent_out = add_two_array(left_out, right_out) <NEW_LINE> self.forward_out = self.softmax_layer.forward(recurrent_out) <NEW_LINE> self.split_pos = split_pos <NEW_LINE> return self.forward_out
|
Compute forward pass
x: numpy.ndarray, 2d arry
The input data. The index of words
split_pos: 1d array like
Start position in x. TRNN will compute from split_pos. The
left_layer will compute from 0 to split_pos(not included)
and the right_layer will compute from split_pos to last.
If split_pos is None, split_pos will
be the half of current row of x.
|
625941b7cdde0d52a9e52e66
|
def getProxyIP(user='my', passwd='xigua'): <NEW_LINE> <INDENT> option_proxy = {"http":'xigua:Abc123456!@121.43.19.34:8899'} <NEW_LINE> import json <NEW_LINE> import requests <NEW_LINE> url = 'http://proxycenter.xiguaji.com:8999/Home/GetProxyIp?type=5&isWaited=true' <NEW_LINE> headers = { 'Content-Type': "application/json", 'cache-control': "no-cache", } <NEW_LINE> try: <NEW_LINE> <INDENT> response = None <NEW_LINE> deadtimes = 0 <NEW_LINE> while response is None: <NEW_LINE> <INDENT> deadtimes += 1 <NEW_LINE> response = requests.request("get", url, timeout=5) <NEW_LINE> if deadtimes == 5 or response: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> if response is None: <NEW_LINE> <INDENT> return option_proxy <NEW_LINE> <DEDENT> return {"http":'{}:{}@{}'.format(user,passwd,response.text)} <NEW_LINE> <DEDENT> except Exception as ex: <NEW_LINE> <INDENT> print(ex) <NEW_LINE> return option_proxy
|
return proxy:
proxy = {"http":'xigua:Abc123456!@121.43.19.34:8899'}
|
625941b767a9b606de4a7cf4
|
def test_3_2_6_4_pending(): <NEW_LINE> <INDENT> p1 = Promise() <NEW_LINE> p2 = p1.then(5) <NEW_LINE> p1.fulfill(10) <NEW_LINE> assert_equals(10, p1.value) <NEW_LINE> assert p2.isFulfilled <NEW_LINE> assert_equals(10, p2.value)
|
Handles the case where the arguments to then
are not functions or promises.
|
625941b785dfad0860c3ac90
|
def test_phony_force_stale(tmpdir): <NEW_LINE> <INDENT> os.chdir(tmpdir) <NEW_LINE> d = dagger.dagger() <NEW_LINE> d.add('1', ['2', '3']) <NEW_LINE> d.add('3', ['4', '5']) <NEW_LINE> d.add('6', ['3', '7']) <NEW_LINE> d.phony('3') <NEW_LINE> all = '1 2 3 4 5 6 7'.split() <NEW_LINE> touch(all, 0) <NEW_LINE> delete('3') <NEW_LINE> d.stale('3') <NEW_LINE> d.run() <NEW_LINE> truth = {'1': 1, '2': 0, '3': 1, '4': 0, '5': 0, '6': 1, '7': 0} <NEW_LINE> states1 = stale_dict(d, all) <NEW_LINE> assert states1 == truth <NEW_LINE> d = dagger.dagger() <NEW_LINE> d.add('1', ['2', '3']) <NEW_LINE> d.add('3', ['4', '5']) <NEW_LINE> d.add('6', ['3', '7']) <NEW_LINE> d.phony('3') <NEW_LINE> all = '1 2 3 4 5 6 7'.split() <NEW_LINE> touch(all, 0) <NEW_LINE> delete('3') <NEW_LINE> d.stale('4') <NEW_LINE> d.run() <NEW_LINE> truth = {'1': 1, '2': 0, '3': 1, '4': 1, '5': 0, '6': 1, '7': 0} <NEW_LINE> states1 = stale_dict(d, all) <NEW_LINE> assert states1 == truth
|
Make a missing file phony and force it to stale.
|
625941b707f4c71912b112be
|
def set_compile_option(self, name, value, allow_empty=False): <NEW_LINE> <INDENT> for source_file in self.get_source_files(allow_empty=allow_empty): <NEW_LINE> <INDENT> source_file.set_compile_option(name, value)
|
Set compile option for all files within the library
:param name: |compile_option|
:param value: The value of the compile option
:param allow_empty: To disable an error when no source files were found
:example:
.. code-block:: python
lib.set_compile_option("ghdl.flags", ["--no-vital-checks"])
.. note::
Only affects files added *before* the option is set.
|
625941b791f36d47f21ac32d
|
def appendrowindf(NEWREPO_xl, row): <NEW_LINE> <INDENT> global DF_REPO <NEW_LINE> global DF_COUNT <NEW_LINE> DF_REPO= DF_REPO.append(pd.Series(row), ignore_index = True) <NEW_LINE> DF_COUNT = DF_COUNT + 1 <NEW_LINE> if DF_COUNT == 1000: <NEW_LINE> <INDENT> df = pd.read_excel(NEWREPO_xl,error_bad_lines=False,header= 0, index = False) <NEW_LINE> df= df.append(DF_REPO, ignore_index = True) <NEW_LINE> df.to_excel(NEWREPO_xl, index = False) <NEW_LINE> DF_COUNT = 0 <NEW_LINE> DF_REPO = pd.DataFrame()
|
This code appends a row into the dataframe and returns the updated dataframe
|
625941b78e71fb1e9831d5e4
|
def stochastic_round(value) : <NEW_LINE> <INDENT> return int(round(value + np.random.uniform(-0.5, 0.5)))
|
This rounds a float stochastically to an integer.
|
625941b7d10714528d5ffb16
|
def colocation_cm(colocation, name=None, op=None): <NEW_LINE> <INDENT> if colocation: <NEW_LINE> <INDENT> return tf.colocate_with(tf.no_op(name) if op is None else op) <NEW_LINE> <DEDENT> return contextlib.suppress()
|
Gets a context manager to colocate ops.
|
625941b70c0af96317bb8020
|
def _get_action_href(exp_thing, action_name, server): <NEW_LINE> <INDENT> action = exp_thing.thing.actions[action_name] <NEW_LINE> action_forms = server.build_forms("localhost", action) <NEW_LINE> return next(item.href for item in action_forms if InteractionVerbs.INVOKE_ACTION in item.op)
|
Helper function to retrieve the Property subscription href.
|
625941b7187af65679ca4f54
|
@task <NEW_LINE> def clean(c): <NEW_LINE> <INDENT> print("Cleaning up...") <NEW_LINE> remove_dir(DOC_DIR) <NEW_LINE> doctest_report = TOP_DIR / "doctest.html" <NEW_LINE> if doctest_report.is_file(): <NEW_LINE> <INDENT> doctest_report.unlink()
|
Clean up our output.
|
625941b7fff4ab517eb2f270
|
def getDelay(self): <NEW_LINE> <INDENT> self.lock.acquire() <NEW_LINE> nexttime=(self.window*self.target-(self.window-1)*self.rate)/1000.+self.lasttime <NEW_LINE> now=time.time() <NEW_LINE> self.lock.release() <NEW_LINE> if (nexttime < now or self.rate == -1): <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return (nexttime-now)
|
Get the delay needed not to overrun target rate.
|
625941b70a50d4780f666cc6
|
def test_update_shopping_cart(self): <NEW_LINE> <INDENT> food_cost = self.browser.find_element_by_id('food-cost') <NEW_LINE> old_food_cost = int(food_cost.text) <NEW_LINE> items = self.get_list_of_items() <NEW_LINE> index = randint(1, len(items) - 1) <NEW_LINE> list_item = self.get_item_dict(items[index]) <NEW_LINE> item_price = self.expected_contents[index]['price'] <NEW_LINE> old_cost = self.expected_contents[index]['cost'] <NEW_LINE> increase_by = randint(5, 10) <NEW_LINE> directions = [ { 'action': 'increase', 'range': range(1, increase_by + 1) }, { 'action': 'decrease', 'range': range(increase_by - 1, - 1, -1) } ] <NEW_LINE> for direction in directions: <NEW_LINE> <INDENT> for i in direction['range']: <NEW_LINE> <INDENT> list_item[direction['action']].click() <NEW_LINE> sleep(0.1) <NEW_LINE> new_cost = int(list_item['cost'].text) <NEW_LINE> new_food_cost = int(food_cost.text) <NEW_LINE> self.assertTrue(new_food_cost - old_food_cost == new_cost - old_cost == item_price * i)
|
Amount of items in cart can be changed.
|
625941b7e5267d203edcdad8
|
def run_sample(): <NEW_LINE> <INDENT> property_id = "YOUR-GA4-PROPERTY-ID" <NEW_LINE> google_ads_customer_id = "YOUR-GOOGLE-ADS-CUSTOMER-ID" <NEW_LINE> create_google_ads_link(property_id, google_ads_customer_id)
|
Runs the sample.
|
625941b726068e7796caeb0f
|
def test_topology_deployment_locations_name_get(self): <NEW_LINE> <INDENT> response = self.client.open( '/api/v1.0/resource-manager/topology/deployment-locations/{name}'.format(name='name_example'), method='GET') <NEW_LINE> self.assert200(response, 'Response body is : ' + response.data.decode('utf-8'))
|
Test case for topology_deployment_locations_name_get
get details of a deployment location
|
625941b745492302aab5e0f7
|
def _convert_matmul(builder, node, graph, err): <NEW_LINE> <INDENT> weight_name = node.inputs[1] <NEW_LINE> W = None <NEW_LINE> weight_as_layer_parameter = False <NEW_LINE> if weight_name in node.input_tensors: <NEW_LINE> <INDENT> W = node.input_tensors[weight_name] <NEW_LINE> <DEDENT> if W is not None: <NEW_LINE> <INDENT> if len(W.shape) != 2: <NEW_LINE> <INDENT> builder.add_load_constant_nd( node.name + "_const_weight_input", weight_name, constant_value=W, shape=W.shape, ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> weight_as_layer_parameter = True <NEW_LINE> <DEDENT> <DEDENT> if weight_as_layer_parameter: <NEW_LINE> <INDENT> builder.add_batched_mat_mul( name=node.name, input_names=[node.inputs[0]], output_name=node.outputs[0], weight_matrix_rows=W.shape[0], weight_matrix_columns=W.shape[1], W=W, ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> builder.add_batched_mat_mul( name=node.name, input_names=[node.inputs[0], weight_name], output_name=node.outputs[0], )
|
convert to CoreML BatchedMatMul Layer:
https://github.com/apple/coremltools/blob/655b3be5cc0d42c3c4fa49f0f0e4a93a26b3e492/mlmodel/format/NeuralNetwork.proto#L3473
|
625941b726068e7796caeb10
|
def test_api_challenges_post_admin(): <NEW_LINE> <INDENT> app = create_ctfd() <NEW_LINE> with app.app_context(): <NEW_LINE> <INDENT> with login_as_user(app, "admin") as client: <NEW_LINE> <INDENT> r = client.post( "/api/v1/challenges", json={ "name": "chal", "category": "cate", "description": "desc", "value": "100", "state": "hidden", "type": "standard", }, ) <NEW_LINE> assert r.status_code == 200 <NEW_LINE> <DEDENT> <DEDENT> destroy_ctfd(app)
|
Can a user post /api/v1/challenges if admin
|
625941b7fbf16365ca6f5ff3
|
def insertion_sort(array): <NEW_LINE> <INDENT> if len(array) < 1: <NEW_LINE> <INDENT> print("Error : Array is empty") <NEW_LINE> return <NEW_LINE> <DEDENT> sorted_array = [] <NEW_LINE> sorted_array.append(array[0]) <NEW_LINE> for i in range(1, len(array)): <NEW_LINE> <INDENT> sorted_array.append(array[i]) <NEW_LINE> for j in reversed(range(0, len(sorted_array)-1)): <NEW_LINE> <INDENT> if sorted_array[j] > sorted_array [j+1]: <NEW_LINE> <INDENT> sorted_array[j], sorted_array[j+1] = sorted_array[j+1], sorted_array[j] <NEW_LINE> <DEDENT> <DEDENT> print("sorted list is : " + str(sorted_array)) <NEW_LINE> <DEDENT> return sorted_array
|
Sorts an array using the insertion sorting algorithm
|
625941b7460517430c393fc6
|
@qgl2decl <NEW_LINE> def FlipFlop(qubit: qreg, dragParamSweep, maxNumFFs=10): <NEW_LINE> <INDENT> for dragParam in dragParamSweep: <NEW_LINE> <INDENT> init(qubit) <NEW_LINE> Id(qubit) <NEW_LINE> MEAS(qubit) <NEW_LINE> flipflop_seqs(dragParam, maxNumFFs, qubit) <NEW_LINE> <DEDENT> init(qubit) <NEW_LINE> X(qubit) <NEW_LINE> MEAS(qubit)
|
Flip-flop sequence (X90-X90m)**n to determine off-resonance or DRAG parameter optimization.
Parameters
----------
qubit : logical channel to implement sequence (LogicalChannel)
dragParamSweep : drag parameter values to sweep over (iterable)
maxNumFFs : maximum number of flip-flop pairs to do
|
625941b77047854f462a1244
|
def nmt_train(args): <NEW_LINE> <INDENT> import os <NEW_LINE> import yaml <NEW_LINE> from nmt.trainer import Trainer <NEW_LINE> hyp_args = yaml.load(open(args.config_path)) <NEW_LINE> model = Trainer(hyp_args) <NEW_LINE> model.train()
|
:param args:
:return:
|
625941b77c178a314d6ef28f
|
def get_master_key(password_hash_hash,nt_response): <NEW_LINE> <INDENT> sha_hash=sha.new() <NEW_LINE> sha_hash.update(password_hash_hash) <NEW_LINE> sha_hash.update(nt_response) <NEW_LINE> sha_hash.update(Magic1) <NEW_LINE> return sha_hash.digest()[:16]
|
GetMasterKey(
IN 16-octet PasswordHashHash,
IN 24-octet NTResponse,
OUT 16-octet MasterKey )
{
20-octet Digest
ZeroMemory(Digest, sizeof(Digest));
/*
* SHSInit(), SHSUpdate() and SHSFinal()
* are an implementation of the Secure Hash Standard [7].
*/
SHSInit(Context);
SHSUpdate(Context, PasswordHashHash, 16);
SHSUpdate(Context, NTResponse, 24);
SHSUpdate(Context, Magic1, 27);
SHSFinal(Context, Digest);
MoveMemory(MasterKey, Digest, 16);
}
|
625941b71f5feb6acb0c498c
|
def predict(self): <NEW_LINE> <INDENT> f = open(self.results, "w") <NEW_LINE> self.preds = self.classifier.predict(self.X_test) <NEW_LINE> f.write("{}\n".format(classification_report(self.y_test, self.preds))) <NEW_LINE> f.close()
|
Check the result of the classifier on a testing set of images.
Save the results of the classifier in the text file.
|
625941b70c0af96317bb8021
|
def _until_demo(): <NEW_LINE> <INDENT> print("get until what?") <NEW_LINE> char = pmlr._read_keypress() <NEW_LINE> pmlr._writer(char + "\n") <NEW_LINE> y = pmlr.until(char) <NEW_LINE> print("\n" + y)
|
demonstrate the until function
|
625941b796565a6dacc8f50c
|
def test_basic(self): <NEW_LINE> <INDENT> for A, B, c_factor, symmetry, smooth in self.cases: <NEW_LINE> <INDENT> A = csr_matrix(A) <NEW_LINE> ml = smoothed_aggregation_solver(A, B, symmetry=symmetry, smooth=smooth, max_coarse=10) <NEW_LINE> np.random.seed(0) <NEW_LINE> x = sp.rand(A.shape[0]) + 1.0j * sp.rand(A.shape[0]) <NEW_LINE> b = A * sp.rand(A.shape[0]) <NEW_LINE> residuals = [] <NEW_LINE> x_sol = ml.solve(b, x0=x, maxiter=20, tol=1e-10, residuals=residuals) <NEW_LINE> del x_sol <NEW_LINE> avg_convergence_ratio = (residuals[-1] / residuals[0]) ** (1.0 / len(residuals)) <NEW_LINE> assert(avg_convergence_ratio < c_factor)
|
check that method converges at a reasonable rate
|
625941b715baa723493c3da9
|
def run(): <NEW_LINE> <INDENT> limit = int(utils.get_input("Find all prime numbers up to and including: ")) <NEW_LINE> primes = sieve_of_eratosthenes(limit) <NEW_LINE> end = {True: '\n', False: ', '} <NEW_LINE> for i, prime in enumerate(primes): <NEW_LINE> <INDENT> print(str(prime).rjust(4), end=end[(i+1)%13==0 or (i+1)==len(primes)])
|
Execute the challenges.020e module.
|
625941b7e8904600ed9f1d60
|
def test_vmf_to_velocity_005(self): <NEW_LINE> <INDENT> assert DynamicConverter.vmf_to_velocity(-1) == 62
|
Tests the VMF to Velocity conversion for a mp dynamic.
|
625941b78da39b475bd64dae
|
def encode_varint(value): <NEW_LINE> <INDENT> if value < pow(2, 8) - 3: <NEW_LINE> <INDENT> size = 1 <NEW_LINE> varint = int2bytes(value, size) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if value < pow(2, 16): <NEW_LINE> <INDENT> size = 2 <NEW_LINE> prefix = 253 <NEW_LINE> <DEDENT> elif value < pow(2, 32): <NEW_LINE> <INDENT> size = 4 <NEW_LINE> prefix = 254 <NEW_LINE> <DEDENT> elif value < pow(2, 64): <NEW_LINE> <INDENT> size = 8 <NEW_LINE> prefix = 255 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise Exception("Wrong input data size") <NEW_LINE> <DEDENT> varint = format(prefix, 'x') + change_endianness(int2bytes(value, size)) <NEW_LINE> <DEDENT> return varint
|
Encodes a given integer value to a varint. It only used the four varint representation cases used by bitcoin:
1-byte, 2-byte, 4-byte or 8-byte integers.
:param value: The integer value that will be encoded into varint.
:type value: int
:return: The varint representation of the given integer value.
:rtype: str
|
625941b7b7558d58953c4d53
|
def _get_all_groups(self): <NEW_LINE> <INDENT> group_dict = {"groups": {}} <NEW_LINE> groups_directory = self.root_directory + 'groups/' <NEW_LINE> list_of_groups = os.listdir(groups_directory) <NEW_LINE> for filename in list_of_groups: <NEW_LINE> <INDENT> if filename.lower().endswith(('.yml', '.yaml')): <NEW_LINE> <INDENT> with open(groups_directory + filename, 'r') as stream: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> group_dict = yaml.safe_load(stream) <NEW_LINE> self._merge_dicts(self.inventory['groups'], group_dict) <NEW_LINE> <DEDENT> except yaml.YAMLError as exc: <NEW_LINE> <INDENT> print(exc) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> pass
|
Walk the groups/ dir, saving all found groups
Then, load all variables, hosts and child groups from each group
|
625941b77b180e01f3dc463e
|
def matchPreviousLiteral(expr): <NEW_LINE> <INDENT> rep = Forward() <NEW_LINE> def copyTokenToRepeater(s, l, t): <NEW_LINE> <INDENT> if t: <NEW_LINE> <INDENT> if len(t) == 1: <NEW_LINE> <INDENT> rep << t[0] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> tflat = _flatten(t.asList()) <NEW_LINE> rep << And(Literal(tt) for tt in tflat) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> rep << Empty() <NEW_LINE> <DEDENT> <DEDENT> expr.addParseAction(copyTokenToRepeater, callDuringTry=True) <NEW_LINE> rep.setName('(prev) ' + _ustr(expr)) <NEW_LINE> return rep
|
Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches a
previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
If this is not desired, use C{matchPreviousExpr}.
Do *not* use with packrat parsing enabled.
|
625941b79c8ee82313fbb5ac
|
def wait_for_js(self, message): <NEW_LINE> <INDENT> line = self.wait_for(category='js', message='[*] {}'.format(message)) <NEW_LINE> line.expected = True <NEW_LINE> return line
|
Wait for the given javascript console message.
Return:
The LogLine.
|
625941b776d4e153a657e967
|
def plot_relationship(df: pd.DataFrame, feature1: str, feature2: str, fit_line=True) -> None: <NEW_LINE> <INDENT> if fit_line: <NEW_LINE> <INDENT> plt.plot(np.unique(df[feature1]), np.poly1d(np.polyfit(df[feature1], df[feature2], 1))(np.unique(df[feature1])), color='red') <NEW_LINE> <DEDENT> plt.scatter(df[feature1], df[feature2], s=50, color='blue') <NEW_LINE> plt.grid() <NEW_LINE> plt.title('Pairwise Feature Relationship', fontsize=15) <NEW_LINE> plt.xlabel(feature1, fontsize=20) <NEW_LINE> plt.ylabel(feature2, fontsize=20) <NEW_LINE> axes = plt.gca() <NEW_LINE> x_pad = (df[feature1].max() - df[feature1].min()) * 0.05 <NEW_LINE> y_pad = (df[feature2].max() - df[feature2].min()) * 0.05 <NEW_LINE> axes.set_xlim([df[feature1].min() - x_pad, df[feature1].max() + x_pad]) <NEW_LINE> axes.set_ylim([df[feature2].min() - y_pad, df[feature2].max() + y_pad]) <NEW_LINE> fig = plt.gcf() <NEW_LINE> fig.set_size_inches(8, 8) <NEW_LINE> plt.show()
|
Plots the relationship of 2 features to each other
Parameters
----------
df : pd.DataFrame
pandas DataFrame where features are contained
feature1 : str
feature 1's column name
feature2 : str
feature 2's column name
fit_line : bool, optional
Whether to also plot a fitted line, by default True
|
625941b7ac7a0e7691ed3f11
|
def GetPaneByWidget(self, window): <NEW_LINE> <INDENT> for p in self._panes: <NEW_LINE> <INDENT> if p.window == window: <NEW_LINE> <INDENT> return p <NEW_LINE> <DEDENT> <DEDENT> return NonePaneInfo
|
This version of L{GetPane} looks up a pane based on a
'pane window'.
:param `window`: a `wx.Window` derived window.
:see: L{GetPane}
|
625941b74428ac0f6e5ba629
|
def volume_set(volume_level): <NEW_LINE> <INDENT> logging.info('Set volume to station %s', volume_level) <NEW_LINE> return run_command("amixer -M set PCM " + str(volume_level) + "%")
|
This function sets the volume to a specific level
:param volume_level: volume level
:type volume_level: int
|
625941b74e696a04525c928c
|
def push_back(self, *args, **kwargs): <NEW_LINE> <INDENT> return _uhd_swig.device_addr_vector_t_push_back(self, *args, **kwargs)
|
push_back(device_addr_vector_t self, device_addr_t x)
|
625941b721a7993f00bc7b21
|
def serializable_fields(self): <NEW_LINE> <INDENT> return self
|
In certain cases the user model isn't serializable so you may want to
only send the id.
|
625941b72eb69b55b151c6e2
|
def getHLdata(self, survey, year, quarter, translate_sps=False, limit_download = True): <NEW_LINE> <INDENT> if self.datras_client is None: <NEW_LINE> <INDENT> print ('DATRAS client not set') <NEW_LINE> return <NEW_LINE> <DEDENT> if not isinstance(survey, list): <NEW_LINE> <INDENT> survey = [survey] <NEW_LINE> <DEDENT> if not isinstance(year, list): <NEW_LINE> <INDENT> year = [year] <NEW_LINE> <DEDENT> if not isinstance(quarter, list): <NEW_LINE> <INDENT> quarter = [quarter] <NEW_LINE> <DEDENT> datasets = [ params for params in product(survey, year, quarter)] <NEW_LINE> datasets_number = len(datasets) <NEW_LINE> if limit_download and (datasets_number > self.download_limits): <NEW_LINE> <INDENT> print("Data download is limited to %i datasets." % self.download_limits) <NEW_LINE> print("Your are trying to download %i datasets. Exiting" % datasets_number) <NEW_LINE> return <NEW_LINE> <DEDENT> downloaded_data = DatrasHLData() <NEW_LINE> downloaded = 0 <NEW_LINE> for dataset in datasets: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> data = self.datras_client.service.getHLdata(dataset[0], dataset[1], dataset[2]) <NEW_LINE> if data is not None: <NEW_LINE> <INDENT> downloaded_data = downloaded_data.append(serialize_object(data)) <NEW_LINE> downloaded += 1 <NEW_LINE> <DEDENT> <DEDENT> except: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> print("%i out of %i Datasets downloaded" %(downloaded, datasets_number)) <NEW_LINE> if translate_sps: <NEW_LINE> <INDENT> if self.worms_client is None: <NEW_LINE> <INDENT> print("WORMS web service not available. Cannot resolve Aphia codes into species") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> codes = downloaded_data.Valid_Aphia.unique() <NEW_LINE> print(codes) <NEW_LINE> names = pd.DataFrame() <NEW_LINE> for code in codes: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> name = self.worms_client.service.getAphiaNameByID(code) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> name = 'No data' <NEW_LINE> <DEDENT> print (code,name) <NEW_LINE> names = names.append({'Valid_Aphia':code,'Species_Name':name}, ignore_index = True) <NEW_LINE> <DEDENT> downloaded_data = pd.merge(downloaded_data, names, on='Valid_Aphia') <NEW_LINE> <DEDENT> <DEDENT> try: <NEW_LINE> <INDENT> string_fields = downloaded_data.select_dtypes(['object']) <NEW_LINE> downloaded_data[string_fields.columns] = string_fields.apply(lambda x: x.str.strip()) <NEW_LINE> downloaded_data.reset_index() <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> return downloaded_data
|
Returns a dataframe whith length frequency distributions information of the
specified surveys, years and quarters.
Multiple datasets can be downloaded at once, though its total number is limited by the
parameter download_limits of the class constructor. However, this behavior can be
overriden with the limit_download parameter in the function call.
Parameters:
survey: one or many (as a list) survey names (SP-ARSA, IBTS...)
year: one or many (as a list) years as integers (2010, 2011...)
quarter: one or many (as a list) of quarters (1, 2...)
limit_download: Boolean indicating whether download limits should be observed or not.
WARNING: Downloading HL data is time consuming. Use carefully.
Examples:
lfd_data = pydatras.getHLdata(['SP-ARSA', 'SP-NORTH'], 2010, [1, 2])
lfd_data = pydatras.getHLdata(['SP-ARSA'], list(range(2010, 2019)), 1, limit_download = False)
|
625941b7097d151d1a222c94
|
def get_plugin_by_tkname(self, tkname): <NEW_LINE> <INDENT> tkname_map = {n: (a, b, c) for (a, b, c, n) in self.plugins.values()} <NEW_LINE> return tkname_map[tkname]
|
Get a plugin tuple in plugins dictionary by its tkname
Parameters
----------
tkname : `str`
A string name used by :py:module: `tkinter` in the drop down menu.
Returns
-------
`tuple`
Size 3 tuple with Plugin, OptionsFrame, OptionsFileFrame
|
625941b77b25080760e39292
|
def delete_app(self, env_template_id, app_id): <NEW_LINE> <INDENT> return self._delete('/v1/templates/{id}/services/{app_id}'. format(id=env_template_id, app_id=app_id))
|
Deletes an application in an environment template.
:param env_template_id: The environment template ID.
:param app_id: the application ID to be deleted.
|
625941b771ff763f4b5494c6
|
def block_pad(pad): <NEW_LINE> <INDENT> def _callback(*_): <NEW_LINE> <INDENT> return Gst.PadProbeReturn.OK <NEW_LINE> <DEDENT> global block_probes <NEW_LINE> if pad not in block_probes: <NEW_LINE> <INDENT> block_probes[pad] = pad.add_probe(Gst.PadProbeType.BLOCK_DOWNSTREAM, _callback)
|
Block an element's pad. (If already blocked, do nothing.)
|
625941b730dc7b76659017a2
|
def setUpRealDatabase(self, table_names=[], basedir='basedir', want_pool=True, sqlite_memory=True): <NEW_LINE> <INDENT> self.__want_pool = want_pool <NEW_LINE> default_sqlite = 'sqlite://' <NEW_LINE> self.db_url = os.environ.get('BUILDBOT_TEST_DB_URL', default_sqlite) <NEW_LINE> if not sqlite_memory and self.db_url == default_sqlite: <NEW_LINE> <INDENT> self.db_url = "sqlite:///tmp.sqlite" <NEW_LINE> <DEDENT> if not os.path.exists(basedir): <NEW_LINE> <INDENT> os.makedirs(basedir) <NEW_LINE> <DEDENT> self.basedir = basedir <NEW_LINE> self.db_engine = enginestrategy.create_engine(self.db_url, basedir=basedir) <NEW_LINE> if not want_pool: <NEW_LINE> <INDENT> return defer.succeed(None) <NEW_LINE> <DEDENT> self.db_pool = pool.DBThreadPool(self.db_engine, reactor=reactor) <NEW_LINE> log.msg("cleaning database %s" % self.db_url) <NEW_LINE> d = self.db_pool.do(self.__thd_clean_database) <NEW_LINE> d.addCallback(lambda _: self.db_pool.do(self.__thd_create_tables, table_names)) <NEW_LINE> return d
|
Set up a database. Ordinarily sets up an engine and a pool and takes
care of cleaning out any existing tables in the database. If
C{want_pool} is false, then no pool will be created, and the database
will not be cleaned.
@param table_names: list of names of tables to instantiate
@param basedir: (optional) basedir for the engine
@param want_pool: (optional) false to not create C{self.db_pool}
@param sqlite_memory: (optional) False to avoid using an in-memory db
@returns: Deferred
|
625941b7283ffb24f3c55744
|
def expires(self, message): <NEW_LINE> <INDENT> for p in self.self_signatures(message): <NEW_LINE> <INDENT> packets = p.hashed_subpackets + p.unhashed_subpackets <NEW_LINE> for s in packets: <NEW_LINE> <INDENT> if isinstance(s, SignaturePacket.KeyExpirationTimePacket): <NEW_LINE> <INDENT> return self.timestamp + s.data <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return None
|
Find expiry time of this key based on the self signatures in a message
|
625941b763b5f9789fde6f1d
|
def create_payload(pipeline_name: str, region: str, status: str) -> Dict: <NEW_LINE> <INDENT> descriptions = { 'pending': 'Build started', 'success': 'Build succeeded', 'failure': 'Build failed!' } <NEW_LINE> return { 'state': status, 'target_url': build_code_pipeline_url(pipeline_name, region), 'description': descriptions[status], 'context': 'continuous-integration' if pipeline_name == 'ci-pipeline' else 'continuous-deployment' }
|
Creates JSON Payload for GitHub POST
-
:param pipeline_name: name of the pipeline
:param region: AWS Region
:param status: Github Status
|
625941b7a8370b77170526d9
|
def cross_Mod(qx): <NEW_LINE> <INDENT> kma = 'ma_%d' %qx.staVars[0] <NEW_LINE> xbar = qx.xbarWrk <NEW_LINE> dma, ma2n = xbar[kma][0], xbar['ma2n'][0] <NEW_LINE> dp, dp2n = xbar['dprice'][0], xbar['dp2n'][0] <NEW_LINE> kmod = -9 <NEW_LINE> if (dp>dma)and(dp2n<ma2n)and(dp>dp2n): <NEW_LINE> <INDENT> kmod = 1 <NEW_LINE> <DEDENT> elif (dp<dma)and(dp2n>ma2n)and(dp<dp2n): <NEW_LINE> <INDENT> kmod = -1 <NEW_LINE> <DEDENT> return kmod
|
均线交叉策略,判断均线向上、向下趋势
Args:
qx (zwQuantX): zwQuantX数据包
ksma (str):均线数据列名称
Return:
1:above
0: =
-1:below
|
625941b7a8370b77170526d8
|
def get_input_text_fields(self): <NEW_LINE> <INDENT> inputtext = [self.ids.inputkms, self.ids.inputprice, self.ids.inputavgconsumption] <NEW_LINE> return inputtext
|
Return a list with all writable input text fields available.
:return: list of writable input text fields available.
|
625941b73617ad0b5ed67d37
|
def __init__(self, obj, sources): <NEW_LINE> <INDENT> obj.addProperty("App::PropertyLinkList", "SourceObjects", "HQ_Ruled_Surface", "SourceObjects") <NEW_LINE> obj.addProperty("App::PropertyLinkSubList", "SourceShapes", "HQ_Ruled_Surface", "SourceShapes") <NEW_LINE> obj.addProperty("App::PropertyInteger", "Samples", "HQ_Ruled_Surface", "Number of orthogonal samples").Samples = 20 <NEW_LINE> obj.addProperty("App::PropertyFloatConstraint","SmoothingFactorStart", "HQ_Ruled_Surface", "Smoothing factor on curve start") <NEW_LINE> obj.addProperty("App::PropertyFloatConstraint","SmoothingFactorEnd", "HQ_Ruled_Surface", "Smoothing factor on curve end") <NEW_LINE> obj.addProperty("App::PropertyInteger", "Method", "HQ_Ruled_Surface", "Projection method (1,2,3,4)").Method = 3 <NEW_LINE> obj.addProperty("App::PropertyFloat", "Tol3D", "HQ_Ruled_Surface", "3D tolerance").Tol3D = 1e-5 <NEW_LINE> obj.addProperty("App::PropertyFloat", "Tol2D", "HQ_Ruled_Surface", "Parametric tolerance").Tol2D = 1e-8 <NEW_LINE> obj.SmoothingFactorStart = (0.2, 0.0, 0.5, 0.05) <NEW_LINE> obj.SmoothingFactorEnd = (0.2, 0.0, 0.5, 0.05) <NEW_LINE> objs = [] <NEW_LINE> shapes = [] <NEW_LINE> for s in sources: <NEW_LINE> <INDENT> if isinstance(s, (list, tuple)): <NEW_LINE> <INDENT> shapes.append(s) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> objs.append(s) <NEW_LINE> <DEDENT> <DEDENT> obj.SourceObjects = objs <NEW_LINE> obj.SourceShapes = shapes <NEW_LINE> obj.setEditorMode("Tol3D",2) <NEW_LINE> obj.setEditorMode("Tol2D",2) <NEW_LINE> obj.setEditorMode("Method",0) <NEW_LINE> obj.Proxy = self
|
Add the properties
|
625941b71b99ca400220a8e8
|
def cond_to_func(expr_or_func): <NEW_LINE> <INDENT> from .parse_expr import parse <NEW_LINE> if hasattr(expr_or_func, "__call__"): <NEW_LINE> <INDENT> return expr_or_func <NEW_LINE> <DEDENT> if isinstance(expr_or_func, str): <NEW_LINE> <INDENT> return parse(expr_or_func) <NEW_LINE> <DEDENT> raise Exception("Can't interpret as expression: %s" % expr_or_func)
|
Helper function to help automatically interpret string expressions
when you manually construct a query plan.
|
625941b7d53ae8145f87a0ae
|
def __set__(self, instance, names): <NEW_LINE> <INDENT> if isinstance(names, str): <NEW_LINE> <INDENT> names = [names] <NEW_LINE> <DEDENT> if names is None: <NEW_LINE> <INDENT> delattr(instance, self.name) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return super().__set__(instance, tuple(names))
|
Set value of ``instance`` attribute to ``names``.
Parameters
----------
instance : object
Instance that owns the attribute
names : None, str, list, tuple
Column name(s) to store, or None to clear
|
625941b7fb3f5b602dac34c6
|
def getPileupDatasetSizes(datasets, phedexUrl): <NEW_LINE> <INDENT> sizeByDset = {} <NEW_LINE> if not datasets: <NEW_LINE> <INDENT> return sizeByDset <NEW_LINE> <DEDENT> urls = ['%s/blockreplicas?dataset=%s' % (phedexUrl, dset) for dset in datasets] <NEW_LINE> logging.info("Executing %d requests against PhEDEx 'blockreplicas' API", len(urls)) <NEW_LINE> data = multi_getdata(urls, ckey(), cert()) <NEW_LINE> for row in data: <NEW_LINE> <INDENT> dataset = row['url'].split('=')[-1] <NEW_LINE> if row['data'] is None: <NEW_LINE> <INDENT> print("Failure in getPileupDatasetSizes for dataset %s. Error: %s %s" % (dataset, row.get('code'), row.get('error'))) <NEW_LINE> sizeByDset.setdefault(dataset, None) <NEW_LINE> continue <NEW_LINE> <DEDENT> rows = json.loads(row['data']) <NEW_LINE> sizeByDset.setdefault(dataset, 0) <NEW_LINE> try: <NEW_LINE> <INDENT> for item in rows['phedex']['block']: <NEW_LINE> <INDENT> sizeByDset[dataset] += item['bytes'] <NEW_LINE> <DEDENT> <DEDENT> except Exception as exc: <NEW_LINE> <INDENT> print("Failure in getPileupDatasetSizes for dataset %s. Error: %s" % (dataset, str(exc))) <NEW_LINE> sizeByDset[dataset] = None <NEW_LINE> <DEDENT> <DEDENT> return sizeByDset
|
Given a list of datasets, find all their blocks with replicas
available, i.e., blocks that have valid files to be processed,
and calculate the total dataset size
:param datasets: list of dataset names
:param phedexUrl: a string with the PhEDEx URL
:return: a dictionary of datasets and their respective sizes
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
|
625941b750812a4eaa59c15e
|
def morphology_op(filename, operation, n_iterations=1, verbose_inner_func=False): <NEW_LINE> <INDENT> filename = misc.add_extesnion_if_missing(filename, '.nii', verbose = verbose_inner_func) <NEW_LINE> filename = misc.add_extesnion_if_missing(filename, '.gz', verbose = verbose_inner_func) <NEW_LINE> nii = load(filename) <NEW_LINE> basename = nii.get_filename().split(os.extsep, 1)[0] <NEW_LINE> data = nii.get_data() <NEW_LINE> if operation == 'erode': <NEW_LINE> <INDENT> data = morphology.binary_erosion(data, iterations=n_iterations) <NEW_LINE> <DEDENT> elif operation == 'dilate': <NEW_LINE> <INDENT> data = morphology.binary_dilation(data, iterations=n_iterations) <NEW_LINE> <DEDENT> elif operation == 'close': <NEW_LINE> <INDENT> data = morphology.binary_dilation(data, iterations=n_iterations) <NEW_LINE> data = morphology.binary_erosion(data, iterations=n_iterations) <NEW_LINE> <DEDENT> elif operation == 'open': <NEW_LINE> <INDENT> data = morphology.binary_erosion(data, iterations=n_iterations) <NEW_LINE> data = morphology.binary_dilation(data, iterations=n_iterations) <NEW_LINE> <DEDENT> out = Nifti1Image(data, header=nii.header, affine=nii.affine) <NEW_LINE> out_name = basename + f'_{operation}.nii.gz' <NEW_LINE> save(out, out_name) <NEW_LINE> return out_name
|
Opening closing operations on MRI data (nifti).
Takes a nifti (filename) and performs either:
erosion, dilation, erosion+dialtion (i.e. 'opening')
or dialtion+erosion (i.e. 'closing').
Mandatory arguments:
filename: filename...
operation: one of the following strings: 'erode', 'dilate', 'close', 'open'
Optional arguments:
n_iterations: set to 1 as default.
verbose_inner_func: if True, the functions called by this function will
alert the user when making assumptions
Adapted from From Faruk's gist.github:
https://gist.github.com/ofgulban/21f9b257de849c546f34863aa26f3dd3#file-dilate_erode_mri_data-py-L13
|
625941b75166f23b2e1a4f91
|
def __init__(self, query_layer, memory, probability_fn, memory_sequence_length=None, memory_layer=None, check_inner_dims_defined=True, score_mask_value=None, name=None): <NEW_LINE> <INDENT> if (query_layer is not None and not isinstance(query_layer, layers_base.Layer)): <NEW_LINE> <INDENT> raise TypeError( "query_layer is not a Layer: %s" % type(query_layer).__name__) <NEW_LINE> <DEDENT> if (memory_layer is not None and not isinstance(memory_layer, layers_base.Layer)): <NEW_LINE> <INDENT> raise TypeError( "memory_layer is not a Layer: %s" % type(memory_layer).__name__) <NEW_LINE> <DEDENT> self._query_layer = query_layer <NEW_LINE> self._memory_layer = memory_layer <NEW_LINE> self.dtype = memory_layer.dtype <NEW_LINE> if not callable(probability_fn): <NEW_LINE> <INDENT> raise TypeError("probability_fn must be callable, saw type: %s" % type(probability_fn).__name__) <NEW_LINE> <DEDENT> if score_mask_value is None: <NEW_LINE> <INDENT> score_mask_value = dtypes.as_dtype( self._memory_layer.dtype).as_numpy_dtype(-np.inf) <NEW_LINE> <DEDENT> self._probability_fn = lambda score, prev: ( probability_fn( _maybe_mask_score(score, memory_sequence_length, score_mask_value), prev)) <NEW_LINE> with ops.name_scope( name, "BaseAttentionMechanismInit", nest.flatten(memory)): <NEW_LINE> <INDENT> self._values = _prepare_memory( memory, memory_sequence_length, check_inner_dims_defined=check_inner_dims_defined) <NEW_LINE> self._keys = ( self.memory_layer(self._values) if self.memory_layer else self._values) <NEW_LINE> self._batch_size = ( self._keys.shape[0].value or array_ops.shape(self._keys)[0]) <NEW_LINE> self._alignments_size = (self._keys.shape[1].value or array_ops.shape(self._keys)[1])
|
Construct base AttentionMechanism class.
Args:
query_layer: Callable. Instance of `tf.layers.Layer`. The layer's depth
must match the depth of `memory_layer`. If `query_layer` is not
provided, the shape of `query` must match that of `memory_layer`.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
probability_fn: A `callable`. Converts the score and previous alignments
to probabilities. Its signature should be:
`probabilities = probability_fn(score, previous_alignments)`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
memory_layer: Instance of `tf.layers.Layer` (may be None). The layer's
depth must match the depth of `query_layer`.
If `memory_layer` is not provided, the shape of `memory` must match
that of `query_layer`.
check_inner_dims_defined: Python boolean. If `True`, the `memory`
argument's shape is checked to ensure all but the two outermost
dimensions are fully defined.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
name: Name to use when creating ops.
|
625941b7fb3f5b602dac34c7
|
def decrypt(self,data): <NEW_LINE> <INDENT> data1 = json.dumps(data).replace(' ', '').replace('\"', '\\\"') <NEW_LINE> return self.__data_processing(2,data1).split('======>')[1]
|
解密数据
|
625941b7b57a9660fec336b8
|
def create_question(self, question, answers, img): <NEW_LINE> <INDENT> self.frame.add_label('question', text=question, column=0, columnspan=4, row=0, sticky='W') <NEW_LINE> if img is not None: <NEW_LINE> <INDENT> self.frame.add_label('img',image=img, column=4, row=0, rowspan=6) <NEW_LINE> <DEDENT> last_row = 0 <NEW_LINE> for i in range(len(answers)): <NEW_LINE> <INDENT> self.frame.add_checkbox('question'+str(last_row), text=answers[i], column=1, columnspan=3, row=(1+i), sticky='W') <NEW_LINE> last_row += 1 <NEW_LINE> <DEDENT> return last_row
|
col: 0-3(4)
row: 0~
:param question: string of question
:param answers: list of answers
:param img: image optionally enclosed to question
:return: row of last element
|
625941b7b830903b967e974f
|
def simulate_dice(num_dice, num_distinct, simulations=100_000, faces=6): <NEW_LINE> <INDENT> trials = set() <NEW_LINE> for i in range(simulations): <NEW_LINE> <INDENT> rolls = sorted([randrange(1, faces + 1) for _ in range(num_dice)]) <NEW_LINE> if len(set(rolls)) == num_distinct: <NEW_LINE> <INDENT> trials.add(tuple(rolls)) <NEW_LINE> <DEDENT> <DEDENT> return len(trials)
|
Calculates with a simulation, not formula. Therefore only gives an upper
bound and is more accurate with more rounds of simulations.
:return: number of ways to roll num_dice dice at once with num_distinct
distinct numbers according to simulation
|
625941b74e4d5625662d4215
|
def argsFetch(): <NEW_LINE> <INDENT> import argparse <NEW_LINE> parser = argparse.ArgumentParser(description='Script for crawling, downloading & parsing Jobcards') <NEW_LINE> parser.add_argument('-v', '--visible', help='Make the browser visible', required=False, action='store_const', const=1) <NEW_LINE> parser.add_argument('-b', '--browser', help='Specify the browser to test with', required=False) <NEW_LINE> parser.add_argument('-d', '--district', help='District for which you need to Download', required=False) <NEW_LINE> parser.add_argument('-l', '--log-level', help='Log level defining verbosity', required=False) <NEW_LINE> parser.add_argument('-n', '--numberOfThreads', help='Number of Threads default 5', required=False) <NEW_LINE> parser.add_argument('-q', '--queueSize', help='Number of Musters in Queue default 200', required=False) <NEW_LINE> parser.add_argument('-s', '--stateCode', help='StateCode for which the numbster needs to be downloaded', required=False) <NEW_LINE> parser.add_argument('-p', '--panchayatCode', help='StateCode for which the numbster needs to be downloaded', required=False) <NEW_LINE> parser.add_argument('-f', '--finyear', help='StateCode for which the numbster needs to be downloaded', required=False) <NEW_LINE> parser.add_argument('-cr', '--crawlRequirement', help='Kindly put the tag of crawlRequiremtn that panchayats are tagged with, by default it will do it for panchayats which are tagged with crawlRequirement of FULL', required=False) <NEW_LINE> parser.add_argument('-limit', '--limit', help='District for which you need to Download', required=False) <NEW_LINE> args = vars(parser.parse_args()) <NEW_LINE> return args
|
Paser for the argument list that returns the args list
|
625941b7dd821e528d63afe3
|
def test_login_required(self): <NEW_LINE> <INDENT> res = self.client.get(MEETUP_URL_LIST) <NEW_LINE> self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)
|
Test that login required for retrieving Meetups
|
625941b791af0d3eaac9b84c
|
def prepare_opt_after_ts( self, socket_calculator: str, rxn: Dict[str, str], pytemplate: str, balsam_exe_settings: Dict[str, int], calc_keywords: Dict[str, str], pseudopotentials: Dict[str, str], pseudo_dir: str) -> None: <NEW_LINE> <INDENT> rxn_name = IO().get_rxn_name(rxn) <NEW_LINE> ts_vib_dir = os.path.join(self.creation_dir, self.facetpath, rxn_name, 'TS_estimate_unique_vib') <NEW_LINE> vib_traj_files = Path(ts_vib_dir).glob('**/*traj') <NEW_LINE> for vib_traj in vib_traj_files: <NEW_LINE> <INDENT> vib_traj = str(vib_traj) <NEW_LINE> prefix = vib_traj.split('/')[-2] <NEW_LINE> after_ts_dir = os.path.join(self.creation_dir, self.facetpath, rxn_name, 'after_TS', prefix) <NEW_LINE> os.makedirs(after_ts_dir, exist_ok=True) <NEW_LINE> fname = os.path.join( prefix + '_' + self.facetpath + '_' + rxn_name + '_after_ts') <NEW_LINE> fname_forward = os.path.join(after_ts_dir, fname + '_f') <NEW_LINE> fname_reverse = os.path.join(after_ts_dir, fname + '_r') <NEW_LINE> AfterTS.get_forward_and_reverse( vib_traj, fname_forward, fname_reverse) <NEW_LINE> self.create_after_ts_py_files( socket_calculator, pytemplate, fname_forward, fname_reverse, balsam_exe_settings, calc_keywords, pseudopotentials, pseudo_dir)
|
Create files for after_TS calculations - to verify TS structures
and get corresponding reactant and product minima
Parameters
----------
rxn : Dict[str, str]
a dictionary with info about the paricular reaction. This can be
view as a splitted many reaction .yaml file to a single reaction
:literal:`*.yaml` file
pytemplate : python script
a template for after_TS calculations
balsam_exe_settings : Dict[str, int]
a dictionary with balsam execute parameters (cores, nodes, etc.),
e.g.
>>> balsam_exe_settings = {'num_nodes': 1,
'ranks_per_node': 48,
'threads_per_rank': 1}
calc_keywords : Dict[str, str]
a dictionary with parameters to run DFT package. Quantum Espresso
is used as default, e.g.
>>> calc_keywords = {'kpts': (3, 3, 1), 'occupations': 'smearing',
'smearing': 'marzari-vanderbilt',
'degauss': 0.01, 'ecutwfc': 40, 'nosym': True,
'conv_thr': 1e-11, 'mixing_mode': 'local-TF'}
pseudopotentials : Dict[str, str]
a dictionary with QE pseudopotentials for all species.
e.g.
>>> dict(Cu='Cu.pbe-spn-kjpaw_psl.1.0.0.UPF',
H='H.pbe-kjpaw_psl.1.0.0.UPF',
O='O.pbe-n-kjpaw_psl.1.0.0.UPF',
C='C.pbe-n-kjpaw_psl.1.0.0.UPF')
pseudo_dir : str
a path to the QE's pseudopotentials main directory
e.g.
``'/home/mgierad/espresso/pseudo'``
|
625941b7be383301e01b52c5
|
def get_reports(trpf_agents, trpf, choices, excess_traveller_counts): <NEW_LINE> <INDENT> reports = list(map(lambda agent, excess: agent.report_congestion(excess), trpf_agents, repeat(excess_traveller_counts))) <NEW_LINE> list(map(lambda report: trpf.recieve_report(*report), reports))
|
This function gets the reports from agents and gives them to trpf.
|
625941b797e22403b379cdd1
|
def get_session(driver=None, app=None): <NEW_LINE> <INDENT> return SqlConnector(driver=driver, app=app).session
|
Return a session for the current SQL connector.
Accept the same arguments as :class:`salic_api.connector.SqlConnector`
|
625941b750485f2cf553cbd1
|
def retrieve_log_flat(settings, tws=None): <NEW_LINE> <INDENT> settings['container'] = tws.container <NEW_LINE> settings['prefix'] = tws.prefix <NEW_LINE> settings['user'] = user = tws.get_current_user()['upn'] <NEW_LINE> settings['users_dir'] = os.path.join(tws.settings['user_dir'], user) <NEW_LINE> settings['gateone_dir'] = tws.settings['gateone_dir'] <NEW_LINE> io_loop = tornado.ioloop.IOLoop.instance() <NEW_LINE> global PROCS <NEW_LINE> if user not in PROCS: <NEW_LINE> <INDENT> PROCS[user] = {} <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> fd = PROCS[user]['queue']._reader.fileno() <NEW_LINE> if fd in io_loop._handlers: <NEW_LINE> <INDENT> io_loop.remove_handler(fd) <NEW_LINE> <DEDENT> if PROCS[user]['process']: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> PROCS[user]['process'].terminate() <NEW_LINE> <DEDENT> except OSError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> PROCS[user]['queue'] = q = Queue() <NEW_LINE> PROCS[user]['process'] = Process( target=_retrieve_log_flat, args=(q, settings)) <NEW_LINE> def send_message(fd, event): <NEW_LINE> <INDENT> io_loop.remove_handler(fd) <NEW_LINE> message = q.get() <NEW_LINE> tws.write_message(message) <NEW_LINE> <DEDENT> io_loop.add_handler(q._reader.fileno(), send_message, io_loop.READ) <NEW_LINE> PROCS[user]['process'].start()
|
Calls _retrieve_log_flat() via a multiprocessing Process() so it doesn't
cause the IOLoop to block.
*settings* - A dict containing the *log_filename*, *colors*, and *theme* to
use when generating the HTML output.
*tws* - TerminalWebSocket instance.
Here's a the details on *settings*:
*settings['log_filename']* - The name of the log to display.
*settings['colors']* - The CSS color scheme to use when generating output.
*settings['theme']* - The CSS theme to use when generating output.
*settings['where']* - Whether or not the result should go into a new window or an iframe.
|
625941b79f2886367277a6c9
|
def __initialize_profiles_model ( self ): <NEW_LINE> <INDENT> self.profilesmodel = gtk.ListStore ( gobject.TYPE_STRING ) <NEW_LINE> self.treeview.set_model ( self.profilesmodel ) <NEW_LINE> gtk_tree_view_add_text_column ( self.treeview, _('Profile Name'), 0, False )
|
Initializes the model and treeview
|
625941b701c39578d7e74c7c
|
def execute(ctxt, executable=None, file_=None, input_=None, output=None, args=None, dir_=None, filter_=None): <NEW_LINE> <INDENT> if args: <NEW_LINE> <INDENT> if isinstance(args, basestring): <NEW_LINE> <INDENT> args = shlex.split(args) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> args = [] <NEW_LINE> <DEDENT> if dir_: <NEW_LINE> <INDENT> def resolve(*args): <NEW_LINE> <INDENT> return ctxt.resolve(dir_, *args) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> resolve = ctxt.resolve <NEW_LINE> <DEDENT> if file_ and os.path.isfile(resolve(file_)): <NEW_LINE> <INDENT> file_ = resolve(file_) <NEW_LINE> <DEDENT> shell = False <NEW_LINE> if file_ and os.name == 'nt': <NEW_LINE> <INDENT> shell = True <NEW_LINE> <DEDENT> if executable is None: <NEW_LINE> <INDENT> executable = file_ <NEW_LINE> <DEDENT> elif file_: <NEW_LINE> <INDENT> args[:0] = [file_] <NEW_LINE> <DEDENT> if os.name == 'nt' and executable.upper() in ['COPY', 'DIR', 'ECHO', 'ERASE', 'DEL', 'MKDIR', 'MD', 'MOVE', 'RMDIR', 'RD', 'TYPE']: <NEW_LINE> <INDENT> shell = True <NEW_LINE> <DEDENT> if input_: <NEW_LINE> <INDENT> input_file = codecs.open(resolve(input_), 'r', 'utf-8') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> input_file = None <NEW_LINE> <DEDENT> if output: <NEW_LINE> <INDENT> output_file = codecs.open(resolve(output), 'w', 'utf-8') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> output_file = None <NEW_LINE> <DEDENT> if dir_ and os.path.isdir(ctxt.resolve(dir_)): <NEW_LINE> <INDENT> dir_ = ctxt.resolve(dir_) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> dir_ = ctxt.basedir <NEW_LINE> <DEDENT> if not filter_: <NEW_LINE> <INDENT> filter_=lambda s: s <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> cmdline = CommandLine(executable, args, input=input_file, cwd=dir_, shell=shell) <NEW_LINE> log_elem = xmlio.Fragment() <NEW_LINE> for out, err in cmdline.execute(): <NEW_LINE> <INDENT> if out is not None: <NEW_LINE> <INDENT> log.info(out) <NEW_LINE> info = filter_(out) <NEW_LINE> if info: <NEW_LINE> <INDENT> log_elem.append(xmlio.Element('message', level='info')[ info.replace(ctxt.basedir + os.sep, '') .replace(ctxt.basedir, '') ]) <NEW_LINE> <DEDENT> if output: <NEW_LINE> <INDENT> output_file.write(out + os.linesep) <NEW_LINE> <DEDENT> <DEDENT> if err is not None: <NEW_LINE> <INDENT> log.error(err) <NEW_LINE> log_elem.append(xmlio.Element('message', level='error')[ err.replace(ctxt.basedir + os.sep, '') .replace(ctxt.basedir, '') ]) <NEW_LINE> if output: <NEW_LINE> <INDENT> output_file.write(err + os.linesep) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> ctxt.log(log_elem) <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> if input_: <NEW_LINE> <INDENT> input_file.close() <NEW_LINE> <DEDENT> if output: <NEW_LINE> <INDENT> output_file.close() <NEW_LINE> <DEDENT> <DEDENT> return cmdline.returncode
|
Generic external program execution.
This function is not itself bound to a recipe command, but rather used from
other commands.
:param ctxt: the build context
:type ctxt: `Context`
:param executable: name of the executable to run
:param file\_: name of the script file, relative to the project directory,
that should be run
:param input\_: name of the file containing the data that should be passed
to the shell script on its standard input stream
:param output: name of the file to which the output of the script should be
written
:param args: command-line arguments to pass to the script
:param filter\_: function to filter out messages from the executable stdout
|
625941b7c4546d3d9de72868
|
def topological_sort(graph): <NEW_LINE> <INDENT> queue = [] <NEW_LINE> edges = graph.values() <NEW_LINE> for node in graph.iterkeys(): <NEW_LINE> <INDENT> for edge in edges: <NEW_LINE> <INDENT> if node in edge: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> queue.append(node) <NEW_LINE> <DEDENT> <DEDENT> result = [] <NEW_LINE> while queue: <NEW_LINE> <INDENT> node = queue.pop(0) <NEW_LINE> result.append(node) <NEW_LINE> for target in graph[node].copy(): <NEW_LINE> <INDENT> graph[node].remove(target) <NEW_LINE> for edge in graph.itervalues(): <NEW_LINE> <INDENT> if target in edge: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> queue.append(target) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> result.reverse() <NEW_LINE> return result
|
Conducts a topological sort of a directed acyclic graph and returns
the sorted nodes as a ``list``.
:param dict graph: The graph to sort, which must be a ``dict`` mapping
each node to a ``set`` containing that node's edges (which are
other nodes present in the graph). This argument is modified during
the sort and should be discarded afterward.
|
625941b74c3428357757c163
|
def get_gilis_val(df, column_label, row_label): <NEW_LINE> <INDENT> return df[column_label][row_label]
|
This function returns a Gilis value
:param df: dataframe
:param column_label: amino code (string)
:param row_label: amino code (string)
:return: value (int)
|
625941b7236d856c2ad44617
|
def test_paralleldiagnostics_create_batches_same_class_without_exclusives(self): <NEW_LINE> <INDENT> self.generic_batch_test(["arptable.yaml", "arptablesrules.yaml", "dig.yaml"], [[0, 1, 2]])
|
Test ec2rlcore.paralleldiagnostics._create_batch with a set of 3 non-exclusive modules.
|
625941b7dd821e528d63afe4
|
def test99a_nonIntEnum(self): <NEW_LINE> <INDENT> colors = {'red': 1.0} <NEW_LINE> self.assertRaises(NotImplementedError, self._createCol, colors, 'red', base=tb.FloatAtom())
|
Describing an enumerated column of floats (not implemented).
|
625941b732920d7e50b28004
|
def createWidgets(self): <NEW_LINE> <INDENT> layout = QHBoxLayout() <NEW_LINE> self.logsItem = TestsView.TestsView(parent=self, local = self.local) <NEW_LINE> self.resumeView = ResumeView.TextualView(parent=self) <NEW_LINE> if QtHelper.str2bool( Settings.instance().readValue( key = 'TestRun/hide-resume-view' ) ): <NEW_LINE> <INDENT> self.hideResumeView() <NEW_LINE> <DEDENT> self.graphView = GraphView.FlowChartView(parent=self) <NEW_LINE> self.logsView = TextualView.TextualView2(parent=self) <NEW_LINE> self.hexLogsView = DetailedView.DetailedView(parent=self) <NEW_LINE> self.displayTab = QTabWidget() <NEW_LINE> hSplitter = QSplitter(self) <NEW_LINE> hSplitter.setOrientation(Qt.Vertical) <NEW_LINE> hSplitter.addWidget( self.resumeView ) <NEW_LINE> hSplitter.addWidget( self.logsView ) <NEW_LINE> hSplitter.addWidget( self.hexLogsView ) <NEW_LINE> self.displayTab.addTab(hSplitter, self.tr('Events') ) <NEW_LINE> self.displayTab.addTab(self.graphView, self.tr('Diagram') ) <NEW_LINE> defaultTab = Settings.instance().readValue( key = 'TestRun/default-tab-run' ) <NEW_LINE> self.displayTab.setCurrentIndex(int(defaultTab)) <NEW_LINE> self.currentEdit = QLineEdit() <NEW_LINE> self.currentEdit.setReadOnly(True) <NEW_LINE> self.currentEdit.setStyleSheet("QLineEdit { background-color : #F0F0F0; color: grey; }") <NEW_LINE> leftFrame = QFrame() <NEW_LINE> leftLayout = QVBoxLayout() <NEW_LINE> leftLayout.setContentsMargins(0, 0, 0, 0) <NEW_LINE> leftFrame.setLayout(leftLayout) <NEW_LINE> leftLayout.addWidget(self.currentEdit) <NEW_LINE> leftLayout.addWidget(self.displayTab) <NEW_LINE> v_splitter = QSplitter(self) <NEW_LINE> v_splitter.addWidget( self.logsItem ) <NEW_LINE> v_splitter.addWidget( leftFrame ) <NEW_LINE> v_splitter.setStretchFactor(1, 1) <NEW_LINE> layout.addWidget(v_splitter) <NEW_LINE> self.setLayout(layout)
|
QtWidgets creation
___________ _________________
| | | |
| | | |
| | | |
| TestsItem | | TextualLogView |
| | | |
| | | |
|___________| |_________________|
|
625941b792d797404e303fc3
|
def __init__(self, guake): <NEW_LINE> <INDENT> self.guake = guake <NEW_LINE> self.accel_group = None <NEW_LINE> self.globalhotkeys = {} <NEW_LINE> globalkeys = ['show-hide', 'show-focus'] <NEW_LINE> for key in globalkeys: <NEW_LINE> <INDENT> guake.settings.keybindingsGlobal.onChangedValue(key, self.reload_global) <NEW_LINE> guake.settings.keybindingsGlobal.triggerOnChangedValue( guake.settings.keybindingsGlobal, key, None ) <NEW_LINE> <DEDENT> keys = [ 'toggle-fullscreen', 'new-tab', 'new-tab-home', 'close-tab', 'rename-current-tab', 'previous-tab', 'next-tab', 'clipboard-copy', 'clipboard-paste', 'quit', 'zoom-in', 'zoom-out', 'increase-height', 'decrease-height', 'increase-transparency', 'decrease-transparency', 'toggle-transparency', "search-on-web", 'move-tab-left', 'move-tab-right', 'switch-tab1', 'switch-tab2', 'switch-tab3', 'switch-tab4', 'switch-tab5', 'switch-tab6', 'switch-tab7', 'switch-tab8', 'switch-tab9', 'switch-tab10', 'switch-tab-last', 'reset-terminal', 'split-tab-vertical', 'split-tab-horizontal', 'close-terminal', 'focus-terminal-up', 'focus-terminal-down', 'focus-terminal-right', 'focus-terminal-left', 'move-terminal-split-up', 'move-terminal-split-down', 'move-terminal-split-left', 'move-terminal-split-right', 'search-terminal' ] <NEW_LINE> for key in keys: <NEW_LINE> <INDENT> guake.settings.keybindingsLocal.onChangedValue(key, self.reload_accelerators) <NEW_LINE> self.reload_accelerators()
|
Constructor of Keyboard, only receives the guake instance
to be used in internal methods.
|
625941b767a9b606de4a7cf5
|
def run_output(*args): <NEW_LINE> <INDENT> if len(args) > 0 and args[0] == "": <NEW_LINE> <INDENT> return 0, None, None <NEW_LINE> <DEDENT> return bash.run_output("iptables", "--wait", *args)
|
Returns the output returned from the given iptables command.
:return: Tuple[int,str,str]
|
625941b79c8ee82313fbb5ad
|
def do_chronbach(self, table_info, log=logging,**kwargs): <NEW_LINE> <INDENT> if 'scales' not in table_info: <NEW_LINE> <INDENT> log.debug('scales not found in table_info') <NEW_LINE> return table_info <NEW_LINE> <DEDENT> if 'tablename' not in table_info: <NEW_LINE> <INDENT> log.warning('tablename not found in table_info. That is weird.') <NEW_LINE> return table_info <NEW_LINE> <DEDENT> if 'type' not in table_info: <NEW_LINE> <INDENT> log.warning('type not found in table_info. this is weird') <NEW_LINE> return table_info <NEW_LINE> <DEDENT> if table_info['type'] != 'data': <NEW_LINE> <INDENT> log.info('%s is not a data table. skipping chronbach'%table_info['tablename']) <NEW_LINE> return table_info <NEW_LINE> <DEDENT> instr_abbr = None <NEW_LINE> if 'instrument_abbreviation' in table_info: <NEW_LINE> <INDENT> instr_abbr = table_info['instrument_abbreviation'] <NEW_LINE> <DEDENT> tname = table_info['tablename'] <NEW_LINE> scales = table_info['scales'] <NEW_LINE> scale_abbrs = [sc['abbreviation'] for sc in scales] <NEW_LINE> log.info('Computing alphas for table %s'%table_info['tablename']) <NEW_LINE> for i,scale in enumerate(scale_abbrs): <NEW_LINE> <INDENT> log.info('calculating chronbach for %s (%s)'%( table_info['tablename'],scale)) <NEW_LINE> scale_cols, col_types = self.dbm.get_scale_columns(tname, scale, instr_abbr = instr_abbr,log=log) <NEW_LINE> log.info('%s (%s) including cols : %s'%(tname, scale, scale_cols)) <NEW_LINE> scale_alpha = self.dbm.calculate_chronbach(tname, scale_cols, col_types, log=log, **kwargs) <NEW_LINE> log.info('alpha calculated to be %s'%str(scale_alpha)) <NEW_LINE> scales[i]['alpha'] = scale_alpha <NEW_LINE> <DEDENT> return table_info
|
This function sets up the chronbachs alpha for a table.
Chronbachs alpha is a measure of internal consistency for a given
instrument.
Each instrument is usually composed of a few different scales
although some instruments may only have one.
this function goes through table_info and tries to calculate the
chronbach for each scale.
ARGS & RETURN
-----
TABLE_INFO expects a dictionary as produced by
self.table_to_bulkimportItem which contains all we know about this
table.
recall the structure is
{
...
...
"scales":[
{ 'abbreviation':'abbrv',
'description':'desc' }
]
}
RETURNS the same dictionary with the alphas calculated
inserted with the key 'α'
this is a safe funtion. i.e. it shouldn't throw any errors
either fill in the table_info or not. check for 'alpha' within
scales to see what it did.
IMPLEMENTATION DEATAILS
------
this function implements the following algorithm
1. if 'scales' is not in table_info : do not proceed; else: continue
2. for each scale:
a. select all columns from the table that match wtp conventions
for the current scale abbreviation
recall columns should be named:
[instr_abbr][scale_abbr][quest_num][resp_abbr]
b. calculate chronbachs alpha using subroutine described
elsewhere using all the selected data.
|
625941b7f8510a7c17cf953d
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.