complexity
int64
1
139
fun_name
stringlengths
1
80
code
stringlengths
101
62.2k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
3.11k
ast_levels
int64
6
36
file_name
stringlengths
5
79
n_ast_nodes
int64
17
19.2k
commit_message
stringlengths
3
15.3k
d_id
int64
12
121k
n_ast_errors
int64
0
9
n_whitespaces
int64
4
10.8k
token_counts
int64
5
3.06k
vocab_size
int64
4
1.11k
id
int64
20
338k
n_words
int64
4
4.82k
repo
stringlengths
3
22
n_identifiers
int64
2
176
path
stringlengths
7
134
language
stringclasses
1 value
nloc
int64
1
413
documentation
dict
url
stringlengths
31
59
4
hard_nms
def hard_nms(box_scores, iou_threshold, top_k=-1, candidate_size=200): scores = box_scores[:, -1] boxes = box_scores[:, :-1] picked = [] indexes = np.argsort(scores) indexes = indexes[-candidate_size:] while len(indexes) > 0: current = indexes[-1] picked.append(current) if 0 < top_k == len(picked) or len(indexes) == 1: break current_box = boxes[current, :] indexes = indexes[:-1] rest_boxes = boxes[indexes, :] iou = iou_of( rest_boxes, np.expand_dims( current_box, axis=0), ) indexes = indexes[iou <= iou_threshold] return box_scores[picked, :]
ddaa2c2552e19635cd6cdf38619f1f176c358f89
13
picodet_postprocess.py
237
add SLANet
4,737
0
196
152
50
24,457
68
PaddleOCR
20
ppstructure/layout/picodet_postprocess.py
Python
20
{ "docstring": "\n Args:\n box_scores (N, 5): boxes in corner-form and probabilities.\n iou_threshold: intersection over union threshold.\n top_k: keep top_k results. If k <= 0, keep all the results.\n candidate_size: only consider the candidates with the highest scores.\n Returns:\n picked: a list of indexes of the kept boxes\n ", "language": "en", "n_whitespaces": 91, "n_words": 45, "vocab_size": 38 }
https://github.com/PaddlePaddle/PaddleOCR.git
4
get_module_by_name
def get_module_by_name(model, module_name): name_list = module_name.split(".") for name in name_list[:-1]: if hasattr(model, name): model = getattr(model, name) else: return None, None if hasattr(model, name_list[-1]): leaf_module = getattr(model, name_list[-1]) return model, leaf_module else: return None, None
d68c786ff81bad19c04619d6a999ff34aaa724e7
12
pruning.py
131
[Compression] remove pruning v1 & refactor directory (#5228)
25,005
0
107
82
24
113,688
35
nni
9
nni/compression/pytorch/utils/pruning.py
Python
12
{ "docstring": "\n Get a module specified by its module name\n Parameters\n ----------\n model : pytorch model\n the pytorch model from which to get its module\n module_name : str\n the name of the required module\n Returns\n -------\n module, module\n the parent module of the required module, the required module\n ", "language": "en", "n_whitespaces": 95, "n_words": 46, "vocab_size": 25 }
https://github.com/microsoft/nni.git
2
check_keys_split
def check_keys_split(self, decoded) -> None: bad_keys = set(decoded.keys()).difference(set(self._split_keys)) if bad_keys: bad_keys_joined = ", ".join(bad_keys) raise ValueError(f"JSON data had unexpected key(s): {bad_keys_joined}")
734db4f1fde2566a02b3c7ff661a479b0a71633c
12
_json.py
85
TYP: Return annotations for io/{formats,json} (#47516) * TYP: Return annotations for io/{formats,json} * flake8 * explicitly check whether width is None
40,008
0
64
47
20
167,425
21
pandas
11
pandas/io/json/_json.py
Python
8
{ "docstring": "\n Checks that dict has only the appropriate keys for orient='split'.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
https://github.com/pandas-dev/pandas.git
7
connect
def connect(self, host='', port=0, timeout=-999, source_address=None): if host != '': self.host = host if port > 0: self.port = port if timeout != -999: self.timeout = timeout if self.timeout is not None and not self.timeout: raise ValueError('Non-blocking socket (timeout=0) is not supported') if source_address is not None: self.source_address = source_address sys.audit("ftplib.connect", self, self.host, self.port) self.sock = socket.create_connection((self.host, self.port), self.timeout, source_address=self.source_address) self.af = self.sock.family self.file = self.sock.makefile('r', encoding=self.encoding) self.welcome = self.getresp() return self.welcome
8198943edd73a363c266633e1aa5b2a9e9c9f526
10
ftplib.py
264
add python 3.10.4 for windows
54,803
0
255
167
49
217,460
72
XX-Net
19
python3.10.4/Lib/ftplib.py
Python
18
{ "docstring": "Connect to host. Arguments are:\n - host: hostname to connect to (string, default previous host)\n - port: port to connect to (integer, default previous port)\n - timeout: the timeout to set against the ftp socket(s)\n - source_address: a 2-tuple (host, port) for the socket to bind\n to as its source address before connecting.\n ", "language": "en", "n_whitespaces": 103, "n_words": 53, "vocab_size": 37 }
https://github.com/XX-net/XX-Net.git
15
model_is_indexable
def model_is_indexable(cls, model, allow_child_models=False): if getattr(model, "wagtail_reference_index_ignore", False): return False # Don't check any models that have a parental key, references from these will be collected from the parent if not allow_child_models and any( [isinstance(field, ParentalKey) for field in model._meta.get_fields()] ): return False for field in model._meta.get_fields(): if field.is_relation and field.many_to_one: if getattr(field, "wagtail_reference_index_ignore", False): continue if getattr( field.related_model, "wagtail_reference_index_ignore", False ): continue if isinstance(field, (ParentalKey, GenericRel)): continue return True if hasattr(field, "extract_references"): return True if issubclass(model, ClusterableModel): for child_relation in get_all_child_relations(model): if cls.model_is_indexable( child_relation.related_model, allow_child_models=True, ): return True return False
c8689acb3724dc12fb09a0bfc14d7e4755a1ea0f
13
reference_index.py
244
Check field for .extract_references method instead of field type Co-authored-by: Matt Westcott <matthew@torchbox.com>
16,955
0
466
156
59
79,676
91
wagtail
20
wagtail/models/reference_index.py
Python
28
{ "docstring": "\n Returns True if the given model may have outbound references that we would be interested in recording in the index.\n\n\n Args:\n model (type): a Django model class\n allow_child_models (boolean): Child models are not indexable on their own. If you are looking at\n a child model from the perspective of indexing it through its parent,\n set this to True to disable checking for this. Default False.\n ", "language": "en", "n_whitespaces": 191, "n_words": 65, "vocab_size": 55 }
https://github.com/wagtail/wagtail.git
3
test_write_tfrecords
def test_write_tfrecords(ray_start_regular_shared, tmp_path): import tensorflow as tf # The dataset we will write to a .tfrecords file. ds = ray.data.from_items( [ # Row one. { "int_item": 1, "int_list": [2, 2, 3], "float_item": 1.0, "float_list": [2.0, 3.0, 4.0], "bytes_item": b"abc", "bytes_list": [b"abc", b"1234"], }, # Row two. { "int_item": 2, "int_list": [3, 3, 4], "float_item": 2.0, "float_list": [2.0, 2.0, 3.0], "bytes_item": b"def", "bytes_list": [b"def", b"1234"], }, ] ) # The corresponding tf.train.Example that we would expect to read # from this dataset. expected_records = [ # Record one (corresponding to row one). tf.train.Example( features=tf.train.Features( feature={ "int_item": tf.train.Feature( int64_list=tf.train.Int64List(value=[1]) ), "int_list": tf.train.Feature( int64_list=tf.train.Int64List(value=[2, 2, 3]) ), "float_item": tf.train.Feature( float_list=tf.train.FloatList(value=[1.0]) ), "float_list": tf.train.Feature( float_list=tf.train.FloatList(value=[2.0, 3.0, 4.0]) ), "bytes_item": tf.train.Feature( bytes_list=tf.train.BytesList(value=[b"abc"]) ), "bytes_list": tf.train.Feature( bytes_list=tf.train.BytesList(value=[b"abc", b"1234"]) ), } ) ), # Record two (corresponding to row two). tf.train.Example( features=tf.train.Features( feature={ "int_item": tf.train.Feature( int64_list=tf.train.Int64List(value=[2]) ), "int_list": tf.train.Feature( int64_list=tf.train.Int64List(value=[3, 3, 4]) ), "float_item": tf.train.Feature( float_list=tf.train.FloatList(value=[2.0]) ), "float_list": tf.train.Feature( float_list=tf.train.FloatList(value=[2.0, 2.0, 3.0]) ), "bytes_item": tf.train.Feature( bytes_list=tf.train.BytesList(value=[b"def"]) ), "bytes_list": tf.train.Feature( bytes_list=tf.train.BytesList(value=[b"def", b"1234"]) ), } ) ), ] # Perform the test. # Write the dataset to a .tfrecords file. ds.write_tfrecords(tmp_path) # Read the Examples back out from the .tfrecords file. # This follows the offical TFRecords tutorial: # https://www.tensorflow.org/tutorials/load_data/tfrecord#reading_a_tfrecord_file_2 filenames = sorted(os.listdir(tmp_path)) filepaths = [os.path.join(tmp_path, filename) for filename in filenames] raw_dataset = tf.data.TFRecordDataset(filepaths) tfrecords = [] for raw_record in raw_dataset: example = tf.train.Example() example.ParseFromString(raw_record.numpy()) tfrecords.append(example) assert tfrecords == expected_records
9fab504fe776f96fecf85e12ea006264cbe92f4a
23
test_dataset_tfrecords.py
885
[Datasets] Add writer for TFRecords. (#29448) This PR enables users to write TFRecords from datasets. In particular, the master branch already includes an API for reading TFRecords from datasets. Users have requested the ability to write these datasets back to TFRecords.
30,665
0
1,453
590
127
135,586
231
ray
40
python/ray/data/tests/test_dataset_tfrecords.py
Python
82
{ "docstring": "Test that write_tfrecords writes TFRecords correctly.\n\n Test this by writing a Dataset to a TFRecord (function under test),\n reading it back out into a tf.train.Example,\n and checking that the result is analogous to the original Dataset.\n ", "language": "en", "n_whitespaces": 48, "n_words": 36, "vocab_size": 30 }
https://github.com/ray-project/ray.git
2
fix_script
def fix_script(path): # type: (str) -> bool # XXX RECORD hashes will need to be updated assert os.path.isfile(path) with open(path, 'rb') as script: firstline = script.readline() if not firstline.startswith(b'#!python'): return False exename = sys.executable.encode(sys.getfilesystemencoding()) firstline = b'#!' + exename + os.linesep.encode("ascii") rest = script.read() with open(path, 'wb') as script: script.write(firstline) script.write(rest) return True
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
12
wheel.py
185
upd; format
12,353
0
134
104
41
60,940
53
transferlearning
18
.venv/lib/python3.8/site-packages/pip/_internal/operations/install/wheel.py
Python
13
{ "docstring": "Replace #!python with #!/path/to/python\n Return True if file was changed.\n ", "language": "en", "n_whitespaces": 16, "n_words": 10, "vocab_size": 10 }
https://github.com/jindongwang/transferlearning.git
1
decrypt_data
async def decrypt_data(self, session): return await decrypt_fernet(session, self.data) @declarative_mixin
40309ccbc3b8c8474ae15293fbbecb28eded6ef5
@declarative_mixin
9
orm_models.py
35
Update Block API
10,992
1
22
18
9
54,156
9
prefect
6
src/prefect/orion/database/orm_models.py
Python
2
{ "docstring": "\n Retrieve decrypted data from the ORM model.\n\n Note: will only succeed if the caller has sufficient permission.\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 16 }
https://github.com/PrefectHQ/prefect.git
2
assign
def assign(self, **kwargs) -> DataFrame: r data = self.copy(deep=None) for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) return data
36dcf519c67a8098572447f7d5a896740fc9c464
10
frame.py
75
ENH/TST: expand copy-on-write to assign() method (#50010)
40,716
0
58
48
18
171,745
20
pandas
12
pandas/core/frame.py
Python
66
{ "docstring": "\n Assign new columns to a DataFrame.\n\n Returns a new object with all original columns in addition to new ones.\n Existing columns that are re-assigned will be overwritten.\n\n Parameters\n ----------\n **kwargs : dict of {str: callable or Series}\n The column names are keywords. If the values are\n callable, they are computed on the DataFrame and\n assigned to the new columns. The callable must not\n change input DataFrame (though pandas doesn't check it).\n If the values are not callable, (e.g. a Series, scalar, or array),\n they are simply assigned.\n\n Returns\n -------\n DataFrame\n A new DataFrame with the new columns in addition to\n all the existing columns.\n\n Notes\n -----\n Assigning multiple columns within the same ``assign`` is possible.\n Later items in '\\*\\*kwargs' may refer to newly created or modified\n columns in 'df'; items are computed and assigned into 'df' in order.\n\n Examples\n --------\n >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},\n ... index=['Portland', 'Berkeley'])\n >>> df\n temp_c\n Portland 17.0\n Berkeley 25.0\n\n Where the value is a callable, evaluated on `df`:\n\n >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)\n temp_c temp_f\n Portland 17.0 62.6\n Berkeley 25.0 77.0\n\n Alternatively, the same behavior can be achieved by directly\n referencing an existing Series or sequence:\n\n >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)\n temp_c temp_f\n Portland 17.0 62.6\n Berkeley 25.0 77.0\n\n You can create multiple columns within the same assign where one\n of the columns depends on another one defined within the same assign:\n\n >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,\n ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)\n temp_c temp_f temp_k\n Portland 17.0 62.6 290.15\n Berkeley 25.0 77.0 298.15\n ", "language": "en", "n_whitespaces": 761, "n_words": 268, "vocab_size": 146 }
https://github.com/pandas-dev/pandas.git
2
in4_chksum
def in4_chksum(proto, u, p): # type: (int, IP, bytes) -> int if not isinstance(u, IP): warning("No IP underlayer to compute checksum. Leaving null.") return 0 psdhdr = in4_pseudoheader(proto, u, len(p)) return checksum(psdhdr + p)
20ac1d00389d0735e6d8cd1347f0a53f478144ba
10
inet.py
74
Support TCP-MD5 and TCP-AO (#3358) Support TCP-MD5 and TCP-AO
52,613
0
63
45
32
209,123
34
scapy
11
scapy/layers/inet.py
Python
6
{ "docstring": "IPv4 Pseudo Header checksum as defined in RFC793\n\n :param nh: value of upper layer protocol\n :param u: upper layer instance\n :param p: the payload of the upper layer provided as a string\n ", "language": "en", "n_whitespaces": 44, "n_words": 32, "vocab_size": 23 }
https://github.com/secdev/scapy.git
4
get_pe_matching_query
def get_pe_matching_query(amount_condition, account_from_to, transaction): # get matching payment entries query from_date = frappe.db.get_single_value("Bank Reconciliation Tool", "bank_statement_from_date") to_date = frappe.db.get_single_value("Bank Reconciliation Tool", "bank_statement_to_date") from_reference_date = frappe.db.get_single_value( "Bank Reconciliation Tool", "from_reference_date" ) to_reference_date = frappe.db.get_single_value("Bank Reconciliation Tool", "to_reference_date") filtered_by_reference_date = frappe.db.get_single_value( "Bank Reconciliation Tool", "filtered_by_reference_date" ) if transaction.deposit > 0: currency_field = "paid_to_account_currency as currency" else: currency_field = "paid_from_account_currency as currency" cond_filtered_from_ref_date = "" cond_filtered_to_ref_date = "" cond_filtered_from_posting_date = "" cond_filtered_to_posting_date = "" from_ref_date ="" to_ref_date ="" from_post_date = "" to_post_date = "" if(filtered_by_reference_date): cond_filtered_from_ref_date = " AND reference_date >=" cond_filtered_to_ref_date = " AND reference_date <=" from_ref_date = from_reference_date to_ref_date = to_reference_date elif(not filtered_by_reference_date): cond_filtered_from_posting_date = " AND posting_date >=" cond_filtered_to_posting_date = " AND posting_date <=" from_post_date = from_date to_post_date = to_date pe_data= f return pe_data
408c89df030998fe36df135570c9edd90a522996
10
bank_reconciliation_tool.py
336
Feat:Filter on Payment Entries and Journal Entries Applying filters on Payement entries and Journal Entries as per reference date and posting date
15,095
0
91
149
60
69,776
124
erpnext
24
erpnext/accounts/doctype/bank_reconciliation_tool/bank_reconciliation_tool.py
Python
61
{ "docstring": "\n\t\tSELECT\n\t\t\t(CASE WHEN reference_no=%(reference_no)s THEN 1 ELSE 0 END\n\t\t\t+ CASE WHEN (party_type = %(party_type)s AND party = %(party)s ) THEN 1 ELSE 0 END\n\t\t\t+ 1 ) AS rank,\n\t\t\t'Payment Entry' as doctype,\n\t\t\tname,\n\t\t\tpaid_amount,\n\t\t\treference_no,\n\t\t\treference_date,\n\t\t\tparty,\n\t\t\tparty_type,\n\t\t\tposting_date,\n\t\t\t{currency_field}\n\t\tFROM\n\t\t\t`tabPayment Entry`\n\t\tWHERE\n\t\t\tpaid_amount {amount_condition} %(amount)s\n\t\t\tAND docstatus = 1\n\t\t\tAND payment_type IN (%(payment_type)s, 'Internal Transfer')\n\t\t\tAND ifnull(clearance_date, '') = \"\"\n\t\t\tAND {account_from_to} = %(bank_account)s\n\t\t\tAND reference_no = '{transaction.reference_number}'\n\t\t\t{cond_filtered_from_ref_date} \"{from_ref_date}\"\n\t\t\t{cond_filtered_to_ref_date} \"{to_ref_date}\"\n\t\t\t{cond_filtered_from_posting_date} \"{from_post_date}\"\n\t\t\t{cond_filtered_to_posting_date} \"{to_post_date}\"\n\t\t", "language": "en", "n_whitespaces": 55, "n_words": 80, "vocab_size": 60 }
https://github.com/frappe/erpnext.git
2
_download_and_prepare
def _download_and_prepare(self, dl_manager): # TODO: Download external resources if needed bad_words_path = dl_manager.download_and_extract(BAD_WORDS_URL) self.bad_words = {w.strip() for w in open(bad_words_path, encoding="utf-8")}
21bfd0d3f5ff3fbfd691600e2c7071a167816cdf
12
new_metric_script.py
64
Run pyupgrade for Python 3.6+ (#3560) * Run pyupgrade for Python 3.6+ * Fix lint issues * Revert changes for the datasets code Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>
21,781
0
49
38
20
104,200
21
datasets
11
templates/new_metric_script.py
Python
3
{ "docstring": "Optional: download external resources useful to compute the scores", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/huggingface/datasets.git
1
_cache_bytecode
def _cache_bytecode(self, source_path, cache_path, data): # For backwards compatibility, we delegate to set_data() return self.set_data(cache_path, data)
8198943edd73a363c266633e1aa5b2a9e9c9f526
7
_bootstrap_external.py
33
add python 3.10.4 for windows
55,160
0
37
21
16
218,142
16
XX-Net
6
python3.10.4/Lib/importlib/_bootstrap_external.py
Python
2
{ "docstring": "Optional method which writes data (bytes) to a file path (a str).\n\n Implementing this method allows for the writing of bytecode files.\n\n The source path is needed in order to correctly transfer permissions\n ", "language": "en", "n_whitespaces": 54, "n_words": 33, "vocab_size": 30 }
https://github.com/XX-net/XX-Net.git
2
add_tip
def add_tip(self, tip=None, tip_shape=None, tip_length=None, at_start=False): if tip is None: tip = self.create_tip(tip_shape, tip_length, at_start) else: self.position_tip(tip, at_start) self.reset_endpoints_based_on_tip(tip, at_start) self.asign_tip_attr(tip, at_start) self.add(tip) return self
e040bcacd38378386749db18aeba575b93f4ebca
10
arc.py
111
Improved structure of the :mod:`.mobject` module (#2476) * group graphing and update its references * group text and update its references * group opengl and update its references * group three_d and update its references * group geometry and update (most) references * move some chaning.py + updater files into animation * refactor arc.py * refactor line.py * refactor polygram.py * refactor tips.py * black + isort * import new files in __init__.py * refactor places where geometry was used * black + isort again * remove unused imports * update reference.rst * add descriptions to files * fix circular imports * forgot ArrowTip * fix tests * fix doctests * satisfy mypy? * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix ALL merge conflicts * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * one VMobject import slipped through * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * re-add imports to `manim/opengl/__init__.py` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix reference manual * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ignore unknown directive type * fix arrow tip imports in docstrings Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Benjamin Hackl <devel@benjamin-hackl.at>
46,154
0
96
73
21
189,648
25
manim
11
manim/mobject/geometry/arc.py
Python
9
{ "docstring": "\n Adds a tip to the TipableVMobject instance, recognising\n that the endpoints might need to be switched if it's\n a 'starting tip' or not.\n ", "language": "en", "n_whitespaces": 52, "n_words": 23, "vocab_size": 20 }
https://github.com/ManimCommunity/manim.git
8
check_graph_consistency
def check_graph_consistency(tensor=None, method="add_loss", force_raise=False): if force_raise or ( tf1.executing_eagerly_outside_functions() and hasattr(tensor, "graph") and tensor.graph.is_control_flow_graph ): if method == "activity_regularizer": bad_example =
fa6d9107a498f7c2403ff28c7b389a1a0c5cc083
bad_example = """
12
base_layer_utils.py
96
reduct too long lines
81,922
1
70
130
19
277,267
21
keras
14
keras/engine/base_layer_utils.py
Python
111
{ "docstring": "Checks that tensors passed to `add_*` method match the Keras graph.\n\n When one of the `add_*` method is called inside a V2 conditional branch, the\n underlying tensor gets created in a FuncGraph managed by control_flow_v2.\n We need to raise clear error messages in such cases.\n\n Args:\n tensor: Tensor to check, or `False` if it is known that an error\n should be raised.\n method: Caller method, one of {'add_metric', 'add_loss', 'add_update'}.\n force_raise: If an error should be raised regardless of `tensor`.\n\n Raises:\n RuntimeError: In case of an out-of-graph tensor.\n \n class TestModel(tf.keras.Model):\n", "language": "en", "n_whitespaces": 140, "n_words": 90, "vocab_size": 70 }
https://github.com/keras-team/keras.git
1
_make_dist
def _make_dist(self): dist = tfp.distributions.MultivariateNormalTriL( self.theta, scale_tril=tf.linalg.cholesky(self.covariance) ) return dist
136c8d5e4d2fab6106f007f4ce5d5c321922ae17
12
bandit_tf_model.py
54
[RLlib] Tests for bandit convergence and solving cov matrix problem (#29666) Signed-off-by: Avnish <avnishnarayan@gmail.com> Signed-off-by: Kourosh Hakhamaneshi <kourosh@anyscale.com> Co-authored-by: Kourosh Hakhamaneshi <kourosh@anyscale.com>
30,601
0
49
33
9
135,341
10
ray
12
rllib/algorithms/bandit/bandit_tf_model.py
Python
5
{ "docstring": "Create a multivariate normal distribution with the current parameters", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/ray-project/ray.git
1
_kth_arnoldi_iteration
def _kth_arnoldi_iteration(k, A, M, V, H): eps = jnp.finfo(jnp.result_type(*tree_leaves(V))).eps v = tree_map(lambda x: x[..., k], V) # Gets V[:, k] v = M(A(v)) _, v_norm_0 = _safe_normalize(v) v, h = _iterative_classical_gram_schmidt(V, v, v_norm_0, max_iterations=2) tol = eps * v_norm_0 unit_v, v_norm_1 = _safe_normalize(v, thresh=tol) V = tree_map(lambda X, y: X.at[..., k + 1].set(y), V, unit_v) h = h.at[k + 1].set(v_norm_1) H = H.at[k, :].set(h) breakdown = v_norm_1 == 0. return V, H, breakdown
df1ceaeeb11efc7c5af1ad2dd102857128c23b26
14
linalg.py
255
Deprecate jax.tree_util.tree_multimap
26,741
0
87
170
52
119,998
73
jax
29
jax/_src/scipy/sparse/linalg.py
Python
13
{ "docstring": "\n Performs a single (the k'th) step of the Arnoldi process. Thus,\n adds a new orthonormalized Krylov vector A(M(V[:, k])) to V[:, k+1],\n and that vectors overlaps with the existing Krylov vectors to\n H[k, :]. The tolerance 'tol' sets the threshold at which an invariant\n subspace is declared to have been found, in which case in which case the new\n vector is taken to be the zero vector.\n ", "language": "en", "n_whitespaces": 75, "n_words": 67, "vocab_size": 50 }
https://github.com/google/jax.git
10
correct_non_span
def _correct_non_span(self, line_str): words = line_str.split("</span>") line_str = "" for i in range(0, words.__len__()): if i != words.__len__() - 1: j = words[i].find("<span") else: j = words[i].__len__() temp = "" starti = -1 for k in range(0, j): if words[i][k] == "\t" and starti == -1: continue else: if starti == -1: starti = k temp = temp + words[i][k] if temp != "": if i != words.__len__() - 1: temp = ( '<span style="color:' + self.default_color + '">' + words[i][starti:j] + "</span>" ) else: temp = ( '<span style="color:' + self.default_color + '">' + words[i][starti:j] ) temp = temp + words[i][j:] words[i] = temp if words[i] != "": line_str = line_str + words[i] + "</span>" return line_str
902e7eb4f0147b5882a613b67467e38a1d47f01e
18
code_mobject.py
375
Hide more private methods from the docs. (#2468) * hide privs from text_mobject.py * hide privs from tex_mobject.py * hide privs from code_mobject.py * hide privs from svg_mobject.py * remove SVGPath and utils from __init__.py * don't import string_to_numbers * hide privs from geometry.py * hide privs from matrix.py * hide privs from numbers.py * hide privs from three_dimensions.py * forgot underscore under set_stroke_width_from_length * there were more i missed * unhidea method that was used in docs * forgot other text2hash * remove svg_path from docs
46,063
0
728
223
46
189,455
118
manim
14
manim/mobject/svg/code_mobject.py
Python
38
{ "docstring": "Function put text color to those strings that don't have one according to background_color of displayed code.\n\n Parameters\n ---------\n line_str : :class:`str`\n Takes a html element's string to put color to it according to background_color of displayed code.\n\n Returns\n -------\n :class:`str`\n The generated html element's string with having color attributes.\n ", "language": "en", "n_whitespaces": 121, "n_words": 50, "vocab_size": 34 }
https://github.com/ManimCommunity/manim.git
3
get_named_beta_schedule
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps): if schedule_name == "linear": # Linear schedule from Ho et al, extended to work for any number of # diffusion steps. scale = 1000 / num_diffusion_timesteps beta_start = scale * 0.0001 beta_end = scale * 0.02 return np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64) elif schedule_name == "cosine": return betas_for_alpha_bar( num_diffusion_timesteps, lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2)**2, ) else: raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
f4d6e64cdc132ae868699a0ba442f4ab1d304a14
19
gaussian_diffusion.py
147
add disco_diffusion_cnclip_vitb16 module
9,908
0
166
96
56
49,784
69
PaddleHub
16
modules/image/text_to_image/disco_diffusion_cnclip_vitb16/reverse_diffusion/model/gaussian_diffusion.py
Python
13
{ "docstring": "\n Get a pre-defined beta schedule for the given name.\n\n The beta schedule library consists of beta schedules which remain similar\n in the limit of num_diffusion_timesteps.\n Beta schedules may be added, but should not be removed or changed once\n they are committed to maintain backwards compatibility.\n ", "language": "en", "n_whitespaces": 64, "n_words": 45, "vocab_size": 38 }
https://github.com/PaddlePaddle/PaddleHub.git
5
call
def call(self, features, training=None): if not isinstance(features, dict): raise ValueError( "We expected a dictionary here. Instead we got: ", features ) if training is None: training = backend.learning_phase() transformation_cache = ( tf.__internal__.feature_column.FeatureTransformationCache(features) ) output_tensors = [] sequence_lengths = [] for column in self._feature_columns: with backend.name_scope(column.name): try: ( dense_tensor, sequence_length, ) = column.get_sequence_dense_tensor( transformation_cache, self._state_manager, training=training, ) except TypeError: ( dense_tensor, sequence_length, ) = column.get_sequence_dense_tensor( transformation_cache, self._state_manager ) # Flattens the final dimension to produce a 3D Tensor. output_tensors.append( self._process_dense_tensor(column, dense_tensor) ) sequence_lengths.append(sequence_length) # Check and process sequence lengths. kfc._verify_static_batch_size_equality( sequence_lengths, self._feature_columns ) sequence_length = _assert_all_equal_and_return(sequence_lengths) return self._verify_and_concat_tensors(output_tensors), sequence_length
6fafb567af4e4d9f42974d0b6c55b18bc03e17eb
16
sequence_feature_column.py
264
resolve line-too-long in feature_column
82,375
0
677
167
73
278,117
98
keras
31
keras/feature_column/sequence_feature_column.py
Python
39
{ "docstring": "Returns sequence input corresponding to the `feature_columns`.\n\n Args:\n features: A dict mapping keys to tensors.\n training: Python boolean or None, indicating whether to the layer is\n being run in training mode. This argument is passed to the call\n method of any `FeatureColumn` that takes a `training` argument. For\n example, if a `FeatureColumn` performed dropout, the column could\n expose a `training` argument to control whether the dropout should\n be applied. If `None`, defaults to\n `tf.keras.backend.learning_phase()`.\n\n\n Returns:\n An `(input_layer, sequence_length)` tuple where:\n - input_layer: A float `Tensor` of shape `[batch_size, T, D]`.\n `T` is the maximum sequence length for this batch, which could\n differ from batch to batch. `D` is the sum of `num_elements` for\n all `feature_columns`.\n - sequence_length: An int `Tensor` of shape `[batch_size]`. The\n sequence length for each example.\n\n Raises:\n ValueError: If features are not a dictionary.\n ", "language": "en", "n_whitespaces": 335, "n_words": 137, "vocab_size": 99 }
https://github.com/keras-team/keras.git
9
validate_utf16_characters
def validate_utf16_characters(self, pair): if not self.first_half_surrogate_pair_detected_16be: if 0xD8 <= pair[0] <= 0xDB: self.first_half_surrogate_pair_detected_16be = True elif 0xDC <= pair[0] <= 0xDF: self.invalid_utf16be = True else: if 0xDC <= pair[0] <= 0xDF: self.first_half_surrogate_pair_detected_16be = False else: self.invalid_utf16be = True if not self.first_half_surrogate_pair_detected_16le: if 0xD8 <= pair[1] <= 0xDB: self.first_half_surrogate_pair_detected_16le = True elif 0xDC <= pair[1] <= 0xDF: self.invalid_utf16le = True else: if 0xDC <= pair[1] <= 0xDF: self.first_half_surrogate_pair_detected_16le = False else: self.invalid_utf16le = True
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
13
utf1632prober.py
199
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
4,096
0
316
128
23
21,965
73
pipenv
7
pipenv/patched/pip/_vendor/chardet/utf1632prober.py
Python
21
{ "docstring": "\n Validate if the pair of bytes is valid UTF-16.\n\n UTF-16 is valid in the range 0x0000 - 0xFFFF excluding 0xD800 - 0xFFFF\n with an exception for surrogate pairs, which must be in the range\n 0xD800-0xDBFF followed by 0xDC00-0xDFFF\n\n https://en.wikipedia.org/wiki/UTF-16\n ", "language": "en", "n_whitespaces": 83, "n_words": 39, "vocab_size": 31 }
https://github.com/pypa/pipenv.git
7
get_all_items
def get_all_items(date_range, company, field, limit=None): if field in ("available_stock_qty", "available_stock_value"): select_field = "sum(actual_qty)" if field == "available_stock_qty" else "sum(stock_value)" return frappe.db.get_all( "Bin", fields=["item_code as name", "{0} as value".format(select_field)], group_by="item_code", order_by="value desc", limit=limit, ) else: if field == "total_sales_amount": select_field = "sum(order_item.base_net_amount)" select_doctype = "Sales Order" elif field == "total_purchase_amount": select_field = "sum(order_item.base_net_amount)" select_doctype = "Purchase Order" elif field == "total_qty_sold": select_field = "sum(order_item.stock_qty)" select_doctype = "Sales Order" elif field == "total_qty_purchased": select_field = "sum(order_item.stock_qty)" select_doctype = "Purchase Order" date_condition = get_date_condition(date_range, "sales_order.transaction_date") return frappe.db.sql( .format( select_field, select_doctype, date_condition ), (company, cint(limit)), as_dict=1, ) # nosec @frappe.whitelist()
494bd9ef78313436f0424b918f200dab8fc7c20b
@frappe.whitelist()
14
leaderboard.py
284
style: format code with black
14,559
1
65
152
56
67,568
96
erpnext
20
erpnext/startup/leaderboard.py
Python
40
{ "docstring": "\n\t\t\tselect order_item.item_code as name, {0} as value\n\t\t\tfrom `tab{1}` sales_order join `tab{1} Item` as order_item\n\t\t\t\ton sales_order.name = order_item.parent\n\t\t\twhere sales_order.docstatus = 1\n\t\t\t\tand sales_order.company = %s {2}\n\t\t\tgroup by order_item.item_code\n\t\t\torder by value desc\n\t\t\tlimit %s\n\t\t", "language": "en", "n_whitespaces": 29, "n_words": 37, "vocab_size": 29 }
https://github.com/frappe/erpnext.git
21
findLibrary
def findLibrary(name): assert compat.is_unix, "Current implementation for Unix only (Linux, Solaris, AIX, FreeBSD)" # Look in the LD_LIBRARY_PATH according to platform. if compat.is_aix: lp = compat.getenv('LIBPATH', '') elif compat.is_darwin: lp = compat.getenv('DYLD_LIBRARY_PATH', '') else: lp = compat.getenv('LD_LIBRARY_PATH', '') lib = _which_library(name, filter(None, lp.split(os.pathsep))) # Look in /etc/ld.so.cache # Solaris does not have /sbin/ldconfig. Just check if this file exists. if lib is None: utils.load_ldconfig_cache() lib = utils.LDCONFIG_CACHE.get(name) if lib: assert os.path.isfile(lib) # Look in the known safe paths. if lib is None: # Architecture independent locations. paths = ['/lib', '/usr/lib'] # Architecture dependent locations. if compat.architecture == '32bit': paths.extend(['/lib32', '/usr/lib32']) else: paths.extend(['/lib64', '/usr/lib64']) # Machine dependent locations. if compat.machine == 'intel': if compat.architecture == '32bit': paths.extend(['/usr/lib/i386-linux-gnu']) else: paths.extend(['/usr/lib/x86_64-linux-gnu']) # On Debian/Ubuntu /usr/bin/python is linked statically with libpython. Newer Debian/Ubuntu with multiarch # support puts the libpythonX.Y.so in paths like /usr/lib/i386-linux-gnu/. try: # Module available only in Python 2.7+ import sysconfig # 'multiarchsubdir' works on Debian/Ubuntu only in Python 2.7 and 3.3+. arch_subdir = sysconfig.get_config_var('multiarchsubdir') # Ignore if None is returned. if arch_subdir: arch_subdir = os.path.basename(arch_subdir) paths.append(os.path.join('/usr/lib', arch_subdir)) else: logger.debug('Multiarch directory not detected.') except ImportError: logger.debug('Multiarch directory not detected.') # Termux (a Ubuntu like subsystem for Android) has an additional libraries directory. if os.path.isdir('/data/data/com.termux/files/usr/lib'): paths.append('/data/data/com.termux/files/usr/lib') if compat.is_aix: paths.append('/opt/freeware/lib') elif compat.is_hpux: if compat.architecture == '32bit': paths.append('/usr/local/lib/hpux32') else: paths.append('/usr/local/lib/hpux64') elif compat.is_freebsd or compat.is_openbsd: paths.append('/usr/local/lib') lib = _which_library(name, paths) # Give up :( if lib is None: return None # Resolve the file name into the soname if compat.is_freebsd or compat.is_aix or compat.is_openbsd: # On FreeBSD objdump does not show SONAME, and on AIX objdump does not exist, so we just return the lib we # have found. return lib else: dir = os.path.dirname(lib) return os.path.join(dir, _get_so_name(lib))
57c520132b4d0ab7bfd5653383ec2602e40088af
16
bindepend.py
650
Bindepend: Add Termux-specific libraries search path. According to termux/termux-app#1595, this is all we need to change to faclitate using PyInstaller on Termux.
77,342
0
840
360
160
262,743
283
pyinstaller
40
PyInstaller/depend/bindepend.py
Python
54
{ "docstring": "\n Look for a library in the system.\n\n Emulate the algorithm used by dlopen. `name` must include the prefix, e.g., ``libpython2.4.so``.\n ", "language": "en", "n_whitespaces": 30, "n_words": 20, "vocab_size": 18 }
https://github.com/pyinstaller/pyinstaller.git
1
test_return_expanded
def test_return_expanded(self): self.assertEqual(StateFilter.all().return_expanded(), StateFilter.all()) self.assertEqual(StateFilter.none().return_expanded(), StateFilter.none()) # Concrete-only state filters stay the same # (Case: mixed filter) self.assertEqual( StateFilter.freeze( { EventTypes.Member: {"@wombat:test", "@alicia:test"}, "some.other.state.type": {""}, }, include_others=False, ).return_expanded(), StateFilter.freeze( { EventTypes.Member: {"@wombat:test", "@alicia:test"}, "some.other.state.type": {""}, }, include_others=False, ), ) # Concrete-only state filters stay the same # (Case: non-member-only filter) self.assertEqual( StateFilter.freeze( {"some.other.state.type": {""}}, include_others=False ).return_expanded(), StateFilter.freeze({"some.other.state.type": {""}}, include_others=False), ) # Concrete-only state filters stay the same # (Case: member-only filter) self.assertEqual( StateFilter.freeze( { EventTypes.Member: {"@wombat:test", "@alicia:test"}, }, include_others=False, ).return_expanded(), StateFilter.freeze( { EventTypes.Member: {"@wombat:test", "@alicia:test"}, }, include_others=False, ), ) # Wildcard member-only state filters stay the same self.assertEqual( StateFilter.freeze( {EventTypes.Member: None}, include_others=False, ).return_expanded(), StateFilter.freeze( {EventTypes.Member: None}, include_others=False, ), ) # If there is a wildcard in the non-member portion of the filter, # it's expanded to include ALL non-member events. # (Case: mixed filter) self.assertEqual( StateFilter.freeze( { EventTypes.Member: {"@wombat:test", "@alicia:test"}, "some.other.state.type": None, }, include_others=False, ).return_expanded(), StateFilter.freeze( {EventTypes.Member: {"@wombat:test", "@alicia:test"}}, include_others=True, ), ) # If there is a wildcard in the non-member portion of the filter, # it's expanded to include ALL non-member events. # (Case: non-member-only filter) self.assertEqual( StateFilter.freeze( { "some.other.state.type": None, }, include_others=False, ).return_expanded(), StateFilter.freeze({EventTypes.Member: set()}, include_others=True), ) self.assertEqual( StateFilter.freeze( { "some.other.state.type": None, "yet.another.state.type": {"wombat"}, }, include_others=False, ).return_expanded(), StateFilter.freeze({EventTypes.Member: set()}, include_others=True), )
eb609c65d0794dd49efcd924bdc8743fd4253a93
15
test_state.py
668
Fix bug in `StateFilter.return_expanded()` and add some tests. (#12016)
71,177
0
1,317
410
63
246,360
203
synapse
12
tests/storage/test_state.py
Python
81
{ "docstring": "\n Tests the behaviour of the return_expanded() function that expands\n StateFilters to include more state types (for the sake of cache hit rate).\n ", "language": "en", "n_whitespaces": 44, "n_words": 22, "vocab_size": 19 }
https://github.com/matrix-org/synapse.git
2
exit_single_process_silo_context
def exit_single_process_silo_context(cls) -> Generator[None, None, None]: old = _single_process_silo_mode_state.mode _single_process_silo_mode_state.mode = None try: yield finally: _single_process_silo_mode_state.mode = old
3bfb9420a7d80e395c250718d17419daaf021aa2
10
base.py
59
chore(hybrid-cloud): several endpoint tests e2e (#41691) This a major break through PR that gets several acceptance and api unit tests passing e2e with hybrid cloud. I want to explain what's going on in greater detail and get this merged next week, unfortunately I'm traveling for now. This only brings up our hybrid cloud test coverage to 77/4096, but here's the thing -- this is a break through, because its much much much easier now in general to add new endpoints. With these major issues out of the way, getting endpoint tests is some combination of: 1. Swapping out `user=user` for `user_id=user.id`, or similar (`organization=organization` for `organization_id=...`) in situations where a relationship would span a silo boundary, but the id is the only thing that matters. 2. Fixing serializers. Serializers LOVE to cross silo boundaries, but fixes don't have to be complicated. 3. Something else but I'm tired and I can't think straight. But honestly, I think it will be much easier to start banging out way more stable=True tests from this. Let's get this merged next week and figure out how to start smashing more boundaries down.
18,506
0
75
35
13
89,155
18
sentry
6
src/sentry/silo/base.py
Python
13
{ "docstring": "\n Used by silo endpoint decorators and other contexts to signal that a potential inter process interaction\n is being simulated locally for acceptance tests that validate the behavior of multiple endpoints with\n process boundaries in play. Call this inside of any RPC interaction to ensure that such acceptance tests\n can 'swap' the silo context on the fly.\n ", "language": "en", "n_whitespaces": 93, "n_words": 56, "vocab_size": 45 }
https://github.com/getsentry/sentry.git
2
redirect
def redirect(to, *args, permanent=False, **kwargs): redirect_class = ( HttpResponsePermanentRedirect if permanent else HttpResponseRedirect ) return redirect_class(resolve_url(to, *args, **kwargs))
9c19aff7c7561e3a82978a272ecdaad40dda5c00
10
shortcuts.py
60
Refs #33476 -- Reformatted code with Black.
51,392
0
37
39
17
206,167
18
django
9
django/shortcuts.py
Python
5
{ "docstring": "\n Return an HttpResponseRedirect to the appropriate URL for the arguments\n passed.\n\n The arguments could be:\n\n * A model: the model's `get_absolute_url()` function will be called.\n\n * A view name, possibly with arguments: `urls.reverse()` will be used\n to reverse-resolve the name.\n\n * A URL, which will be used as-is for the redirect location.\n\n Issues a temporary redirect by default; pass permanent=True to issue a\n permanent redirect.\n ", "language": "en", "n_whitespaces": 114, "n_words": 65, "vocab_size": 46 }
https://github.com/django/django.git
6
test_ppo_exploration_setup
def test_ppo_exploration_setup(self): config = copy.deepcopy(ppo.DEFAULT_CONFIG) config["num_workers"] = 0 # Run locally. config["env_config"] = {"is_slippery": False, "map_name": "4x4"} obs = np.array(0) # Test against all frameworks. for fw in framework_iterator(config): # Default Agent should be setup with StochasticSampling. trainer = ppo.PPOTrainer(config=config, env="FrozenLake-v1") # explore=False, always expect the same (deterministic) action. a_ = trainer.compute_single_action( obs, explore=False, prev_action=np.array(2), prev_reward=np.array(1.0) ) # Test whether this is really the argmax action over the logits. if fw != "tf": last_out = trainer.get_policy().model.last_output() if fw == "torch": check(a_, np.argmax(last_out.detach().cpu().numpy(), 1)[0]) else: check(a_, np.argmax(last_out.numpy(), 1)[0]) for _ in range(50): a = trainer.compute_single_action( obs, explore=False, prev_action=np.array(2), prev_reward=np.array(1.0), ) check(a, a_) # With explore=True (default), expect stochastic actions. actions = [] for _ in range(300): actions.append( trainer.compute_single_action( obs, prev_action=np.array(2), prev_reward=np.array(1.0) ) ) check(np.mean(actions), 1.5, atol=0.2) trainer.stop()
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
22
test_ppo.py
446
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
30,114
0
629
285
87
133,802
126
ray
37
rllib/agents/ppo/tests/test_ppo.py
Python
33
{ "docstring": "Tests, whether PPO runs with different exploration setups.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/ray-project/ray.git
3
logout
def logout(self, request, extra_context=None): from django.contrib.auth.views import LogoutView defaults = { "extra_context": { **self.each_context(request), # Since the user isn't logged out at this point, the value of # has_permission must be overridden. "has_permission": False, **(extra_context or {}), }, } if self.logout_template is not None: defaults["template_name"] = self.logout_template request.current_app = self.name return LogoutView.as_view(**defaults)(request)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
13
sites.py
137
Refs #33476 -- Reformatted code with Black.
50,391
0
209
85
46
203,467
52
django
15
django/contrib/admin/sites.py
Python
13
{ "docstring": "\n Log out the user for the given HttpRequest.\n\n This should *not* assume the user is already logged in.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 15 }
https://github.com/django/django.git
7
register
def register(self, addon): api_changes = { # mitmproxy 6 -> mitmproxy 7 "clientconnect": "client_connected", "clientdisconnect": "client_disconnected", "serverconnect": "server_connect and server_connected", "serverdisconnect": "server_disconnected", } for a in traverse([addon]): for old, new in api_changes.items(): if hasattr(a, old): ctx.log.warn(f"The {old} event has been removed, use {new} instead. " f"For more details, see https://docs.mitmproxy.org/stable/addons-events/.") name = _get_name(a) if name in self.lookup: raise exceptions.AddonManagerError( "An addon called '%s' already exists." % name ) l = Loader(self.master) self.invoke_addon_sync(addon, LoadHook(l)) for a in traverse([addon]): name = _get_name(a) self.lookup[name] = a for a in traverse([addon]): self.master.commands.collect_commands(a) self.master.options.process_deferred() return addon
ee4999e8e4380f7b67faef92f04c361deffba412
16
addonmanager.py
283
Rename new async helper functions. async_trigger -> trigger_event invoke_addon -> invoke_addon_sync (API breakage) async_invoke_addon -> invoke_addon
73,504
0
397
164
68
250,551
91
mitmproxy
27
mitmproxy/addonmanager.py
Python
26
{ "docstring": "\n Register an addon, call its load event, and then register all its\n sub-addons. This should be used by addons that dynamically manage\n addons.\n\n If the calling addon is already running, it should follow with\n running and configure events. Must be called within a current\n context.\n ", "language": "en", "n_whitespaces": 119, "n_words": 45, "vocab_size": 41 }
https://github.com/mitmproxy/mitmproxy.git
5
test_sql_create_database
def test_sql_create_database(self, db, subtests, request): db_data = request.getfixturevalue(db) db_type = db_data['type'] db_creds = db_data['connection_data'] queries = [ { 'create': 'CREATE DATABASE', 'drop': 'DROP DATABASE' }, { 'create': 'CREATE DATABASE', 'drop': None } ] created_db_names = [] for query in queries: create_query = query['create'] drop_query = query['drop'] db_name = db_type.upper() created_db_names.append(db_name) with subtests.test(msg=f'{db_type}', create_query=create_query, drop_query=drop_query, db_name=db_name): query = f self.sql_via_http(query, RESPONSE_TYPE.OK) assert db_name in self.show_databases() if drop_query is not None: self.sql_via_http(f'{drop_query} {db_name}', RESPONSE_TYPE.OK) assert db_name.upper() not in self.show_databases() resp = self.sql_via_http('show databases', RESPONSE_TYPE.TABLE) db_names = [x[0] for x in resp['data']] for name in created_db_names: assert name in db_names
13d267c409bf1cc65fca366d1aa4fc51438cbf71
15
test_http.py
367
It http test refactoring (#3959) * HTTP and company independent tests refactoring
25,949
0
431
200
65
117,299
97
mindsdb
30
tests/integration_tests/flows/test_http.py
Python
34
{ "docstring": " sql-via-http:\n 'create database' for each db\n 'drop database' for each db\n 'create database' for each db\n \n {create_query} {db_name}\n WITH ENGINE = '{db_type}',\n PARAMETERS = {json.dumps(db_creds)};\n ", "language": "en", "n_whitespaces": 139, "n_words": 25, "vocab_size": 15 }
https://github.com/mindsdb/mindsdb.git
2
get_log_file_handles
def get_log_file_handles(self, name, unique=False): if not self.should_redirect_logs(): return None, None log_stdout, log_stderr = self._get_log_file_names(name, unique=unique) return open_log(log_stdout), open_log(log_stderr)
1971a08b7dadf98c337ed0067db5b59d805b31ae
9
node.py
77
[RFC] [Core] Support disabling log redirection via `RAY_LOG_TO_STDERR` environment variable. (#21767)
28,982
0
57
48
17
129,592
18
ray
9
python/ray/node.py
Python
5
{ "docstring": "Open log files with partially randomized filenames, returning the\n file handles. If output redirection has been disabled, no files will\n be opened and `(None, None)` will be returned.\n\n Args:\n name (str): descriptive string for this log file.\n unique (bool): if true, a counter will be attached to `name` to\n ensure the returned filename is not already used.\n\n Returns:\n A tuple of two file handles for redirecting (stdout, stderr), or\n `(None, None)` if output redirection is disabled.\n ", "language": "en", "n_whitespaces": 170, "n_words": 76, "vocab_size": 60 }
https://github.com/ray-project/ray.git
3
get_default_cache_location
def get_default_cache_location() -> str: if "LUDWIG_CACHE" in os.environ and os.environ["LUDWIG_CACHE"]: return os.environ["LUDWIG_CACHE"] else: return str(Path.home().joinpath(".ludwig_cache"))
e4fc06f986e03919d9aef3ab55c05fee5a6b9d3a
14
dataset_loader.py
81
Config-first Datasets API (ludwig.datasets refactor) (#2479) * Adds README and stub for reading dataset configs. * Adds __init__.py for configs, moves circular import into function scope in ludwig/datasets/__init__.py * Print config files in datasets folder. * First pass at automatic archive extraction. * Implemented downloading and extract. * Refactor DatasetConfig into its own file. * Fixed bugs downloading kaggle dataset. * Makes registry store dataset instances, not classes. Also comments out import_submodules for testing. * Typo fix. * Only pass data files on to load_unprocessed_dataframe, symlink directories. * Downloading dataset files into existing directory if exists. * Refactor: make datasets fully config-first, lazy load dataset loaders. * Implemented agnews custom loader. * Implements train/validation/test split by files, and globbing support * Adds _glob_multiple * Adds adult_census_income, agnews, allstate_claims_severity. * Implements sha256 verification, adds more datasets up to creditcard_fraud. * Adds checksums, dbpedia, electricity * Fixes gzip file name returned as string not list, adds up to forest_cover dataset. * Adds datasets up to reuters_r8 * Adds all datasets which don't require a custom class. * Restore dataset import behavior by implementing module __getattr__ * Adds KDD datasets. * Adds ieee_fraud. * Adds imbalanced_insurance, insurance_lite. * Adds mnist. * Completes implementation of all of the built-in datasets. * Made cache_dir optional, read from environment variable if set. * Upgrades datasets tests. * Adds test for new dataset config API. Also adds scripts for dataset link checking. * Fixes loading allstate claims severity dataset. * Use @lru_cache(1), @cache not supported in python < 3.9 * Deletes dataset registry, updates automl test utils * Fix imports of datasets API. * Adds more detail to sha256: docstring and basic README * Copy-paste link oops. * Fixes handling of nested archive types like .tar.bz Also adds a LUDWIG_CACHE and export to the README * Adds link for twitter bots. * Fix order of splits in README.md * typo * Adds verify as a phase in doc string. * Support .pqt, .pq extensions for parquet. * Handle nested archives with longer file extensions like .csv.zip * Handle nested .gz types properly too. Check all extensions with .endswith * Handle all archive types with .endswith * Update ludwig/datasets/loaders/split_loaders.py Co-authored-by: Joppe Geluykens <joppe@rvrie.com> * Adds explanation for export, fixes preserve_paths (should be relative to processed_dataset_dir) * Resolve preserved paths relative to raw dataset dir before move. * Catch runtime exception from extracting sub-archives. Co-authored-by: Daniel Treiman <daniel@predibase.com> Co-authored-by: Joppe Geluykens <joppe@rvrie.com>
1,336
0
38
44
14
8,086
15
ludwig
7
ludwig/datasets/loaders/dataset_loader.py
Python
6
{ "docstring": "Returns a path to the default LUDWIG_CACHE location, or $HOME/.ludwig_cache.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/ludwig-ai/ludwig.git
5
response_chunks
def response_chunks(response, chunk_size=CONTENT_CHUNK_SIZE): # type: (Response, int) -> Iterator[bytes] try: # Special case for urllib3. for chunk in response.raw.stream( chunk_size, # We use decode_content=False here because we don't # want urllib3 to mess with the raw bytes we get # from the server. If we decompress inside of # urllib3 then we cannot verify the checksum # because the checksum will be of the compressed # file. This breakage will only occur if the # server adds a Content-Encoding header, which # depends on how the server was configured: # - Some servers will notice that the file isn't a # compressible file and will leave the file alone # and with an empty Content-Encoding # - Some servers will notice that the file is # already compressed and will leave the file # alone and will add a Content-Encoding: gzip # header # - Some servers won't notice anything at all and # will take a file that's already been compressed # and compress it again and set the # Content-Encoding: gzip header # # By setting this not to decode automatically we # hope to eliminate problems with the second case. decode_content=False, ): yield chunk except AttributeError: # Standard file-like object. while True: chunk = response.raw.read(chunk_size) if not chunk: break yield chunk
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
14
utils.py
114
upd; format
12,341
0
600
54
119
60,915
214
transferlearning
10
.venv/lib/python3.8/site-packages/pip/_internal/network/utils.py
Python
13
{ "docstring": "Given a requests Response, provide the data chunks.\n ", "language": "en", "n_whitespaces": 11, "n_words": 8, "vocab_size": 8 }
https://github.com/jindongwang/transferlearning.git
4
result_list
def result_list(cl): headers = list(result_headers(cl)) num_sorted_fields = 0 for h in headers: if h["sortable"] and h["sorted"]: num_sorted_fields += 1 return { "cl": cl, "result_hidden_fields": list(result_hidden_fields(cl)), "result_headers": headers, "num_sorted_fields": num_sorted_fields, "results": list(results(cl)), } @register.tag(name="result_list")
9c19aff7c7561e3a82978a272ecdaad40dda5c00
@register.tag(name="result_list")
11
admin_list.py
142
Refs #33476 -- Reformatted code with Black.
50,407
1
103
72
31
203,489
33
django
12
django/contrib/admin/templatetags/admin_list.py
Python
13
{ "docstring": "\n Display the headers and data list together.\n ", "language": "en", "n_whitespaces": 14, "n_words": 7, "vocab_size": 7 }
https://github.com/django/django.git
1
test_single_path
def test_single_path(self): with extend_sys_path(self.base_location): with self.settings(INSTALLED_APPS=["nsapp"]): app_config = apps.get_app_config("nsapp") self.assertEqual(app_config.path, self.app_path)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
14
tests.py
84
Refs #33476 -- Reformatted code with Black.
49,877
0
66
46
10
201,109
11
django
12
tests/apps/tests.py
Python
5
{ "docstring": "\n A Py3.3+ namespace package can be an app if it has only one path.\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 14 }
https://github.com/django/django.git
3
origins
def origins(self): if hasattr(self, '_path_objects'): return self.path_objects[0] return [ path_node_to_object(node) for node in self.path[0] ]
6ff2e55ce408f0f7f2fe99129048421c25ecafe6
9
cables.py
60
Add origins, destinations properties on CablePath
77,907
0
65
37
14
264,909
15
netbox
7
netbox/dcim/models/cables.py
Python
6
{ "docstring": "\n Return the list of originating objects (from cache, if available).\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
https://github.com/netbox-community/netbox.git
1
to_state
def to_state(self) -> Tuple[str, Any]: # Must implement by each connector. return NotImplementedError
51aa429f4c2db2f5bb35064cedaf70c8df2828a8
6
connector.py
26
[RLlib] minor cleanup of connector to/from state APIs (#28884) * [RLlib] minor cleanup of connector to/from state APIs. Also better error messages. Signed-off-by: Jun Gong <jungong@anyscale.com> * wip Signed-off-by: Jun Gong <jungong@anyscale.com> * address review comments. Signed-off-by: Jun Gong <jungong@anyscale.com> * lint Signed-off-by: Jun Gong <jungong@anyscale.com>
28,611
0
34
15
13
128,083
13
ray
6
rllib/connectors/connector.py
Python
13
{ "docstring": "Serialize a connector into a JSON serializable Tuple.\n\n to_state is required, so that all Connectors are serializable.\n\n Returns:\n A tuple of connector's name and its serialized states.\n String should match the name used to register the connector,\n while state can be any single data structure that contains the\n serialized state of the connector. If a connector is stateless,\n state can simply be None.\n ", "language": "en", "n_whitespaces": 139, "n_words": 63, "vocab_size": 48 }
https://github.com/ray-project/ray.git
1
paired_cosine_distances
def paired_cosine_distances(X, Y): X, Y = check_paired_arrays(X, Y) return 0.5 * row_norms(normalize(X) - normalize(Y), squared=True) PAIRED_DISTANCES = { "cosine": paired_cosine_distances, "euclidean": paired_euclidean_distances, "l2": paired_euclidean_distances, "l1": paired_manhattan_distances, "manhattan": paired_manhattan_distances, "cityblock": paired_manhattan_distances, }
a5b70b3132467b5e3616178d9ecca6cb7316c400
11
pairwise.py
108
DOC Ensures that sklearn.metrics.pairwise.paired_cosine_distances passes numpydoc validation (#22141) Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com>
75,273
0
56
39
27
258,521
31
scikit-learn
10
sklearn/metrics/pairwise.py
Python
3
{ "docstring": "\n Compute the paired cosine distances between X and Y.\n\n Read more in the :ref:`User Guide <metrics>`.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n An array where each row is a sample and each column is a feature.\n\n Y : array-like of shape (n_samples, n_features)\n An array where each row is a sample and each column is a feature.\n\n Returns\n -------\n distances : ndarray of shape (n_samples,)\n Returns the distances between the row vectors of `X`\n and the row vectors of `Y`, where `distances[i]` is the\n distance between `X[i]` and `Y[i]`.\n\n Notes\n -----\n The cosine distance is equivalent to the half the squared\n euclidean distance if each sample is normalized to unit norm.\n ", "language": "en", "n_whitespaces": 192, "n_words": 114, "vocab_size": 57 }
https://github.com/scikit-learn/scikit-learn.git
1
popular_tags_for_model
def popular_tags_for_model(model, count=10): content_type = ContentType.objects.get_for_model(model) return ( Tag.objects.filter(taggit_taggeditem_items__content_type=content_type) .annotate(item_count=Count("taggit_taggeditem_items")) .order_by("-item_count")[:count] )
d10f15e55806c6944827d801cd9c2d53f5da4186
15
models.py
88
Reformat with black
15,639
0
45
52
12
71,191
12
wagtail
14
wagtail/admin/models.py
Python
7
{ "docstring": "Return a queryset of the most frequently used tags used on this model class", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 13 }
https://github.com/wagtail/wagtail.git
1
test_create_remote_before_start
def test_create_remote_before_start(call_ray_start_shared): from ray.util.client import ray
297341e107daee1ea3aff991ae8ea8c90993c683
6
test_client.py
24
[Test][Client] Only start ray once in client tests (#28835) It looks like we're frequently starting and shutting down Ray in this test because `ray_start_client_server` isn't connecting to the Ray created by `ray_start_regular_shared`, and is instead starting a new Ray head process every time it launches. Ray client tests are failing frequently with: ``` [2022-10-06 07:31:46,253 E 13235 13751] core_worker_process.cc:277: The core worker has already been shutdown. This happens when the language frontend accesses the Ray's worker after it is shutdown. The process will exit ``` Which is probably caused by having multiple ray clusters running simultaneous, with some shutting down asynchronously. This refactor forces all of the tests in the module to use the same Ray cluster. Also fixes two other sources of potential flakiness: * Joins the thread in test_client_thread_safe (seems like this has a bad interaction when the client server is cleaned up) * Calls ray.get in `test_stdout_log_stream`, to make sure that the remote function is done running before we try searching for its output Should also have the happy side effect of speeding up test_client. Ran the `Small & Client` tests (regular and external redis) twice each, no flakes, and windows version of test_client.
30,157
0
12
77
6
133,938
6
ray
5
python/ray/tests/test_client.py
Python
12
{ "docstring": "Creates remote objects (as though in a library) before\n starting the client.\n ", "language": "en", "n_whitespaces": 18, "n_words": 12, "vocab_size": 12 }
https://github.com/ray-project/ray.git
10
tsqr
def tsqr(a): if len(a.shape) != 2: raise Exception( "tsqr requires len(a.shape) == 2, but a.shape is " "{}".format(a.shape) ) if a.num_blocks[1] != 1: raise Exception( "tsqr requires a.num_blocks[1] == 1, but a.num_blocks " "is {}".format(a.num_blocks) ) num_blocks = a.num_blocks[0] K = int(np.ceil(np.log2(num_blocks))) + 1 q_tree = np.empty((num_blocks, K), dtype=object) current_rs = [] for i in range(num_blocks): block = a.object_refs[i, 0] q, r = ra.linalg.qr.remote(block) q_tree[i, 0] = q current_rs.append(r) for j in range(1, K): new_rs = [] for i in range(int(np.ceil(1.0 * len(current_rs) / 2))): stacked_rs = ra.vstack.remote(*current_rs[(2 * i) : (2 * i + 2)]) q, r = ra.linalg.qr.remote(stacked_rs) q_tree[i, j] = q new_rs.append(r) current_rs = new_rs assert len(current_rs) == 1, "len(current_rs) = " + str(len(current_rs)) # handle the special case in which the whole DistArray "a" fits in one # block and has fewer rows than columns, this is a bit ugly so think about # how to remove it if a.shape[0] >= a.shape[1]: q_shape = a.shape else: q_shape = [a.shape[0], a.shape[0]] q_num_blocks = core.DistArray.compute_num_blocks(q_shape) q_object_refs = np.empty(q_num_blocks, dtype=object) q_result = core.DistArray(q_shape, q_object_refs) # reconstruct output for i in range(num_blocks): q_block_current = q_tree[i, 0] ith_index = i for j in range(1, K): if np.mod(ith_index, 2) == 0: lower = [0, 0] upper = [a.shape[1], core.BLOCK_SIZE] else: lower = [a.shape[1], 0] upper = [2 * a.shape[1], core.BLOCK_SIZE] ith_index //= 2 q_block_current = ra.dot.remote( q_block_current, ra.subarray.remote(q_tree[ith_index, j], lower, upper) ) q_result.object_refs[i] = q_block_current r = current_rs[0] return q_result, ray.get(r) # TODO(rkn): This is unoptimized, we really want a block version of this. # This is Algorithm 5 from # http://www.eecs.berkeley.edu/Pubs/TechRpts/2013/EECS-2013-175.pdf. @ray.remote(num_returns=3)
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
@ray.remote(num_returns=3)
18
linalg.py
749
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,340
1
649
473
154
130,746
261
ray
51
python/ray/experimental/array/distributed/linalg.py
Python
52
{ "docstring": "Perform a QR decomposition of a tall-skinny matrix.\n\n Args:\n a: A distributed matrix with shape MxN (suppose K = min(M, N)).\n\n Returns:\n A tuple of q (a DistArray) and r (a numpy array) satisfying the\n following.\n - If q_full = ray.get(DistArray, q).assemble(), then\n q_full.shape == (M, K).\n - np.allclose(np.dot(q_full.T, q_full), np.eye(K)) == True.\n - If r_val = ray.get(np.ndarray, r), then r_val.shape == (K, N).\n - np.allclose(r, np.triu(r)) == True.\n ", "language": "en", "n_whitespaces": 160, "n_words": 69, "vocab_size": 54 }
https://github.com/ray-project/ray.git
5
recognize_log_derivative
def recognize_log_derivative(a, d, DE, z=None): z = z or Dummy('z') a, d = a.cancel(d, include=True) _, a = a.div(d) pz = Poly(z, DE.t) Dd = derivation(d, DE) q = a - pz*Dd r, _ = d.resultant(q, includePRS=True) r = Poly(r, z) Np, Sp = splitfactor_sqf(r, DE, coefficientD=True, z=z) for s, _ in Sp: # TODO also consider the complex roots which should # turn the flag false a = real_roots(s.as_poly(z)) if not all(j.is_Integer for j in a): return False return True
f5e24ed39a88b645ca27d15d60d5098895785773
12
risch.py
226
fix nits
49,138
0
156
146
62
199,088
81
sympy
29
sympy/integrals/risch.py
Python
15
{ "docstring": "\n There exists a v in K(x)* such that f = dv/v\n where f a rational function if and only if f can be written as f = A/D\n where D is squarefree,deg(A) < deg(D), gcd(A, D) = 1,\n and all the roots of the Rothstein-Trager resultant are integers. In that case,\n any of the Rothstein-Trager, Lazard-Rioboo-Trager or Czichowski algorithm\n produces u in K(x) such that du/dx = uf.\n ", "language": "en", "n_whitespaces": 90, "n_words": 68, "vocab_size": 51 }
https://github.com/sympy/sympy.git
2
async_step_zeroconf
async def async_step_zeroconf(self, discovery_info): self.url = discovery_info.host self.uuid = await helpers.get_uuid(self.url) if self.uuid is None: return self.async_abort(reason="no_valid_uuid_set") await self.async_set_unique_id(self.uuid) self._abort_if_unique_id_configured() return await self.async_step_user()
3c5a667d9784bb5f2fab426b133b5582706c6e68
11
config_flow.py
111
Add Z-Wave.Me integration (#65473) * Add support of Z-Wave.Me Z-Way and RaZberry server (#61182) Co-authored-by: Paulus Schoutsen <paulus@home-assistant.io> Co-authored-by: Martin Hjelmare <marhje52@gmail.com> Co-authored-by: LawfulChaos <kerbalspacema@gmail.com> * Add switch platform to Z-Wave.Me integration (#64957) Co-authored-by: Martin Hjelmare <marhje52@gmail.com> Co-authored-by: Dmitry Vlasov <kerbalspacema@gmail.com> * Add button platform to Z-Wave.Me integration (#65109) Co-authored-by: epenet <6771947+epenet@users.noreply.github.com> Co-authored-by: Dmitry Vlasov <kerbalspacema@gmail.com> Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Fix button controller access (#65117) * Add lock platform to Z-Wave.Me integration #65109 (#65114) Co-authored-by: epenet <6771947+epenet@users.noreply.github.com> Co-authored-by: Dmitry Vlasov <kerbalspacema@gmail.com> Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Add sensor platform to Z-Wave.Me integration (#65132) * Sensor Entity * Sensor fixes * Apply suggestions from code review Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Inline descriotion according to review proposal * State Classes for sensor * Generic sensor * Generic sensor Co-authored-by: Dmitry Vlasov <kerbalspacema@gmail.com> Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Add binary sensor platform to Z-Wave.Me integration (#65306) * Binary Sensor Entity * Update docstring Co-authored-by: Dmitry Vlasov <kerbalspacema@gmail.com> Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Add Light Entity platform to Z-Wave.Me integration (#65331) * Light Entity * mypy fix * Fixes, ZWaveMePlatforms enum * Apply suggestions from code review Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Fixes * Fixes * Fixes Co-authored-by: Dmitry Vlasov <kerbalspacema@gmail.com> Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Add Thermostat platform to Z-Wave.Me integration #65331 (#65371) * Climate entity * Climate entity * Apply suggestions from code review Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Climate entity fix * Clean up * cleanup * Import order fix * Correct naming Co-authored-by: Dmitry Vlasov <kerbalspacema@gmail.com> Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Correct zwave_me .coveragerc (#65491) Co-authored-by: Martin Hjelmare <marhje52@gmail.com> Co-authored-by: Paulus Schoutsen <paulus@home-assistant.io> Co-authored-by: Martin Hjelmare <marhje52@gmail.com> Co-authored-by: LawfulChaos <kerbalspacema@gmail.com> Co-authored-by: epenet <6771947+epenet@users.noreply.github.com>
111,351
0
83
65
18
312,712
23
core
13
homeassistant/components/zwave_me/config_flow.py
Python
8
{ "docstring": "\n Handle a discovered Z-Wave accessory - get url to pass into user step.\n\n This flow is triggered by the discovery component.\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 21 }
https://github.com/home-assistant/core.git
8
all_simple_paths
def all_simple_paths(G, source, target, cutoff=None): if source not in G: raise nx.NodeNotFound(f"source node {source} not in graph") if target in G: targets = {target} else: try: targets = set(target) except TypeError as err: raise nx.NodeNotFound(f"target node {target} not in graph") from err if source in targets: return _empty_generator() if cutoff is None: cutoff = len(G) - 1 if cutoff < 1: return _empty_generator() if G.is_multigraph(): return _all_simple_paths_multigraph(G, source, targets, cutoff) else: return _all_simple_paths_graph(G, source, targets, cutoff)
53f766aa94b5aa5d3f87178418e794c4cc5f77eb
15
simple_paths.py
205
Improved documentation for all_simple_paths (#5944) * Improved documentation for all_simple_paths Improved the documentation for all_simple_paths. * Update simple_paths.py Black code style compliance edits.
42,304
0
188
125
45
177,180
76
networkx
16
networkx/algorithms/simple_paths.py
Python
20
{ "docstring": "Generate all simple paths in the graph G from source to target.\n\n A simple path is a path with no repeated nodes.\n\n Parameters\n ----------\n G : NetworkX graph\n\n source : node\n Starting node for path\n\n target : nodes\n Single node or iterable of nodes at which to end path\n\n cutoff : integer, optional\n Depth to stop the search. Only paths of length <= cutoff are returned.\n\n Returns\n -------\n path_generator: generator\n A generator that produces lists of simple paths. If there are no paths\n between the source and target within the given cutoff the generator\n produces no output. If it is possible to traverse the same sequence of\n nodes in multiple ways, namely through parallel edges, then it will be\n returned multiple times (once for each viable edge combination).\n\n Examples\n --------\n This iterator generates lists of nodes::\n\n >>> G = nx.complete_graph(4)\n >>> for path in nx.all_simple_paths(G, source=0, target=3):\n ... print(path)\n ...\n [0, 1, 2, 3]\n [0, 1, 3]\n [0, 2, 1, 3]\n [0, 2, 3]\n [0, 3]\n\n You can generate only those paths that are shorter than a certain\n length by using the `cutoff` keyword argument::\n\n >>> paths = nx.all_simple_paths(G, source=0, target=3, cutoff=2)\n >>> print(list(paths))\n [[0, 1, 3], [0, 2, 3], [0, 3]]\n\n To get each path as the corresponding list of edges, you can use the\n :func:`networkx.utils.pairwise` helper function::\n\n >>> paths = nx.all_simple_paths(G, source=0, target=3)\n >>> for path in map(nx.utils.pairwise, paths):\n ... print(list(path))\n [(0, 1), (1, 2), (2, 3)]\n [(0, 1), (1, 3)]\n [(0, 2), (2, 1), (1, 3)]\n [(0, 2), (2, 3)]\n [(0, 3)]\n\n Pass an iterable of nodes as target to generate all paths ending in any of several nodes::\n\n >>> G = nx.complete_graph(4)\n >>> for path in nx.all_simple_paths(G, source=0, target=[3, 2]):\n ... print(path)\n ...\n [0, 1, 2]\n [0, 1, 2, 3]\n [0, 1, 3]\n [0, 1, 3, 2]\n [0, 2]\n [0, 2, 1, 3]\n [0, 2, 3]\n [0, 3]\n [0, 3, 1, 2]\n [0, 3, 2]\n\n Iterate over each path from the root nodes to the leaf nodes in a\n directed acyclic graph using a functional programming approach::\n\n >>> from itertools import chain\n >>> from itertools import product\n >>> from itertools import starmap\n >>> from functools import partial\n >>>\n >>> chaini = chain.from_iterable\n >>>\n >>> G = nx.DiGraph([(0, 1), (1, 2), (0, 3), (3, 2)])\n >>> roots = (v for v, d in G.in_degree() if d == 0)\n >>> leaves = (v for v, d in G.out_degree() if d == 0)\n >>> all_paths = partial(nx.all_simple_paths, G)\n >>> list(chaini(starmap(all_paths, product(roots, leaves))))\n [[0, 1, 2], [0, 3, 2]]\n\n The same list computed using an iterative approach::\n\n >>> G = nx.DiGraph([(0, 1), (1, 2), (0, 3), (3, 2)])\n >>> roots = (v for v, d in G.in_degree() if d == 0)\n >>> leaves = (v for v, d in G.out_degree() if d == 0)\n >>> all_paths = []\n >>> for root in roots:\n ... for leaf in leaves:\n ... paths = nx.all_simple_paths(G, root, leaf)\n ... all_paths.extend(paths)\n >>> all_paths\n [[0, 1, 2], [0, 3, 2]]\n\n Iterate over each path from the root nodes to the leaf nodes in a\n directed acyclic graph passing all leaves together to avoid unnecessary\n compute::\n\n >>> G = nx.DiGraph([(0, 1), (2, 1), (1, 3), (1, 4)])\n >>> roots = (v for v, d in G.in_degree() if d == 0)\n >>> leaves = [v for v, d in G.out_degree() if d == 0]\n >>> all_paths = []\n >>> for root in roots:\n ... paths = nx.all_simple_paths(G, root, leaves)\n ... all_paths.extend(paths)\n >>> all_paths\n [[0, 1, 3], [0, 1, 4], [2, 1, 3], [2, 1, 4]]\n\n If parallel edges offer multiple ways to traverse a given sequence of\n nodes, this sequence of nodes will be returned multiple times:\n\n >>> G = nx.MultiDiGraph([(0, 1), (0, 1), (1, 2)])\n >>> list(nx.all_simple_paths(G, 0, 2))\n [[0, 1, 2], [0, 1, 2]]\n\n Notes\n -----\n This algorithm uses a modified depth-first search to generate the\n paths [1]_. A single path can be found in $O(V+E)$ time but the\n number of simple paths in a graph can be very large, e.g. $O(n!)$ in\n the complete graph of order $n$.\n\n This function does not check that a path exists between `source` and\n `target`. For large graphs, this may result in very long runtimes.\n Consider using `has_path` to check that a path exists between `source` and\n `target` before calling this function on large graphs.\n\n References\n ----------\n .. [1] R. Sedgewick, \"Algorithms in C, Part 5: Graph Algorithms\",\n Addison Wesley Professional, 3rd ed., 2001.\n\n See Also\n --------\n all_shortest_paths, shortest_path, has_path\n\n ", "language": "en", "n_whitespaces": 1450, "n_words": 741, "vocab_size": 285 }
https://github.com/networkx/networkx.git
2
supports_numpy
def supports_numpy(self): if not self.supports_python(): return False self.interpreter_exec('console', 'python import numpy; print numpy') return "module \'numpy\' from" in self._captured.before.decode()
159f3667f4772680368cb7b0771c6d5e44416e3c
10
gdb_support.py
69
Numba gdb-python extension for printing This adds support for printing Numba types as their python equivalents from gdb by using its python extension.
39,119
0
58
36
18
161,988
19
numba
7
numba/tests/gdb_support.py
Python
5
{ "docstring": "Returns True if the underlying gdb implementation has NumPy support\n (and by extension Python support) False otherwise", "language": "en", "n_whitespaces": 26, "n_words": 17, "vocab_size": 17 }
https://github.com/numba/numba.git
3
_get_tcl_tk_info
def _get_tcl_tk_info(): try: import tkinter from _tkinter import TCL_VERSION, TK_VERSION except ImportError: # tkinter unavailable return None, None, None, False tcl = tkinter.Tcl() # Query the location of Tcl library/data directory. tcl_dir = tcl.eval("info library") # Check if Tcl/Tk is built with multi-threaded support (built with --enable-threads), as indicated by the presence # of optional `threaded` member in `tcl_platform` array. try: tcl.getvar("tcl_platform(threaded)") # Ignore the actual value. tcl_threaded = True except tkinter.TclError: tcl_threaded = False return tcl_dir, TCL_VERSION, TK_VERSION, tcl_threaded # Populate the variables. If `tkinter` is unavailable, the values are set to `None` or `False`. ( tcl_dir, tcl_version, tk_version, tcl_threaded, ) = _get_tcl_tk_info()
2b2559af1c7790596e7b2040f48e56baef608f9d
10
tcl_tk.py
141
hookutils: tcl/tk: port to PyInstaller.isolated framework
77,583
0
196
68
76
264,062
104
pyinstaller
15
PyInstaller/utils/hooks/tcl_tk.py
Python
14
{ "docstring": "\n Isolated-subprocess helper to retrieve the basic Tcl/Tk information:\n - tcl_dir = path to the Tcl library/data directory.\n - tcl_version = Tcl version\n - tk_version = Tk version\n - tcl_theaded = boolean indicating whether Tcl/Tk is built with multi-threading support.\n ", "language": "en", "n_whitespaces": 62, "n_words": 39, "vocab_size": 28 }
https://github.com/pyinstaller/pyinstaller.git
1
lead_query
def lead_query(doctype, txt, searchfield, start, page_len, filters): fields = get_fields("Lead", ["name", "lead_name", "company_name"]) return frappe.db.sql( .format( **{"fields": ", ".join(fields), "key": searchfield, "mcond": get_match_cond(doctype)} ), {"txt": "%%%s%%" % txt, "_txt": txt.replace("%", ""), "start": start, "page_len": page_len}, ) # searches for customer @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs
494bd9ef78313436f0424b918f200dab8fc7c20b
@frappe.whitelist() @frappe.validate_and_sanitize_search_inputs
15
queries.py
178
style: format code with black
13,968
1
31
92
39
65,646
42
erpnext
18
erpnext/controllers/queries.py
Python
21
{ "docstring": "select {fields} from `tabLead`\n\t\twhere docstatus < 2\n\t\t\tand ifnull(status, '') != 'Converted'\n\t\t\tand ({key} like %(txt)s\n\t\t\t\tor lead_name like %(txt)s\n\t\t\t\tor company_name like %(txt)s)\n\t\t\t{mcond}\n\t\torder by\n\t\t\tif(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),\n\t\t\tif(locate(%(_txt)s, lead_name), locate(%(_txt)s, lead_name), 99999),\n\t\t\tif(locate(%(_txt)s, company_name), locate(%(_txt)s, company_name), 99999),\n\t\t\tidx desc,\n\t\t\tname, lead_name\n\t\tlimit %(start)s, %(page_len)s", "language": "en", "n_whitespaces": 36, "n_words": 50, "vocab_size": 35 }
https://github.com/frappe/erpnext.git
3
update_work_queue_id_from_name
async def update_work_queue_id_from_name(self) -> bool: if not self.work_queue_name: raise ValueError("No work queue name provided.") try: work_queue = await self.client.read_work_queue_by_name(self.work_queue_name) self.work_queue_id = work_queue.id except httpx.HTTPStatusError: self.logger.warn(f'No work queue found named "{self.work_queue_name}"') self.work_queue_id = None
ca11b933b9187a0e1bf9008315eeec2155815ab3
13
agent.py
111
Support work_queue_name for agent
11,054
0
116
60
28
54,421
33
prefect
14
src/prefect/agent.py
Python
15
{ "docstring": "\n For agents that were provided a work_queue_name, rather than a work_queue_id,\n this function will retrieve the work queue ID corresponding to that name and assign\n it to `work_queue_id`. If no matching queue is found, a warning is logged\n and `work_queue_id = None`.\n ", "language": "en", "n_whitespaces": 78, "n_words": 42, "vocab_size": 35 }
https://github.com/PrefectHQ/prefect.git
2
on_chord_header_start
def on_chord_header_start(self, chord, **header) -> dict: if not isinstance(chord.tasks, group): chord.tasks = group(chord.tasks) return self.on_group_start(chord.tasks, **header)
1c4ff33bd22cf94e297bd6449a06b5a30c2c1fbc
11
canvas.py
73
Canvas Header Stamping (#7384) * Strip down the header-stamping PR to the basics. * Serialize groups. * Add groups to result backend meta data. * Fix spelling mistake. * Revert changes to canvas.py * Revert changes to app/base.py * Add stamping implementation to canvas.py * Send task to AMQP with groups. * Successfully pass single group to result. * _freeze_gid dict merge fixed * First draft of the visitor API. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * OptionsVisitor created * Fixed canvas.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test for simple test for chord and fixed chord implementation * Changed _IMMUTABLE_OPTIONS * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed list order * Fixed tests (stamp test and chord test), fixed order in groups * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed lint and elements * Changed implementation of stamp API and fix lint * Added documentation to Stamping API. Added chord with groups test * Implemented stamping inside replace and added test for an implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Splitted into subtests * Group stamping rollback * group.id is None fixed * Added integration test * Added integration test * apply_async fixed * Integration test and test_chord fixed * Lint fixed * chord freeze fixed * Minor fixes. * Chain apply_async fixed and tests fixed * lint fixed * Added integration test for chord * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * type -> isinstance * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Redo header stamping (#7341) * _freeze_gid dict merge fixed * OptionsVisitor created * Fixed canvas.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test for simple test for chord and fixed chord implementation * Changed _IMMUTABLE_OPTIONS * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed list order * Fixed tests (stamp test and chord test), fixed order in groups * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed lint and elements * Changed implementation of stamp API and fix lint * Added documentation to Stamping API. Added chord with groups test * Implemented stamping inside replace and added test for an implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Splitted into subtests * Group stamping rollback * group.id is None fixed * Added integration test * Added integration test * apply_async fixed * Integration test and test_chord fixed * Lint fixed * chord freeze fixed * Minor fixes. * Chain apply_async fixed and tests fixed * lint fixed * Added integration test for chord * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * type -> isinstance * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Omer Katz <omer.katz@omerkatz.com> * Added stamping mechanism * Manual stamping improved * flake8 fixed * Added subtests * Add comma. * Moved groups to stamps * Fixed chord and added test for that * Strip down the header-stamping PR to the basics. * Serialize groups. * Add groups to result backend meta data. * Fix spelling mistake. * Revert changes to canvas.py * Revert changes to app/base.py * Add stamping implementation to canvas.py * Send task to AMQP with groups. * Successfully pass single group to result. * _freeze_gid dict merge fixed * First draft of the visitor API. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * OptionsVisitor created * Fixed canvas.py * Added test for simple test for chord and fixed chord implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changed _IMMUTABLE_OPTIONS * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed list order * Fixed tests (stamp test and chord test), fixed order in groups * Fixed lint and elements * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changed implementation of stamp API and fix lint * Added documentation to Stamping API. Added chord with groups test * Implemented stamping inside replace and added test for an implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Splitted into subtests * Group stamping rollback * group.id is None fixed * Added integration test * Added integration test * apply_async fixed * Integration test and test_chord fixed * Lint fixed * chord freeze fixed * Minor fixes. * Chain apply_async fixed and tests fixed * lint fixed * Added integration test for chord * type -> isinstance * Added stamping mechanism * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Manual stamping improved * fail_ci_if_error uncommented * flake8 fixed * Added subtests * Changes * Add comma. * Fixed chord and added test for that * canvas.py fixed * Test chord.py fixed * Fixed stamped_headers * collections import fixed * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * collections import fixed * Update celery/backends/base.py Co-authored-by: Omer Katz <omer.katz@omerkatz.com> * ampq.py fixed * Refrain from using deprecated import path. * Fix test_complex_chain regression. Whenever we stamp a group we need to freeze it first if it wasn't already frozen. Somewhere along the line, the group id changed because we were freezing twice. This commit places the stamping operation after preparing the chain's steps which fixes the problem somehow. We don't know why yet. * Fixed integration tests * Fixed integration tests * Fixed integration tests * Fixed integration tests * Fixed issues with maybe_list. Add documentation * Fixed potential issue with integration tests * Fixed issues with _regen * Fixed issues with _regen * Fixed test_generator issues * Fixed _regen stamping * Fixed _regen stamping * Fixed TimeOut issue * Fixed TimeOut issue * Fixed TimeOut issue * Update docs/userguide/canvas.rst Co-authored-by: Omer Katz <omer.katz@omerkatz.com> * Fixed Couchbase * Better stamping intro * New GroupVisitor example * Adjust documentation. Co-authored-by: Naomi Elstein <naomi.els@omerkatz.com> Co-authored-by: Omer Katz <omer.katz@omerkatz.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Asif Saif Uddin <auvipy@gmail.com> Co-authored-by: Omer Katz <omer.katz@kcg.tech>
52,191
0
48
46
15
208,066
16
celery
9
celery/canvas.py
Python
12
{ "docstring": "Method that is called on сhord header stamping start.\n\n Arguments:\n chord (chord): chord that is stamped.\n headers (Dict): Partial headers that could be merged with existing headers.\n Returns:\n Dict: headers to update.\n ", "language": "en", "n_whitespaces": 92, "n_words": 32, "vocab_size": 26 }
https://github.com/celery/celery.git
1
test_collect_commands
async def test_collect_commands(): with taddons.context() as tctx: c = command.CommandManager(tctx.master) a = TCmds() c.collect_commands(a) assert "empty" in c.commands a = TypeErrAddon() c.collect_commands(a) await tctx.master.await_log("Could not load")
b3587b52b25077f68116b9852b041d33e7fc6601
11
test_command.py
113
make it black!
73,904
0
81
61
22
251,966
26
mitmproxy
14
test/mitmproxy/test_command.py
Python
9
{ "docstring": "\n This tests for errors thrown by getattr() or __getattr__ implementations\n that return an object for .command_name.\n ", "language": "en", "n_whitespaces": 26, "n_words": 16, "vocab_size": 15 }
https://github.com/mitmproxy/mitmproxy.git
7
mask
def mask(self, row_labels, col_labels): logger = get_logger() logger.debug(f"ENTER::Partition.mask::{self._identity}") new_obj = super().mask(row_labels, col_labels) if isinstance(row_labels, slice) and unidist.is_object_ref(self._length_cache): if row_labels == slice(None): # fast path - full axis take new_obj._length_cache = self._length_cache else: new_obj._length_cache = compute_sliced_len.remote( row_labels, self._length_cache ) if isinstance(col_labels, slice) and unidist.is_object_ref(self._width_cache): if col_labels == slice(None): # fast path - full axis take new_obj._width_cache = self._width_cache else: new_obj._width_cache = compute_sliced_len.remote( col_labels, self._width_cache ) logger.debug(f"EXIT::Partition.mask::{self._identity}") return new_obj
193505fdf0c984743397ba3df56262f30aee13a8
14
partition.py
238
FEAT-#5053: Add pandas on unidist execution with MPI backend (#5059) Signed-off-by: Igoshev, Iaroslav <iaroslav.igoshev@intel.com>
36,272
0
325
139
39
155,180
67
modin
18
modin/core/execution/unidist/implementations/pandas_on_unidist/partitioning/partition.py
Python
20
{ "docstring": "\n Lazily create a mask that extracts the indices provided.\n\n Parameters\n ----------\n row_labels : list-like, slice or label\n The row labels for the rows to extract.\n col_labels : list-like, slice or label\n The column labels for the columns to extract.\n\n Returns\n -------\n PandasOnUnidistDataframePartition\n A new ``PandasOnUnidistDataframePartition`` object.\n ", "language": "en", "n_whitespaces": 143, "n_words": 46, "vocab_size": 34 }
https://github.com/modin-project/modin.git
1
mixin_gateway_parser
def mixin_gateway_parser(parser): gp = add_arg_group(parser, title='Gateway') _add_host(gp) _add_proxy(gp) gp.add_argument( '--uses', type=str, default=None, # TODO: add Jina Hub Gateway help=, ) gp.add_argument( '--uses-with', action=KVAppendAction, metavar='KEY: VALUE', nargs='*', help=, ) gp.add_argument( '--py-modules', type=str, nargs='*', metavar='PATH', help=, ) mixin_base_runtime_parser(gp) gp.add_argument( '--port-expose', type=int, dest='port', default=helper.random_port(), help='The port that the gateway exposes for clients for GRPC connections.', ) parser.add_argument( '--graph-description', type=str, help='Routing graph for the gateway', default='{}', ) parser.add_argument( '--graph-conditions', type=str, help='Dictionary stating which filtering conditions each Executor in the graph requires to receive Documents.', default='{}', ) parser.add_argument( '--deployments-addresses', type=str, help='dictionary JSON with the input addresses of each Deployment', default='{}', ) parser.add_argument( '--deployments-disable-reduce', type=str, help='list JSON disabling the built-in merging mechanism for each Deployment listed', default='[]', ) gp.add_argument( '--compression', choices=['NoCompression', 'Deflate', 'Gzip'], help='The compression mechanism used when sending requests from the Head to the WorkerRuntimes. For more details, ' 'check https://grpc.github.io/grpc/python/grpc.html#compression.', ) gp.add_argument( '--timeout-send', type=int, default=None, help='The timeout in milliseconds used when sending data requests to Executors, -1 means no timeout, disabled by default', )
cdaf7f87ececf9e13b517379ca183b17f0d7b007
10
remote.py
404
feat: allow passing custom gateway in Flow (#5189)
2,555
0
543
237
108
13,120
160
jina
22
jina/parsers/orchestrate/runtimes/remote.py
Python
87
{ "docstring": "Add the options for remote expose at the Gateway\n :param parser: the parser\n \n The config of the gateway, it could be one of the followings:\n * the string literal of an Gateway class name\n * a Gateway YAML file (.yml, .yaml, .jaml)\n * a docker image (must start with `docker://`)\n * the string literal of a YAML config (must start with `!` or `jtype: `)\n * the string literal of a JSON config\n\n When use it under Python, one can use the following values additionally:\n - a Python dict that represents the config\n - a text file stream has `.read()` interface\n \n Dictionary of keyword arguments that will override the `with` configuration in `uses`\n \nThe customized python modules need to be imported before loading the gateway\n\nNote that the recommended way is to only import a single module - a simple python file, if your\ngateway can be defined in a single file, or an ``__init__.py`` file if you have multiple files,\nwhich should be structured as a python package.\n", "language": "en", "n_whitespaces": 249, "n_words": 169, "vocab_size": 102 }
https://github.com/jina-ai/jina.git
1
test_update_next_event
async def test_update_next_event(hass, calls, fake_schedule): event_data1 = fake_schedule.create_event( start=datetime.datetime.fromisoformat("2022-04-19 11:00:00+00:00"), end=datetime.datetime.fromisoformat("2022-04-19 11:15:00+00:00"), ) await create_automation(hass, EVENT_START) # No calls before event start await fake_schedule.fire_until( datetime.datetime.fromisoformat("2022-04-19 10:45:00+00:00") ) assert len(calls()) == 0 # Create a new event between now and when the event fires event_data2 = fake_schedule.create_event( start=datetime.datetime.fromisoformat("2022-04-19 10:55:00+00:00"), end=datetime.datetime.fromisoformat("2022-04-19 11:05:00+00:00"), ) # Advance past the end of the events await fake_schedule.fire_until( datetime.datetime.fromisoformat("2022-04-19 11:30:00+00:00") ) assert calls() == [ { "platform": "calendar", "event": EVENT_START, "calendar_event": event_data2, }, { "platform": "calendar", "event": EVENT_START, "calendar_event": event_data1, }, ]
a2c74b978664b627bafc4a43b26aa2be7b15b229
12
test_trigger.py
259
Add initial implementation of a calendar trigger (#68674) * Add initial implementation of calendar trigger This is an initial implementation of a calendar trigger, that supports triggering on calendar start time. See architecture proposal in: https://github.com/home-assistant/architecture/discussions/700 * Address reviewer feedback * Use f-strings for all tests * Apply suggestions from code review Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Remove logging f-strings, and move to main code * Remove mypy ignore * Apply suggestions from code review Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Update calendar triggers to use new calendar data model * Update tests/components/calendar/test_trigger.py Co-authored-by: Franck Nijhof <frenck@frenck.nl> * Rewrite tests using freezegun Rewrite tests using freezegun and improve edge case handling, and use utc consistently for all alarms. * Update homeassistant/components/calendar/trigger.py Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Update homeassistant/components/calendar/trigger.py Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Increase test coverage based on pr feedback Co-authored-by: Martin Hjelmare <marhje52@gmail.com> Co-authored-by: Franck Nijhof <frenck@frenck.nl>
95,806
0
269
149
59
296,832
85
core
15
tests/components/calendar/test_trigger.py
Python
29
{ "docstring": "Test detection of a new event after initial trigger is setup.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/home-assistant/core.git
1
encode
def encode(self, bboxes, gt_bboxes): bboxes = get_box_tensor(bboxes) gt_bboxes = get_box_tensor(gt_bboxes) assert bboxes.size(0) == gt_bboxes.size(0) assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 encoded_bboxes = bbox2delta(bboxes, gt_bboxes, self.means, self.stds) return encoded_bboxes
d915740fa8228cf57741b27d9e5d66e358456b8e
9
delta_xywh_bbox_coder.py
112
[Refactor] Refactor anchor head and base head with boxlist (#8625) * Refactor anchor head * Update * Update * Update * Add a series of boxes tools * Fix box type to support n x box_dim boxes * revert box type changes * Add docstring * refactor retina_head * Update * Update * Fix comments * modify docstring of coder and ioucalculator * Replace with_boxlist with use_box_type
70,861
0
77
72
22
245,715
28
mmdetection
10
mmdet/models/task_modules/coders/delta_xywh_bbox_coder.py
Python
7
{ "docstring": "Get box regression transformation deltas that can be used to\n transform the ``bboxes`` into the ``gt_bboxes``.\n\n Args:\n bboxes (torch.Tensor or :obj:`BaseBoxes`): Source boxes,\n e.g., object proposals.\n gt_bboxes (torch.Tensor or :obj:`BaseBoxes`): Target of the\n transformation, e.g., ground-truth boxes.\n\n Returns:\n torch.Tensor: Box transformation deltas\n ", "language": "en", "n_whitespaces": 133, "n_words": 42, "vocab_size": 34 }
https://github.com/open-mmlab/mmdetection.git
5
create_perspective_transform
def create_perspective_transform(src, dst, round=False, splat_args=False): try: transform_matrix = create_perspective_transform_matrix(src, dst) error = None except np.linalg.LinAlgError as e: transform_matrix = np.identity(3, dtype=np.float) error = "invalid input quads (%s and %s): %s" %(src, dst, e) error = error.replace("\n", "") to_eval = "def perspective_transform(%s):\n" %( splat_args and "*pt" or "pt", ) to_eval += " res = np.dot(transform_matrix, ((pt[0], ), (pt[1], ), (1, )))\n" to_eval += " res = res / res[2]\n" if round: to_eval += " return (int(round(res[0][0])), int(round(res[1][0])))\n" else: to_eval += " return (res[0][0], res[1][0])\n" locals = { "transform_matrix": transform_matrix, } locals.update(globals()) exec(to_eval,locals,locals) res = locals["perspective_transform"] res.matrix = transform_matrix res.error = error return res
7375ee364e0df2a417f92593e09557f1b2a3575a
13
align2stylegan.py
254
initialize ostec
1,631
0
220
144
67
9,551
102
insightface
23
reconstruction/ostec/utils/align2stylegan.py
Python
26
{ "docstring": " Returns a function which will transform points in quadrilateral\n ``src`` to the corresponding points on quadrilateral ``dst``::\n\n >>> transform = create_perspective_transform(\n ... [(0, 0), (10, 0), (10, 10), (0, 10)],\n ... [(50, 50), (100, 50), (100, 100), (50, 100)],\n ... )\n >>> transform((5, 5))\n (74.99999999999639, 74.999999999999957)\n\n If ``round`` is ``True`` then points will be rounded to the nearest\n integer and integer values will be returned.\n\n >>> transform = create_perspective_transform(\n ... [(0, 0), (10, 0), (10, 10), (0, 10)],\n ... [(50, 50), (100, 50), (100, 100), (50, 100)],\n ... round=True,\n ... )\n >>> transform((5, 5))\n (75, 75)\n\n If ``splat_args`` is ``True`` the function will accept two arguments\n instead of a tuple.\n\n >>> transform = create_perspective_transform(\n ... [(0, 0), (10, 0), (10, 10), (0, 10)],\n ... [(50, 50), (100, 50), (100, 100), (50, 100)],\n ... splat_args=True,\n ... )\n >>> transform(5, 5)\n (74.99999999999639, 74.999999999999957)\n\n If the input values yield an invalid transformation matrix an identity\n function will be returned and the ``error`` attribute will be set to a\n description of the error::\n\n >>> tranform = create_perspective_transform(\n ... np.zeros((4, 2)),\n ... np.zeros((4, 2)),\n ... )\n >>> transform((5, 5))\n (5.0, 5.0)\n >>> transform.error\n 'invalid input quads (...): Singular matrix\n ", "language": "en", "n_whitespaces": 606, "n_words": 194, "vocab_size": 84 }
https://github.com/deepinsight/insightface.git
1
limit
def limit(self, *args): return self.applyfunc(lambda x: x.limit(*args)) # https://github.com/sympy/sympy/pull/12854
59d22b6bb7287613d598611027f640d068ca5748
11
matrices.py
44
Moved imports to higher level
47,891
0
22
25
9
196,391
9
sympy
5
sympy/matrices/matrices.py
Python
2
{ "docstring": "Calculate the limit of each element in the matrix.\n ``args`` will be passed to the ``limit`` function.\n\n Examples\n ========\n\n >>> from sympy import Matrix\n >>> from sympy.abc import x, y\n >>> M = Matrix([[x, y], [1, 0]])\n >>> M.limit(x, 2)\n Matrix([\n [2, y],\n [1, 0]])\n\n See Also\n ========\n\n integrate\n diff\n ", "language": "en", "n_whitespaces": 155, "n_words": 50, "vocab_size": 39 }
https://github.com/sympy/sympy.git
4
difference
def difference(self, other, sort=None): self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) # Note: we do NOT call _deprecate_dti_setop here, as there # is no requirement that .difference be commutative, so it does # not cast to object. if self.equals(other): # Note: we do not (yet) sort even if sort=None GH#24959 return self[:0].rename(result_name) if len(other) == 0: # Note: we do not (yet) sort even if sort=None GH#24959 return self.rename(result_name) if not self._should_compare(other): # Nothing matches -> difference is everything return self.rename(result_name) result = self._difference(other, sort=sort) return self._wrap_difference_result(other, result)
4e034ec0006b6c05160ce67ea1420ce28f295c91
11
base.py
173
DEPR: DatetimeIndex.intersection with mixed timezones cast to UTC, not object (#45357) * DEPR: DatetimeIndex.intersection with mixed timezones cast to UTC instead of object * GH ref * mypy fixup Co-authored-by: Jeff Reback <jeff@reback.net>
39,453
0
239
105
57
163,521
87
pandas
15
pandas/core/indexes/base.py
Python
12
{ "docstring": "\n Return a new Index with elements of index not in `other`.\n\n This is the set difference of two Index objects.\n\n Parameters\n ----------\n other : Index or array-like\n sort : False or None, default None\n Whether to sort the resulting index. By default, the\n values are attempted to be sorted, but any TypeError from\n incomparable elements is caught by pandas.\n\n * None : Attempt to sort the result, but catch any TypeErrors\n from comparing incomparable elements.\n * False : Do not sort the result.\n\n Returns\n -------\n difference : Index\n\n Examples\n --------\n >>> idx1 = pd.Index([2, 1, 3, 4])\n >>> idx2 = pd.Index([3, 4, 5, 6])\n >>> idx1.difference(idx2)\n Int64Index([1, 2], dtype='int64')\n >>> idx1.difference(idx2, sort=False)\n Int64Index([2, 1], dtype='int64')\n ", "language": "en", "n_whitespaces": 310, "n_words": 115, "vocab_size": 81 }
https://github.com/pandas-dev/pandas.git
1
_on_frame_load_finished
def _on_frame_load_finished(self): page = self._widget.page() assert isinstance(page, webpage.BrowserPage), page self._on_load_finished(not page.error_occurred)
a20bb67a878b2e68abf8268c1b0a27f018d01352
9
webkittab.py
58
mypy: Upgrade to PyQt5-stubs 5.15.6.0 For some unknown reason, those new stubs cause a *lot* of things now to be checked by mypy which formerly probably got skipped due to Any being implied somewhere. The stubs themselves mainly improved, with a couple of regressions too. In total, there were some 337 (!) new mypy errors. This commit fixes almost all of them, and the next commit improves a fix to get things down to 0 errors again. Overview of the changes: ==== qutebrowser/app.py - Drop type ignore due to improved stubs. ==== qutebrowser/browser/browsertab.py - Specify the type of _widget members more closely than just QWidget. This is debatable: I suppose the abstract stuff shouldn't need to know anything about the concrete backends at all. But it seems like we cut some corners when initially implementing things, and put some code in browsertab.py just because the APIs of both backends happened to be compatible. Perhaps something to reconsider once we drop QtWebKit and hopefully implement a dummy backend. - Add an additional assertion in AbstractAction.run_string. This is already covered by the isinstance(member, self.action_base) above it, but that's too dynamic for mypy to understand. - Fix the return type of AbstractScroller.pos_px, which is a QPoint (with x and y components), not a single int. - Fix the return type of AbstractScroller.pos_perc, which is a Tuple (with x and y components), not a single int. - Fix the argument types of AbstractScroller.to_perc, as it's possible to pass fractional percentages too. - Specify the type for AbstractHistoryPrivate._history. See above (_widget) re this being debatable. - Fix the return type of AbstractTabPrivate.event_target(), which can be None (see #3888). - Fix the return type of AbstractTabPrivate.run_js_sync, which is Any (the JS return value), not None. - Fix the argument type for AbstractTabPrivate.toggle_inspector: position can be None to use the last used position. - Declare the type of sub-objects of AbstractTab. - Fix the return value of AbstractTab.icon(), which is the QIcon, not None. ==== qutebrowser/browser/commands.py - Make sure the active window is a MainWindow (with a .win_id attribute). ==== qutebrowser/browser/downloadview.py - Add _model() which makes sure that self.model() is a DownloadModel, not None or any other model. This is needed because other methods access a variety of custom attributes on it, e.g. last_index(). ==== qutebrowser/browser/greasemonkey.py - Add an ignore for AbstractDownload.requested_url which we patch onto the downloads. Probably would be nicer to add it as a proper attribute which always gets set by the DownloadManager. ==== qutebrowser/browser/hints.py - Remove type ignores for QUrl.toString(). - Add a new type ignore for combining different URL flags (which works, but is not exactly type safe... still probably a regression in the stubs). - Make sure the things we get back from self._get_keyparser are what we actually expect. Probably should introduce a TypedDict (and/or overloads for _get_keyparser with typing.Literal) to teach mypy about the exact return value. See #7098. This is needed because we access Hint/NormalKeyParser-specific attributes such as .set_inhibited_timout() or .update_bindings(). ==== qutebrowser/browser/inspector.py - Similar changes than in browsertab.py to make some types where we share API (e.g. .setPage()) more concrete. Didn't work out unfortunately, see next commit. ==== qutebrowser/browser/network/pac.py - Remove now unneeded type ignore for signal. ==== qutebrowser/browser/qtnetworkdownloads.py - Make sure that downloads is a qtnetworkdownloads.DownloadItem (rather than an AbstractDownload), so that we can call ._uses_nam() on it. ==== qutebrowser/browser/qutescheme.py - Remove now unneeded type ignore for QUrl flags. ==== qutebrowser/browser/urlmarks.py - Specify the type of UrlMarkManager._lineparser, as those only get initialized in _init_lineparser of subclasses, so mypy doesn't know it's supposed to exist. ==== qutebrowser/browser/webelem.py - New casts to turn single KeyboardModifier (enum) entries into KeyboardModifiers (flags). Might not be needed anymore with Qt 6. - With that, casting the final value is now unneeded. ==== qutebrowser/browser/webengine/notification.py - Remove now unneeded type ignore for signal. - Make sure the self.sender() we get in HerbeNotificationAdapter._on_finished() is a QProcess, not just any QObject. ==== qutebrowser/browser/webengine/webenginedownloads.py - Remove now unneeded type ignores for signals. ==== qutebrowser/browser/webengine/webengineelem.py - Specify the type of WebEngineElement._tab. - Remove now unneeded type ignore for mixed flags. ==== qutebrowser/browser/webengine/webengineinspector.py - See changes to inspector.py and next commit. - Remove now unneeded type ignore for signal. ==== qutebrowser/browser/webengine/webenginequtescheme.py - Remove now unneeded type ignore for mixed flags. ==== qutebrowser/browser/webengine/webenginesettings.py - Ignore access of .setter attribute which we patch onto QWebEngineProfile. Would be nice to have a subclass or wrapper-class instead. ==== qutebrowser/browser/webengine/webenginetab.py - Specified the type of _widget members more closely than just QWidget. See browsertab.py changes for details. - Remove some now-unneeded type ignores for creating FindFlags. - Specify more concrete types for WebEngineTab members where we actually need to access WebEngine-specific attributes. - Make sure the page we get is our custom WebEnginePage subclass, not just any QWebEnginePage. This is needed because we access custom attributes on it. ==== qutebrowser/browser/webengine/webview.py - Make sure the page we get is our custom WebEnginePage subclass, not just any QWebEnginePage. This is needed because we access custom attributes on it. ==== qutebrowser/browser/webkit/network/networkreply.py - Remove now unneeded type ignores for signals. ==== qutebrowser/browser/webkit/webkitinspector.py - See changes to inspector.py and next commit. ==== qutebrowser/browser/webkit/webkittab.py - Specify the type of _widget members more closely than just QWidget. See browsertab.py changes for details. - Add a type ignore for WebKitAction because our workaround needs to treat them as ints (which is allowed by PyQt, even if not type-safe). - Add new ignores for findText calls: The text is a QString and can be None; the flags are valid despite mypy thinking they aren't (stubs regression?). - Specify the type for WebKitHistoryPrivate._history, because we access WebKit-specific attributes. See above (_widget) re this being debatable. - Make mypy aware that .currentFrame() and .frameAt() can return None (stubs regression?). - Make sure the .page() and .page().networkAccessManager() are our subclasses rather than the more generic QtWebKit objects, as we use custom attributes. - Add new type ignores for signals (stubs regression!) ==== qutebrowser/browser/webkit/webpage.py - Make sure the .networkAccessManager() is our subclass rather than the more generic QtWebKit object, as we use custom attributes. - Replace a cast by a type ignore. The cast didn't work anymore. ==== qutebrowser/browser/webkit/webview.py - Make sure the .page() is our subclass rather than the more generic QtWebKit object, as we use custom attributes. ==== qutebrowser/commands/userscripts.py - Remove now unneeded type ignore for signal. ==== qutebrowser/completion/completer.py - Add a new _completion() getter (which ensures it actually gets the completion view) rather than accessing the .parent() directly (which could be any QObject). ==== qutebrowser/completion/completiondelegate.py - Make sure self.parent() is a CompletionView (no helper method as there is only one instance). - Remove a now-unneeded type ignore for adding QSizes. ==== qutebrowser/completion/completionwidget.py - Add a ._model() getter which ensures that we get a CompletionModel (with custom attributes) rather than Qt's .model() which can be any QAbstractItemModel (or None). - Removed a now-unneeded type ignore for OR-ing flags. ==== qutebrowser/completion/models/completionmodel.py - Remove now unneeded type ignores for signals. - Ignore a complaint about .set_pattern() not being defined. Completion categories don't share any common parent class, so it would be good to introduce a typing.Protocol for this. See #7098. ==== qutebrowser/components/misccommands.py - Removed a now-unneeded type ignore for OR-ing flags. ==== qutebrowser/components/readlinecommands.py - Make sure QApplication.instance() is a QApplication (and not just a QCoreApplication). This includes the former "not None" check. ==== qutebrowser/components/scrollcommands.py - Add basic annotation for "funcs" dict. Could have a callable protocol to specify it needs a count kwarg, see #7098. ==== qutebrowser/config/stylesheet.py - Correctly specify that stylesheet apply to QWidgets, not any QObject. - Ignore an attr-defined for obj.STYLESHEET. Perhaps could somehow teach mypy about this with overloads and protocols (stylesheet for set_register being None => STYLESHEET needs to be defined, otherwise anything goes), but perhaps not worth the troble. See #7098. ==== qutebrowser/keyinput/keyutils.py - Remove some now-unneeded type ignores and add a cast for using a single enum value as flags. Might need to look at this again with Qt 6 support. ==== qutebrowser/keyinput/modeman.py - Add a FIXME for using a TypedDict, see comments for hints.py above. ==== qutebrowser/mainwindow/mainwindow.py - Remove now-unneeded type ignores for calling with OR-ed flags. - Improve where we cast from WindowType to WindowFlags, no int needed - Use new .tab_bar() getter, see below. ==== qutebrowser/mainwindow/prompt.py - Remove now-unneeded type ignores for calling with OR-ed flags. ==== qutebrowser/mainwindow/statusbar/bar.py - Adjust type ignores around @pyqtProperty. The fact one is still needed seems like a stub regression. ==== qutebrowser/mainwindow/statusbar/command.py - Fix type for setText() override (from QLineEdit): text can be None (QString in C++). ==== qutebrowser/mainwindow/statusbar/url.py - Adjust type ignores around @pyqtProperty. The fact one is still needed seems like a stub regression. ==== qutebrowser/mainwindow/tabbedbrowser.py - Specify that TabDeque manages browser tabs, not any QWidgets. It accesses AbstractTab-specific attributes. - Make sure that the .tabBar() we get is a tabwidget.TabBar, as we access .maybe_hide. - Fix the annotations for stored marks: Scroll positions are a QPoint, not int. - Add _current_tab() and _tab_by_idx() wrappers for .currentWidget() and .widget(), which ensures that the return values are valid AbstractTabs (or None for _tab_by_idx). This is needed because we access AbstractTab-specific attributes. - For some places, where the tab can be None, continue using .currentTab() but add asserts. - Remove some now-unneeded [unreachable] ignores, as mypy knows about the None possibility now. ==== qutebrowser/mainwindow/tabwidget.py - Add new tab_bar() and _tab_by_idx() helpers which check that the .tabBar() and .widget() are of type TabBar and AbstractTab, respectively. - Add additional assertions where we expect ._tab_by_idx() to never be None. - Remove dead code in get_tab_fields for handling a None y scroll position. I was unable to find any place in the code where this could be set to None. - Remove some now-unneeded type ignores and casts, as mypy now knows that _type_by_idx() could be None. - Work around a strange instance where mypy complains about not being able to find the type of TabBar.drag_in_progress from TabWidget._toggle_visibility, despite it clearly being shown as a bool *inside* that class without any annotation. - Add a ._tab_widget() getter in TabBar which ensures that the .parent() is in fact a TabWidget. ==== qutebrowser/misc/crashsignal.py - Remove now unneeded type ignores for signals. ==== qutebrowser/misc/editor.py - Remove now unneeded type ignores for signals. ==== qutebrowser/misc/ipc.py - Remove now unneeded type ignores for signals. - Add new type ignores for .error() which is both a signal and a getter (stub regression?). Won't be relevant for Qt 6 anymore, as the signal was renamed to errorOccurred in 5.15. ==== qutebrowser/misc/objects.py - Make sure mypy knows that objects.app is our custom Application (with custom attributes) rather than any QApplication. ==== qutebrowser/utils/objreg.py - Ignore attr-defined for .win_id attributes. Maybe could add a typing.Protocol, but ideally, the whole objreg stuff should die one day anyways. ==== tests/unit/completion/test_completer.py - Make CompletionWidgetStub inherit from CompletionView so that it passes the new isinstance() asserts in completer.py (see above).
117,330
0
39
35
10
320,759
11
qutebrowser
9
qutebrowser/browser/webkit/webkittab.py
Python
4
{ "docstring": "Make sure we emit an appropriate status when loading finished.\n\n While Qt has a bool \"ok\" attribute for loadFinished, it always is True\n when using error pages... See\n https://github.com/qutebrowser/qutebrowser/issues/84\n ", "language": "en", "n_whitespaces": 57, "n_words": 29, "vocab_size": 28 }
https://github.com/qutebrowser/qutebrowser.git
2
get_package
def get_package(package): # type: (Package) -> types.ModuleType resolved = resolve(package) if wrap_spec(resolved).submodule_search_locations is None: raise TypeError(f'{package!r} is not a package') return resolved
8198943edd73a363c266633e1aa5b2a9e9c9f526
11
_common.py
58
add python 3.10.4 for windows
55,184
0
44
30
20
218,182
22
XX-Net
7
python3.10.4/Lib/importlib/_common.py
Python
5
{ "docstring": "Take a package name or module object and return the module.\n\n Raise an exception if the resolved module is not a package.\n ", "language": "en", "n_whitespaces": 28, "n_words": 22, "vocab_size": 19 }
https://github.com/XX-net/XX-Net.git
6
get_content_charset
def get_content_charset(self, failobj=None): missing = object() charset = self.get_param('charset', missing) if charset is missing: return failobj if isinstance(charset, tuple): # RFC 2231 encoded, so decode it, and it better end up as ascii. pcharset = charset[0] or 'us-ascii' try: # LookupError will be raised if the charset isn't known to # Python. UnicodeError will be raised if the encoded text # contains a character not in the charset. as_bytes = charset[2].encode('raw-unicode-escape') charset = str(as_bytes, pcharset) except (LookupError, UnicodeError): charset = charset[2] # charset characters must be in us-ascii range try: charset.encode('us-ascii') except UnicodeError: return failobj # RFC 2046, $4.1.2 says charsets are not case sensitive return charset.lower()
8198943edd73a363c266633e1aa5b2a9e9c9f526
13
message.py
175
add python 3.10.4 for windows
57,095
0
345
101
75
223,835
107
XX-Net
16
python3.10.4/Lib/email/message.py
Python
17
{ "docstring": "Return the charset parameter of the Content-Type header.\n\n The returned string is always coerced to lower case. If there is no\n Content-Type header, or if that header has no charset parameter,\n failobj is returned.\n ", "language": "en", "n_whitespaces": 63, "n_words": 34, "vocab_size": 28 }
https://github.com/XX-net/XX-Net.git
1
get_fws
def get_fws(value): newvalue = value.lstrip() fws = WhiteSpaceTerminal(value[:len(value)-len(newvalue)], 'fws') return fws, newvalue
8198943edd73a363c266633e1aa5b2a9e9c9f526
13
_header_value_parser.py
64
add python 3.10.4 for windows
56,997
0
24
37
10
223,601
12
XX-Net
7
python3.10.4/Lib/email/_header_value_parser.py
Python
4
{ "docstring": "FWS = 1*WSP\n\n This isn't the RFC definition. We're using fws to represent tokens where\n folding can be done, but when we are parsing the *un*folding has already\n been done so we don't need to watch out for CRLF.\n\n ", "language": "en", "n_whitespaces": 52, "n_words": 39, "vocab_size": 36 }
https://github.com/XX-net/XX-Net.git
3
_determine_base_url
def _determine_base_url(document, page_url): # type: (HTMLElement, str) -> str for base in document.findall(".//base"): href = base.get("href") if href is not None: return href return page_url
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
11
collector.py
63
upd; format
12,264
0
62
36
22
60,726
25
transferlearning
7
.venv/lib/python3.8/site-packages/pip/_internal/index/collector.py
Python
6
{ "docstring": "Determine the HTML document's base URL.\n\n This looks for a ``<base>`` tag in the HTML document. If present, its href\n attribute denotes the base URL of anchor tags in the document. If there is\n no such tag (or if it does not have a valid href attribute), the HTML\n file's URL is used as the base URL.\n\n :param document: An HTML document representation. The current\n implementation expects the result of ``html5lib.parse()``.\n :param page_url: The URL of the HTML document.\n ", "language": "en", "n_whitespaces": 107, "n_words": 79, "vocab_size": 51 }
https://github.com/jindongwang/transferlearning.git
1
binary_accuracy
def binary_accuracy(y_true, y_pred, threshold=0.5): y_pred = tf.convert_to_tensor(y_pred) threshold = tf.cast(threshold, y_pred.dtype) y_pred = tf.cast(y_pred > threshold, y_pred.dtype) return backend.mean(tf.equal(y_true, y_pred), axis=-1) @keras_export('keras.metrics.categorical_accuracy') @tf.__internal__.dispatch.add_dispatch_support
8bb1b365ca6bb21b32a1ee1654eecb02570970ac
@keras_export('keras.metrics.categorical_accuracy') @tf.__internal__.dispatch.add_dispatch_support
9
metrics.py
123
reverting binary accuracy to original
79,764
1
26
67
19
268,903
23
keras
16
keras/metrics/metrics.py
Python
5
{ "docstring": "Calculates how often predictions match binary labels.\n\n Standalone usage:\n >>> y_true = [[1], [1], [0], [0]]\n >>> y_pred = [[1], [1], [0], [0]]\n >>> m = tf.keras.metrics.binary_accuracy(y_true, y_pred)\n >>> assert m.shape == (4,)\n >>> m.numpy()\n array([1., 1., 1., 1.], dtype=float32)\n\n Args:\n y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.\n threshold: (Optional) Float representing the threshold for deciding whether\n prediction values are 1 or 0.\n\n Returns:\n Binary accuracy values. shape = `[batch_size, d0, .. dN-1]`\n ", "language": "en", "n_whitespaces": 113, "n_words": 86, "vocab_size": 61 }
https://github.com/keras-team/keras.git
1
activate
def activate(self) -> str: load_kube_config_from_dict( config_dict=self.config, context=self.context, ) return self.current_context()
8f3ffd09dc47bfd2af6a635cc04c640febffd519
9
kubernetes.py
48
add test coerage for get_api_client and activate
11,603
0
60
29
10
56,999
10
prefect
8
src/prefect/blocks/kubernetes.py
Python
11
{ "docstring": "\n Convenience method for activating the k8s config stored in an instance of this block\n\n Returns current_context for sanity check\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 18 }
https://github.com/PrefectHQ/prefect.git
17
style_doc_files
def style_doc_files(*files, max_len=119, check_only=False): changed = [] black_errors = [] for file in files: # Treat folders if os.path.isdir(file): files = [os.path.join(file, f) for f in os.listdir(file)] files = [f for f in files if os.path.isdir(f) or f.endswith(".mdx") or f.endswith(".py")] changed += style_doc_files(*files, max_len=max_len, check_only=check_only) # Treat mdx elif file.endswith(".mdx"): try: diff, black_error = style_mdx_file(file, max_len=max_len, check_only=check_only) if diff: changed.append(file) if len(black_error) > 0: black_errors.append( f"There was a problem while formatting an example in {file} with black:\m{black_error}" ) except Exception: print(f"There is a problem in {file}.") raise # Treat python files elif file.endswith(".py"): try: diff, black_error = style_file_docstrings(file, max_len=max_len, check_only=check_only) if diff: changed.append(file) if len(black_error) > 0: black_errors.append( f"There was a problem while formatting an example in {file} with black:\m{black_error}" ) except Exception: print(f"There is a problem in {file}.") raise else: warnings.warn(f"Ignoring {file} because it's not a py or an mdx file or a folder.") if len(black_errors) > 0: black_message = "\n\n".join(black_errors) raise ValueError( "Some code examples can't be interpreted by black, which means they aren't regular python:\n\n" + black_message + "\n\nMake sure to fix the corresponding docstring or doc file, or remove the py/python after ``` if it " + "was not supposed to be a Python code sample." ) return changed
fb5ed62c102c0323486b89805e1888495de3db15
18
style_doc.py
474
Convert documentation to the new front (#271) * Main conversion * Doc styling * Style * New front deploy * Fixes * Fixes * Fix new docstrings * Style
121,025
0
733
264
110
337,312
203
accelerate
26
utils/style_doc.py
Python
43
{ "docstring": "\n Applies doc styling or checks everything is correct in a list of files.\n\n Args:\n files (several `str` or `os.PathLike`): The files to treat.\n max_len (`int`): The maximum number of characters per line.\n check_only (`bool`, *optional*, defaults to `False`):\n Whether to restyle file or just check if they should be restyled.\n\n Returns:\n List[`str`]: The list of files changed or that should be restyled.\n ", "language": "en", "n_whitespaces": 114, "n_words": 62, "vocab_size": 47 }
https://github.com/huggingface/accelerate.git
2
assuming
def assuming(*assumptions): old_global_assumptions = global_assumptions.copy() global_assumptions.update(assumptions) try: yield finally: global_assumptions.clear() global_assumptions.update(old_global_assumptions)
498015021131af4dbb07eb110e5badaba8250c7b
10
assume.py
67
Updated import locations
47,527
0
47
36
11
196,027
11
sympy
7
sympy/assumptions/assume.py
Python
8
{ "docstring": "\n Context manager for assumptions.\n\n Examples\n ========\n\n >>> from sympy import assuming, Q, ask\n >>> from sympy.abc import x, y\n >>> print(ask(Q.integer(x + y)))\n None\n >>> with assuming(Q.integer(x), Q.integer(y)):\n ... print(ask(Q.integer(x + y)))\n True\n ", "language": "en", "n_whitespaces": 71, "n_words": 33, "vocab_size": 25 }
https://github.com/sympy/sympy.git
10
update
def update(self, paddle, brickwall): self._xLoc += self.__xVel self._yLoc += self.__yVel # left screen wall bounce if self._xLoc <= self._radius: self.__xVel *= -1 # right screen wall bounce elif self._xLoc >= self.__width - self._radius: self.__xVel *= -1 # top wall bounce if self._yLoc <= self._radius: self.__yVel *= -1 # bottom drop out elif self._yLoc >= self.__width - self._radius: return True # for bouncing off the bricks. if brickwall.collide(self): self.__yVel *= -1 # collision detection between ball and paddle paddleY = paddle._yLoc paddleW = paddle._width paddleH = paddle._height paddleX = paddle._xLoc ballX = self._xLoc ballY = self._yLoc if ((ballX + self._radius) >= paddleX and ballX <= (paddleX + paddleW)) and ( (ballY + self._radius) >= paddleY and ballY <= (paddleY + paddleH) ): self.__yVel *= -1 return False
f0af0c43340763724f139fa68aa1e5a9ffe458b4
12
brickout-game.py
296
refactor: clean code Signed-off-by: slowy07 <slowy.arfy@gmail.com>
4,365
0
364
186
64
22,585
126
Python
19
brickout-game/brickout-game.py
Python
24
{ "docstring": "\n moves the ball at the screen.\n contains some collision detection.\n \n Simple class for representing a paddle\n", "language": "en", "n_whitespaces": 41, "n_words": 16, "vocab_size": 15 }
https://github.com/geekcomputers/Python.git
1
get_metadata_distribution
def get_metadata_distribution(self) -> BaseDistribution: assert self.req.local_file_path, "Set as part of preparation during download" assert self.req.name, "Wheels are never unnamed" wheel = FilesystemWheel(self.req.local_file_path) return get_wheel_distribution(wheel, canonicalize_name(self.req.name))
f3166e673fe8d40277b804d35d77dcdb760fc3b3
11
wheel.py
79
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
3,125
0
60
48
24
19,880
25
pipenv
10
pipenv/patched/notpip/_internal/distributions/wheel.py
Python
9
{ "docstring": "Loads the metadata from the wheel file into memory and returns a\n Distribution that uses it, not relying on the wheel file or\n requirement.\n ", "language": "en", "n_whitespaces": 45, "n_words": 24, "vocab_size": 20 }
https://github.com/pypa/pipenv.git
4
no_batch_dim_reference_rnn_gru
def no_batch_dim_reference_rnn_gru(m, p, *args, **kwargs): if len(args) == 1: inp, = args h = None elif len(args) == 2: inp, h = args h = h.unsqueeze(1) batch_dim = 0 if kwargs['batch_first'] else 1 kwargs.pop('batch_first') inp = inp.unsqueeze(batch_dim) single_batch_input_args = (inp, h) with freeze_rng_state(): output = m(*single_batch_input_args, **kwargs) return (output[0].squeeze(batch_dim), output[1].squeeze(1))
6eba936082a641be8ece156f70c0f5c435f7a7aa
11
common_modules.py
192
[rnn/gru] no batch dim (#70442) Summary: Fixes https://github.com/pytorch/pytorch/issues/60585 TODO: * [x] Doc updates Pull Request resolved: https://github.com/pytorch/pytorch/pull/70442 Reviewed By: zou3519 Differential Revision: D33460427 Pulled By: jbschlosser fbshipit-source-id: c64d9624c305d90570c79d11a28557f9ec667b27
21,518
0
116
118
36
102,398
50
pytorch
15
torch/testing/_internal/common_modules.py
Python
14
{ "docstring": "Reference function for RNN and GRU supporting no batch dimensions.\n\n Unbatched inputs are unsqueezed to form a\n single batch input before passing them to the module.\n The output is squeezed to compare with the\n output of unbatched input to the module.\n ", "language": "en", "n_whitespaces": 56, "n_words": 41, "vocab_size": 32 }
https://github.com/pytorch/pytorch.git
3
get_years
def get_years(): year_list = frappe.db.sql_list( ) if not year_list: year_list = [getdate().year] return "\n".join(str(year) for year in year_list)
494bd9ef78313436f0424b918f200dab8fc7c20b
12
provident_fund_deductions.py
72
style: format code with black
14,458
0
12
41
16
67,257
18
erpnext
9
erpnext/regional/report/provident_fund_deductions/provident_fund_deductions.py
Python
7
{ "docstring": "select distinct YEAR(end_date) from `tabSalary Slip` ORDER BY YEAR(end_date) DESC", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
https://github.com/frappe/erpnext.git
2
rmul
def rmul(*args): rv = args[0] for i in range(1, len(args)): rv = args[i]*rv return rv
498015021131af4dbb07eb110e5badaba8250c7b
10
permutations.py
58
Updated import locations
47,659
0
54
36
12
196,159
15
sympy
6
sympy/combinatorics/permutations.py
Python
5
{ "docstring": "\n Return product of Permutations [a, b, c, ...] as the Permutation whose\n ith value is a(b(c(i))).\n\n a, b, c, ... can be Permutation objects or tuples.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Permutation\n\n >>> a, b = [1, 0, 2], [0, 2, 1]\n >>> a = Permutation(a); b = Permutation(b)\n >>> list(Permutation.rmul(a, b))\n [1, 2, 0]\n >>> [a(b(i)) for i in range(3)]\n [1, 2, 0]\n\n This handles the operands in reverse order compared to the ``*`` operator:\n\n >>> a = Permutation(a); b = Permutation(b)\n >>> list(a*b)\n [2, 0, 1]\n >>> [b(a(i)) for i in range(3)]\n [2, 0, 1]\n\n Notes\n =====\n\n All items in the sequence will be parsed by Permutation as\n necessary as long as the first item is a Permutation:\n\n >>> Permutation.rmul(a, [0, 2, 1]) == Permutation.rmul(a, b)\n True\n\n The reverse order of arguments will raise a TypeError.\n\n ", "language": "en", "n_whitespaces": 322, "n_words": 139, "vocab_size": 82 }
https://github.com/sympy/sympy.git
19
generate
def generate(self) -> dict[str, str]: primary = self.primary secondary = self.secondary or primary warning = self.warning or primary error = self.error or secondary success = self.success or secondary accent = self.accent or primary dark = self._dark luminosity_spread = self._luminosity_spread text_alpha = self._text_alpha if dark: background = self.background or Color.parse(DEFAULT_DARK_BACKGROUND) surface = self.surface or Color.parse(DEFAULT_DARK_SURFACE) else: background = self.background or Color.parse(DEFAULT_LIGHT_BACKGROUND) surface = self.surface or Color.parse(DEFAULT_LIGHT_SURFACE) if self.panel is None: panel = surface.blend(primary, luminosity_spread) else: panel = self.panel colors: dict[str, str] = {}
49764a3ec7e9525530e25465be0e1b0c7bffaf6c
12
design.py
248
improved color harmony
44,480
0
253
387
45
184,099
82
textual
27
src/textual/design.py
Python
72
{ "docstring": "Generate a mapping of color name on to a CSS color.\n\n Args:\n dark (bool, optional): Enable dark mode. Defaults to False.\n luminosity_spread (float, optional): Amount of luminosity to subtract and add to generate\n shades. Defaults to 0.2.\n text_alpha (float, optional): Alpha value for text. Defaults to 0.9.\n\n Returns:\n dict[str, str]: A mapping of color name on to a CSS-style encoded color\n\n ", "language": "en", "n_whitespaces": 141, "n_words": 61, "vocab_size": 40 }
https://github.com/Textualize/textual.git
3
cls_token
def cls_token(self) -> str: if self._cls_token is None: if self.verbose: logger.error("Using cls_token, but it is not set yet.") return None return str(self._cls_token)
3eed5530ec74bb60ad9f8f612717d0f6ccf820f2
12
tokenization_utils_base.py
61
Fix properties of unset special tokens in non verbose mode (#17797) Co-authored-by: SaulLu <55560583+SaulLu@users.noreply.github.com>
5,765
0
80
35
19
31,490
22
transformers
7
src/transformers/tokenization_utils_base.py
Python
10
{ "docstring": "\n `str`: Classification token, to extract a summary of an input sequence leveraging self-attention along the full\n depth of the model. Log an error if used while not having been set.\n ", "language": "en", "n_whitespaces": 52, "n_words": 30, "vocab_size": 27 }
https://github.com/huggingface/transformers.git
7
eye
def eye(N, chunks="auto", M=None, k=0, dtype=float): eye = {} if M is None: M = N if dtype is None: dtype = float if not isinstance(chunks, (int, str)): raise ValueError("chunks must be an int or string") vchunks, hchunks = normalize_chunks(chunks, shape=(N, M), dtype=dtype) chunks = vchunks[0] token = tokenize(N, chunks, M, k, dtype) name_eye = "eye-" + token for i, vchunk in enumerate(vchunks): for j, hchunk in enumerate(hchunks): if (j - i - 1) * chunks <= k <= (j - i + 1) * chunks: eye[name_eye, i, j] = ( np.eye, vchunk, hchunk, k - (j - i) * chunks, dtype, ) else: eye[name_eye, i, j] = (np.zeros, (vchunk, hchunk), dtype) return Array(eye, name_eye, shape=(N, M), chunks=(chunks, chunks), dtype=dtype) @derived_from(np)
e25284dced9749f02bd5d8c80b6225153aa282d8
@derived_from(np)
17
creation.py
342
Fix eye inconsistency with NumPy for dtype=None (#8669) (#8685)
36,471
1
343
230
80
155,800
121
dask
27
dask/array/creation.py
Python
25
{ "docstring": "\n Return a 2-D Array with ones on the diagonal and zeros elsewhere.\n\n Parameters\n ----------\n N : int\n Number of rows in the output.\n chunks : int, str\n How to chunk the array. Must be one of the following forms:\n\n - A blocksize like 1000.\n - A size in bytes, like \"100 MiB\" which will choose a uniform\n block-like shape\n - The word \"auto\" which acts like the above, but uses a configuration\n value ``array.chunk-size`` for the chunk size\n M : int, optional\n Number of columns in the output. If None, defaults to `N`.\n k : int, optional\n Index of the diagonal: 0 (the default) refers to the main diagonal,\n a positive value refers to an upper diagonal, and a negative value\n to a lower diagonal.\n dtype : data-type, optional\n Data-type of the returned array.\n\n Returns\n -------\n I : Array of shape (N,M)\n An array where all elements are equal to zero, except for the `k`-th\n diagonal, whose values are equal to one.\n ", "language": "en", "n_whitespaces": 295, "n_words": 162, "vocab_size": 103 }
https://github.com/dask/dask.git
1
test_null_annotation
def test_null_annotation(self): book = Book.objects.annotate( no_value=Value(None, output_field=IntegerField()) ).first() self.assertIsNone(book.no_value)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
16
tests.py
66
Refs #33476 -- Reformatted code with Black.
49,839
0
48
39
9
200,995
9
django
12
tests/annotations/tests.py
Python
5
{ "docstring": "\n Annotating None onto a model round-trips\n ", "language": "en", "n_whitespaces": 21, "n_words": 6, "vocab_size": 6 }
https://github.com/django/django.git
5
process
def process(self) -> None: logger.info("[CREATE ALIGNMENTS FROM FACES]") # Tidy up cli output skip_count = 0 d_align = {} for filename, meta in tqdm(read_image_meta_batch(self._filelist), desc="Generating Alignments", total=len(self._filelist), leave=False): if "itxt" not in meta or "alignments" not in meta["itxt"]: logger.verbose("skipping invalid file: '%s'", filename) skip_count += 1 continue align_fname = self._get_alignments_filename(meta["itxt"]["source"]) source_name, f_idx, alignment = self._extract_alignment(meta) full_info = (f_idx, alignment, filename, meta["itxt"]["source"]) d_align.setdefault(align_fname, {}).setdefault(source_name, []).append(full_info) alignments = self._sort_alignments(d_align) self._save_alignments(alignments) if skip_count > 1: logger.warning("%s of %s files skipped that do not contain valid alignment data", skip_count, len(self._filelist)) logger.warning("Run the process in verbose mode to see which files were skipped")
6437cd7ab0d6f18cdca0172ba281fd71967b86ac
14
jobs.py
306
alignments tool - Add from-faces job - Allows user to regenerate alignments file(s) from a folder of extracted faces
20,138
0
405
184
81
100,680
98
faceswap
29
tools/alignments/jobs.py
Python
23
{ "docstring": " Run the job to read faces from a folder to create alignments file(s). ", "language": "en", "n_whitespaces": 14, "n_words": 13, "vocab_size": 12 }
https://github.com/deepfakes/faceswap.git
1
method
def method(self): # type: () -> str raise NotImplementedError('Ansible has no built-in doas become plugin.')
24d91f552cad2a485f286f3c34cbba2005599ab4
8
become.py
24
ansible-test - Add support for more remotes.
78,936
0
30
11
15
267,516
15
ansible
3
test/lib/ansible_test/_internal/become.py
Python
2
{ "docstring": "The name of the Ansible become plugin that is equivalent to this.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
https://github.com/ansible/ansible.git
1
test_disposition_none
def test_disposition_none(self) -> None: channel = self._req(None) headers = channel.headers self.assertEqual( headers.getRawHeaders(b"Content-Type"), [self.test_image.content_type] ) self.assertEqual(headers.getRawHeaders(b"Content-Disposition"), None)
32c828d0f760492711a98b11376e229d795fd1b3
10
test_media_storage.py
90
Add type hints to `tests/rest`. (#12208) Co-authored-by: Patrick Cloke <clokep@users.noreply.github.com>
71,709
0
69
55
15
247,516
16
synapse
9
tests/rest/media/v1/test_media_storage.py
Python
11
{ "docstring": "\n If there is no filename, one isn't passed on in the Content-Disposition\n of the request.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 14 }
https://github.com/matrix-org/synapse.git
2
is_on
def is_on(self) -> bool: # Note: wemo.get_standby_state is a @property. return super().is_on and self.wemo.get_standby_state == StandbyState.ON
cf5e21a996818d4273cb107f1de5c91ac69ab4e9
9
binary_sensor.py
42
Use properties of wemo Insight device (#72316)
100,041
0
37
24
16
301,193
16
core
8
homeassistant/components/wemo/binary_sensor.py
Python
3
{ "docstring": "Return true device connected to the Insight Switch is on.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/home-assistant/core.git
1
CircularUnitaryEnsemble
def CircularUnitaryEnsemble(sym, dim): sym, dim = _symbol_converter(sym), _sympify(dim) model = CircularUnitaryEnsembleModel(sym, dim) rmp = RandomMatrixPSpace(sym, model=model) return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
24f1e7730119fe958cc8e28411f790c9a5ec04eb
9
random_matrix_models.py
80
Fix various typos Found via `codespell -q 3 -L aboves,aline,ans,aother,arithmetics,assum,atleast,braket,clen,declar,declars,dorder,dum,enew,fo,fro,inout,iself,ist,ket,lamda,lightyear,lightyears,nd,numer,numers,orderd,ot,pring,rcall,rever,ro,ser,siz,splitted,sring,supercedes,te,tht,unequality,upto,vas,versin,whet`
49,657
0
36
52
18
200,451
21
sympy
11
sympy/stats/random_matrix_models.py
Python
5
{ "docstring": "\n Represents Circular Unitary Ensembles.\n\n Examples\n ========\n\n >>> from sympy.stats import CircularUnitaryEnsemble as CUE\n >>> from sympy.stats import joint_eigen_distribution\n >>> C = CUE('U', 1)\n >>> joint_eigen_distribution(C)\n Lambda(t[1], Product(Abs(exp(I*t[_j]) - exp(I*t[_k]))**2, (_j, _k + 1, 1), (_k, 1, 0))/(2*pi))\n\n Note\n ====\n\n As can be seen above in the example, density of CiruclarUnitaryEnsemble\n is not evaluated because the exact definition is based on haar measure of\n unitary group which is not unique.\n ", "language": "en", "n_whitespaces": 112, "n_words": 69, "vocab_size": 57 }
https://github.com/sympy/sympy.git
4
generate_invalid_param_val
def generate_invalid_param_val(constraint, constraints=None): if isinstance(constraint, StrOptions): return f"not {' or '.join(constraint.options)}" if not isinstance(constraint, Interval): raise NotImplementedError # constraint is an interval constraints = [constraint] if constraints is None else constraints return _generate_invalid_param_val_interval(constraint, constraints)
02cbe01e67165d7d38e5e441cfccd6b57b2207b6
12
_param_validation.py
96
FIX Param validation: fix generating invalid param when 2 interval constraints (#23513) Co-authored-by: Julien Jerphanion <git@jjerphan.xyz> Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com>
76,092
0
66
50
27
260,151
34
scikit-learn
10
sklearn/utils/_param_validation.py
Python
7
{ "docstring": "Return a value that does not satisfy the constraint.\n\n Raises a NotImplementedError if there exists no invalid value for this constraint.\n\n This is only useful for testing purpose.\n\n Parameters\n ----------\n constraint : _Constraint instance\n The constraint to generate a value for.\n\n constraints : list of _Constraint instances or None, default=None\n The list of all constraints for this parameter. If None, the list only\n containing `constraint` is used.\n\n Returns\n -------\n val : object\n A value that does not satisfy the constraint.\n ", "language": "en", "n_whitespaces": 138, "n_words": 80, "vocab_size": 52 }
https://github.com/scikit-learn/scikit-learn.git
1
test_invalidate_cache_by_room_id
def test_invalidate_cache_by_room_id(self): with LoggingContext(name="test") as ctx: # Prime the cache with some values res = self.get_success( self.store.have_seen_events(self.room_id, self.event_ids) ) self.assertEqual(res, set(self.event_ids)) # That should result in a single db query to lookup self.assertEqual(ctx.get_resource_usage().db_txn_count, 1) # Clear the cache with any events associated with the `room_id` self.store.have_seen_event.invalidate((self.room_id,)) with LoggingContext(name="test") as ctx: res = self.get_success( self.store.have_seen_events(self.room_id, self.event_ids) ) self.assertEqual(res, set(self.event_ids)) # Since we cleared the cache, it should result in another db query to lookup self.assertEqual(ctx.get_resource_usage().db_txn_count, 1)
29269d9d3f3419a3d92cdd80dae4a37e2d99a395
13
test_events_worker.py
230
Fix `have_seen_event` cache not being invalidated (#13863) Fix https://github.com/matrix-org/synapse/issues/13856 Fix https://github.com/matrix-org/synapse/issues/13865 > Discovered while trying to make Synapse fast enough for [this MSC2716 test for importing many batches](https://github.com/matrix-org/complement/pull/214#discussion_r741678240). As an example, disabling the `have_seen_event` cache saves 10 seconds for each `/messages` request in that MSC2716 Complement test because we're not making as many federation requests for `/state` (speeding up `have_seen_event` itself is related to https://github.com/matrix-org/synapse/issues/13625) > > But this will also make `/messages` faster in general so we can include it in the [faster `/messages` milestone](https://github.com/matrix-org/synapse/milestone/11). > > *-- https://github.com/matrix-org/synapse/issues/13856* ### The problem `_invalidate_caches_for_event` doesn't run in monolith mode which means we never even tried to clear the `have_seen_event` and other caches. And even in worker mode, it only runs on the workers, not the master (AFAICT). Additionally there was bug with the key being wrong so `_invalidate_caches_for_event` never invalidates the `have_seen_event` cache even when it does run. Because we were using the `@cachedList` wrong, it was putting items in the cache under keys like `((room_id, event_id),)` with a `set` in a `set` (ex. `(('!TnCIJPKzdQdUlIyXdQ:test', '$Iu0eqEBN7qcyF1S9B3oNB3I91v2o5YOgRNPwi_78s-k'),)`) and we we're trying to invalidate with just `(room_id, event_id)` which did nothing.
72,989
0
261
137
44
249,552
75
synapse
17
tests/storage/databases/main/test_events_worker.py
Python
14
{ "docstring": "\n Test to make sure that all events associated with the given `(room_id,)`\n are invalidated in the `have_seen_event` cache.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 17 }
https://github.com/matrix-org/synapse.git
12
_find_alignments
def _find_alignments(self) -> str: fname = self._args.alignments_file frames = self._args.frames_dir if fname and os.path.isfile(fname) and os.path.splitext(fname)[-1].lower() == ".fsa": return fname if fname: logger.error("Not a valid alignments file: '%s'", fname) sys.exit(1) if not frames or not os.path.exists(frames): logger.error("Not a valid frames folder: '%s'. Can't scan for alignments.", frames) sys.exit(1) fname = "alignments.fsa" if os.path.isdir(frames) and os.path.exists(os.path.join(frames, fname)): return fname if os.path.isdir(frames) or os.path.splitext(frames)[-1] not in _video_extensions: logger.error("Can't find a valid alignments file in location: %s", frames) sys.exit(1) fname = f"{os.path.splitext(frames)[0]}_{fname}" if not os.path.exists(fname): logger.error("Can't find a valid alignments file for video: %s", frames) sys.exit(1) return fname
2d312a9db228c025d0bd2ea7a4f747a2c644b5d8
13
alignments.py
360
Minor updates and fixups - Mask Tool - Typing + BiSeNet mask update fix - Alignments Tool - Auto search for alignments file
21,043
0
289
204
50
101,635
95
faceswap
21
tools/alignments/alignments.py
Python
32
{ "docstring": " If an alignments folder is required and hasn't been provided, scan for a file based on\n the video folder.\n\n Exits if an alignments file cannot be located\n\n Returns\n -------\n str\n The full path to an alignments file\n ", "language": "en", "n_whitespaces": 91, "n_words": 37, "vocab_size": 31 }
https://github.com/deepfakes/faceswap.git
3
_has_arrow_table
def _has_arrow_table(self): if not isinstance(self._op, FrameNode): return False return all(p.arrow_table is not None for p in self._partitions.flatten())
027f92a7655ae5b473839b7956ff52bf7879f3cc
11
dataframe.py
63
FIX-#4022: Fixed empty data frame with index (#4910) Signed-off-by: Andrey Pavlenko <andrey.a.pavlenko@gmail.com>
36,150
0
49
39
15
154,793
17
modin
10
modin/experimental/core/execution/native/implementations/hdk_on_native/dataframe/dataframe.py
Python
4
{ "docstring": "\n Return True for materialized frame with Arrow table.\n\n Returns\n -------\n bool\n ", "language": "en", "n_whitespaces": 47, "n_words": 11, "vocab_size": 11 }
https://github.com/modin-project/modin.git
1
message_level_tag
def message_level_tag(message): return MESSAGE_TAGS.get(message.level) @register.simple_tag
1838fbfb1a720e0a286c989dbdea03dfde6af4a5
@register.simple_tag
8
wagtailadmin_tags.py
34
Prevent custom MESSAGE_TAGS settings from leaking into admin styles Fixes a test failure against Django main. In #2552, a fix was applied to ensure that the project-level MESSAGE_TAGS setting was ignored, allowing end-users to customise that setting for their own projects without it leaking into Wagtail admin styles. Unfortunately, the test was flawed (or was broken in a Django regression at some point): in Django <=4.0, MESSAGE_TAGS was not affected by override_settings after the first request, which meant that unless the test was run in isolation, the custom classname that was supposed to flag up the problem never got applied, and the test always succeeded. The change to SVG icons broke the intent of #2552, since it used message.level_tag for the icon's classname (and this picks up MESSAGE_TAGS customisations), but due to the broken test this went unnoticed. https://github.com/django/django/commit/24b316536a7ee4c54a54f632de1852aecb4038c0 fixed the override_settings behaviour, making the test fail as it should have done long ago. Here we adjust the test to not rely on override_settings (so that it does what it's supposed to do on all Django versions), fix a test that gets broken as a side effect (because it's unnecessarily checking message.level_tag), and fixes our SVG-icon-powered message include to bypass the MESSAGE_TAGS setting like the old implementation did. Confusing? Yes.
16,502
1
10
15
5
76,338
5
wagtail
7
wagtail/admin/templatetags/wagtailadmin_tags.py
Python
2
{ "docstring": "\n Return the tag for this message's level as defined in\n django.contrib.messages.constants.DEFAULT_TAGS, ignoring the project-level\n MESSAGE_TAGS setting (which end-users might customise).\n ", "language": "en", "n_whitespaces": 33, "n_words": 20, "vocab_size": 19 }
https://github.com/wagtail/wagtail.git
2
_has_nchw_support
def _has_nchw_support(): explicitly_on_cpu = _is_current_explicit_device("CPU") gpus_available = bool(_get_available_gpus()) return not explicitly_on_cpu and gpus_available # VARIABLE MANIPULATION
84afc5193d38057e2e2badf9c889ea87d80d8fbf
10
backend.py
47
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,226
0
27
24
13
269,606
16
keras
6
keras/backend.py
Python
4
{ "docstring": "Check whether the current scope supports NCHW ops.\n\n TensorFlow does not support NCHW on CPU. Therefore we check if we are not\n explicitly put on\n CPU, and have GPUs available. In this case there will be soft-placing on the\n GPU device.\n\n Returns:\n bool: if the current scope device placement would support nchw\n ", "language": "en", "n_whitespaces": 77, "n_words": 52, "vocab_size": 41 }
https://github.com/keras-team/keras.git
4
write
def write(self, fp, space_around_delimiters=True): if space_around_delimiters: d = " {} ".format(self._delimiters[0]) else: d = self._delimiters[0] if self._defaults: self._write_section(fp, self.default_section, self._defaults.items(), d) for section in self._sections: self._write_section(fp, section, self._sections[section].items(), d)
8198943edd73a363c266633e1aa5b2a9e9c9f526
13
configparser.py
140
add python 3.10.4 for windows
56,462
0
174
91
24
221,659
29
XX-Net
13
python3.10.4/Lib/configparser.py
Python
11
{ "docstring": "Write an .ini-format representation of the configuration state.\n\n If `space_around_delimiters' is True (the default), delimiters\n between keys and values are surrounded by spaces.\n\n Please note that comments in the original configuration file are not\n preserved when writing the configuration back.\n ", "language": "en", "n_whitespaces": 75, "n_words": 40, "vocab_size": 35 }
https://github.com/XX-net/XX-Net.git
1
test_get_name_mixed_case
def test_get_name_mixed_case(): result = salt.utils.win_dacl.get_name("adMiniStrAtorS") expected = "Administrators" assert result == expected
3bb43882e727b1d36abe2e501759c9c5e9048ecf
10
test_get_name.py
46
Add tests, migrate some tests to pytest
54,127
0
24
24
9
215,733
12
salt
7
tests/pytests/unit/utils/win_dacl/test_get_name.py
Python
4
{ "docstring": "\n Test get_name when passing an account name with mixed case characters\n ", "language": "en", "n_whitespaces": 18, "n_words": 11, "vocab_size": 11 }
https://github.com/saltstack/salt.git
1
test_non_categorical_value_label_convert_categoricals_error
def test_non_categorical_value_label_convert_categoricals_error(): # Mapping more than one value to the same label is valid for Stata # labels, but can't be read with convert_categoricals=True value_labels = { "repeated_labels": {10: "Ten", 20: "More than ten", 40: "More than ten"} } data = DataFrame( { "repeated_labels": [10, 10, 20, 20, 40, 40], } ) with tm.ensure_clean() as path: data.to_stata(path, value_labels=value_labels) with StataReader(path, convert_categoricals=False) as reader: reader_value_labels = reader.value_labels() assert reader_value_labels == value_labels col = "repeated_labels" repeats = "-" * 80 + "\n" + "\n".join(["More than ten"]) msg = f with pytest.raises(ValueError, match=msg): read_stata(path, convert_categoricals=True) @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) @pytest.mark.parametrize( "dtype", [ pd.BooleanDtype, pd.Int8Dtype, pd.Int16Dtype, pd.Int32Dtype, pd.Int64Dtype, pd.UInt8Dtype, pd.UInt16Dtype, pd.UInt32Dtype, pd.UInt64Dtype, ], )
b48a73ff53a2c3414e38f5adf11f661dd7883cd1
@pytest.mark.parametrize("version", [114, 117, 118, 119, None]) @pytest.mark.parametrize( "dtype", [ pd.BooleanDtype, pd.Int8Dtype, pd.Int16Dtype, pd.Int32Dtype, pd.Int64Dtype, pd.UInt8Dtype, pd.UInt16Dtype, pd.UInt32Dtype, pd.UInt64Dtype, ], )
13
test_stata.py
334
TST: use `with` where possible instead of manual `close` (#48931) Coincidentally fixes some StataReaders being left open in tests.
40,445
1
304
131
90
169,679
112
pandas
33
pandas/tests/io/test_stata.py
Python
29
{ "docstring": "\nValue labels for column {col} are not unique. These cannot be converted to\npandas categoricals.\n\nEither read the file with `convert_categoricals` set to False or use the\nlow level interface in `StataReader` to separately read the values and the\nvalue_labels.\n\nThe repeated labels are:\n{repeats}\n", "language": "en", "n_whitespaces": 38, "n_words": 45, "vocab_size": 38 }
https://github.com/pandas-dev/pandas.git
4
memoize
def memoize(ttl=60, cache_key=None, track_function=False, cache=None): if cache_key and track_function: raise IllegalArgumentError("Can not specify cache_key when track_function is True") cache = cache or get_memoize_cache()
cfce31419d6fa5155e87f0d3faddd713e12210a2
10
common.py
61
Move the IS_TESTING method out of settings
17,296
0
39
41
21
82,019
23
awx
7
awx/main/utils/common.py
Python
6
{ "docstring": "\n Decorator to wrap a function and cache its result.\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 9 }
https://github.com/ansible/awx.git
5
mixin_base_ppr_parser
def mixin_base_ppr_parser(parser): gp = add_arg_group(parser, title='Essential') gp.add_argument( '--name', type=str, help=, ) gp.add_argument( '--workspace', type=str, help='The working directory for any IO operations in this object. ' 'If not set, then derive from its parent `workspace`.', ) from jina import __resources_path__ gp.add_argument( '--log-config', type=str, default=os.path.join(__resources_path__, 'logging.default.yml'), help='The YAML config of the logger used in this object.', ) gp.add_argument( '--quiet', action='store_true', default=False, help='If set, then no log will be emitted from this object.', ) gp.add_argument( '--quiet-error', action='store_true', default=False, help='If set, then exception stack information will not be added to the log', ) gp.add_argument( '--workspace-id', type=str, default=random_identity(), help='the UUID for identifying the workspace. When not given a random id will be assigned.' 'Multiple Pod/Deployment/Flow will work under the same workspace if they share the same ' '`workspace-id`.' if _SHOW_ALL_ARGS else argparse.SUPPRESS, ) parser.add_argument( '--extra-search-paths', type=str, default=[], nargs='*', help='Extra search paths to be used when loading modules and finding YAML config files.' if _SHOW_ALL_ARGS else argparse.SUPPRESS, ) gp.add_argument( '--timeout-ctrl', type=int, default=int(os.getenv('JINA_DEFAULT_TIMEOUT_CTRL', '60')), help='The timeout in milliseconds of the control request, -1 for waiting forever', ) parser.add_argument( '--k8s-namespace', type=str, help='Name of the namespace where Kubernetes deployment should be deployed, to be filled by flow name' if _SHOW_ALL_ARGS else argparse.SUPPRESS, ) gp.add_argument( '--k8s-disable-connection-pool', action='store_false', dest='k8s_connection_pool', default=True, help='Defines if connection pooling for replicas should be disabled in K8s. This mechanism implements load balancing between replicas of the same executor. This should be disabled if a service mesh (like istio) is used for load balancing.' if _SHOW_ALL_ARGS else argparse.SUPPRESS, ) gp.add_argument( '--polling', type=str, default=PollingType.ANY.name, help=, )
13edc16d806fb5d77a6849551178ccc75937f25f
13
base.py
461
refactor: rename pod to deployment (#4230) * refactor: rename pod to deployment * style: fix overload and cli autocomplete * fix: undo daemon mistake * refactor: leftover cleanup * fix: more test fixes * fix: more fixes * fix: more fixes * fix: more fixes * fix: more tests * fix: fix more tests * refactor: fix more tests * refactor: more tests fixes * refactor: rename pea to pod * refactor: adjust docs * refactor: complete pea renaming * refactor: more fixes * fix: pea_type in k8s yamls * fix: adjust pod args name * refactor: rename peapods parser folder * fix: da init Co-authored-by: Jina Dev Bot <dev-bot@jina.ai>
1,996
0
700
278
147
10,921
247
jina
27
jina/parsers/orchestrate/base.py
Python
99
{ "docstring": "Mixing in arguments required by pod/deployment/runtime module into the given parser.\n :param parser: the parser instance to which we add arguments\n \nThe name of this object.\n\nThis will be used in the following places:\n- how you refer to this object in Python/YAML/CLI\n- visualization\n- log message header\n- ...\n\nWhen not given, then the default naming strategy will apply.\n \n The polling strategy of the Deployment and its endpoints (when `shards>1`).\n Can be defined for all endpoints of a Deployment or by endpoint.\n Define per Deployment:\n - ANY: only one (whoever is idle) Pod polls the message\n - ALL: all Pods poll the message (like a broadcast)\n Define per Endpoint:\n JSON dict, {endpoint: PollingType}\n {'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}\n \n ", "language": "en", "n_whitespaces": 172, "n_words": 121, "vocab_size": 90 }
https://github.com/jina-ai/jina.git
3
transform
def transform(self, X, copy=True): check_is_fitted(self) X = self._validate_data( X, copy=(copy and self._whiten), dtype=[np.float64, np.float32], reset=False ) if self._whiten: X -= self.mean_ return np.dot(X, self.components_.T)
d14fd82cf423c21ab6d01f7d0430083f9d7026be
12
_fastica.py
110
ENH Preserving dtypes for ICA (#22806) Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org> Co-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com> Co-authored-by: Julien Jerphanion <git@jjerphan.xyz>
75,763
0
88
73
22
259,424
24
scikit-learn
16
sklearn/decomposition/_fastica.py
Python
8
{ "docstring": "Recover the sources from X (apply the unmixing matrix).\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Data to transform, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n copy : bool, default=True\n If False, data passed to fit can be overwritten. Defaults to True.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components)\n Estimated sources obtained by transforming the data with the\n estimated unmixing matrix.\n ", "language": "en", "n_whitespaces": 183, "n_words": 72, "vocab_size": 52 }
https://github.com/scikit-learn/scikit-learn.git
12
get_freq
def get_freq(self) -> str | None: if not self.is_monotonic or not self.index._is_unique: return None delta = self.deltas[0] ppd = periods_per_day(self._reso) if delta and _is_multiple(delta, ppd): return self._infer_daily_rule() # Business hourly, maybe. 17: one day / 65: one weekend if self.hour_deltas in ([1, 17], [1, 65], [1, 17, 65]): return "BH" # Possibly intraday frequency. Here we use the # original .asi8 values as the modified values # will not work around DST transitions. See #8772 if not self.is_unique_asi8: return None delta = self.deltas_asi8[0] pph = ppd // 24 ppm = pph // 60 pps = ppm // 60 if _is_multiple(delta, pph): # Hours return _maybe_add_count("H", delta / pph) elif _is_multiple(delta, ppm): # Minutes return _maybe_add_count("T", delta / ppm) elif _is_multiple(delta, pps): # Seconds return _maybe_add_count("S", delta / pps) elif _is_multiple(delta, (pps // 1000)): # Milliseconds return _maybe_add_count("L", delta / (pps // 1000)) elif _is_multiple(delta, (pps // 1_000_000)): # Microseconds return _maybe_add_count("U", delta / (pps // 1_000_000)) else: # Nanoseconds return _maybe_add_count("N", delta)
e9350a4affbb424aaecad279f638a0dd1584df68
13
frequencies.py
367
infer_freq handle non-nano (#47126) * infer_freq handle non-nano * remove unused import
39,834
0
487
210
94
166,591
162
pandas
20
pandas/tseries/frequencies.py
Python
35
{ "docstring": "\n Find the appropriate frequency string to describe the inferred\n frequency of self.i8values\n\n Returns\n -------\n str or None\n ", "language": "en", "n_whitespaces": 60, "n_words": 17, "vocab_size": 15 }
https://github.com/pandas-dev/pandas.git
2
guarded_deprecation_warning
def guarded_deprecation_warning(*args, **kwargs): if os.environ.get("SERVE_WARN_V1_DEPRECATIONS", "0") == "1": from ray._private.utils import deprecated return deprecated(*args, **kwargs) else:
f6d19ac7c03b12bbf839824381376e228d0fffad
10
utils.py
75
[Serve] Gate the deprecation warnings behind envvar (#27479)
28,208
0
39
47
16
126,641
16
ray
10
python/ray/serve/_private/utils.py
Python
7
{ "docstring": "Wrapper for deprecation warnings, guarded by a flag.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/ray-project/ray.git
2
current_headings
def current_headings(self): return {v['name']:('#' + v['label']) for v in self.custcols.values()}
9a95d8b0c26bdaea17ea9264ab45e8a81b6422f0
11
create_custom_column.py
57
More CreateNewCustomColumn stuff. - Improved documentation - Check column headings for duplicates - Method to return the current column headings as a dict - Improved exception handling
45,934
0
24
32
10
188,798
10
calibre
5
src/calibre/gui2/preferences/create_custom_column.py
Python
2
{ "docstring": "\n Return the currently defined column headings\n\n Return the column headings including the ones that haven't yet been\n created. It is a dict. The key is the heading, the value is the lookup\n name having that heading.\n ", "language": "en", "n_whitespaces": 72, "n_words": 36, "vocab_size": 25 }
https://github.com/kovidgoyal/calibre.git
8
_generate
def _generate(self, pset, min_, max_, condition, type_=None): if type_ is None: type_ = pset.ret expr = [] height = np.random.randint(min_, max_) stack = [(0, type_)] while len(stack) != 0: depth, type_ = stack.pop() # We've added a type_ parameter to the condition function if condition(height, depth, type_): try: term = np.random.choice(pset.terminals[type_]) except IndexError: _, _, traceback = sys.exc_info() raise IndexError( "The gp.generate function tried to add " "a terminal of type {}, but there is" "none available. {}".format(type_, traceback) ) if inspect.isclass(term): term = term() expr.append(term) else: try: prim = np.random.choice(pset.primitives[type_]) except IndexError: _, _, traceback = sys.exc_info() raise IndexError( "The gp.generate function tried to add " "a primitive of type {}, but there is" "none available. {}".format(type_, traceback) ) expr.append(prim) for arg in reversed(prim.args): stack.append((depth + 1, arg)) return expr
388616b6247ca4ea8de4e2f340d6206aee523541
19
base.py
357
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
43,607
0
683
221
83
181,829
131
tpot
34
tpot/base.py
Python
35
{ "docstring": "Generate a Tree as a list of lists.\n\n The tree is build from the root to the leaves, and it stop growing when\n the condition is fulfilled.\n\n Parameters\n ----------\n pset: PrimitiveSetTyped\n Primitive set from which primitives are selected.\n min_: int\n Minimum height of the produced trees.\n max_: int\n Maximum height of the produced trees.\n condition: function\n The condition is a function that takes two arguments,\n the height of the tree to build and the current\n depth in the tree.\n type_: class\n The type that should return the tree when called, when\n :obj:None (default) no return type is enforced.\n\n Returns\n -------\n individual: list\n A grown tree with leaves at possibly different depths\n depending on the condition function.\n ", "language": "en", "n_whitespaces": 317, "n_words": 116, "vocab_size": 75 }
https://github.com/EpistasisLab/tpot.git
2
installed_by_distutils
def installed_by_distutils(self) -> bool: info_location = self.info_location if not info_location: return False return pathlib.Path(info_location).is_file()
f3166e673fe8d40277b804d35d77dcdb760fc3b3
9
base.py
52
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
3,148
0
53
30
13
19,917
14
pipenv
7
pipenv/patched/notpip/_internal/metadata/base.py
Python
11
{ "docstring": "Whether this distribution is installed with legacy distutils format.\n\n A distribution installed with \"raw\" distutils not patched by setuptools\n uses one single file at ``info_location`` to store metadata. We need to\n treat this specially on uninstallation.\n ", "language": "en", "n_whitespaces": 64, "n_words": 36, "vocab_size": 30 }
https://github.com/pypa/pipenv.git
13
_view
def _view(arr, dtype=None, type=None): lax_internal._check_user_dtype_supported(dtype, "view") if type is not None: raise NotImplementedError("`type` argument of array.view()") if dtype is None: return arr arr_dtype = _dtype(arr) if arr_dtype == dtype: return arr # bool is implemented as lax:PRED, which is not compatible with lax.bitcast_convert_type. # We work around this by casting bool to uint8. if arr_dtype == bool_: arr = arr.astype(uint8) nbits_in = 8 * arr_dtype.itemsize nbits_out = 8 * np.dtype(dtype).itemsize if nbits_in == nbits_out: if dtype == bool_: return lax.bitcast_convert_type(arr, uint8).astype(dtype) return lax.bitcast_convert_type(arr, dtype) if nbits_out > nbits_in and (shape(arr)[-1] * nbits_in) % nbits_out != 0: raise ValueError("When changing to a larger dtype, its size must be a divisor " "of the total size in bytes of the last axis of the array.") byte_dtypes = {8: uint8, 16: uint16, 32: uint32, 64: uint64} if nbits_in not in byte_dtypes: raise NotImplementedError(f"arr.view() for arr.dtype={arr_dtype}") if nbits_out not in byte_dtypes: raise NotImplementedError(f"arr.view(dtype) for dtype={dtype}") dt_in = byte_dtypes[nbits_in] dt_out = byte_dtypes[nbits_out] arr_bytes = lax.bitcast_convert_type(arr, dt_in) if nbits_in < nbits_out: arr_bytes = arr_bytes.reshape(arr.shape[:-1] + (-1, nbits_out // nbits_in)).astype(dt_out) shifts = expand_dims(arange(0, nbits_out, nbits_in, dtype=dt_out), tuple(range(arr_bytes.ndim - 1))) arr_bytes = (arr_bytes << shifts).sum(-1).astype(dt_out) else: shifts = lax.expand_dims(arange(0, nbits_in, nbits_out, dtype=dt_in), tuple(range(arr_bytes.ndim))) arr_bytes = ((arr_bytes[..., newaxis] >> shifts) & iinfo(dt_out).max).astype(dt_out) arr_bytes = arr_bytes.reshape(arr_bytes.shape[:-2] + (-1,)) if dtype == bool_: return lax.bitcast_convert_type(arr_bytes, uint8).astype(dtype) return lax.bitcast_convert_type(arr_bytes, dtype) ### track unimplemented functions _NOT_IMPLEMENTED_DESC =
e262c72b195d4f6b31d9b45c18a23a53d22be85c
16
lax_numpy.py
632
remove `_check_user_dtype_supported` from public `jax.lax` module
26,669
0
317
391
135
119,709
224
jax
39
jax/_src/numpy/lax_numpy.py
Python
39
{ "docstring": "\n*** This function is not yet implemented by jax.numpy, and will raise NotImplementedError ***\n", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 13 }
https://github.com/google/jax.git
2
_get_offset
def _get_offset(self) -> Dict[CenteringType, np.ndarray]: offset: Dict[CenteringType, np.ndarray] = dict(legacy=np.array([0.0, 0.0])) points: Dict[Literal["face", "head"], Tuple[float, ...]] = dict(head=(0.0, 0.0, -2.3), face=(0.0, -1.5, 4.2)) for key, pnts in points.items(): center = cv2.projectPoints(np.array([pnts]).astype("float32"), self._rotation, self._translation, self._camera_matrix, self._distortion_coefficients)[0].squeeze() logger.trace("center %s: %s", key, center) # type: ignore offset[key] = center - (0.5, 0.5) logger.trace("offset: %s", offset) # type: ignore return offset
a2de4a97985dc62db3b140a924aeac2be733abf8
18
aligned_face.py
258
lib.align.aligned_face updates - Typing - Legacy support for pre-aligned faces - Coverage support for pre-aligned faces - Standardized retrieval of sub-crops
20,610
0
357
190
47
101,189
57
faceswap
30
lib/align/aligned_face.py
Python
22
{ "docstring": " Obtain the offset between the original center of the extracted face to the new center\n of the head in 2D space.\n\n Returns\n -------\n :class:`numpy.ndarray`\n The x, y offset of the new center from the old center.\n ", "language": "en", "n_whitespaces": 83, "n_words": 36, "vocab_size": 24 }
https://github.com/deepfakes/faceswap.git
9
list_fonts
def list_fonts(directory, extensions): extensions = ["." + ext for ext in extensions] if sys.platform == 'win32' and directory == win32FontDirectory(): return [os.path.join(directory, filename) for filename in os.listdir(directory) if os.path.isfile(filename)] else: return [os.path.join(dirpath, filename) # os.walk ignores access errors, unlike Path.glob. for dirpath, _, filenames in os.walk(directory) for filename in filenames if Path(filename).suffix.lower() in extensions]
e8006163923564ea04f745a289e079b80afc6db8
16
font_manager.py
170
skip sub directories when finding fonts on windows Closes #22859 Co-authored-by: Tim Hoffmann <2836374+timhoffm@users.noreply.github.com>
23,132
0
170
108
38
108,279
54
matplotlib
20
lib/matplotlib/font_manager.py
Python
11
{ "docstring": "\n Return a list of all fonts matching any of the extensions, found\n recursively under the directory.\n ", "language": "en", "n_whitespaces": 26, "n_words": 16, "vocab_size": 14 }
https://github.com/matplotlib/matplotlib.git