query
stringlengths
9
60
language
stringclasses
1 value
code
stringlengths
105
25.7k
url
stringlengths
91
217
group by count
python
def count(self): """ Compute count of group, excluding missing values """ ids, _, ngroups = self.grouper.group_info val = self.obj.get_values() mask = (ids != -1) & ~isna(val) ids = ensure_platform_int(ids) minlength = ngroups or 0 out = np.bincount(ids[mask], minlength=minlength) return Series(out, index=self.grouper.result_index, name=self._selection_name, dtype='int64')
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/generic.py#L1183-L1196
group by count
python
def groups_count(self): """Number of all groups (get-only). :getter: Returns number of all groups :type: int """ if self._keyboard_description.contents.ctrls is not None: return self._keyboard_description.contents.ctrls.contents.num_groups else: groups_source = self._groups_source groups_count = 0 while (groups_count < XkbNumKbdGroups and groups_source[groups_count] != None_): groups_count += 1 return groups_count
https://github.com/hcpl/xkbgroup/blob/fcf4709a3c8221e0cdf62c09e5cccda232b0104c/xkbgroup/core.py#L242-L258
group by count
python
def ngroup(self, ascending=True): """ Number each group from 0 to the number of groups - 1. This is the enumerative complement of cumcount. Note that the numbers given to the groups match the order in which the groups would be seen when iterating over the groupby object, not the order they are first observed. .. versionadded:: 0.20.2 Parameters ---------- ascending : bool, default True If False, number in reverse, from number of group - 1 to 0. See Also -------- .cumcount : Number the rows in each group. Examples -------- >>> df = pd.DataFrame({"A": list("aaabba")}) >>> df A 0 a 1 a 2 a 3 b 4 b 5 a >>> df.groupby('A').ngroup() 0 0 1 0 2 0 3 1 4 1 5 0 dtype: int64 >>> df.groupby('A').ngroup(ascending=False) 0 1 1 1 2 1 3 0 4 0 5 1 dtype: int64 >>> df.groupby(["A", [1,1,2,3,2,1]]).ngroup() 0 0 1 0 2 1 3 3 4 2 5 0 dtype: int64 """ with _group_selection_context(self): index = self._selected_obj.index result = Series(self.grouper.group_info[0], index) if not ascending: result = self.ngroups - 1 - result return result
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1780-L1843
group by count
python
def count(self): """ Compute count of group, excluding missing values """ from pandas.core.dtypes.missing import _isna_ndarraylike as _isna data, _ = self._get_data_to_aggregate() ids, _, ngroups = self.grouper.group_info mask = ids != -1 val = ((mask & ~_isna(np.atleast_2d(blk.get_values()))) for blk in data.blocks) loc = (blk.mgr_locs for blk in data.blocks) counter = partial( lib.count_level_2d, labels=ids, max_bin=ngroups, axis=1) blk = map(make_block, map(counter, val), loc) return self._wrap_agged_blocks(data.items, list(blk))
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/generic.py#L1481-L1497
group by count
python
def count_group(group_id, failures=False, cached=Conf.CACHED): """ Count the results in a group. :param str group_id: the group id :param bool failures: Returns failure count if True :param bool cached: run this against the cache backend :return: the number of tasks/results in a group :rtype: int """ if cached: return count_group_cached(group_id, failures) return Task.get_group_count(group_id, failures)
https://github.com/Koed00/django-q/blob/c84fd11a67c9a47d821786dfcdc189bb258c6f54/django_q/tasks.py#L312-L324
group by count
python
def get_groups_count(self, field=None): ''' Returns 'matches' from group response. If grouping on more than one field, provide the field argument to specify which count you are looking for. ''' field = field if field else self._determine_group_field(field) if 'matches' in self.data['grouped'][field]: return self.data['grouped'][field]['matches'] raise ValueError("group matches not found in response")
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrresp.py#L89-L98
group by count
python
def count(self): """ If result is True, then the count will process result set , if result if False, then only use condition to count """ if self._group_by or self._join or self.distinct_field: return self.do_(self.get_query().limit(None).order_by(None).offset(None).alias().count()).scalar() else: return self.do_(self.get_query().with_only_columns([func.count()]).limit(None).order_by(None).offset(None)).scalar()
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/orm/__init__.py#L2661-L2669
group by count
python
def countBy(self, val): """ Counts instances of an object that group by a certain criterion. Pass either a string attribute to count by, or a function that returns the criterion. """ def by(result, key, value): if key not in result: result[key] = 0 result[key] += 1 res = self._group(self.obj, val, by) return self._wrap(res)
https://github.com/serkanyersen/underscore.py/blob/07c25c3f0f789536e4ad47aa315faccc0da9602f/src/underscore.py#L445-L459
group by count
python
def cumcount(self, ascending=True): """ Number each item in each group from 0 to the length of that group - 1. Essentially this is equivalent to >>> self.apply(lambda x: pd.Series(np.arange(len(x)), x.index)) Parameters ---------- ascending : bool, default True If False, number in reverse, from length of group - 1 to 0. See Also -------- .ngroup : Number the groups themselves. Examples -------- >>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']], ... columns=['A']) >>> df A 0 a 1 a 2 a 3 b 4 b 5 a >>> df.groupby('A').cumcount() 0 0 1 1 2 2 3 0 4 1 5 3 dtype: int64 >>> df.groupby('A').cumcount(ascending=False) 0 3 1 2 2 1 3 1 4 0 5 0 dtype: int64 """ with _group_selection_context(self): index = self._selected_obj.index cumcounts = self._cumcount_array(ascending=ascending) return Series(cumcounts, index)
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1846-L1897
group by count
python
def _cumcount_array(self, ascending=True): """ Parameters ---------- ascending : bool, default True If False, number in reverse, from length of group - 1 to 0. Notes ----- this is currently implementing sort=False (though the default is sort=True) for groupby in general """ ids, _, ngroups = self.grouper.group_info sorter = get_group_index_sorter(ids, ngroups) ids, count = ids[sorter], len(ids) if count == 0: return np.empty(0, dtype=np.int64) run = np.r_[True, ids[:-1] != ids[1:]] rep = np.diff(np.r_[np.nonzero(run)[0], count]) out = (~run).cumsum() if ascending: out -= np.repeat(out[run], rep) else: out = np.repeat(out[np.r_[run[1:], True]], rep) - out rev = np.empty(count, dtype=np.intp) rev[sorter] = np.arange(count, dtype=np.intp) return out[rev].astype(np.int64, copy=False)
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L724-L754
group by count
python
def list_groups(self, **kwargs): """List all groups in organisation. :param int limit: The number of groups to retrieve :param str order: The ordering direction, ascending (asc) or descending (desc) :param str after: Get groups after/starting at given group ID :returns: a list of :py:class:`Group` objects. :rtype: PaginatedResponse """ kwargs = self._verify_sort_options(kwargs) api = self._get_api(iam.DeveloperApi) return PaginatedResponse(api.get_all_groups, lwrap_type=Group, **kwargs)
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/account_management/account_management.py#L257-L268
group by count
python
def count(self, query): """ If query is Select object, this function will try to get count of select """ if self.manual: return self.total if isinstance(query, Select): q = query.with_only_columns([func.count()]).order_by(None).limit(None).offset(None) return do_(q).scalar() return query.count()
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/generic.py#L1967-L1978
group by count
python
def get_groupby_statistic(data): """Calculate value counts and distinct count of a variable (technically a Series). The result is cached by column name in a global variable to avoid recomputing. Parameters ---------- data : Series The data type of the Series. Returns ------- list value count and distinct count """ if data.name is not None and data.name in _VALUE_COUNTS_MEMO: return _VALUE_COUNTS_MEMO[data.name] value_counts_with_nan = data.value_counts(dropna=False) value_counts_without_nan = value_counts_with_nan.reset_index().dropna().set_index('index').iloc[:,0] distinct_count_with_nan = value_counts_with_nan.count() # When the inferred type of the index is just "mixed" probably the types within the series are tuple, dict, list and so on... if value_counts_without_nan.index.inferred_type == "mixed": raise TypeError('Not supported mixed type') result = [value_counts_without_nan, distinct_count_with_nan] if data.name is not None: _VALUE_COUNTS_MEMO[data.name] = result return result
https://github.com/pandas-profiling/pandas-profiling/blob/003d236daee8b7aca39c62708b18d59bced0bc03/pandas_profiling/base.py#L29-L60
group by count
python
def count(self): """ Returns the number of rows after aggregation. """ sql = u'SELECT count() FROM (%s)' % self.as_sql() raw = self._database.raw(sql) return int(raw) if raw else 0
https://github.com/Infinidat/infi.clickhouse_orm/blob/595f2023e334e3925a5c3fbfdd6083a5992a7169/src/infi/clickhouse_orm/query.py#L588-L594
group by count
python
def UNIFAC_groups(self): r'''Dictionary of UNIFAC subgroup: count groups for the original UNIFAC subgroups, as determined by `DDBST's online service <http://www.ddbst.com/unifacga.html>`_. Examples -------- >>> pprint(Chemical('Cumene').UNIFAC_groups) {1: 2, 9: 5, 13: 1} ''' if self.__UNIFAC_groups: return self.__UNIFAC_groups else: load_group_assignments_DDBST() if self.InChI_Key in DDBST_UNIFAC_assignments: self.__UNIFAC_groups = DDBST_UNIFAC_assignments[self.InChI_Key] return self.__UNIFAC_groups else: return None
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/chemical.py#L1352-L1369
group by count
python
def groups(self): """Returns dict {group name -> group labels}.""" self._prep_pandas_groupby() def extract_group_labels(frame): return (frame[0], frame[1].index.values) return self._mergedRDD.map(extract_group_labels).collectAsMap()
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L119-L126
group by count
python
def groups(self, labels, collect=None): """Group rows by multiple columns, count or aggregate others. Args: ``labels``: list of column names (or indices) to group on ``collect``: a function applied to values in other columns for each group Returns: A Table with each row corresponding to a unique combination of values in the columns specified in ``labels``, where the first columns are those specified in ``labels``, followed by a column of counts for each of the unique values. If ``collect`` is provided, a Table is returned with all original columns, each containing values calculated by first grouping rows according to to values in the ``labels`` column, then applying ``collect`` to each set of grouped values in the other columns. Note: The grouped columns will appear first in the result table. If ``collect`` does not accept arguments with one of the column types, that column will be empty in the resulting table. >>> marbles = Table().with_columns( ... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"), ... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"), ... "Amount", make_array(4, 6, 12, 7, 9, 2), ... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00)) >>> marbles Color | Shape | Amount | Price Red | Round | 4 | 1.3 Green | Rectangular | 6 | 1.3 Blue | Rectangular | 12 | 2 Red | Round | 7 | 1.75 Green | Rectangular | 9 | 1.4 Green | Round | 2 | 1 >>> marbles.groups(["Color", "Shape"]) Color | Shape | count Blue | Rectangular | 1 Green | Rectangular | 2 Green | Round | 1 Red | Round | 2 >>> marbles.groups(["Color", "Shape"], sum) Color | Shape | Amount sum | Price sum Blue | Rectangular | 12 | 2 Green | Rectangular | 15 | 2.7 Green | Round | 2 | 1 Red | Round | 11 | 3.05 """ # Assume that a call to groups with one label is a call to group if not _is_non_string_iterable(labels): return self.group(labels, collect=collect) collect = _zero_on_type_error(collect) columns = [] labels = self._as_labels(labels) for label in labels: if label not in self.labels: raise ValueError("All labels must exist in the table") columns.append(self._get_column(label)) grouped = self.group(list(zip(*columns)), lambda s: s) grouped._columns.popitem(last=False) # Discard the column of tuples # Flatten grouping values and move them to front counts = [len(v) for v in grouped[0]] for label in labels[::-1]: grouped[label] = grouped.apply(_assert_same, label) grouped.move_to_start(label) # Aggregate other values if collect is None: count = 'count' if 'count' not in labels else self._unused_label('count') return grouped.select(labels).with_column(count, counts) else: for label in grouped.labels: if label in labels: continue column = [collect(v) for v in grouped[label]] del grouped[label] grouped[_collected_label(collect, label)] = column return grouped
https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L954-L1032
group by count
python
def group_by_count(iterable: List[Any], count: int, default_value: Any) -> List[List[Any]]: """ Takes a list and groups it into sublists of size ``count``, using ``default_value`` to pad the list at the end if the list is not divisable by ``count``. For example: >>> group_by_count([1, 2, 3, 4, 5, 6, 7], 3, 0) [[1, 2, 3], [4, 5, 6], [7, 0, 0]] This is a short method, but it's complicated and hard to remember as a one-liner, so we just make a function out of it. """ return [list(l) for l in zip_longest(*[iter(iterable)] * count, fillvalue=default_value)]
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/util.py#L83-L95
group by count
python
def groupcountdistinctvalues(table, key, value): """Group by the `key` field then count the number of distinct values in the `value` field.""" s1 = cut(table, key, value) s2 = distinct(s1) s3 = aggregate(s2, key, len) return s3
https://github.com/petl-developers/petl/blob/1d33ca055f7e04e0d28a772041c9fd30c8d415d6/petl/transform/reductions.py#L335-L342
group by count
python
def groups(self): """ dict {group name -> group labels} """ if len(self.groupings) == 1: return self.groupings[0].groups else: to_groupby = lzip(*(ping.grouper for ping in self.groupings)) to_groupby = Index(to_groupby) return self.axis.groupby(to_groupby)
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/ops.py#L257-L264
group by count
python
def count(self, *columns): """ Retrieve the "count" result of the query :param columns: The columns to get :type columns: tuple :return: The count :rtype: int """ if not columns and self.distinct_: columns = self.columns if not columns: columns = ["*"] return int(self.aggregate("count", *columns))
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/query/builder.py#L1244-L1260
group by count
python
def compute_groups_matrix(groups): """Generate matrix which notes factor membership of groups Computes a k-by-g matrix which notes factor membership of groups where: k is the number of variables (factors) g is the number of groups Also returns a g-length list of unique group_names whose positions correspond to the order of groups in the k-by-g matrix Arguments --------- groups : list Group names corresponding to each variable Returns ------- tuple containing group matrix assigning parameters to groups and a list of unique group names """ if not groups: return None num_vars = len(groups) # Get a unique set of the group names unique_group_names = list(OrderedDict.fromkeys(groups)) number_of_groups = len(unique_group_names) indices = dict([(x, i) for (i, x) in enumerate(unique_group_names)]) output = np.zeros((num_vars, number_of_groups), dtype=np.int) for parameter_row, group_membership in enumerate(groups): group_index = indices[group_membership] output[parameter_row, group_index] = 1 return output, unique_group_names
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/util/__init__.py#L248-L286
group by count
python
def get_group(self, t, i): """Get group number.""" try: value = [] if t in _DIGIT and t != '0': value.append(t) t = next(i) if t in _DIGIT: value.append(t) else: i.rewind(1) except StopIteration: pass return ''.join(value) if value else None
https://github.com/facelessuser/backrefs/blob/3b3d60f5d57b02044f880aa29c9c5add0e31a34f/backrefs/_bre_parse.py#L1078-L1092
group by count
python
def total_count(self, seqs, nreport=100, scan_rc=True): """ count the number of matches above the cutoff returns an iterator of lists containing integer counts """ count_table = [counts for counts in self.count(seqs, nreport, scan_rc)] return np.sum(np.array(count_table), 0)
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/scanner.py#L524-L531
group by count
python
def count_group_cached(group_id, failures=False, broker=None): """ Count the results in a group in the cache backend """ if not broker: broker = get_broker() group_list = broker.cache.get('{}:{}:keys'.format(broker.list_key, group_id)) if group_list: if not failures: return len(group_list) failure_count = 0 for task_key in group_list: task = SignedPackage.loads(broker.cache.get(task_key)) if not task['success']: failure_count += 1 return failure_count
https://github.com/Koed00/django-q/blob/c84fd11a67c9a47d821786dfcdc189bb258c6f54/django_q/tasks.py#L327-L342
group by count
python
def compute_group_count(self, pattern): """Compute the number of regexp match groups when the pattern is provided to the :func:`Cardinality.make_pattern()` method. :param pattern: Item regexp pattern (as string). :return: Number of regexp match groups in the cardinality pattern. """ group_count = self.group_count pattern_repeated = 1 if self.is_many(): pattern_repeated = 2 return group_count + pattern_repeated * pattern_group_count(pattern)
https://github.com/jenisys/parse_type/blob/7cad3a67a5ca725cb786da31f656fd473084289f/parse_type/cardinality.py#L67-L78
group by count
python
def count(self, metric_name='count'): """ Convenience function for computing the group sizes (number of rows per group) given a grouped table. Parameters ---------- metric_name : string, default 'count' Name to use for the row count metric Returns ------- aggregated : TableExpr The aggregated table """ metric = self.table.count().name(metric_name) return self.table.aggregate([metric], by=self.by, having=self._having)
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/expr/groupby.py#L269-L285
group by count
python
def totals(iter, keyfunc, sumfunc): """groups items by field described in keyfunc and counts totals using value from sumfunc """ data = sorted(iter, key=keyfunc) res = {} for k, group in groupby(data, keyfunc): res[k] = sum([sumfunc(entry) for entry in group]) return res
https://github.com/projecthamster/hamster/blob/ca5254eff53172796ddafc72226c394ed1858245/src/hamster/lib/stuff.py#L224-L234
group by count
python
def query_admins_by_group_ids(cls, groups_ids=None): """Get count of admins per group.""" assert groups_ids is None or isinstance(groups_ids, list) query = db.session.query( Group.id, func.count(GroupAdmin.id) ).join( GroupAdmin ).group_by( Group.id ) if groups_ids: query = query.filter(Group.id.in_(groups_ids)) return query
https://github.com/inveniosoftware-contrib/invenio-groups/blob/109481d6b02701db00b72223dd4a65e167c589a6/invenio_groups/models.py#L789-L804
group by count
python
def list_groups(self, filtr=None): """ Get the groups the logged in user is a member of. Optionally filter by 'member' or 'maintainer'. Args: filtr (optional[string|None]): ['member'|'maintainer'] Returns: (list[string]): List of group names. Raises: requests.HTTPError on failure. """ self.project_service.set_auth(self._token_project) return self.project_service.list_groups(filtr)
https://github.com/jhuapl-boss/intern/blob/d8fc6df011d8f212c87e6a1fd4cc21cfb5d103ed/intern/remote/boss/remote.py#L217-L233
group by count
python
def count(self, cls, filters, quick=True, sort_by=None, select=None): """ Get the number of results that would be returned in this query """ query = "select count(*) from `%s` %s" % (self.domain.name, self._build_filter_part(cls, filters, sort_by, select)) count = 0 for row in self.domain.select(query): count += int(row['Count']) if quick: return count return count
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/sdb/db/manager/sdbmanager.py#L503-L514
group by count
python
def list_groups( self, name, children_of_group=None, ancestors_of_group=None, descendants_of_group=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Lists the existing groups. Example: >>> from google.cloud import monitoring_v3 >>> >>> client = monitoring_v3.GroupServiceClient() >>> >>> name = client.project_path('[PROJECT]') >>> >>> # Iterate over all results >>> for element in client.list_groups(name): ... # process element ... pass >>> >>> >>> # Alternatively: >>> >>> # Iterate over results one page at a time >>> for page in client.list_groups(name).pages: ... for element in page: ... # process element ... pass Args: name (str): The project whose groups are to be listed. The format is ``"projects/{project_id_or_number}"``. children_of_group (str): A group name: ``"projects/{project_id_or_number}/groups/{group_id}"``. Returns groups whose ``parentName`` field contains the group name. If no groups have this parent, the results are empty. ancestors_of_group (str): A group name: ``"projects/{project_id_or_number}/groups/{group_id}"``. Returns groups that are ancestors of the specified group. The groups are returned in order, starting with the immediate parent and ending with the most distant ancestor. If the specified group has no immediate parent, the results are empty. descendants_of_group (str): A group name: ``"projects/{project_id_or_number}/groups/{group_id}"``. Returns the descendants of the specified group. This is a superset of the results returned by the ``childrenOfGroup`` filter, and includes children-of-children, and so forth. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page streaming is performed per-page, this determines the maximum number of resources in a page. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.gax.PageIterator` instance. By default, this is an iterable of :class:`~google.cloud.monitoring_v3.types.Group` instances. This object can also be configured to iterate over the pages of the response through the `options` parameter. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ if metadata is None: metadata = [] metadata = list(metadata) # Wrap the transport method to add retry and timeout logic. if "list_groups" not in self._inner_api_calls: self._inner_api_calls[ "list_groups" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_groups, default_retry=self._method_configs["ListGroups"].retry, default_timeout=self._method_configs["ListGroups"].timeout, client_info=self._client_info, ) # Sanity check: We have some fields which are mutually exclusive; # raise ValueError if more than one is sent. google.api_core.protobuf_helpers.check_oneof( children_of_group=children_of_group, ancestors_of_group=ancestors_of_group, descendants_of_group=descendants_of_group, ) request = group_service_pb2.ListGroupsRequest( name=name, children_of_group=children_of_group, ancestors_of_group=ancestors_of_group, descendants_of_group=descendants_of_group, page_size=page_size, ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls["list_groups"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field="group", request_token_field="page_token", response_token_field="next_page_token", ) return iterator
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/monitoring/google/cloud/monitoring_v3/gapic/group_service_client.py#L206-L338
group by count
python
def get_groups(self, table_name): """ Return list of all groups for a particular data type """ df = self.dm[table_name] return list(df['group'].unique())
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/data_model3.py#L225-L230
group by count
python
def data_groups(self, groups, entity_count): """Process Group data. Args: groups (list): The list of groups to process. Returns: list: A list of groups including associations """ data = [] # process group objects for xid in groups.keys(): # get association from group data assoc_group_data = self.data_group_association(xid) data += assoc_group_data entity_count += len(assoc_group_data) if entity_count >= self._batch_max_chunk: break return data, entity_count
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_ti_batch.py#L558-L577
group by count
python
def classificationgroup(self): """List with (subject group ID, number of documents)-tuples.""" path = ['author-profile', 'classificationgroup', 'classifications', 'classification'] out = [(item['$'], item['@frequency']) for item in listify(chained_get(self._json, path, []))] return out or None
https://github.com/scopus-api/scopus/blob/27ce02dd3095bfdab9d3e8475543d7c17767d1ab/scopus/author_retrieval.py#L45-L51
group by count
python
def count(self, seqs, nreport=100, scan_rc=True): """ count the number of matches above the cutoff returns an iterator of lists containing integer counts """ for matches in self.scan(seqs, nreport, scan_rc): counts = [len(m) for m in matches] yield counts
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/scanner.py#L515-L522
group by count
python
def groups(self, query=None, exclude=None, maxResults=9999): """Return a list of groups matching the specified criteria. :param query: filter groups by name with this string :type query: Optional[str] :param exclude: filter out groups by name with this string :type exclude: Optional[Any] :param maxResults: maximum results to return. (Default: 9999) :type maxResults: int :rtype: List[str] """ params = {} groups = [] if query is not None: params['query'] = query if exclude is not None: params['exclude'] = exclude if maxResults is not None: params['maxResults'] = maxResults for group in self._get_json('groups/picker', params=params)['groups']: groups.append(group['name']) return sorted(groups)
https://github.com/pycontribs/jira/blob/397db5d78441ed6a680a9b7db4c62030ade1fd8a/jira/client.py#L1128-L1150
group by count
python
def count(self, value=True): """Returns the number of present/absent members.""" if value not in (True, False): raise ValueError('can only count True or False, not %r' % (value,)) return bin(self)[2:].count('01'[value])
https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/bases.py#L116-L120
group by count
python
def group_by(self, by): """ Return a new ``GroupBy`` object using this frame and the desired grouping columns. The returned groups are sorted by the natural group-by column sort. :param by: The columns to group on (either a single column name, or a list of column names, or a list of column indices). """ assert_is_type(by, str, int, [str, int]) return GroupBy(self, by)
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L1830-L1840
group by count
python
def group(self, column_or_label, collect=None): """Group rows by unique values in a column; count or aggregate others. Args: ``column_or_label``: values to group (column label or index, or array) ``collect``: a function applied to values in other columns for each group Returns: A Table with each row corresponding to a unique value in ``column_or_label``, where the first column contains the unique values from ``column_or_label``, and the second contains counts for each of the unique values. If ``collect`` is provided, a Table is returned with all original columns, each containing values calculated by first grouping rows according to ``column_or_label``, then applying ``collect`` to each set of grouped values in the other columns. Note: The grouped column will appear first in the result table. If ``collect`` does not accept arguments with one of the column types, that column will be empty in the resulting table. >>> marbles = Table().with_columns( ... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"), ... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"), ... "Amount", make_array(4, 6, 12, 7, 9, 2), ... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00)) >>> marbles Color | Shape | Amount | Price Red | Round | 4 | 1.3 Green | Rectangular | 6 | 1.3 Blue | Rectangular | 12 | 2 Red | Round | 7 | 1.75 Green | Rectangular | 9 | 1.4 Green | Round | 2 | 1 >>> marbles.group("Color") # just gives counts Color | count Blue | 1 Green | 3 Red | 2 >>> marbles.group("Color", max) # takes the max of each grouping, in each column Color | Shape max | Amount max | Price max Blue | Rectangular | 12 | 2 Green | Round | 9 | 1.4 Red | Round | 7 | 1.75 >>> marbles.group("Shape", sum) # sum doesn't make sense for strings Shape | Color sum | Amount sum | Price sum Rectangular | | 27 | 4.7 Round | | 13 | 4.05 """ # Assume that a call to group with a list of labels is a call to groups if _is_non_string_iterable(column_or_label) and \ len(column_or_label) != self._num_rows: return self.groups(column_or_label, collect) self = self.copy(shallow=True) collect = _zero_on_type_error(collect) # Remove column used for grouping column = self._get_column(column_or_label) if isinstance(column_or_label, str) or isinstance(column_or_label, numbers.Integral): column_label = self._as_label(column_or_label) del self[column_label] else: column_label = self._unused_label('group') # Group by column groups = self.index_by(column) keys = sorted(groups.keys()) # Generate grouped columns if collect is None: labels = [column_label, 'count' if column_label != 'count' else self._unused_label('count')] columns = [keys, [len(groups[k]) for k in keys]] else: columns, labels = [], [] for i, label in enumerate(self.labels): labels.append(_collected_label(collect, label)) c = [collect(np.array([row[i] for row in groups[k]])) for k in keys] columns.append(c) grouped = type(self)().with_columns(zip(labels, columns)) assert column_label == self._unused_label(column_label) grouped[column_label] = keys grouped.move_to_start(column_label) return grouped
https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L868-L952
group by count
python
def rank_genes_groups( adata, groupby, use_raw=True, groups: Union[str, Iterable[str]] = 'all', reference='rest', n_genes=100, rankby_abs=False, key_added=None, copy=False, method='t-test_overestim_var', corr_method='benjamini-hochberg', log_transformed=True, **kwds ): """Rank genes for characterizing groups. Parameters ---------- adata : :class:`~anndata.AnnData` Annotated data matrix. groupby : `str` The key of the observations grouping to consider. use_raw : `bool`, optional (default: `True`) Use `raw` attribute of `adata` if present. groups Subset of groups, e.g. `['g1', 'g2', 'g3']`, to which comparison shall be restricted, or `'all'` (default), for all groups. reference : `str`, optional (default: `'rest'`) If `'rest'`, compare each group to the union of the rest of the group. If a group identifier, compare with respect to this group. n_genes : `int`, optional (default: 100) The number of genes that appear in the returned tables. method : `{'logreg', 't-test', 'wilcoxon', 't-test_overestim_var'}`, optional (default: 't-test_overestim_var') If 't-test', uses t-test, if 'wilcoxon', uses Wilcoxon-Rank-Sum. If 't-test_overestim_var', overestimates variance of each group. If 'logreg' uses logistic regression, see [Ntranos18]_, `here <https://github.com/theislab/scanpy/issues/95>`__ and `here <http://www.nxn.se/valent/2018/3/5/actionable-scrna-seq-clusters>`__, for why this is meaningful. corr_method : `{'benjamini-hochberg', 'bonferroni'}`, optional (default: 'benjamini-hochberg') p-value correction method. Used only for 't-test', 't-test_overestim_var', and 'wilcoxon' methods. rankby_abs : `bool`, optional (default: `False`) Rank genes by the absolute value of the score, not by the score. The returned scores are never the absolute values. **kwds : keyword parameters Are passed to test methods. Currently this affects only parameters that are passed to `sklearn.linear_model.LogisticRegression <http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html>`__. For instance, you can pass `penalty='l1'` to try to come up with a minimal set of genes that are good predictors (sparse solution meaning few non-zero fitted coefficients). Returns ------- **names** : structured `np.ndarray` (`.uns['rank_genes_groups']`) Structured array to be indexed by group id storing the gene names. Ordered according to scores. **scores** : structured `np.ndarray` (`.uns['rank_genes_groups']`) Structured array to be indexed by group id storing the z-score underlying the computation of a p-value for each gene for each group. Ordered according to scores. **logfoldchanges** : structured `np.ndarray` (`.uns['rank_genes_groups']`) Structured array to be indexed by group id storing the log2 fold change for each gene for each group. Ordered according to scores. Only provided if method is 't-test' like. Note: this is an approximation calculated from mean-log values. **pvals** : structured `np.ndarray` (`.uns['rank_genes_groups']`) p-values. **pvals_adj** : structured `np.ndarray` (`.uns['rank_genes_groups']`) Corrected p-values. Notes ----- There are slight inconsistencies depending on whether sparse or dense data are passed. See `here <https://github.com/theislab/scanpy/blob/master/scanpy/tests/test_rank_genes_groups.py>`__. Examples -------- >>> adata = sc.datasets.pbmc68k_reduced() >>> sc.tl.rank_genes_groups(adata, 'bulk_labels', method='wilcoxon') # to visualize the results >>> sc.pl.rank_genes_groups(adata) """ if 'only_positive' in kwds: rankby_abs = not kwds.pop('only_positive') # backwards compat logg.info('ranking genes', r=True) avail_methods = {'t-test', 't-test_overestim_var', 'wilcoxon', 'logreg'} if method not in avail_methods: raise ValueError('Method must be one of {}.'.format(avail_methods)) avail_corr = {'benjamini-hochberg', 'bonferroni'} if corr_method not in avail_corr: raise ValueError('Correction method must be one of {}.'.format(avail_corr)) adata = adata.copy() if copy else adata utils.sanitize_anndata(adata) # for clarity, rename variable groups_order = groups if isinstance(groups, str) else list(groups) if isinstance(groups_order, list) and isinstance(groups_order[0], int): groups_order = [str(n) for n in groups_order] if reference != 'rest' and reference not in set(groups_order): groups_order += [reference] if (reference != 'rest' and reference not in set(adata.obs[groupby].cat.categories)): raise ValueError('reference = {} needs to be one of groupby = {}.' .format(reference, adata.obs[groupby].cat.categories.tolist())) groups_order, groups_masks = utils.select_groups( adata, groups_order, groupby) if key_added is None: key_added = 'rank_genes_groups' adata.uns[key_added] = {} adata.uns[key_added]['params'] = { 'groupby': groupby, 'reference': reference, 'method': method, 'use_raw': use_raw, 'corr_method': corr_method, } # adata_comp mocks an AnnData object if use_raw is True # otherwise it's just the AnnData object adata_comp = adata if adata.raw is not None and use_raw: adata_comp = adata.raw X = adata_comp.X # for clarity, rename variable n_genes_user = n_genes # make sure indices are not OoB in case there are less genes than n_genes if n_genes_user > X.shape[1]: n_genes_user = X.shape[1] # in the following, n_genes is simply another name for the total number of genes n_genes = X.shape[1] n_groups = groups_masks.shape[0] ns = np.zeros(n_groups, dtype=int) for imask, mask in enumerate(groups_masks): ns[imask] = np.where(mask)[0].size logg.msg('consider \'{}\' groups:'.format(groupby), groups_order, v=4) logg.msg('with sizes:', ns, v=4) if reference != 'rest': ireference = np.where(groups_order == reference)[0][0] reference_indices = np.arange(adata_comp.n_vars, dtype=int) rankings_gene_scores = [] rankings_gene_names = [] rankings_gene_logfoldchanges = [] rankings_gene_pvals = [] rankings_gene_pvals_adj = [] if method in {'t-test', 't-test_overestim_var'}: from scipy import stats from statsmodels.stats.multitest import multipletests # loop over all masks and compute means, variances and sample numbers means = np.zeros((n_groups, n_genes)) vars = np.zeros((n_groups, n_genes)) for imask, mask in enumerate(groups_masks): means[imask], vars[imask] = _get_mean_var(X[mask]) # test each either against the union of all other groups or against a # specific group for igroup in range(n_groups): if reference == 'rest': mask_rest = ~groups_masks[igroup] else: if igroup == ireference: continue else: mask_rest = groups_masks[ireference] mean_group, var_group = means[igroup], vars[igroup] mean_rest, var_rest = _get_mean_var(X[mask_rest]) ns_group = ns[igroup] # number of observations in group if method == 't-test': ns_rest = np.where(mask_rest)[0].size elif method == 't-test_overestim_var': ns_rest = ns[igroup] # hack for overestimating the variance for small groups else: raise ValueError('Method does not exist.') scores, pvals = stats.ttest_ind_from_stats( mean1=mean_group, std1=np.sqrt(var_group), nobs1=ns_group, mean2=mean_rest, std2=np.sqrt(var_rest), nobs2=ns_rest, equal_var=False # Welch's ) # Fold change foldchanges = (np.expm1(mean_group) + 1e-9) / (np.expm1(mean_rest) + 1e-9) # add small value to remove 0's scores[np.isnan(scores)] = 0 # I think it's only nan when means are the same and vars are 0 pvals[np.isnan(pvals)] = 1 # This also has to happen for Benjamini Hochberg if corr_method == 'benjamini-hochberg': _, pvals_adj, _, _ = multipletests(pvals, alpha=0.05, method='fdr_bh') elif corr_method == 'bonferroni': pvals_adj = np.minimum(pvals * n_genes, 1.0) scores_sort = np.abs(scores) if rankby_abs else scores partition = np.argpartition(scores_sort, -n_genes_user)[-n_genes_user:] partial_indices = np.argsort(scores_sort[partition])[::-1] global_indices = reference_indices[partition][partial_indices] rankings_gene_scores.append(scores[global_indices]) rankings_gene_logfoldchanges.append(np.log2(foldchanges[global_indices])) rankings_gene_names.append(adata_comp.var_names[global_indices]) rankings_gene_pvals.append(pvals[global_indices]) rankings_gene_pvals_adj.append(pvals_adj[global_indices]) elif method == 'logreg': # if reference is not set, then the groups listed will be compared to the rest # if reference is set, then the groups listed will be compared only to the other groups listed from sklearn.linear_model import LogisticRegression reference = groups_order[0] if len(groups) == 1: raise Exception('Cannot perform logistic regression on a single cluster.') adata_copy = adata[adata.obs[groupby].isin(groups_order)] adata_comp = adata_copy if adata.raw is not None and use_raw: adata_comp = adata_copy.raw X = adata_comp.X clf = LogisticRegression(**kwds) clf.fit(X, adata_copy.obs[groupby].cat.codes) scores_all = clf.coef_ for igroup, group in enumerate(groups_order): if len(groups_order) <= 2: # binary logistic regression scores = scores_all[0] else: scores = scores_all[igroup] partition = np.argpartition(scores, -n_genes_user)[-n_genes_user:] partial_indices = np.argsort(scores[partition])[::-1] global_indices = reference_indices[partition][partial_indices] rankings_gene_scores.append(scores[global_indices]) rankings_gene_names.append(adata_comp.var_names[global_indices]) if len(groups_order) <= 2: break elif method == 'wilcoxon': from scipy import stats from statsmodels.stats.multitest import multipletests CONST_MAX_SIZE = 10000000 means = np.zeros((n_groups, n_genes)) vars = np.zeros((n_groups, n_genes)) # initialize space for z-scores scores = np.zeros(n_genes) # First loop: Loop over all genes if reference != 'rest': for imask, mask in enumerate(groups_masks): means[imask], vars[imask] = _get_mean_var(X[mask]) # for fold-change only if imask == ireference: continue else: mask_rest = groups_masks[ireference] ns_rest = np.where(mask_rest)[0].size mean_rest, var_rest = _get_mean_var(X[mask_rest]) # for fold-change only if ns_rest <= 25 or ns[imask] <= 25: logg.hint('Few observations in a group for ' 'normal approximation (<=25). Lower test accuracy.') n_active = ns[imask] m_active = ns_rest # Now calculate gene expression ranking in chunkes: chunk = [] # Calculate chunk frames n_genes_max_chunk = floor(CONST_MAX_SIZE / (n_active + m_active)) if n_genes_max_chunk < n_genes: chunk_index = n_genes_max_chunk while chunk_index < n_genes: chunk.append(chunk_index) chunk_index = chunk_index + n_genes_max_chunk chunk.append(n_genes) else: chunk.append(n_genes) left = 0 # Calculate rank sums for each chunk for the current mask for chunk_index, right in enumerate(chunk): # Check if issparse is true: AnnData objects are currently sparse.csr or ndarray. if issparse(X): df1 = pd.DataFrame(data=X[mask, left:right].todense()) df2 = pd.DataFrame(data=X[mask_rest, left:right].todense(), index=np.arange(start=n_active, stop=n_active + m_active)) else: df1 = pd.DataFrame(data=X[mask, left:right]) df2 = pd.DataFrame(data=X[mask_rest, left:right], index=np.arange(start=n_active, stop=n_active + m_active)) df1 = df1.append(df2) ranks = df1.rank() # sum up adjusted_ranks to calculate W_m,n scores[left:right] = np.sum(ranks.loc[0:n_active, :]) left = right scores = (scores - (n_active * (n_active + m_active + 1) / 2)) / sqrt( (n_active * m_active * (n_active + m_active + 1) / 12)) scores[np.isnan(scores)] = 0 pvals = 2 * stats.distributions.norm.sf(np.abs(scores)) if corr_method == 'benjamini-hochberg': pvals[np.isnan(pvals)] = 1 # set Nan values to 1 to properly convert using Benhjamini Hochberg _, pvals_adj, _, _ = multipletests(pvals, alpha=0.05, method='fdr_bh') elif corr_method == 'bonferroni': pvals_adj = np.minimum(pvals * n_genes, 1.0) # Fold change foldchanges = (np.expm1(means[imask]) + 1e-9) / (np.expm1(mean_rest) + 1e-9) # add small value to remove 0's scores_sort = np.abs(scores) if rankby_abs else scores partition = np.argpartition(scores_sort, -n_genes_user)[-n_genes_user:] partial_indices = np.argsort(scores_sort[partition])[::-1] global_indices = reference_indices[partition][partial_indices] rankings_gene_scores.append(scores[global_indices]) rankings_gene_names.append(adata_comp.var_names[global_indices]) rankings_gene_logfoldchanges.append(np.log2(foldchanges[global_indices])) rankings_gene_pvals.append(pvals[global_indices]) rankings_gene_pvals_adj.append(pvals_adj[global_indices]) # If no reference group exists, ranking needs only to be done once (full mask) else: scores = np.zeros((n_groups, n_genes)) chunk = [] n_cells = X.shape[0] n_genes_max_chunk = floor(CONST_MAX_SIZE / n_cells) if n_genes_max_chunk < n_genes: chunk_index = n_genes_max_chunk while chunk_index < n_genes: chunk.append(chunk_index) chunk_index = chunk_index + n_genes_max_chunk chunk.append(n_genes) else: chunk.append(n_genes) left = 0 for chunk_index, right in enumerate(chunk): # Check if issparse is true if issparse(X): df1 = pd.DataFrame(data=X[:, left:right].todense()) else: df1 = pd.DataFrame(data=X[:, left:right]) ranks = df1.rank() # sum up adjusted_ranks to calculate W_m,n for imask, mask in enumerate(groups_masks): scores[imask, left:right] = np.sum(ranks.loc[mask, :]) left = right for imask, mask in enumerate(groups_masks): mask_rest = ~groups_masks[imask] means[imask], vars[imask] = _get_mean_var(X[mask]) #for fold-change mean_rest, var_rest = _get_mean_var(X[mask_rest]) # for fold-change scores[imask, :] = (scores[imask, :] - (ns[imask] * (n_cells + 1) / 2)) / sqrt( (ns[imask] * (n_cells - ns[imask]) * (n_cells + 1) / 12)) scores[np.isnan(scores)] = 0 pvals = 2 * stats.distributions.norm.sf(np.abs(scores[imask,:])) if corr_method == 'benjamini-hochberg': pvals[np.isnan(pvals)] = 1 # set Nan values to 1 to properly convert using Benhjamini Hochberg _, pvals_adj, _, _ = multipletests(pvals, alpha=0.05, method='fdr_bh') elif corr_method == 'bonferroni': pvals_adj = np.minimum(pvals * n_genes, 1.0) # Fold change foldchanges = (np.expm1(means[imask]) + 1e-9) / (np.expm1(mean_rest) + 1e-9) # add small value to remove 0's scores_sort = np.abs(scores) if rankby_abs else scores partition = np.argpartition(scores_sort[imask, :], -n_genes_user)[-n_genes_user:] partial_indices = np.argsort(scores_sort[imask, partition])[::-1] global_indices = reference_indices[partition][partial_indices] rankings_gene_scores.append(scores[imask, global_indices]) rankings_gene_names.append(adata_comp.var_names[global_indices]) rankings_gene_logfoldchanges.append(np.log2(foldchanges[global_indices])) rankings_gene_pvals.append(pvals[global_indices]) rankings_gene_pvals_adj.append(pvals_adj[global_indices]) groups_order_save = [str(g) for g in groups_order] if (reference != 'rest' and method != 'logreg') or (method == 'logreg' and len(groups) == 2): groups_order_save = [g for g in groups_order if g != reference] adata.uns[key_added]['scores'] = np.rec.fromarrays( [n for n in rankings_gene_scores], dtype=[(rn, 'float32') for rn in groups_order_save]) adata.uns[key_added]['names'] = np.rec.fromarrays( [n for n in rankings_gene_names], dtype=[(rn, 'U50') for rn in groups_order_save]) if method in {'t-test', 't-test_overestim_var', 'wilcoxon'}: adata.uns[key_added]['logfoldchanges'] = np.rec.fromarrays( [n for n in rankings_gene_logfoldchanges], dtype=[(rn, 'float32') for rn in groups_order_save]) adata.uns[key_added]['pvals'] = np.rec.fromarrays( [n for n in rankings_gene_pvals], dtype=[(rn, 'float64') for rn in groups_order_save]) adata.uns[key_added]['pvals_adj'] = np.rec.fromarrays( [n for n in rankings_gene_pvals_adj], dtype=[(rn, 'float64') for rn in groups_order_save]) logg.info(' finished', time=True, end=' ' if _settings_verbosity_greater_or_equal_than(3) else '\n') logg.hint( 'added to `.uns[\'{}\']`\n' ' \'names\', sorted np.recarray to be indexed by group ids\n' ' \'scores\', sorted np.recarray to be indexed by group ids\n' .format(key_added) + (' \'logfoldchanges\', sorted np.recarray to be indexed by group ids\n' ' \'pvals\', sorted np.recarray to be indexed by group ids\n' ' \'pvals_adj\', sorted np.recarray to be indexed by group ids' if method in {'t-test', 't-test_overestim_var', 'wilcoxon'} else '')) return adata if copy else None
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/tools/_rank_genes_groups.py#L16-L422
group by count
python
def group_num(self): """Current group number. :getter: Returns current group number :setter: Sets current group number :type: int """ xkb_state = XkbStateRec() XkbGetState(self._display, XkbUseCoreKbd, byref(xkb_state)) return xkb_state.group
https://github.com/hcpl/xkbgroup/blob/fcf4709a3c8221e0cdf62c09e5cccda232b0104c/xkbgroup/core.py#L303-L312
group by count
python
def by_group(self): # pragma: no cover """ Display group membership sorted by group. Returns: Array with a dictionary of group membership. For example: {'testgroup': ['test.user', 'test.user2']} """ group_membership = {} for record in self.__get_groups_with_membership(): group_membership[record.cn.value] = [ i for i in record.memberUid.values ] return group_membership
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/audit.py#L36-L50
group by count
python
def grouped_count_sizes(fileslist, fgrouped): # pragma: no cover '''Compute the total size per group and total number of files. Useful to check that everything is OK.''' fsizes = {} total_files = 0 allitems = None if isinstance(fgrouped, dict): allitems = fgrouped.iteritems() elif isinstance(fgrouped, list): allitems = enumerate(fgrouped) for fkey, cluster in allitems: fsizes[fkey] = [] for subcluster in cluster: tot = 0 if subcluster is not None: for fname in subcluster: tot += fileslist[fname] total_files += 1 fsizes[fkey].append(tot) return fsizes, total_files
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/aux_funcs.py#L338-L356
group by count
python
def groups(self): """ dict {group name -> group labels} """ # this is mainly for compat # GH 3881 result = {key: value for key, value in zip(self.binlabels, self.bins) if key is not NaT} return result
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/ops.py#L698-L705
group by count
python
def list_groups(self, filtr=None): """Get the groups the logged in user is a member of. Optionally filter by 'member' or 'maintainer'. Args: filtr (optional[string|None]): ['member'|'maintainer'] or defaults to None. Returns: (list[string]): List of group names. Raises: requests.HTTPError on failure. """ return self.service.list_groups( filtr, self.url_prefix, self.auth, self.session, self.session_send_opts)
https://github.com/jhuapl-boss/intern/blob/d8fc6df011d8f212c87e6a1fd4cc21cfb5d103ed/intern/service/boss/project.py#L40-L55
group by count
python
def group(self, *args): """Returns one or more subgroups of the match. Each argument is either a group index or a group name.""" if len(args) == 0: args = (0,) grouplist = [] for group in args: grouplist.append(self._get_slice(self._get_index(group), None)) if len(grouplist) == 1: return grouplist[0] else: return tuple(grouplist)
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/_sre.py#L317-L328
group by count
python
def group_min(groups, data): """ Given a list of groups, find the minimum element of data within each group Parameters ----------- groups : (n,) sequence of (q,) int Indexes of each group corresponding to each element in data data : (m,) The data that groups indexes reference Returns ----------- minimums : (n,) Minimum value of data per group """ # sort with major key groups, minor key data order = np.lexsort((data, groups)) groups = groups[order] # this is only needed if groups is unsorted data = data[order] # construct an index which marks borders between groups index = np.empty(len(groups), 'bool') index[0] = True index[1:] = groups[1:] != groups[:-1] return data[index]
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/grouping.py#L710-L735
group by count
python
def group(self): """(re-)group all logevents by the given group.""" if hasattr(self, 'group_by'): group_by = self.group_by else: group_by = self.default_group_by if self.args['group'] is not None: group_by = self.args['group'] self.groups = Grouping(self.logevents, group_by) self.groups.move_items(None, 'others') self.groups.sort_by_size(group_limit=self.args['group_limit'], discard_others=self.args['no_others'])
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mplotqueries/plottypes/base_type.py#L79-L91
group by count
python
def group(self, name): """GROUP command. """ args = name code, message = self.command("GROUP", args) if code != 211: raise NNTPReplyError(code, message) parts = message.split(None, 4) try: total = int(parts[0]) first = int(parts[1]) last = int(parts[2]) group = parts[3] except (IndexError, ValueError): raise NNTPDataError("Invalid GROUP status '%s'" % message) return total, first, last, group
https://github.com/greenbender/pynntp/blob/991a76331cdf5d8f9dbf5b18f6e29adc80749a2f/nntp/nntp.py#L951-L969
group by count
python
def count(self): """Total count of the matching items. It sums up the count of partial results, and returns the total count of matching items in the table. """ count = 0 operation = self._get_operation() kwargs = self.kwargs.copy() kwargs['select'] = 'COUNT' limit = kwargs.get('limit', None) while True: result = operation(self.model.get_table_name(), **kwargs) count += result['Count'] if limit is not None: limit -= result['Count'] last_evaluated_key = result.get('LastEvaluatedKey', None) if not self._prepare_next_fetch(kwargs, last_evaluated_key, limit): break return count
https://github.com/teddychoi/BynamoDB/blob/9b143d0554c89fb8edbfb99db5542e48bd126b39/bynamodb/results.py#L25-L45
group by count
python
def count(cls, cur, table:str, where_keys: list=None): """ gives the number of records in the table Args: table: a string indicating the name of the table Returns: an integer indicating the number of records in the table """ if where_keys: where_clause, values = cls._get_where_clause_with_values(where_keys) query = cls._count_query_where.format(table, where_clause) q, t = query, values else: query = cls._count_query.format(table) q, t = query, () yield from cur.execute(q, t) result = yield from cur.fetchone() return int(result[0])
https://github.com/nerandell/cauldron/blob/d363bac763781bb2da18debfa0fdd4be28288b92/cauldron/sql.py#L200-L221
group by count
python
def get_group_list(self): """ 获取分组列表 返回JSON示例:: { "groups": [ { "cnt": 8, "id": 0, "name": "未分组" }, { "cnt": 0, "id": 1, "name": "黑名单" }, { "cnt": 0, "id": 2, "name": "星标组" } ] } :return: 返回的 JSON 数据 :raises NeedLoginError: 操作未执行成功, 需要再次尝试登录, 异常内容为服务器返回的错误数据 """ url = 'https://mp.weixin.qq.com/cgi-bin/contactmanage?t=user/index&pagesize=10&pageidx=0&type=0&groupid=0&lang=zh_CN&f=json&token={token}'.format( token=self.__token, ) headers = { 'x-requested-with': 'XMLHttpRequest', 'referer': 'https://mp.weixin.qq.com/cgi-bin/contactmanage?t=user/index&pagesize=10&pageidx=0&type=0&groupid=0&lang=zh_CN&token='.format( token=self.__token, ), 'cookie': self.__cookies, } r = requests.get(url, headers=headers) try: message = json.loads(r.text)['group_list'] except (KeyError, ValueError): raise NeedLoginError(r.text) return message
https://github.com/doraemonext/wechat-python-sdk/blob/bf6f6f3d4a5440feb73a51937059d7feddc335a0/wechat_sdk/ext.py#L366-L412
group by count
python
def ngroups(self): """Number of groups.""" if self._can_use_new_school(): return self._grouped_spark_sql.count() self._prep_pandas_groupby() return self._mergedRDD.count()
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L129-L134
group by count
python
def PSRK_groups(self): r'''Dictionary of PSRK subgroup: count groups for the PSRK subgroups, as determined by `DDBST's online service <http://www.ddbst.com/unifacga.html>`_. Examples -------- >>> pprint(Chemical('Cumene').PSRK_groups) {1: 2, 9: 5, 13: 1} ''' if self.__PSRK_groups: return self.__PSRK_groups else: load_group_assignments_DDBST() if self.InChI_Key in DDBST_PSRK_assignments: self.__PSRK_groups = DDBST_PSRK_assignments[self.InChI_Key] return self.__PSRK_groups else: return None
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/chemical.py#L1392-L1409
group by count
python
def get_descendants_group_count(cls, parent=None): """ Helper for a very common case: get a group of siblings and the number of *descendants* (not only children) in every sibling. :param parent: The parent of the siblings to return. If no parent is given, the root nodes will be returned. :returns: A `list` (**NOT** a Queryset) of node objects with an extra attribute: `descendants_count`. """ if parent is None: qset = cls.get_root_nodes() else: qset = parent.get_children() nodes = list(qset) for node in nodes: node.descendants_count = node.get_descendant_count() return nodes
https://github.com/django-treebeard/django-treebeard/blob/8042ee939cb45394909237da447f8925e3cc6aa3/treebeard/models.py#L205-L227
group by count
python
def hist_of_counts(self, *columns, overlay=True, bins=None, bin_column=None, group=None, side_by_side=False, width=6, height=4, **vargs): """ Plots one count-based histogram for each column in columns. The heights of each bar will represent the counts, and all the bins must be of equal size. If no column is specified, plot all columns. Kwargs: overlay (bool): If True, plots 1 chart with all the histograms overlaid on top of each other (instead of the default behavior of one histogram for each column in the table). Also adds a legend that matches each bar color to its column. Note that if the histograms are not overlaid, they are not forced to the same scale. bins (array or int): Lower bound for each bin in the histogram or number of bins. If None, bins will be chosen automatically. bin_column (column name or index): A column of bin lower bounds. All other columns are treated as counts of these bins. If None, each value in each row is assigned a count of 1. group (column name or index): A column of categories. The rows are grouped by the values in this column, and a separate histogram is generated for each group. The histograms are overlaid or plotted separately depending on the overlay argument. If None, no such grouping is done. side_by_side (bool): Whether histogram bins should be plotted side by side (instead of directly overlaid). Makes sense only when plotting multiple histograms, either by passing several columns or by using the group option. vargs: Additional arguments that get passed into :func:plt.hist. See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hist for additional arguments that can be passed into vargs. These include: `range`, `cumulative`, and `orientation`, to name a few. >>> t = Table().with_columns( ... 'count', make_array(9, 3, 3, 1), ... 'points', make_array(1, 2, 2, 10)) >>> t count | points 9 | 1 3 | 2 3 | 2 1 | 10 >>> t.hist_of_counts() # doctest: +SKIP <histogram of values in count with counts on y-axis> <histogram of values in points with counts on y-axis> >>> t = Table().with_columns( ... 'value', make_array(101, 102, 103), ... 'count', make_array(5, 10, 5)) >>> t.hist_of_counts(bin_column='value') # doctest: +SKIP <histogram of values weighted by corresponding counts> >>> t = Table().with_columns( ... 'value', make_array(1, 2, 3, 2, 5 ), ... 'category', make_array('a', 'a', 'a', 'b', 'b')) >>> t.hist('value', group='category') # doctest: +SKIP <two overlaid histograms of the data [1, 2, 3] and [2, 5]> """ if bin_column is not None and bins is None: bins = np.unique(self.column(bin_column)) # TODO ensure counts are integers even when `columns` is empty for column in columns: if not _is_array_integer(self.column(column)): raise ValueError('The column {0} contains non-integer values ' 'When using hist_of_counts with bin_columns, ' 'all columns should contain counts.' .format(column)) if vargs.get('normed', False) or vargs.get('density', False): raise ValueError("hist_of_counts is for displaying counts only, " "and should not be used with the normed or " "density keyword arguments") vargs['density'] = False if bins is not None: if len(bins) < 2: raise ValueError("bins must have at least two items") diffs = np.diff(sorted(bins)) # Diffs should all be equal (up to floating point error) normalized_diff_deviances = np.abs((diffs - diffs[0])/diffs[0]) if np.any(normalized_diff_deviances > 1e-11): raise ValueError("Bins of unequal size should not be used " "with hist_of_counts. Please use hist() and " "make sure to set normed=True") return self.hist(*columns, overlay=overlay, bins=bins, bin_column=bin_column, group=group, side_by_side=side_by_side, width=width, height=height, **vargs)
https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L2572-L2666
group by count
python
def search_group(self, search_query): """ Searches for public groups using a query Results will be returned using the on_group_search_response() callback :param search_query: The query that contains some of the desired groups' name. """ log.info("[+] Initiating a search for groups using the query '{}'".format(search_query)) return self._send_xmpp_element(roster.GroupSearchRequest(search_query))
https://github.com/tomer8007/kik-bot-api-unofficial/blob/2ae5216bc05e7099a41895382fc8e428a7a5c3ac/kik_unofficial/client.py#L349-L357
group by count
python
def _group_by_batches(samples, check_fn): """Group calls by batches, processing families together during ensemble calling. """ batch_groups = collections.defaultdict(list) extras = [] for data in [x[0] for x in samples]: if check_fn(data): batch_groups[multi.get_batch_for_key(data)].append(data) else: extras.append([data]) return batch_groups, extras
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/ensemble.py#L133-L143
group by count
python
def get_groups(self, username): """Get all groups of a user""" username = ldap.filter.escape_filter_chars(self._byte_p2(username)) userdn = self._get_user(username, NO_ATTR) searchfilter = self.group_filter_tmpl % { 'userdn': userdn, 'username': username } groups = self._search(searchfilter, NO_ATTR, self.groupdn) ret = [] for entry in groups: ret.append(self._uni(entry[0])) return ret
https://github.com/kakwa/ldapcherry/blob/b5e7cb6a44065abc30d164e72981b3713a172dda/ldapcherry/backend/backendLdap.py#L651-L665
group by count
python
def count(self): """ Count the number of records in the table, subject to the query. """ cmd = ("select COUNT(*) from {table} " "{join_clause}{where_clause}{order_clause}").format( table=self.table_name, where_clause=self.where_clause, join_clause=self.join_clause, order_clause=self.order_clause).rstrip() return Repo.db.execute(cmd, self.where_values)
https://github.com/ECESeniorDesign/lazy_record/blob/929d3cc7c2538b0f792365c0d2b0e0d41084c2dd/lazy_record/repo.py#L226-L236
group by count
python
def aggregate_groups(self, ct_agg, nr_groups, skip_key, carray_factor, groupby_cols, agg_ops, dtype_dict, bool_arr=None): '''Perform aggregation and place the result in the given ctable. Args: ct_agg (ctable): the table to hold the aggregation nr_groups (int): the number of groups (number of rows in output table) skip_key (int): index of the output row to remove from results (used for filtering) carray_factor: the carray for each row in the table a reference to the the unique group index groupby_cols: the list of 'dimension' columns that are used to perform the groupby over output_agg_ops (list): list of tuples of the form: (input_col, agg_op) input_col (string): name of the column to act on agg_op (int): aggregation operation to perform bool_arr: a boolean array containing the filter ''' # this creates the groupby columns for col in groupby_cols: result_array = ctable_ext.groupby_value(self[col], carray_factor, nr_groups, skip_key) if bool_arr is not None: result_array = np.delete(result_array, skip_key) ct_agg.addcol(result_array, name=col) del result_array # this creates the aggregation columns for input_col_name, output_col_name, agg_op in agg_ops: input_col = self[input_col_name] output_col_dtype = dtype_dict[output_col_name] input_buffer = np.empty(input_col.chunklen, dtype=input_col.dtype) output_buffer = np.zeros(nr_groups, dtype=output_col_dtype) if agg_op == 'sum': ctable_ext.aggregate_sum(input_col, carray_factor, nr_groups, skip_key, input_buffer, output_buffer) elif agg_op == 'mean': ctable_ext.aggregate_mean(input_col, carray_factor, nr_groups, skip_key, input_buffer, output_buffer) elif agg_op == 'std': ctable_ext.aggregate_std(input_col, carray_factor, nr_groups, skip_key, input_buffer, output_buffer) elif agg_op == 'count': ctable_ext.aggregate_count(input_col, carray_factor, nr_groups, skip_key, input_buffer, output_buffer) elif agg_op == 'count_distinct': ctable_ext.aggregate_count_distinct(input_col, carray_factor, nr_groups, skip_key, input_buffer, output_buffer) elif agg_op == 'sorted_count_distinct': ctable_ext.aggregate_sorted_count_distinct(input_col, carray_factor, nr_groups, skip_key, input_buffer, output_buffer) else: raise KeyError('Unknown aggregation operation ' + str(agg_op)) if bool_arr is not None: output_buffer = np.delete(output_buffer, skip_key) ct_agg.addcol(output_buffer, name=output_col_name) del output_buffer ct_agg.delcol('tmp_col_bquery__')
https://github.com/visualfabriq/bquery/blob/3702e974696e22876944a3339affad2f29e1ee06/bquery/ctable.py#L194-L260
group by count
python
def count_channel_groups(stream, include_channels=False): """ count all channel groups as fast as possible. This is used to provide reliable progress information when loading a file using the GUI Parameters ---------- stream : file handle opened file handle include_channels : bool also count channels Returns ------- count : int channel group count """ count = 0 ch_count = 0 stream.seek(64) blk_id = stream.read(2) if blk_id == b"HD": version = 3 else: blk_id += stream.read(2) if blk_id == b"##HD": version = 4 else: raise MdfException(f'"{stream.name}" is not a valid MDF file') if version >= 4: stream.seek(88, 0) dg_addr = UINT64_u(stream.read(8))[0] while dg_addr: stream.seek(dg_addr + 32) cg_addr = UINT64_u(stream.read(8))[0] while cg_addr: count += 1 if include_channels: stream.seek(cg_addr + 32) ch_addr = UINT64_u(stream.read(8))[0] while ch_addr: ch_count += 1 stream.seek(ch_addr + 24) ch_addr = UINT64_u(stream.read(8))[0] stream.seek(cg_addr + 24) cg_addr = UINT64_u(stream.read(8))[0] stream.seek(dg_addr + 24) dg_addr = UINT64_u(stream.read(8))[0] else: stream.seek(68, 0) dg_addr = UINT32_u(stream.read(4))[0] while dg_addr: stream.seek(dg_addr + 8) cg_addr = UINT32_u(stream.read(4))[0] while cg_addr: count += 1 if include_channels: stream.seek(cg_addr + 8) ch_addr = UINT32_u(stream.read(4))[0] while ch_addr: ch_count += 1 stream.seek(ch_addr + 4) ch_addr = UINT32_u(stream.read(4))[0] stream.seek(cg_addr + 4) cg_addr = UINT32_u(stream.read(4))[0] stream.seek(dg_addr + 4) dg_addr = UINT32_u(stream.read(4))[0] return count, ch_count
https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/utils.py#L646-L720
group by count
python
def UNIFAC_Dortmund_groups(self): r'''Dictionary of Dortmund UNIFAC subgroup: count groups for the Dortmund UNIFAC subgroups, as determined by `DDBST's online service <http://www.ddbst.com/unifacga.html>`_. Examples -------- >>> pprint(Chemical('Cumene').UNIFAC_Dortmund_groups) {1: 2, 9: 5, 13: 1} ''' if self.__UNIFAC_Dortmund_groups: return self.__UNIFAC_Dortmund_groups else: load_group_assignments_DDBST() if self.InChI_Key in DDBST_MODIFIED_UNIFAC_assignments: self.__UNIFAC_Dortmund_groups = DDBST_MODIFIED_UNIFAC_assignments[self.InChI_Key] return self.__UNIFAC_Dortmund_groups else: return None
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/chemical.py#L1372-L1389
group by count
python
def group_by(self, *args): """ Indica los campos para agrupación """ if len(args) == 1: self.raw_fields_group = args[0].split(',') else: self.raw_fields_group = list(args) return self
https://github.com/josegomezr/pqb/blob/a600cc6e4e9acdaaf2cff171d13eb85c9ed1757c/pqb/queries.py#L98-L106
group by count
python
def count(self, **query_dict): """Return the number of entries that match query.""" param_dict = query_dict.copy() param_dict['count'] = 0 resp_dict = self._get_query(**param_dict) return resp_dict['response']['numFound']
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_client/src/d1_client/solr_client.py#L194-L199
group by count
python
def count(cls, user_id): """ Count sessions with user_id """ return cls.query.with_entities( cls.user_id).filter_by(user_id=user_id).count()
https://github.com/AtomHash/evernode/blob/b2fb91555fb937a3f3eba41db56dee26f9b034be/evernode/models/session_model.py#L40-L43
group by count
python
def classify(in_x, groups, labels, k): """分类""" # 计算欧式距离 gl = array_len(groups) tmp = tile(in_x, (gl, 1)) - groups tmp = exponential_operation(tmp, 2) tmp = array_sum(tmp) tmp = exponential_operation(tmp, 0.5) # 得到排序后的数组的索引 arg = argsort(tmp) # 计算最相似数据的前k个数据的分类次数 cc = odict() for i in range(k): # 获得类别次数 la = labels[arg[i]] cc[la] = cc.get(la, 0) + 1 return max(cc)
https://github.com/shi-cong/PYSTUDY/blob/c8da7128ea18ecaa5849f2066d321e70d6f97f70/PYSTUDY/ml/knnlib.py#L27-L45
group by count
python
def count_star(self) -> int: """ Implements the ``COUNT(*)`` specialization. """ count_query = (self.statement.with_only_columns([func.count()]) .order_by(None)) return self.session.execute(count_query).scalar()
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/orm_query.py#L202-L208
group by count
python
def count(self): """ Returns the number of rows matched by this query. *Note: This function executes a SELECT COUNT() and has a performance cost on large datasets* """ if self._batch: raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode") if self._count is None: query = self._select_query() query.count = True result = self._execute(query) count_row = result.one().popitem() self._count = count_row[1] return self._count
https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/cqlengine/query.py#L831-L846
group by count
python
def groupby( self, by=None, axis=0, level=None, as_index=True, sort=False, group_keys=False, squeeze=False, **kwargs ): """仿dataframe的groupby写法,但控制了by的code和datetime Keyword Arguments: by {[type]} -- [description] (default: {None}) axis {int} -- [description] (default: {0}) level {[type]} -- [description] (default: {None}) as_index {bool} -- [description] (default: {True}) sort {bool} -- [description] (default: {True}) group_keys {bool} -- [description] (default: {True}) squeeze {bool} -- [description] (default: {False}) observed {bool} -- [description] (default: {False}) Returns: [type] -- [description] """ if by == self.index.names[1]: by = None level = 1 elif by == self.index.names[0]: by = None level = 0 return self.data.groupby( by=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, squeeze=squeeze )
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAData/base_datastruct.py#L802-L843
group by count
python
def count(self): """ This method retrieve the total of records resulting from a given query. This attribute can be used compounded with query, filter, sort, order and facet methods. Examples: >>> from crossref.restful import Works >>> Works().query('zika').count() 3597 >>> Works().query('zika').filter(prefix='10.1590').count() 61 >>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').count() 14 >>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').query(author='Marli').count() 1 """ request_params = dict(self.request_params) request_url = str(self.request_url) request_params['rows'] = 0 result = self.do_http_request( 'get', request_url, data=request_params, custom_header=str(self.etiquette) ).json() return int(result['message']['total-results'])
https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L186-L215
group by count
python
def group_consecutives(data, stepsize=1): """ Return list of consecutive lists of numbers from data (number list). References: http://stackoverflow.com/questions/7352684/how-to-find-the-groups-of-consecutive-elements-from-an-array-in-numpy """ run = [] result = [run] expect = None for item in data: if (item == expect) or (expect is None): run.append(item) else: run = [item] result.append(run) expect = item + stepsize return result
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L2266-L2283
group by count
python
def count_protein_group_hits(lineproteins, groups): """Takes a list of protein accessions and a list of protein groups content from DB. Counts for each group in list how many proteins are found in lineproteins. Returns list of str amounts. """ hits = [] for group in groups: hits.append(0) for protein in lineproteins: if protein in group: hits[-1] += 1 return [str(x) for x in hits]
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/mzidtsv/proteingrouping.py#L57-L68
group by count
python
def build_groups(self): """ Generates the sql for the GROUP BY portion of the query :return: the GROUP BY portion of the query :rtype: str """ # check if there are any groupings if len(self.groups): groups = [] # get the group sql for each grouping for group in self.groups: groups.append(group.get_name()) return 'GROUP BY {0} '.format(', '.join(groups)) return ''
https://github.com/ambitioninc/django-query-builder/blob/113a7d845d3ddc6a45621b9880308e756f87c5bf/querybuilder/query.py#L1443-L1458
group by count
python
def count(self): """Approximate number of results, according to the API""" if self._total_count is None: self._total_count = self._get_total_count() return self._total_count
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/pagination.py#L124-L128
group by count
python
def count(self, field='*'): """ Returns a COUNT of the query by wrapping the query and performing a COUNT aggregate of the specified field :param field: the field to pass to the COUNT aggregate. Defaults to '*' :type field: str :return: The number of rows that the query will return :rtype: int """ rows = self.get_count_query().select(bypass_safe_limit=True) return list(rows[0].values())[0]
https://github.com/ambitioninc/django-query-builder/blob/113a7d845d3ddc6a45621b9880308e756f87c5bf/querybuilder/query.py#L1823-L1835
group by count
python
def show_group(self, group_id): """ Get information about a group :type group_id: int :param group_id: Group ID Number :rtype: dict :return: a dictionary containing group information """ res = self.post('loadGroups', {'groupId': group_id}) if isinstance(res, list): return _fix_group(res[0]) else: return _fix_group(res)
https://github.com/galaxy-genome-annotation/python-apollo/blob/2bc9991302abe4402ec2885dcaac35915475b387/apollo/groups/__init__.py#L41-L55
group by count
python
def _guess_group(info): """Add the first group to get report with some factor""" value = "fake" if "metadata" in info: if info["metadata"]: return ",".join(map(str, info["metadata"].values())) return value
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/group.py#L175-L181
group by count
python
def count_with_multiplier(groups, multiplier): """ Update group counts with multiplier This is for handling atom counts on groups like (OH)2 :param groups: iterable of Group/Element :param multiplier: the number to multiply by """ counts = collections.defaultdict(float) for group in groups: for element, count in group.count().items(): counts[element] += count*multiplier return counts
https://github.com/Ex-Mente/auxi.0/blob/2dcdae74154f136f8ca58289fe5b20772f215046/auxi/tools/chemistry/stoichiometry.py#L57-L70
group by count
python
def getgroupmembers(self, group_id, page=1, per_page=20): """ Lists the members of a given group id :param group_id: the group id :param page: which page to return (default is 1) :param per_page: number of items to return per page (default is 20) :return: the group's members """ data = {'page': page, 'per_page': per_page} request = requests.get( '{0}/{1}/members'.format(self.groups_url, group_id), params=data, headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout) if request.status_code == 200: return request.json() else: return False
https://github.com/pyapi-gitlab/pyapi-gitlab/blob/f74b6fb5c13cecae9524997847e928905cc60acf/gitlab/__init__.py#L1695-L1713
group by count
python
def parse_groups(output): """Parse ``make_ndx`` output and return groups as a list of dicts.""" groups = [] for line in output.split('\n'): m = NDXGROUP.match(line) if m: d = m.groupdict() groups.append({'name': d['GROUPNAME'], 'nr': int(d['GROUPNUMBER']), 'natoms': int(d['NATOMS'])}) return groups
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/cbook.py#L1154-L1164
group by count
python
def list_groups(self, **kwargs): """List all groups. :param kwargs: arbitrary search filters :returns: list of groups :rtype: list[:class:`marathon.models.group.MarathonGroup`] """ response = self._do_request('GET', '/v2/groups') groups = self._parse_response( response, MarathonGroup, is_list=True, resource_name='groups') for k, v in kwargs.items(): groups = [o for o in groups if getattr(o, k) == v] return groups
https://github.com/thefactory/marathon-python/blob/592b253aa8edf2475c97ca438ad7b6936652caf2/marathon/client.py#L400-L413
group by count
python
def get_group_members(group_name, region=None, key=None, keyid=None, profile=None): ''' Get group information. .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt myminion boto_iam.get_group mygroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: marker = None truncated = True users = [] while truncated: info = conn.get_group(group_name, marker=marker, max_items=1000) if not info: return False truncated = bool(info['get_group_response']['get_group_result']['is_truncated']) if truncated and 'marker' in info['get_group_response']['get_group_result']: marker = info['get_group_response']['get_group_result']['marker'] else: marker = None truncated = False users += info['get_group_response']['get_group_result']['users'] return users except boto.exception.BotoServerError as e: log.debug(e) log.error('Failed to get members for IAM group %s.', group_name) return False
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_iam.py#L408-L440
group by count
python
def count(self, *criterion, **kwargs): """ Count the number of models matching some criterion. """ query = self._query(*criterion) query = self._filter(query, **kwargs) return query.count()
https://github.com/globality-corp/microcosm-postgres/blob/43dd793b1fc9b84e4056700f350e79e0df5ff501/microcosm_postgres/store.py#L160-L167
group by count
python
def _find_all_groups(items, require_bam=True): """Find all groups """ all_groups = [] for data in items: batches = _get_batches(data, require_bam) all_groups.append(batches) return all_groups
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/multi.py#L57-L64
group by count
python
def count(self): ''' Will return the total count of the objects that match the specified filters.:: # counts the number of users created in the last 24 hours User.query.filter(created_at=(time.time()-86400, time.time())).count() ''' filters = self._filters if self._order_by: filters += (self._order_by.lstrip('-'),) if not filters: # We can actually count entities here... size = _connect(self._model).hlen(self._model._namespace + '::') limit = self._limit or (0, 2**64) size = max(size - max(limit[0], 0), 0) return min(size, limit[1]) return self._model._gindex.count(_connect(self._model), filters)
https://github.com/josiahcarlson/rom/blob/8b5607a856341df85df33422accc30ba9294dbdb/rom/query.py#L402-L420
group by count
python
def count(self, min, max): """ -> #int number of elements in the sorted set with a score between @min and @max. """ return self._client.zcount(self.key_prefix, min, max)
https://github.com/jaredLunde/redis_structures/blob/b9cce5f5c85db5e12c292633ff8d04e3ae053294/redis_structures/__init__.py#L2120-L2124
group by count
python
def get_descendants_group_count(cls, parent=None): """ Helper for a very common case: get a group of siblings and the number of *descendants* in every sibling. """ #~ # disclaimer: this is the FOURTH implementation I wrote for this # function. I really tried to make it return a queryset, but doing so # with a *single* query isn't trivial with Django's ORM. # ok, I DID manage to make Django's ORM return a queryset here, # defining two querysets, passing one subquery in the tables parameters # of .extra() of the second queryset, using the undocumented order_by # feature, and using a HORRIBLE hack to avoid django quoting the # subquery as a table, BUT (and there is always a but) the hack didn't # survive turning the QuerySet into a ValuesQuerySet, so I just used # good old SQL. # NOTE: in case there is interest, the hack to avoid django quoting the # subquery as a table, was adding the subquery to the alias cache of # the queryset's query object: # # qset.query.quote_cache[subquery] = subquery # # If there is a better way to do this in an UNMODIFIED django 1.0, let # me know. #~ cls = get_result_class(cls) vendor = cls.get_database_vendor('write') if parent: depth = parent.depth + 1 params = cls._get_children_path_interval(parent.path) extrand = 'AND path BETWEEN %s AND %s' else: depth = 1 params = [] extrand = '' subpath = sql_substr("path", "1", "%(subpathlen)s", vendor=vendor) sql = ( 'SELECT * FROM %(table)s AS t1 INNER JOIN ' ' (SELECT ' ' ' + subpath + ' AS subpath, ' ' COUNT(1)-1 AS count ' ' FROM %(table)s ' ' WHERE depth >= %(depth)s %(extrand)s' ' GROUP BY '+ subpath + ') AS t2 ' ' ON t1.path=t2.subpath ' ' ORDER BY t1.path' ) % { 'table': connection.ops.quote_name(cls._meta.db_table), 'subpathlen': depth * cls.steplen, 'depth': depth, 'extrand': extrand} cursor = cls._get_database_cursor('write') cursor.execute(sql, params) ret = [] field_names = [field[0] for field in cursor.description] for node_data in cursor.fetchall(): node = cls(**dict(zip(field_names, node_data[:-2]))) node.descendants_count = node_data[-1] ret.append(node) return ret
https://github.com/django-treebeard/django-treebeard/blob/8042ee939cb45394909237da447f8925e3cc6aa3/treebeard/mp_tree.py#L848-L914
group by count
python
def count(self, q): """ Shorthand for counting the results of a specific query. ## Arguments * `q` (str): The query to count. This will be executed as: `"SELECT COUNT(*) %s" % q`. ## Returns * `count` (int): The resulting count. """ q = "SELECT COUNT(*) %s"%q return int(self.quick(q).split("\n")[1])
https://github.com/dfm/casjobs/blob/1cc3f5511cc254d776082909221787e3c037ac16/casjobs.py#L333-L348
group by count
python
def group(self): """returns the community.Group class for the current group""" split_count = self._url.lower().find("/content/") len_count = len('/content/') gURL = self._url[:self._url.lower().find("/content/")] + \ "/community/" + self._url[split_count+ len_count:]#self.__assembleURL(self._contentURL, self._groupId) return CommunityGroup(url=gURL, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageorg/_content.py#L3149-L3159
group by count
python
def get_groups(self, filterTerm=None, groupName=None, groupDomain=None): """ Return groups from the database """ if groupDomain: groupDomain = groupDomain.split('.')[0].upper() cur = self.conn.cursor() if self.is_group_valid(filterTerm): cur.execute("SELECT * FROM groups WHERE id=? LIMIT 1", [filterTerm]) elif groupName and groupDomain: cur.execute("SELECT * FROM groups WHERE LOWER(name)=LOWER(?) AND LOWER(domain)=LOWER(?)", [groupName, groupDomain]) elif filterTerm and filterTerm !="": cur.execute("SELECT * FROM groups WHERE LOWER(name) LIKE LOWER(?)", ['%{}%'.format(filterTerm)]) else: cur.execute("SELECT * FROM groups") results = cur.fetchall() cur.close() logging.debug('get_groups(filterTerm={}, groupName={}, groupDomain={}) => {}'.format(filterTerm, groupName, groupDomain, results)) return results
https://github.com/byt3bl33d3r/CrackMapExec/blob/333f1c4e06884e85b2776459963ef85d182aba8e/cme/protocols/smb/database.py#L429-L453
group by count
python
def list(self, page=1, per_page=10, omit=None): """List groups by page. The API allows certain fields to be excluded from the results so that very large groups can be fetched without exceeding the maximum response size. At the time of this writing, only 'memberships' is supported. :param int page: page number :param int per_page: number of groups per page :param int omit: a comma-separated list of fields to exclude :return: a list of groups :rtype: :class:`~groupy.pagers.GroupList` """ return pagers.GroupList(self, self._raw_list, page=page, per_page=per_page, omit=omit)
https://github.com/rhgrant10/Groupy/blob/ffd8cac57586fa1c218e3b4bfaa531142c3be766/groupy/api/groups.py#L23-L38
group by count
python
def handle_group(self, text, capture=None, is_format=False): """Handle groups.""" if capture is None: capture = tuple() if self.is_bytes else '' if len(self.result) > 1: self.literal_slots.append("".join(self.result)) if is_format: self.literal_slots.extend(["\\g<", text, ">"]) else: self.literal_slots.append(text) del self.result[:] self.result.append("") self.slot += 1 elif is_format: self.literal_slots.extend(["\\g<", text, ">"]) else: self.literal_slots.append(text) self.group_slots.append( ( self.slot, ( (self.span_stack[-1] if self.span_stack else None), self.get_single_stack(), capture ) ) ) self.slot += 1
https://github.com/facelessuser/backrefs/blob/3b3d60f5d57b02044f880aa29c9c5add0e31a34f/backrefs/_bre_parse.py#L1307-L1337
group by count
python
def group(self): """ :rtype: list """ lowerbound = self.lowerbound age_group_list = [] for age_group in self.et.findall("group"): upperbound = age_group.attrib["upperbound"] try: poppercent = age_group.attrib["poppercent"] except KeyError: poppercent = None age_group_list.append({"lowerbound": lowerbound, "upperbound": upperbound, "poppercent": poppercent}) lowerbound = upperbound return age_group_list
https://github.com/vecnet/vecnet.openmalaria/blob/795bc9d1b81a6c664f14879edda7a7c41188e95a/vecnet/openmalaria/scenario/monitoring.py#L22-L38
group by count
python
def get_group_gn(dim, dim_per_gp, num_groups): """get number of groups used by GroupNorm, based on number of channels.""" assert dim_per_gp == -1 or num_groups == -1, \ "GroupNorm: can only specify G or C/G." if dim_per_gp > 0: assert dim % dim_per_gp == 0, \ "dim: {}, dim_per_gp: {}".format(dim, dim_per_gp) group_gn = dim // dim_per_gp else: assert dim % num_groups == 0, \ "dim: {}, num_groups: {}".format(dim, num_groups) group_gn = num_groups return group_gn
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/object_detection/pytorch/maskrcnn_benchmark/modeling/make_layers.py#L14-L28
group by count
python
def _get_groups(self, data): """ Get all groups defined """ groups = [] for attribute in SOURCE_KEYS: for k, v in data[attribute].items(): if k == None: k = 'Sources' if k not in groups: groups.append(k) for k, v in data['include_files'].items(): if k == None: k = 'Includes' if k not in groups: groups.append(k) return groups
https://github.com/project-generator/project_generator/blob/a361be16eeb5a8829ff5cd26850ddd4b264296fe/project_generator/tools/tool.py#L177-L191
group by count
python
def groupBy(groups_in, classifier, fun_desc='?', keep_uniques=False, *args, **kwargs): """Subdivide groups of paths according to a function. :param groups_in: Grouped sets of paths. :type groups_in: :class:`~__builtins__.dict` of iterables :param classifier: Function to group a list of paths by some attribute. :type classifier: ``function(list, *args, **kwargs) -> str`` :param fun_desc: Human-readable term for what the classifier operates on. (Used in log messages) :type fun_desc: :class:`~__builtins__.str` :param keep_uniques: If ``False``, discard groups with only one member. :type keep_uniques: :class:`~__builtins__.bool` :returns: A dict mapping classifier keys to groups of matches. :rtype: :class:`~__builtins__.dict` :attention: Grouping functions generally use a :class:`~__builtins__.set` ``groups`` as extra protection against accidentally counting a given file twice. (Complimentary to use of :func:`os.path.realpath` in :func:`~fastdupes.getPaths`) .. todo:: Find some way to bring back the file-by-file status text """ groups, count, group_count = {}, 0, len(groups_in) for pos, paths in enumerate(groups_in.values()): out.write("Subdividing group %d of %d by %s... (%d files examined, %d " "in current group)" % ( pos + 1, group_count, fun_desc, count, len(paths) )) for key, group in classifier(paths, *args, **kwargs).items(): groups.setdefault(key, set()).update(group) count += len(group) if not keep_uniques: # Return only the groups with more than one file. groups = dict([(x, groups[x]) for x in groups if len(groups[x]) > 1]) out.write("Found %s sets of files with identical %s. (%d files examined)" % (len(groups), fun_desc, count), newline=True) return groups
https://github.com/ssokolow/fastdupes/blob/0334545885445834307c075a445fba9fe6f0c9e7/fastdupes.py#L217-L263
group by count
python
def countDistinct(col, *cols): """Returns a new :class:`Column` for distinct count of ``col`` or ``cols``. >>> df.agg(countDistinct(df.age, df.name).alias('c')).collect() [Row(c=2)] >>> df.agg(countDistinct("age", "name").alias('c')).collect() [Row(c=2)] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.countDistinct(_to_java_column(col), _to_seq(sc, cols, _to_java_column)) return Column(jc)
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L421-L432
group by count
python
def group(self, labels): """ group as list """ unique_labels, groupxs = self.group_indicies(labels) groups = [self.take(idxs) for idxs in groupxs] return unique_labels, groups
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L2887-L2891