query
stringlengths
9
60
language
stringclasses
1 value
code
stringlengths
105
25.7k
url
stringlengths
91
217
binomial distribution
python
def rbinomial(n, p, size=None): """ Random binomial variates. """ if not size: size = None return np.random.binomial(np.ravel(n), np.ravel(p), size)
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L858-L864
binomial distribution
python
def binomial(n,k): """ Binomial coefficient >>> binomial(5,2) 10 >>> binomial(10,5) 252 """ if n==k: return 1 assert n>k, "Attempting to call binomial(%d,%d)" % (n,k) return factorial(n)//(factorial(k)*factorial(n-k))
https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/qc/utils.py#L30-L40
binomial distribution
python
def multinomial_pdf(n,p): r""" Returns the PDF of the multinomial distribution :math:`\operatorname{Multinomial}(N, n, p)= \frac{N!}{n_1!\cdots n_k!}p_1^{n_1}\cdots p_k^{n_k}` :param np.ndarray n : Array of outcome integers of shape ``(sides, ...)`` where sides is the number of sides on the dice and summing over this index indicates the number of rolls for the given experiment. :param np.ndarray p : Array of (assumed) probabilities of shape ``(sides, ...)`` or ``(sides-1,...)`` with the rest of the dimensions the same as ``n``. If ``sides-1``, the last probability is chosen so that the probabilities of all sides sums to 1. If ``sides`` is the last index, these probabilities are assumed to sum to 1. Note that the numbers of experiments don't need to be given because they are implicit in the sum over the 0 index of ``n``. """ # work in log space to avoid overflow log_N_fac = gammaln(np.sum(n, axis=0) + 1)[np.newaxis,...] log_n_fac_sum = np.sum(gammaln(n + 1), axis=0) # since working in log space, we need special # consideration at p=0. deal with p=0, n>0 later. def nlogp(n,p): result = np.zeros(p.shape) mask = p!=0 result[mask] = n[mask] * np.log(p[mask]) return result if p.shape[0] == n.shape[0] - 1: ep = np.empty(n.shape) ep[:p.shape[0],...] = p ep[-1,...] = 1-np.sum(p,axis=0) else: ep = p log_p_sum = np.sum(nlogp(n, ep), axis=0) probs = np.exp(log_N_fac - log_n_fac_sum + log_p_sum) # if n_k>0 but p_k=0, the whole probability must be 0 mask = np.sum(np.logical_and(n!=0, ep==0), axis=0) == 0 probs = mask * probs return probs[0,...]
https://github.com/QInfer/python-qinfer/blob/8170c84a0be1723f8c6b09e0d3c7a40a886f1fe3/src/qinfer/utils.py#L113-L161
binomial distribution
python
def Binomial(n, p, tag=None): """ A Binomial random variate Parameters ---------- n : int The number of trials p : scalar The probability of success """ assert ( int(n) == n and n > 0 ), 'Binomial number of trials "n" must be an integer greater than zero' assert ( 0 < p < 1 ), 'Binomial probability "p" must be between zero and one, non-inclusive' return uv(ss.binom(n, p), tag=tag)
https://github.com/tisimst/mcerp/blob/2bb8260c9ad2d58a806847f1b627b6451e407de1/mcerp/__init__.py#L1150-L1167
binomial distribution
python
def pdf(self): r""" Generate the vector of probabilities for the Beta-binomial (n, a, b) distribution. The Beta-binomial distribution takes the form .. math:: p(k \,|\, n, a, b) = {n \choose k} \frac{B(k + a, n - k + b)}{B(a, b)}, \qquad k = 0, \ldots, n, where :math:`B` is the beta function. Parameters ---------- n : scalar(int) First parameter to the Beta-binomial distribution a : scalar(float) Second parameter to the Beta-binomial distribution b : scalar(float) Third parameter to the Beta-binomial distribution Returns ------- probs: array_like(float) Vector of probabilities over k """ n, a, b = self.n, self.a, self.b k = np.arange(n + 1) probs = binom(n, k) * beta(k + a, n - k + b) / beta(a, b) return probs
https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/distributions.py#L64-L96
binomial distribution
python
def multinomial(data, shape=_Null, get_prob=True, dtype='int32', **kwargs): """Concurrent sampling from multiple multinomial distributions. .. note:: The input distribution must be normalized, i.e. `data` must sum to 1 along its last dimension. Parameters ---------- data : Symbol An *n* dimensional array whose last dimension has length `k`, where `k` is the number of possible outcomes of each multinomial distribution. For example, data with shape `(m, n, k)` specifies `m*n` multinomial distributions each with `k` possible outcomes. shape : int or tuple of ints, optional The number of samples to draw from each distribution. If shape is empty one sample will be drawn from each distribution. get_prob : bool, optional If true, a second array containing log likelihood of the drawn samples will also be returned. This is usually used for reinforcement learning, where you can provide reward as head gradient w.r.t. this array to estimate gradient. dtype : str or numpy.dtype, optional Data type of the sample output array. The default is int32. Note that the data type of the log likelihood array is the same with that of `data`. Returns ------- Symbol For input `data` with `n` dimensions and shape `(d1, d2, ..., dn-1, k)`, and input `shape` with shape `(s1, s2, ..., sx)`, returns a Symbol that resovles to shape `(d1, d2, ... dn-1, s1, s2, ..., sx)`. The `s1, s2, ... sx` dimensions of the returned Symbol's resolved value will consist of 0-indexed values sampled from each respective multinomial distribution provided in the `k` dimension of `data`. For the case `n`=1, and `x`=1 (one shape dimension), returned Symbol will resolve to shape `(s1,)`. If `get_prob` is set to True, this function returns a Symbol that will resolve to a list of outputs: `[ndarray_output, log_likelihood_output]`, where `log_likelihood_output` will resolve to the same shape as the sampled outputs in ndarray_output. """ return _internal._sample_multinomial(data, shape, get_prob, dtype=dtype, **kwargs)
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/symbol/random.py#L284-L325
binomial distribution
python
def generalized_negative_binomial(mu=1, alpha=1, shape=_Null, dtype=_Null, **kwargs): """Draw random samples from a generalized negative binomial distribution. Samples are distributed according to a generalized negative binomial distribution parametrized by *mu* (mean) and *alpha* (dispersion). *alpha* is defined as *1/k* where *k* is the failure limit of the number of unsuccessful experiments (generalized to real numbers). Samples will always be returned as a floating point data type. Parameters ---------- mu : float or Symbol, optional Mean of the negative binomial distribution. alpha : float or Symbol, optional Alpha (dispersion) parameter of the negative binomial distribution. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `mu` and `alpha` are scalars, output shape will be `(m, n)`. If `mu` and `alpha` are Symbols with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[mu, alpha)` pair. dtype : {'float16', 'float32', 'float64'}, optional Data type of output samples. Default is 'float32' Returns ------- Symbol If input `shape` has dimensions, e.g., `(m, n)`, and `mu` and `alpha` are scalars, returned Symbol will resolve to shape `(m, n)`. If `mu` and `alpha` are Symbols with shape, e.g., `(x, y)`, returned Symbol will resolve to shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[mu, alpha)` pair. """ return _random_helper(_internal._random_generalized_negative_binomial, _internal._sample_generalized_negative_binomial, [mu, alpha], shape, dtype, kwargs)
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/symbol/random.py#L248-L281
binomial distribution
python
def multinomial(data, shape=_Null, get_prob=False, out=None, dtype='int32', **kwargs): """Concurrent sampling from multiple multinomial distributions. .. note:: The input distribution must be normalized, i.e. `data` must sum to 1 along its last dimension. Parameters ---------- data : NDArray An *n* dimensional array whose last dimension has length `k`, where `k` is the number of possible outcomes of each multinomial distribution. For example, data with shape `(m, n, k)` specifies `m*n` multinomial distributions each with `k` possible outcomes. shape : int or tuple of ints, optional The number of samples to draw from each distribution. If shape is empty one sample will be drawn from each distribution. get_prob : bool, optional If true, a second array containing log likelihood of the drawn samples will also be returned. This is usually used for reinforcement learning, where you can provide reward as head gradient w.r.t. this array to estimate gradient. out : NDArray, optional Store output to an existing NDArray. dtype : str or numpy.dtype, optional Data type of the sample output array. The default is int32. Note that the data type of the log likelihood array is the same with that of `data`. Returns ------- List, or NDArray For input `data` with `n` dimensions and shape `(d1, d2, ..., dn-1, k)`, and input `shape` with shape `(s1, s2, ..., sx)`, returns an NDArray with shape `(d1, d2, ... dn-1, s1, s2, ..., sx)`. The `s1, s2, ... sx` dimensions of the returned NDArray consist of 0-indexed values sampled from each respective multinomial distribution provided in the `k` dimension of `data`. For the case `n`=1, and `x`=1 (one shape dimension), returned NDArray has shape `(s1,)`. If `get_prob` is set to True, this function returns a list of format: `[ndarray_output, log_likelihood_output]`, where `log_likelihood_output` is an NDArray of the same shape as the sampled outputs. Examples -------- >>> probs = mx.nd.array([0, 0.1, 0.2, 0.3, 0.4]) >>> mx.nd.random.multinomial(probs) [3] <NDArray 1 @cpu(0)> >>> probs = mx.nd.array([[0, 0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1, 0]]) >>> mx.nd.random.multinomial(probs) [3 1] <NDArray 2 @cpu(0)> >>> mx.nd.random.multinomial(probs, shape=2) [[4 4] [1 2]] <NDArray 2x2 @cpu(0)> >>> mx.nd.random.multinomial(probs, get_prob=True) [3 2] <NDArray 2 @cpu(0)> [-1.20397282 -1.60943794] <NDArray 2 @cpu(0)> """ return _internal._sample_multinomial(data, shape, get_prob, out=out, dtype=dtype, **kwargs)
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/random.py#L500-L562
binomial distribution
python
def Bernstein(n, k): """Bernstein polynomial. """ coeff = binom(n, k) def _bpoly(x): return coeff * x ** k * (1 - x) ** (n - k) return _bpoly
https://github.com/matplotlib/viscm/blob/cb31d0a6b95bcb23fd8f48d23e28e415db5ddb7c/viscm/bezierbuilder.py#L299-L308
binomial distribution
python
def negative_binomial(k=1, p=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs): """Draw random samples from a negative binomial distribution. Samples are distributed according to a negative binomial distribution parametrized by *k* (limit of unsuccessful experiments) and *p* (failure probability in each experiment). Samples will always be returned as a floating point data type. Parameters ---------- k : float or NDArray, optional Limit of unsuccessful experiments, > 0. p : float or NDArray, optional Failure probability in each experiment, >= 0 and <=1. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `k` and `p` are scalars, output shape will be `(m, n)`. If `k` and `p` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[k, p)` pair. dtype : {'float16', 'float32', 'float64'}, optional Data type of output samples. Default is 'float32' ctx : Context, optional Device context of output. Default is current context. Overridden by `k.context` when `k` is an NDArray. out : NDArray, optional Store output to an existing NDArray. Returns ------- NDArray If input `shape` has shape, e.g., `(m, n)` and `k` and `p` are scalars, output shape will be `(m, n)`. If `k` and `p` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[k, p)` pair. Examples -------- >>> mx.nd.random.negative_binomial(10, 0.5) [ 4.] <NDArray 1 @cpu(0)> >>> mx.nd.random.negative_binomial(10, 0.5, shape=(2,)) [ 3. 4.] <NDArray 2 @cpu(0)> >>> k = mx.nd.array([1,2,3]) >>> p = mx.nd.array([0.2,0.4,0.6]) >>> mx.nd.random.negative_binomial(k, p, shape=2) [[ 3. 2.] [ 4. 4.] [ 0. 5.]] <NDArray 3x2 @cpu(0)> """ return _random_helper(_internal._random_negative_binomial, _internal._sample_negative_binomial, [k, p], shape, dtype, ctx, out, kwargs)
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/random.py#L386-L439
binomial distribution
python
def Distribution(pos, size, counts, dtype): """ Returns an array of length size and type dtype that is everywhere 0, except in the indices listed in sequence pos. The non-zero indices contain a normalized distribution based on the counts. :param pos: A single integer or sequence of integers that specify the position of ones to be set. :param size: The total size of the array to be returned. :param counts: The number of times we have observed each index. :param dtype: The element type (compatible with NumPy array()) of the array to be returned. :returns: An array of length size and element type dtype. """ x = numpy.zeros(size, dtype=dtype) if hasattr(pos, '__iter__'): # calculate normalization constant total = 0 for i in pos: total += counts[i] total = float(total) # set included positions to normalized probability for i in pos: x[i] = counts[i]/total # If we don't have a set of positions, assume there's only one position else: x[pos] = 1 return x
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/math/stats.py#L156-L183
binomial distribution
python
def binomial(n): """ Return all binomial coefficients for a given order. For n > 5, scipy.special.binom is used, below we hardcode to avoid the scipy.special dependency. Parameters -------------- n : int Order Returns --------------- binom : (n + 1,) int Binomial coefficients of a given order """ if n == 1: return [1, 1] elif n == 2: return [1, 2, 1] elif n == 3: return [1, 3, 3, 1] elif n == 4: return [1, 4, 6, 4, 1] elif n == 5: return [1, 5, 10, 10, 5, 1] else: from scipy.special import binom return binom(n, np.arange(n + 1))
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/curve.py#L100-L129
binomial distribution
python
def distribution(self, limit=1024): """ Build the distribution of distinct values """ res = self._qexec("%s, count(*) as __cnt" % self.name(), group="%s" % self.name(), order="__cnt DESC LIMIT %d" % limit) dist = [] cnt = self._table.size() for i, r in enumerate(res): dist.append(list(r) + [i, r[1] / float(cnt)]) self._distribution = pd.DataFrame(dist, columns=["value", "cnt", "r", "fraction"]) self._distribution.index = self._distribution.r return self._distribution
https://github.com/grundprinzip/pyxplorer/blob/34c1d166cfef4a94aeb6d5fcb3cbb726d48146e2/pyxplorer/types.py#L91-L105
binomial distribution
python
def normal_distribution(self, pos, sample): """returns the value of normal distribution, given the weight's sample and target position Parameters ---------- pos: int the epoch number of the position you want to predict sample: list sample is a (1 * NUM_OF_FUNCTIONS) matrix, representing{w1, w2, ... wk} Returns ------- float the value of normal distribution """ curr_sigma_sq = self.sigma_sq(sample) delta = self.trial_history[pos - 1] - self.f_comb(pos, sample) return np.exp(np.square(delta) / (-2.0 * curr_sigma_sq)) / np.sqrt(2 * np.pi * np.sqrt(curr_sigma_sq))
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/curvefitting_assessor/model_factory.py#L204-L221
binomial distribution
python
def build_distribution(): """Build distributions of the code.""" result = invoke.run('python setup.py sdist bdist_egg bdist_wheel', warn=True, hide=True) if result.ok: print("[{}GOOD{}] Distribution built without errors." .format(GOOD_COLOR, RESET_COLOR)) else: print('[{}ERROR{}] Something broke trying to package your ' 'code...'.format(ERROR_COLOR, RESET_COLOR)) print(result.stderr) sys.exit(1)
https://github.com/MinchinWeb/minchin.releaser/blob/cfc7f40ac4852b46db98aa1bb8fcaf138a6cdef4/minchin/releaser/make_release.py#L171-L182
binomial distribution
python
def binom(n, k): """Binomial coefficients for :math:`n \choose k` :param n,k: non-negative integers :complexity: O(k) """ prod = 1 for i in range(k): prod = (prod * (n - i)) // (i + 1) return prod
https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/arithm.py#L43-L52
binomial distribution
python
def distribution(self, start=None, end=None, normalized=True, mask=None): """Calculate the distribution of values over the given time range from `start` to `end`. Args: start (orderable, optional): The lower time bound of when to calculate the distribution. By default, the first time point will be used. end (orderable, optional): The upper time bound of when to calculate the distribution. By default, the last time point will be used. normalized (bool): If True, distribution will sum to one. If False and the time values of the TimeSeries are datetimes, the units will be seconds. mask (:obj:`TimeSeries`, optional): A domain on which to calculate the distribution. Returns: :obj:`Histogram` with the results. """ start, end, mask = self._check_boundaries(start, end, mask=mask) counter = histogram.Histogram() for start, end, _ in mask.iterperiods(value=True): for t0, t1, value in self.iterperiods(start, end): duration = utils.duration_to_number( t1 - t0, units='seconds', ) try: counter[value] += duration except histogram.UnorderableElements as e: counter = histogram.Histogram.from_dict( dict(counter), key=hash) counter[value] += duration # divide by total duration if result needs to be normalized if normalized: return counter.normalized() else: return counter
https://github.com/datascopeanalytics/traces/blob/420611151a05fea88a07bc5200fefffdc37cc95b/traces/timeseries.py#L553-L601
binomial distribution
python
def zipf_distribution(nbr_symbols, alpha): """Helper function: Create a Zipf distribution. Args: nbr_symbols: number of symbols to use in the distribution. alpha: float, Zipf's Law Distribution parameter. Default = 1.5. Usually for modelling natural text distribution is in the range [1.1-1.6]. Returns: distr_map: list of float, Zipf's distribution over nbr_symbols. """ tmp = np.power(np.arange(1, nbr_symbols + 1), -alpha) zeta = np.r_[0.0, np.cumsum(tmp)] return [x / zeta[-1] for x in zeta]
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/algorithmic.py#L208-L223
binomial distribution
python
def plot_distribution(samples, label, figure=None): """ Plot a distribution and print statistics about it""" from scipy import stats import matplotlib.pyplot as plt quant = [16, 50, 84] quantiles = dict(six.moves.zip(quant, np.percentile(samples, quant))) std = np.std(samples) if isinstance(samples[0], u.Quantity): unit = samples[0].unit else: unit = "" if isinstance(std, u.Quantity): std = std.value dist_props = "{label} distribution properties:\n \ $-$ median: ${median}$ {unit}, std: ${std}$ {unit}\n \ $-$ Median with uncertainties based on \n \ the 16th and 84th percentiles ($\sim$1$\sigma$):\n\ {label} = {value_error} {unit}".format( label=label, median=_latex_float(quantiles[50]), std=_latex_float(std), value_error=_latex_value_error( quantiles[50], quantiles[50] - quantiles[16], quantiles[84] - quantiles[50], ), unit=unit, ) if figure is None: f = plt.figure() else: f = figure ax = f.add_subplot(111) f.subplots_adjust(bottom=0.40, top=0.93, left=0.06, right=0.95) f.text(0.2, 0.27, dist_props, ha="left", va="top") histnbins = min(max(25, int(len(samples) / 100.0)), 100) xlabel = "" if label is None else label if isinstance(samples, u.Quantity): samples_nounit = samples.value else: samples_nounit = samples n, x, _ = ax.hist( samples_nounit, histnbins, histtype="stepfilled", color=color_cycle[0], lw=0, density=True, ) kde = stats.kde.gaussian_kde(samples_nounit) ax.plot(x, kde(x), color="k", label="KDE") ax.axvline( quantiles[50], ls="--", color="k", alpha=0.5, lw=2, label="50% quantile", ) ax.axvspan( quantiles[16], quantiles[84], color=(0.5,) * 3, alpha=0.25, label="68% CI", lw=0, ) # ax.legend() for l in ax.get_xticklabels(): l.set_rotation(45) # [l.set_rotation(45) for l in ax.get_yticklabels()] if unit != "": xlabel += " [{0}]".format(unit) ax.set_xlabel(xlabel) ax.set_title("posterior distribution of {0}".format(label)) ax.set_ylim(top=n.max() * 1.05) return f
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/plot.py#L1379-L1469
binomial distribution
python
def marginal_distribution(self, variables, inplace=True): """ Returns the marginal distribution over variables. Parameters ---------- variables: string, list, tuple, set, dict Variable or list of variables over which marginal distribution needs to be calculated inplace: Boolean (default True) If False return a new instance of JointProbabilityDistribution Examples -------- >>> import numpy as np >>> from pgmpy.factors.discrete import JointProbabilityDistribution >>> values = np.random.rand(12) >>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 3, 2], values/np.sum(values)) >>> prob.marginal_distribution(['x1', 'x2']) >>> print(prob) x1 x2 P(x1,x2) ---- ---- ---------- x1_0 x2_0 0.1502 x1_0 x2_1 0.1626 x1_0 x2_2 0.1197 x1_1 x2_0 0.2339 x1_1 x2_1 0.1996 x1_1 x2_2 0.1340 """ return self.marginalize(list(set(list(self.variables)) - set(variables if isinstance( variables, (list, set, dict, tuple)) else [variables])), inplace=inplace)
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/factors/discrete/JointProbabilityDistribution.py#L101-L133
binomial distribution
python
def _flat_sample_distributions(self, sample_shape=(), seed=None, value=None): """Executes `model`, creating both samples and distributions.""" ds = [] values_out = [] seed = seed_stream.SeedStream('JointDistributionCoroutine', seed) gen = self._model() index = 0 d = next(gen) try: while True: actual_distribution = d.distribution if isinstance(d, self.Root) else d ds.append(actual_distribution) if (value is not None and len(value) > index and value[index] is not None): seed() next_value = value[index] else: next_value = actual_distribution.sample( sample_shape=sample_shape if isinstance(d, self.Root) else (), seed=seed()) values_out.append(next_value) index += 1 d = gen.send(next_value) except StopIteration: pass return ds, values_out
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/joint_distribution_coroutine.py#L170-L195
binomial distribution
python
def rnegative_binomial(mu, alpha, size=None): """ Random negative binomial variates. """ # Using gamma-poisson mixture rather than numpy directly # because numpy apparently rounds mu = np.asarray(mu, dtype=float) pois_mu = np.random.gamma(alpha, mu / alpha, size) return np.random.poisson(pois_mu, size)
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2065-L2073
binomial distribution
python
def sample_multinomial(N, p, size=None): r""" Draws fixed number of samples N from different multinomial distributions (with the same number dice sides). :param int N: How many samples to draw from each distribution. :param np.ndarray p: Probabilities specifying each distribution. Sum along axis 0 should be 1. :param size: Output shape. ``int`` or tuple of ``int``s. If the given shape is, e.g., ``(m, n, k)``, then m * n * k samples are drawn for each distribution. Default is None, in which case a single value is returned for each distribution. :rtype: np.ndarray :return: Array of shape ``(p.shape, size)`` or p.shape if size is ``None``. """ # ensure s is array s = np.array([1]) if size is None else np.array([size]).flatten() def take_samples(ps): # we have to flatten to make apply_along_axis work. return np.random.multinomial(N, ps, np.prod(s)).flatten() # should have shape (prod(size)*ps.shape[0], ps.shape[1:]) samples = np.apply_along_axis(take_samples, 0, p) # should have shape (size, p.shape) samples = samples.reshape(np.concatenate([s, p.shape])) # should have shape (p.shape, size) samples = samples.transpose(np.concatenate( [np.arange(s.ndim, p.ndim+s.ndim), np.arange(s.ndim)] )) if size is None: # get rid of trailing singleton dimension. samples = samples[...,0] return samples
https://github.com/QInfer/python-qinfer/blob/8170c84a0be1723f8c6b09e0d3c7a40a886f1fe3/src/qinfer/utils.py#L163-L202
binomial distribution
python
def pore_size_distribution(im, bins=10, log=True, voxel_size=1): r""" Calculate a pore-size distribution based on the image produced by the ``porosimetry`` or ``local_thickness`` functions. Parameters ---------- im : ND-array The array of containing the sizes of the largest sphere that overlaps each voxel. Obtained from either ``porosimetry`` or ``local_thickness``. bins : scalar or array_like Either an array of bin sizes to use, or the number of bins that should be automatically generated that span the data range. log : boolean If ``True`` (default) the size data is converted to log (base-10) values before processing. This can help voxel_size : scalar The size of a voxel side in preferred units. The default is 1, so the user can apply the scaling to the returned results after the fact. Returns ------- result : named_tuple A named-tuple containing several values: *R* or *logR* - radius, equivalent to ``bin_centers`` *pdf* - probability density function *cdf* - cumulative density function *satn* - phase saturation in differential form. For the cumulative saturation, just use *cfd* which is already normalized to 1. *bin_centers* - the center point of each bin *bin_edges* - locations of bin divisions, including 1 more value than the number of bins *bin_widths* - useful for passing to the ``width`` argument of ``matplotlib.pyplot.bar`` Notes ----- (1) To ensure the returned values represent actual sizes be sure to scale the distance transform by the voxel size first (``dt *= voxel_size``) plt.bar(psd.R, psd.satn, width=psd.bin_widths, edgecolor='k') """ im = im.flatten() vals = im[im > 0]*voxel_size if log: vals = sp.log10(vals) h = _parse_histogram(sp.histogram(vals, bins=bins, density=True)) psd = namedtuple('pore_size_distribution', (log*'log' + 'R', 'pdf', 'cdf', 'satn', 'bin_centers', 'bin_edges', 'bin_widths')) return psd(h.bin_centers, h.pdf, h.cdf, h.relfreq, h.bin_centers, h.bin_edges, h.bin_widths)
https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/metrics/__funcs__.py#L374-L434
binomial distribution
python
def binom_modulo(n, k, p): """Binomial coefficients for :math:`n \choose k`, modulo p :param n,k: non-negative integers :complexity: O(k) """ prod = 1 for i in range(k): prod = (prod * (n - i) * inv(i + 1, p)) % p return prod
https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/arithm.py#L57-L66
binomial distribution
python
def choose(n, k): """ A fast way to calculate binomial coefficients by Andrew Dalke (contrib). """ if 0 <= k <= n: ntok = 1 ktok = 1 for t in xrange(1, min(k, n - k) + 1): ntok *= n ktok *= t n -= 1 return ntok // ktok else: return 0
https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/belief_utils.py#L173-L186
binomial distribution
python
def _random_bernoulli(shape, probs, dtype=tf.int32, seed=None, name=None): """Returns samples from a Bernoulli distribution.""" with tf.compat.v1.name_scope(name, "random_bernoulli", [shape, probs]): probs = tf.convert_to_tensor(value=probs) random_uniform = tf.random.uniform(shape, dtype=probs.dtype, seed=seed) return tf.cast(tf.less(random_uniform, probs), dtype)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/no_u_turn_sampler/nuts.py#L510-L515
binomial distribution
python
def equal_distribution_folds(y, folds=2): """Creates `folds` number of indices that has roughly balanced multi-label distribution. Args: y: The multi-label outputs. folds: The number of folds to create. Returns: `folds` number of indices that have roughly equal multi-label distributions. """ n, classes = y.shape # Compute sample distribution over classes dist = y.sum(axis=0).astype('float') dist /= dist.sum() index_list = [] fold_dist = np.zeros((folds, classes), dtype='float') for _ in range(folds): index_list.append([]) for i in range(n): if i < folds: target_fold = i else: normed_folds = fold_dist.T / fold_dist.sum(axis=1) how_off = normed_folds.T - dist target_fold = np.argmin( np.dot((y[i] - .5).reshape(1, -1), how_off.T)) fold_dist[target_fold] += y[i] index_list[target_fold].append(i) logger.debug("Fold distributions:") logger.debug(fold_dist) return index_list
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/utils/sampling.py#L11-L44
binomial distribution
python
def qnwnorm(n, mu=None, sig2=None, usesqrtm=False): """ Computes nodes and weights for multivariate normal distribution Parameters ---------- n : int or array_like(float) A length-d iterable of the number of nodes in each dimension mu : scalar or array_like(float), optional(default=zeros(d)) The means of each dimension of the random variable. If a scalar is given, that constant is repeated d times, where d is the number of dimensions sig2 : array_like(float), optional(default=eye(d)) A d x d array representing the variance-covariance matrix of the multivariate normal distribution. Returns ------- nodes : np.ndarray(dtype=float) Quadrature nodes weights : np.ndarray(dtype=float) Weights for quadrature nodes Notes ----- Based of original function ``qnwnorm`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002. """ n = np.atleast_1d(n) d = n.size if mu is None: mu = np.zeros(d) else: mu = np.atleast_1d(mu) if sig2 is None: sig2 = np.eye(d) else: sig2 = np.atleast_1d(sig2).reshape(d, d) if all([x.size == 1 for x in [n, mu, sig2]]): nodes, weights = _qnwnorm1(n[0]) else: nodes = [] weights = [] for i in range(d): _1d = _qnwnorm1(n[i]) nodes.append(_1d[0]) weights.append(_1d[1]) nodes = gridmake(*nodes) weights = ckron(*weights[::-1]) if usesqrtm: new_sig2 = la.sqrtm(sig2) else: # cholesky new_sig2 = la.cholesky(sig2) if d > 1: nodes = nodes.dot(new_sig2) + mu # Broadcast ok else: # nodes.dot(sig) will not be aligned in scalar case. nodes = nodes * new_sig2 + mu return nodes.squeeze(), weights
https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/quad.py#L225-L299
binomial distribution
python
def distributions(self, _args): """Lists all distributions currently available (i.e. that have already been built).""" ctx = self.ctx dists = Distribution.get_distributions(ctx) if dists: print('{Style.BRIGHT}Distributions currently installed are:' '{Style.RESET_ALL}'.format(Style=Out_Style, Fore=Out_Fore)) pretty_log_dists(dists, print) else: print('{Style.BRIGHT}There are no dists currently built.' '{Style.RESET_ALL}'.format(Style=Out_Style))
https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/toolchain.py#L1075-L1087
binomial distribution
python
def distribution_from_path(cls, path, name=None): """Return a distribution from a path. If name is provided, find the distribution. If none is found matching the name, return None. If name is not provided and there is unambiguously a single distribution, return that distribution otherwise None. """ # Monkeypatch pkg_resources finders should it not already be so. register_finders() if name is None: distributions = set(find_distributions(path)) if len(distributions) == 1: return distributions.pop() else: for dist in find_distributions(path): if dist.project_name == name: return dist
https://github.com/pantsbuild/pex/blob/87b2129d860250d3b9edce75b9cb62f9789ee521/pex/util.py#L87-L103
binomial distribution
python
def sample_from_distribution(self, distribution, k, proportions=False): """Return a new table with the same number of rows and a new column. The values in the distribution column are define a multinomial. They are replaced by sample counts/proportions in the output. >>> sizes = Table(['size', 'count']).with_rows([ ... ['small', 50], ... ['medium', 100], ... ['big', 50], ... ]) >>> sizes.sample_from_distribution('count', 1000) # doctest: +SKIP size | count | count sample small | 50 | 239 medium | 100 | 496 big | 50 | 265 >>> sizes.sample_from_distribution('count', 1000, True) # doctest: +SKIP size | count | count sample small | 50 | 0.24 medium | 100 | 0.51 big | 50 | 0.25 """ dist = self._get_column(distribution) total = sum(dist) assert total > 0 and np.all(dist >= 0), 'Counts or a distribution required' dist = dist/sum(dist) sample = np.random.multinomial(k, dist) if proportions: sample = sample / sum(sample) label = self._unused_label(self._as_label(distribution) + ' sample') return self.with_column(label, sample)
https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L1430-L1459
binomial distribution
python
def prior(self, samples): """priori distribution Parameters ---------- samples: list a collection of sample, it's a (NUM_OF_INSTANCE * NUM_OF_FUNCTIONS) matrix, representing{{w11, w12, ..., w1k}, {w21, w22, ... w2k}, ...{wk1, wk2,..., wkk}} Returns ------- float priori distribution """ ret = np.ones(NUM_OF_INSTANCE) for i in range(NUM_OF_INSTANCE): for j in range(self.effective_model_num): if not samples[i][j] > 0: ret[i] = 0 if self.f_comb(1, samples[i]) >= self.f_comb(self.target_pos, samples[i]): ret[i] = 0 return ret
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/curvefitting_assessor/model_factory.py#L242-L263
binomial distribution
python
def angular_distribution(labels, resolution=100, weights=None): '''For each object in labels, compute the angular distribution around the centers of mass. Returns an i x j matrix, where i is the number of objects in the label matrix, and j is the resolution of the distribution (default 100), mapped from -pi to pi. Optionally, the distributions can be weighted by pixel. The algorithm approximates the angular width of pixels relative to the object centers, in an attempt to be accurate for small objects. The ChordRatio of an object can be approximated by >>> angdist = angular_distribution(labels, resolution) >>> angdist2 = angdist[:, :resolution//2] + angdist[:, resolution//2] # map to widths, rather than radii >>> chord_ratio = np.sqrt(angdist2.max(axis=1) / angdist2.min(axis=1)) # sqrt because these are sectors, not triangles ''' if weights is None: weights = np.ones(labels.shape) maxlabel = labels.max() ci, cj = centers_of_labels(labels) j, i = np.meshgrid(np.arange(labels.shape[0]), np.arange(labels.shape[1])) # compute deltas from pixels to object centroids, and mask to labels di = i[labels > 0] - ci[labels[labels > 0] - 1] dj = j[labels > 0] - cj[labels[labels > 0] - 1] weights = weights[labels > 0] labels = labels[labels > 0] # find angles, and angular width of pixels angle = np.arctan2(di, dj) # Use pixels of width 2 to get some smoothing width = np.arctan(1.0 / np.sqrt(di**2 + dj**2 + np.finfo(float).eps)) # create an onset/offset array of size 3 * resolution lo = np.clip((angle - width) * resolution / (2 * np.pi), -resolution, 2 * resolution).astype(int) + resolution hi = np.clip((angle + width) * resolution / (2 * np.pi), -resolution, 2 * resolution).astype(int) + resolution # make sure every pixel counts at least once hi[lo == hi] += 1 # normalize weights by their angular width (adding a bit to avoid 0 / 0) weights /= (hi - lo) onset = scipy.sparse.coo_matrix((weights, (labels - 1, lo)), (maxlabel, 3 * resolution)).toarray() offset = scipy.sparse.coo_matrix((weights, (labels - 1, hi)), (maxlabel, 3 * resolution)).toarray() # sum onset/offset to get actual distribution onoff = onset - offset dist = np.cumsum(onoff, axis=1) dist = dist[:, :resolution] + dist[:, resolution:2*resolution] + dist[:, 2*resolution:] return dist
https://github.com/CellProfiler/centrosome/blob/7bd9350a2d4ae1b215b81eabcecfe560bbb1f32a/centrosome/cpmorphology.py#L4262-L4306
binomial distribution
python
def MarginalBeta(self, i): """Computes the marginal distribution of the ith element. See http://en.wikipedia.org/wiki/Dirichlet_distribution #Marginal_distributions i: int Returns: Beta object """ alpha0 = self.params.sum() alpha = self.params[i] return Beta(alpha, alpha0 - alpha)
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1790-L1802
binomial distribution
python
def binomial_coefficient(k, i): """ Computes the binomial coefficient (denoted by *k choose i*). Please see the following website for details: http://mathworld.wolfram.com/BinomialCoefficient.html :param k: size of the set of distinct elements :type k: int :param i: size of the subsets :type i: int :return: combination of *k* and *i* :rtype: float """ # Special case if i > k: return float(0) # Compute binomial coefficient k_fact = math.factorial(k) i_fact = math.factorial(i) k_i_fact = math.factorial(k - i) return float(k_fact / (k_i_fact * i_fact))
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L419-L438
binomial distribution
python
def binom(n, k): """ Returns binomial coefficient (n choose k). """ # http://blog.plover.com/math/choose.html if k > n: return 0 if k == 0: return 1 result = 1 for denom in range(1, k + 1): result *= n result /= denom n -= 1 return result
https://github.com/cathalgarvey/deadlock/blob/30099b476ff767611ce617150a0c574fc03fdf79/deadlock/passwords/zxcvbn/scoring.py#L7-L21
binomial distribution
python
def distribution(self, **slice_kwargs): """ Calculates the number of papers in each slice, as defined by ``slice_kwargs``. Examples -------- .. code-block:: python >>> corpus.distribution(step_size=1, window_size=1) [5, 5] Parameters ---------- slice_kwargs : kwargs Keyword arguments to be passed to :meth:`.Corpus.slice`\. Returns ------- list """ values = [] keys = [] for key, size in self.slice(count_only=True, **slice_kwargs): values.append(size) keys.append(key) return keys, values
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/classes/corpus.py#L595-L622
binomial distribution
python
def plot_pdf(self, names=None, Nbest=5, lw=2): """Plots Probability density functions of the distributions :param str,list names: names can be a single distribution name, or a list of distribution names, or kept as None, in which case, the first Nbest distribution will be taken (default to best 5) """ assert Nbest > 0 if Nbest > len(self.distributions): Nbest = len(self.distributions) if isinstance(names, list): for name in names: pylab.plot(self.x, self.fitted_pdf[name], lw=lw, label=name) elif names: pylab.plot(self.x, self.fitted_pdf[names], lw=lw, label=names) else: try: names = self.df_errors.sort_values( by="sumsquare_error").index[0:Nbest] except: names = self.df_errors.sort("sumsquare_error").index[0:Nbest] for name in names: if name in self.fitted_pdf.keys(): pylab.plot(self.x, self.fitted_pdf[name], lw=lw, label=name) else: print("%s was not fitted. no parameters available" % name) pylab.grid(True) pylab.legend()
https://github.com/cokelaer/fitter/blob/1f07a42ad44b38a1f944afe456b7c8401bd50402/src/fitter/fitter.py#L246-L277
binomial distribution
python
def qnwlogn(n, mu=None, sig2=None): """ Computes nodes and weights for multivariate lognormal distribution Parameters ---------- n : int or array_like(float) A length-d iterable of the number of nodes in each dimension mu : scalar or array_like(float), optional(default=zeros(d)) The means of each dimension of the random variable. If a scalar is given, that constant is repeated d times, where d is the number of dimensions sig2 : array_like(float), optional(default=eye(d)) A d x d array representing the variance-covariance matrix of the multivariate normal distribution. Returns ------- nodes : np.ndarray(dtype=float) Quadrature nodes weights : np.ndarray(dtype=float) Weights for quadrature nodes Notes ----- Based of original function ``qnwlogn`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002. """ nodes, weights = qnwnorm(n, mu, sig2) return np.exp(nodes), weights
https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/quad.py#L302-L340
binomial distribution
python
def initial_distribution_samples(self): r""" Samples of the initial distribution """ res = np.empty((self.nsamples, self.nstates), dtype=config.dtype) for i in range(self.nsamples): res[i, :] = self._sampled_hmms[i].stationary_distribution return res
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/hmm/generic_sampled_hmm.py#L70-L75
binomial distribution
python
def uniform_distribution(number_of_nodes): """ Return the uniform distribution for a set of binary nodes, indexed by state (so there is one dimension per node, the size of which is the number of possible states for that node). Args: nodes (np.ndarray): A set of indices of binary nodes. Returns: np.ndarray: The uniform distribution over the set of nodes. """ # The size of the state space for binary nodes is 2^(number of nodes). number_of_states = 2 ** number_of_nodes # Generate the maximum entropy distribution # TODO extend to nonbinary nodes return (np.ones(number_of_states) / number_of_states).reshape([2] * number_of_nodes)
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/distribution.py#L30-L47
binomial distribution
python
def logProbability(self, distn): """Form of distribution must be an array of counts in order of self.keys.""" x = numpy.asarray(distn) n = x.sum() return (logFactorial(n) - numpy.sum([logFactorial(k) for k in x]) + numpy.sum(x * numpy.log(self.dist.pmf)))
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/math/dist.py#L106-L111
binomial distribution
python
def rmultinomial(n, p, size=None): """ Random multinomial variates. """ # Leaving size=None as the default means return value is 1d array # if not specified-- nicer. # Single value for p: if len(np.shape(p)) == 1: return np.random.multinomial(n, p, size) # Multiple values for p: if np.isscalar(n): n = n * np.ones(np.shape(p)[0], dtype=np.int) out = np.empty(np.shape(p)) for i in xrange(np.shape(p)[0]): out[i, :] = np.random.multinomial(n[i], p[i,:], size) return out
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L1773-L1790
binomial distribution
python
def k_s(X): """ Kolmorgorov-Smirnov statistic. Finds the probability that the data are distributed as func - used method of Numerical Recipes (Press et al., 1986) """ xbar, sigma = pmag.gausspars(X) d, f = 0, 0. for i in range(1, len(X) + 1): b = old_div(float(i), float(len(X))) a = gaussfunc(X[i - 1], xbar, sigma) if abs(f - a) > abs(b - a): delta = abs(f - a) else: delta = abs(b - a) if delta > d: d = delta f = b return d, xbar, sigma
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmagplotlib.py#L198-L216
binomial distribution
python
def plot_dist_normal(s, mu, sigma): """ plot distribution """ import matplotlib.pyplot as plt count, bins, ignored = plt.hist(s, 30, normed=True) plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) \ * np.exp( - (bins - mu)**2 / (2 * sigma**2) ), \ linewidth = 2, color = 'r') plt.show()
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/shuffle_genome.py#L16-L25
binomial distribution
python
def pueyo_bins(data): """ Binning method based on Pueyo (2006) Parameters ---------- data : array-like data Data to be binned Returns ------- : tuple of arrays binned data, empirical probability density Notes ----- Bins the data in into bins of length 2**i, i=0, 1, 2 ... The empirical probability densities will sum to 1 if multiplied by the respective 2**i. """ log_ub = np.ceil(np.log2(np.max(data))) bins = 2**np.arange(log_ub + 1) binned_data = np.histogram(data, bins=bins)[0] epdf = (1 / bins[:-1]) * binned_data / len(data) return binned_data, epdf
https://github.com/jkitzes/macroeco/blob/ee5fac5560a2d64de3a64738b5bc6833e2d7ff2e/macroeco/compare/_compare.py#L443-L468
binomial distribution
python
def get_random(self, size=10): """Returns random variates from the histogram. Note this assumes the histogram is an 'events per bin', not a pdf. Inside the bins, a uniform distribution is assumed. """ bin_i = np.random.choice(np.arange(len(self.bin_centers)), size=size, p=self.normalized_histogram) return self.bin_centers[bin_i] + np.random.uniform(-0.5, 0.5, size=size) * self.bin_volumes()[bin_i]
https://github.com/JelleAalbers/multihist/blob/072288277f807e7e388fdf424c3921c80576f3ab/multihist.py#L187-L193
binomial distribution
python
def dyno_hist(x, window=None, probability=True, edge_weight=1.): """ Probability Distribution function from values Arguments: probability (bool): whether the values should be min/max scaled to lie on the range [0, 1] Like `hist` but smoother, more accurate/useful Double-Normalization: The x values are min/max normalized to lie in the range 0-1 inclusive The pdf is normalized to integrate/sum to 1.0 >>> h = dyno_hist(np.arange(100), window=5) >>> abs(sum(np.diff(h.index.values) * h.values[1:]) - 1.) < 0.00001 True >>> h = dyno_hist(np.arange(50), window=12) >>> abs(sum(np.diff(h.index.values) * h.values[1:]) - 1.) < 0.00001 True >>> h = dyno_hist(np.random.randn(1000), window=42) >>> abs(sum(np.diff(h.index.values) * h.values[1:]) - 1.0) < 0.04 True """ x = np.sort(x) if probability: # normalize x first x = x - x[0] x = x / float(x[-1] or 1) window = window or 1 window = min(max(window, 1), int(len(x) / 1.5)) window += 1 # Empirical Densitites (PDF) based on diff of sorted values delta = x[(window - 1):] - x[:(1 - window)] densities = float(window - 1) / (len(delta) + window - 2) / delta h = pd.Series(densities, index=x[window // 2:][:len(delta)]) if probability: if h.index[0] > 0: h = pd.Series(edge_weight * densities[0], index=[0]).append(h) if h.index[-1] < 1: h = h.append(pd.Series(edge_weight * densities[-1], index=[1.])) return h
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/stats.py#L962-L997
binomial distribution
python
def negative_binomial_like(x, mu, alpha): R""" Negative binomial log-likelihood. The negative binomial distribution describes a Poisson random variable whose rate parameter is gamma distributed. PyMC's chosen parameterization is based on this mixture interpretation. .. math:: f(x \mid \mu, \alpha) = \frac{\Gamma(x+\alpha)}{x! \Gamma(\alpha)} (\alpha/(\mu+\alpha))^\alpha (\mu/(\mu+\alpha))^x :Parameters: - `x` : x = 0,1,2,... - `mu` : mu > 0 - `alpha` : alpha > 0 .. note:: - :math:`E[x]=\mu` - In Wikipedia's parameterization, :math:`r=\alpha`, :math:`p=\mu/(\mu+\alpha)`, :math:`\mu=rp/(1-p)` """ alpha = np.array(alpha) if (alpha > 1e10).any(): if (alpha > 1e10).all(): # Return Poisson when alpha gets very large return flib.poisson(x, mu) # Split big and small dispersion values big = alpha > 1e10 return flib.poisson(x[big], mu[big]) + flib.negbin2(x[big - True], mu[big - True], alpha[big - True]) return flib.negbin2(x, mu, alpha)
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2084-L2119
binomial distribution
python
def rejection_sampling(X, e, bn, N): """Estimate the probability distribution of variable X given evidence e in BayesNet bn, using N samples. [Fig. 14.14] Raises a ZeroDivisionError if all the N samples are rejected, i.e., inconsistent with e. >>> seed(47) >>> rejection_sampling('Burglary', dict(JohnCalls=T, MaryCalls=T), ... burglary, 10000).show_approx() 'False: 0.7, True: 0.3' """ counts = dict((x, 0) for x in bn.variable_values(X)) # bold N in Fig. 14.14 for j in xrange(N): sample = prior_sample(bn) # boldface x in Fig. 14.14 if consistent_with(sample, e): counts[sample[X]] += 1 return ProbDist(X, counts)
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L397-L412
binomial distribution
python
def call(self, inputs): """Runs the model to generate a distribution p(x_t | z_t, f). Args: inputs: A tuple of (z_{1:T}, f), where `z_{1:T}` is a tensor of shape [..., batch_size, timesteps, latent_size_dynamic], and `f` is of shape [..., batch_size, latent_size_static]. Returns: A batched Independent distribution wrapping a set of Normal distributions over the pixels of x_t, where the Independent distribution has event shape [height, width, channels], batch shape [batch_size, timesteps], and sample shape [sample_shape, batch_size, timesteps, height, width, channels]. """ # We explicitly broadcast f to the same shape as z other than the final # dimension, because `tf.concat` can't automatically do this. dynamic, static = inputs timesteps = tf.shape(input=dynamic)[-2] static = static[..., tf.newaxis, :] + tf.zeros([timesteps, 1]) latents = tf.concat([dynamic, static], axis=-1) # (sample, N, T, latents) out = self.dense(latents) out = tf.reshape(out, (-1, 1, 1, self.hidden_size)) out = self.conv_transpose1(out) out = self.conv_transpose2(out) out = self.conv_transpose3(out) out = self.conv_transpose4(out) # (sample*N*T, h, w, c) expanded_shape = tf.concat( (tf.shape(input=latents)[:-1], tf.shape(input=out)[1:]), axis=0) out = tf.reshape(out, expanded_shape) # (sample, N, T, h, w, c) return tfd.Independent( distribution=tfd.Normal(loc=out, scale=1.), reinterpreted_batch_ndims=3, # wrap (h, w, c) name="decoded_image")
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/disentangled_vae.py#L358-L391
binomial distribution
python
def binomial_coefficient(n, k): """ Calculate the binomial coefficient indexed by n and k. Args: n (int): positive integer k (int): positive integer Returns: The binomial coefficient indexed by n and k Raises: TypeError: If either n or k is not an integer ValueError: If either n or k is negative, or if k is strictly greater than n """ if not isinstance(k, int) or not isinstance(n, int): raise TypeError("Expecting positive integers") if k > n: raise ValueError("k must be lower or equal than n") if k < 0 or n < 0: raise ValueError("Expecting positive integers") return factorial(n) // (factorial(k) * factorial(n - k))
https://github.com/julienc91/utools/blob/6b2f18a5cb30a9349ba25a20c720c737f0683099/utools/math.py#L202-L226
binomial distribution
python
def pdf(self): """ Returns the probability density function(pdf). Returns ------- function: The probability density function of the distribution. Examples -------- >>> from pgmpy.factors.distributions import GaussianDistribution >>> dist = GD(variables=['x1', 'x2', 'x3'], ... mean=[1, -3, 4], ... cov=[[4, 2, -2], ... [2, 5, -5], ... [-2, -5, 8]]) >>> dist.pdf <function pgmpy.factors.distributions.GaussianDistribution.GaussianDistribution.pdf.<locals>.<lambda>> >>> dist.pdf([0, 0, 0]) 0.0014805631279234139 """ return lambda *args: multivariate_normal.pdf( args, self.mean.reshape(1, len(self.variables))[0], self.covariance)
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/factors/distributions/GaussianDistribution.py#L73-L95
binomial distribution
python
def get_distbins(start=100, bins=2500, ratio=1.01): """ Get exponentially sized """ b = np.ones(bins, dtype="float64") b[0] = 100 for i in range(1, bins): b[i] = b[i - 1] * ratio bins = np.around(b).astype(dtype="int") binsizes = np.diff(bins) return bins, binsizes
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/hic.py#L685-L694
binomial distribution
python
def proportions_from_distribution(table, label, sample_size, column_name='Random Sample'): """ Adds a column named ``column_name`` containing the proportions of a random draw using the distribution in ``label``. This method uses ``np.random.multinomial`` to draw ``sample_size`` samples from the distribution in ``table.column(label)``, then divides by ``sample_size`` to create the resulting column of proportions. Args: ``table``: An instance of ``Table``. ``label``: Label of column in ``table``. This column must contain a distribution (the values must sum to 1). ``sample_size``: The size of the sample to draw from the distribution. ``column_name``: The name of the new column that contains the sampled proportions. Defaults to ``'Random Sample'``. Returns: A copy of ``table`` with a column ``column_name`` containing the sampled proportions. The proportions will sum to 1. Throws: ``ValueError``: If the ``label`` is not in the table, or if ``table.column(label)`` does not sum to 1. """ proportions = sample_proportions(sample_size, table.column(label)) return table.with_column('Random Sample', proportions)
https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/util.py#L128-L158
binomial distribution
python
def show_G_distribution(data): '''Show the distribution of the G function.''' Xs, t = fitting.preprocess_data(data) Theta, Phi = np.meshgrid(np.linspace(0, np.pi, 50), np.linspace(0, 2 * np.pi, 50)) G = [] for i in range(len(Theta)): G.append([]) for j in range(len(Theta[i])): w = fitting.direction(Theta[i][j], Phi[i][j]) G[-1].append(fitting.G(w, Xs)) plt.imshow(G, extent=[0, np.pi, 0, 2 * np.pi], origin='lower') plt.show()
https://github.com/xingjiepan/cylinder_fitting/blob/f96d79732bc49cbc0cb4b39f008af7ce42aeb213/cylinder_fitting/visualize.py#L11-L25
binomial distribution
python
def networks_distribution(df, filepath=None): """ Generates two alternative plots describing the distribution of variables `mse` and `size`. It is intended to be used over a list of logical networks. Parameters ---------- df: `pandas.DataFrame`_ DataFrame with columns `mse` and `size` filepath: str Absolute path to a folder where to write the plots Returns ------- tuple Generated plots .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe """ df.mse = df.mse.map(lambda f: "%.4f" % f) g = sns.JointGrid(x="mse", y="size", data=df) g.plot_joint(sns.violinplot, scale='count') g.ax_joint.set_yticks(range(df['size'].min(), df['size'].max() + 1)) g.ax_joint.set_yticklabels(range(df['size'].min(), df['size'].max() + 1)) for tick in g.ax_joint.get_xticklabels(): tick.set_rotation(90) g.ax_joint.set_xlabel("MSE") g.ax_joint.set_ylabel("Size") for i, t in enumerate(g.ax_joint.get_xticklabels()): c = df[df['mse'] == t.get_text()].shape[0] g.ax_marg_x.annotate(c, xy=(i, 0.5), va="center", ha="center", size=20, rotation=90) for i, t in enumerate(g.ax_joint.get_yticklabels()): s = int(t.get_text()) c = df[df['size'] == s].shape[0] g.ax_marg_y.annotate(c, xy=(0.5, s), va="center", ha="center", size=20) if filepath: g.savefig(os.path.join(filepath, 'networks-distribution.pdf')) plt.figure() counts = df[["size", "mse"]].reset_index(level=0).groupby(["size", "mse"], as_index=False).count() cp = counts.pivot("size", "mse", "index").sort_index() ax = sns.heatmap(cp, annot=True, fmt=".0f", linewidths=.5) ax.set_xlabel("MSE") ax.set_ylabel("Size") if filepath: plt.savefig(os.path.join(filepath, 'networks-heatmap.pdf')) return g, ax
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/visualize.py#L94-L156
binomial distribution
python
def cumulative_distribution(self, X): """Computes the integral of a 1-D pdf between two bounds Args: X(numpy.array): Shaped (1, n), containing the datapoints. Returns: numpy.array: estimated cumulative distribution. """ self.check_fit() low_bounds = self.model.dataset.mean() - (5 * self.model.dataset.std()) result = [] for value in X: result.append(self.model.integrate_box_1d(low_bounds, value)) return np.array(result)
https://github.com/DAI-Lab/Copulas/blob/821df61c3d36a6b81ef2883935f935c2eaaa862c/copulas/univariate/gaussian_kde.py#L20-L37
binomial distribution
python
def distributions_for_instances(self, data): """ Peforms predictions, returning the class distributions. :param data: the Instances to get the class distributions for :type data: Instances :return: the class distribution matrix, None if not a batch predictor :rtype: ndarray """ if self.is_batchpredictor: return arrays.double_matrix_to_ndarray(self.__distributions(data.jobject)) else: return None
https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/classifiers.py#L120-L132
binomial distribution
python
def _bin_exp(self, n_bin, scale=1.0): """ Calculate the bin locations to approximate exponential distribution. It breaks the cumulative probability of exponential distribution into n_bin equal bins, each covering 1 / n_bin probability. Then it calculates the center of mass in each bins and returns the centers of mass. So, it approximates the exponential distribution with n_bin of Delta function weighted by 1 / n_bin, at the locations of these centers of mass. Parameters: ----------- n_bin: int The number of bins to approximate the exponential distribution scale: float. The scale parameter of the exponential distribution, defined in the same way as scipy.stats. It does not influence the ratios between the bins, but just controls the spacing between the bins. So generally users should not change its default. Returns: -------- bins: numpy array of size [n_bin,] The centers of mass for each segment of the exponential distribution. """ boundaries = np.flip(scipy.stats.expon.isf( np.linspace(0, 1, n_bin + 1), scale=scale), axis=0) bins = np.empty(n_bin) for i in np.arange(n_bin): bins[i] = utils.center_mass_exp( (boundaries[i], boundaries[i + 1]), scale=scale) return bins
https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/reprsimil/brsa.py#L4085-L4115
binomial distribution
python
def get_distributions(self): """ Returns a dictionary of name and its distribution. Distribution is a ndarray. The ndarray is stored in the standard way such that the rightmost variable changes most often. Consider a CPD of variable 'd' which has parents 'b' and 'c' (distribution['CONDSET'] = ['b', 'c']) | d_0 d_1 --------------------------- b_0, c_0 | 0.8 0.2 b_0, c_1 | 0.9 0.1 b_1, c_0 | 0.7 0.3 b_1, c_1 | 0.05 0.95 The value of distribution['d']['DPIS'] for the above example will be: array([[ 0.8 , 0.2 ], [ 0.9 , 0.1 ], [ 0.7 , 0.3 ], [ 0.05, 0.95]]) Examples -------- >>> reader = XBNReader('xbn_test.xml') >>> reader.get_distributions() {'a': {'TYPE': 'discrete', 'DPIS': array([[ 0.2, 0.8]])}, 'e': {'TYPE': 'discrete', 'DPIS': array([[ 0.8, 0.2], [ 0.6, 0.4]]), 'CONDSET': ['c'], 'CARDINALITY': [2]}, 'b': {'TYPE': 'discrete', 'DPIS': array([[ 0.8, 0.2], [ 0.2, 0.8]]), 'CONDSET': ['a'], 'CARDINALITY': [2]}, 'c': {'TYPE': 'discrete', 'DPIS': array([[ 0.2 , 0.8 ], [ 0.05, 0.95]]), 'CONDSET': ['a'], 'CARDINALITY': [2]}, 'd': {'TYPE': 'discrete', 'DPIS': array([[ 0.8 , 0.2 ], [ 0.9 , 0.1 ], [ 0.7 , 0.3 ], [ 0.05, 0.95]]), 'CONDSET': ['b', 'c']}, 'CARDINALITY': [2, 2]} """ distribution = {} for dist in self.bnmodel.find('DISTRIBUTIONS'): variable_name = dist.find('PRIVATE').get('NAME') distribution[variable_name] = {'TYPE': dist.get('TYPE')} if dist.find('CONDSET') is not None: distribution[variable_name]['CONDSET'] = [var.get('NAME') for var in dist.find('CONDSET').findall('CONDELEM')] distribution[variable_name]['CARDINALITY'] = np.array( [len(set(np.array([list(map(int, dpi.get('INDEXES').split())) for dpi in dist.find('DPIS')])[:, i])) for i in range(len(distribution[variable_name]['CONDSET']))]) distribution[variable_name]['DPIS'] = np.array( [list(map(float, dpi.text.split())) for dpi in dist.find('DPIS')]) return distribution
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/readwrite/XMLBeliefNetwork.py#L137-L184
binomial distribution
python
def binned_entropy(x, max_bins): """ First bins the values of x into max_bins equidistant bins. Then calculates the value of .. math:: - \\sum_{k=0}^{min(max\\_bins, len(x))} p_k log(p_k) \\cdot \\mathbf{1}_{(p_k > 0)} where :math:`p_k` is the percentage of samples in bin :math:`k`. :param x: the time series to calculate the feature of :type x: numpy.ndarray :param max_bins: the maximal number of bins :type max_bins: int :return: the value of this feature :return type: float """ if not isinstance(x, (np.ndarray, pd.Series)): x = np.asarray(x) hist, bin_edges = np.histogram(x, bins=max_bins) probs = hist / x.size return - np.sum(p * np.math.log(p) for p in probs if p != 0)
https://github.com/blue-yonder/tsfresh/blob/c72c9c574371cf7dd7d54e00a466792792e5d202/tsfresh/feature_extraction/feature_calculators.py#L1439-L1461
binomial distribution
python
def gaussian_distribution(mean, stdev, num_pts=50): """ get an x and y numpy.ndarray that spans the +/- 4 standard deviation range of a gaussian distribution with a given mean and standard deviation. useful for plotting Parameters ---------- mean : float the mean of the distribution stdev : float the standard deviation of the distribution num_pts : int the number of points in the returned ndarrays. Default is 50 Returns ------- x : numpy.ndarray the x-values of the distribution y : numpy.ndarray the y-values of the distribution """ warnings.warn("pyemu.helpers.gaussian_distribution() has moved to plot_utils",PyemuWarning) from pyemu import plot_utils return plot_utils.gaussian_distribution(mean=mean,stdev=stdev,num_pts=num_pts)
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/utils/helpers.py#L3855-L3880
binomial distribution
python
def distributions_for_instances(self, data): """ Peforms predictions, returning the class distributions. :param data: the Instances to get the class distributions for :type data: Instances :return: the class distribution matrix, None if not a batch predictor :rtype: ndarray """ if self.is_batchpredictor: return typeconv.double_matrix_to_ndarray(self.__distributions(data.jobject)) else: return None
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/classifiers.py#L120-L132
binomial distribution
python
def random_histogram(counts, nbins, seed): """ Distribute a total number of counts on a set of bins homogenously. >>> random_histogram(1, 2, 42) array([1, 0]) >>> random_histogram(100, 5, 42) array([28, 18, 17, 19, 18]) >>> random_histogram(10000, 5, 42) array([2043, 2015, 2050, 1930, 1962]) """ numpy.random.seed(seed) return numpy.histogram(numpy.random.random(counts), nbins, (0, 1))[0]
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/baselib/general.py#L988-L1000
binomial distribution
python
def normal_distribution(mean, variance, minimum=None, maximum=None, weight_count=23): """ Return a list of weights approximating a normal distribution. Args: mean (float): The mean of the distribution variance (float): The variance of the distribution minimum (float): The minimum outcome possible to bound the output distribution to maximum (float): The maximum outcome possible to bound the output distribution to weight_count (int): The number of weights that will be used to approximate the distribution Returns: list: a list of ``(float, float)`` weight tuples approximating a normal distribution. Raises: ValueError: ``if maximum < minimum`` TypeError: if both ``minimum`` and ``maximum`` are ``None`` Example: >>> weights = normal_distribution(10, 3, ... minimum=0, maximum=20, ... weight_count=5) >>> rounded_weights = [(round(value, 2), round(strength, 2)) ... for value, strength in weights] >>> rounded_weights [(1.34, 0.0), (4.8, 0.0), (8.27, 0.14), (11.73, 0.14), (15.2, 0.0)] """ # Pin 0 to +- 5 sigma as bounds, or minimum and maximum # if they cross +/- sigma standard_deviation = math.sqrt(variance) min_x = (standard_deviation * -5) + mean max_x = (standard_deviation * 5) + mean step = (max_x - min_x) / weight_count current_x = min_x weights = [] while current_x < max_x: weights.append( (current_x, _normal_function(current_x, mean, variance)) ) current_x += step if minimum is not None or maximum is not None: return bound_weights(weights, minimum, maximum) else: return weights
https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/blur/rand.py#L252-L300
binomial distribution
python
def binomial_prefactor(s,ia,ib,xpa,xpb): """ The integral prefactor containing the binomial coefficients from Augspurger and Dykstra. >>> binomial_prefactor(0,0,0,0,0) 1 """ total= 0 for t in range(s+1): if s-ia <= t <= ib: total += binomial(ia,s-t)*binomial(ib,t)* \ pow(xpa,ia-s+t)*pow(xpb,ib-t) return total
https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/qc/one.py#L133-L144
binomial distribution
python
def binary(self, name): """Returns the path to the command of the given name for this distribution. For example: :: >>> d = Distribution() >>> jar = d.binary('jar') >>> jar '/usr/bin/jar' >>> If this distribution has no valid command of the given name raises Distribution.Error. If this distribution is a JDK checks both `bin` and `jre/bin` for the binary. """ if not isinstance(name, str): raise ValueError('name must be a binary name, given {} of type {}'.format(name, type(name))) self.validate() return self._validated_executable(name)
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/java/distribution/distribution.py#L172-L189
binomial distribution
python
def normal(target, seeds, scale, loc): r""" Produces values from a Weibull distribution given a set of random numbers. Parameters ---------- target : OpenPNM Object The object with which this function as associated. This argument is required to (1) set number of values to generate (geom.Np or geom.Nt) and (2) provide access to other necessary values (i.e. geom['pore.seed']). seeds : string, optional The dictionary key on the Geometry object containing random seed values (between 0 and 1) to use in the statistical distribution. scale : float The standard deviation of the Normal distribution loc : float The mean of the Normal distribution Examples -------- The following code illustrates the inner workings of this function, which uses the 'norm' method of the scipy.stats module. This can be used to find suitable values of 'scale' and 'loc'. >>> import scipy >>> func = scipy.stats.norm(scale=.0001, loc=0.001) >>> import matplotlib.pyplot as plt >>> fig = plt.hist(func.ppf(q=scipy.rand(10000)), bins=50) """ seeds = target[seeds] value = spts.norm.ppf(q=seeds, scale=scale, loc=loc) return value
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/models/misc/misc.py#L253-L289
binomial distribution
python
def fisher(x,k): """Fisher distribution """ return k/(2*np.sinh(k)) * np.exp(k*np.cos(x))*np.sin(x)
https://github.com/timothydmorton/obliquity/blob/ae0a237ae2ca7ba0f7c71f0ee391f52e809da235/obliquity/kappa_inference.py#L20-L23
binomial distribution
python
def prior_sample(bn): """Randomly sample from bn's full joint distribution. The result is a {variable: value} dict. [Fig. 14.13]""" event = {} for node in bn.nodes: event[node.variable] = node.sample(event) return event
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L387-L393
binomial distribution
python
def _qnwbeta1(n, a=1.0, b=1.0): """ Computes nodes and weights for quadrature on the beta distribution. Default is a=b=1 which is just a uniform distribution NOTE: For now I am just following compecon; would be much better to find a different way since I don't know what they are doing. Parameters ---------- n : scalar : int The number of quadrature points a : scalar : float, optional(default=1) First Beta distribution parameter b : scalar : float, optional(default=1) Second Beta distribution parameter Returns ------- nodes : np.ndarray(dtype=float, ndim=1) The quadrature points weights : np.ndarray(dtype=float, ndim=1) The quadrature weights that correspond to nodes Notes ----- Based of original function ``_qnwbeta1`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002. """ # We subtract one and write a + 1 where we actually want a, and a # where we want a - 1 a = a - 1 b = b - 1 maxiter = 25 # Allocate empty space nodes = np.zeros(n) weights = np.zeros(n) # Find "reasonable" starting values. Why these numbers? for i in range(n): if i == 0: an = a/n bn = b/n r1 = (1+a) * (2.78/(4+n*n) + .768*an/n) r2 = 1 + 1.48*an + .96*bn + .452*an*an + .83*an*bn z = 1 - r1/r2 elif i == 1: r1 = (4.1+a) / ((1+a)*(1+0.156*a)) r2 = 1 + 0.06 * (n-8) * (1+0.12*a)/n r3 = 1 + 0.012*b * (1+0.25*abs(a))/n z = z - (1-z) * r1 * r2 * r3 elif i == 2: r1 = (1.67+0.28*a)/(1+0.37*a) r2 = 1+0.22*(n-8)/n r3 = 1+8*b/((6.28+b)*n*n) z = z-(nodes[0]-z)*r1*r2*r3 elif i == n - 2: r1 = (1+0.235*b)/(0.766+0.119*b) r2 = 1/(1+0.639*(n-4)/(1+0.71*(n-4))) r3 = 1/(1+20*a/((7.5+a)*n*n)) z = z+(z-nodes[-4])*r1*r2*r3 elif i == n - 1: r1 = (1+0.37*b) / (1.67+0.28*b) r2 = 1 / (1+0.22*(n-8)/n) r3 = 1 / (1+8*a/((6.28+a)*n*n)) z = z+(z-nodes[-3])*r1*r2*r3 else: z = 3*nodes[i-1] - 3*nodes[i-2] + nodes[i-3] ab = a+b # Root finding its = 0 z1 = -100 while abs(z - z1) > 1e-10 and its < maxiter: temp = 2 + ab p1 = (a-b + temp*z)/2 p2 = 1 for j in range(2, n+1): p3 = p2 p2 = p1 temp = 2*j + ab aa = 2*j * (j+ab)*(temp-2) bb = (temp-1) * (a*a - b*b + temp*(temp-2) * z) c = 2 * (j - 1 + a) * (j - 1 + b) * temp p1 = (bb*p2 - c*p3)/aa pp = (n*(a-b-temp*z) * p1 + 2*(n+a)*(n+b)*p2)/(temp*(1 - z*z)) z1 = z z = z1 - p1/pp if abs(z - z1) < 1e-12: break its += 1 if its == maxiter: raise ValueError("Max Iteration reached. Failed to converge") nodes[i] = z weights[i] = temp/(pp*p2) nodes = (1-nodes)/2 weights = weights * math.exp(gammaln(a+n) + gammaln(b+n) - gammaln(n+1) - gammaln(n+ab+1)) weights = weights / (2*math.exp(gammaln(a+1) + gammaln(b+1) - gammaln(ab+2))) return nodes, weights
https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/quad.py#L978-L1098
binomial distribution
python
def sample_normal(mean, var, rng): """Sample from independent normal distributions Each element is an independent normal distribution. Parameters ---------- mean : numpy.ndarray Means of the normal distribution. Shape --> (batch_num, sample_dim) var : numpy.ndarray Variance of the normal distribution. Shape --> (batch_num, sample_dim) rng : numpy.random.RandomState Returns ------- ret : numpy.ndarray The sampling result. Shape --> (batch_num, sample_dim) """ ret = numpy.sqrt(var) * rng.randn(*mean.shape) + mean return ret
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/reinforcement-learning/dqn/utils.py#L157-L176
binomial distribution
python
def pickByDistribution(distribution, r=None): """ Pick a value according to the provided distribution. Example: :: pickByDistribution([.2, .1]) Returns 0 two thirds of the time and 1 one third of the time. :param distribution: Probability distribution. Need not be normalized. :param r: Instance of random.Random. Uses the system instance if one is not provided. """ if r is None: r = random x = r.uniform(0, sum(distribution)) for i, d in enumerate(distribution): if x <= d: return i x -= d
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/math/stats.py#L36-L60
binomial distribution
python
def sample_poly(self, poly, scalar=None, bias_range=1, poly_range=None, ignored_terms=None, **parameters): """Scale and sample from the given binary polynomial. If scalar is not given, problem is scaled based on bias and polynomial ranges. See :meth:`.BinaryPolynomial.scale` and :meth:`.BinaryPolynomial.normalize` Args: poly (obj:`.BinaryPolynomial`): A binary polynomial. scalar (number, optional): Value by which to scale the energy range of the binary polynomial. bias_range (number/pair, optional, default=1): Value/range by which to normalize the all the biases, or if `poly_range` is provided, just the linear biases. poly_range (number/pair, optional): Value/range by which to normalize the higher order biases. ignored_terms (iterable, optional): Biases associated with these terms are not scaled. **parameters: Other parameters for the sampling method, specified by the child sampler. """ if ignored_terms is None: ignored_terms = set() else: ignored_terms = {frozenset(term) for term in ignored_terms} # scale and normalize happen in-place so we need to make a copy original, poly = poly, poly.copy() if scalar is not None: poly.scale(scalar, ignored_terms=ignored_terms) else: poly.normalize(bias_range=bias_range, poly_range=poly_range, ignored_terms=ignored_terms) # we need to know how much we scaled by, which we can do by looking # at the biases try: v = next(v for v, bias in original.items() if bias and v not in ignored_terms) except StopIteration: # nothing to scale scalar = 1 else: scalar = poly[v] / original[v] sampleset = self.child.sample_poly(poly, **parameters) if ignored_terms: # we need to recalculate the energy sampleset.record.energy = original.energies((sampleset.record.sample, sampleset.variables)) else: sampleset.record.energy /= scalar return sampleset
https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/reference/composites/higherordercomposites.py#L283-L347
binomial distribution
python
def ndist(data,Xs): """ given some data and a list of X posistions, return the normal distribution curve as a Y point at each of those Xs. """ sigma=np.sqrt(np.var(data)) center=np.average(data) curve=mlab.normpdf(Xs,center,sigma) curve*=len(data)*HIST_RESOLUTION return curve
https://github.com/swharden/SWHLab/blob/a86c3c65323cec809a4bd4f81919644927094bf5/doc/uses/EPSCs-and-IPSCs/variance method/2016-12-16 tryout2.py#L42-L51
binomial distribution
python
def distributions_impl(self, tag, run): """Result of the form `(body, mime_type)`, or `ValueError`.""" (histograms, mime_type) = self._histograms_plugin.histograms_impl( tag, run, downsample_to=self.SAMPLE_SIZE) return ([self._compress(histogram) for histogram in histograms], mime_type)
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/distribution/distributions_plugin.py#L71-L76
binomial distribution
python
def rand(self, n=1): """ Generate random samples from the distribution Parameters ---------- n : int, optional(default=1) The number of samples to generate Returns ------- out : array_like The generated samples """ if n == 1: return self._rand1() else: out = np.empty((n, self._p, self._p)) for i in range(n): out[i] = self._rand1() return out
https://github.com/sglyon/distcan/blob/7e2a4c810c18e8292fa3c50c2f47347ee2707d58/distcan/matrix.py#L176-L198
binomial distribution
python
def histogram(a, bins=10, range=None, normed=False, weights=None, axis=None, strategy=None): """histogram(a, bins=10, range=None, normed=False, weights=None, axis=None) -> H, dict Return the distribution of sample. :Stochastics: `a` : Array sample. `bins` : Number of bins, or an array of bin edges, in which case the range is not used. If 'Scott' or 'Freeman' is passed, then the named method is used to find the optimal number of bins. `range` : Lower and upper bin edges, default: [min, max]. `normed` :Boolean, if False, return the number of samples in each bin, if True, return the density. `weights` : Sample weights. The weights are normed only if normed is True. Should weights.sum() not equal len(a), the total bin count will not be equal to the number of samples. `axis` : Specifies the dimension along which the histogram is computed. Defaults to None, which aggregates the entire sample array. `strategy` : Histogramming method (binsize, searchsorted or digitize). :Return: `H` : The number of samples in each bin. If normed is True, H is a frequency distribution. dict{ 'edges': The bin edges, including the rightmost edge. 'upper': Upper outliers. 'lower': Lower outliers. 'bincenters': Center of bins. 'strategy': the histogramming method employed.} :Examples: >>> x = random.rand(100,10) >>> H, D = histogram(x, bins=10, range=[0,1], normed=True) >>> H2, D = histogram(x, bins=10, range=[0,1], normed=True, axis=0) :SeeAlso: histogramnd """ weighted = weights is not None a = asarray(a) if axis is None: a = atleast_1d(a.ravel()) if weighted: weights = atleast_1d(weights.ravel()) axis = 0 # Define the range if range is None: mn, mx = a.min(), a.max() if mn == mx: mn = mn - .5 mx = mx + .5 range = [mn, mx] # Find the optimal number of bins. if bins is None or isinstance(bins, str): bins = _optimize_binning(a, range, bins) # Compute the bin edges if they are not given explicitely. # For the rightmost bin, we want values equal to the right # edge to be counted in the last bin, and not as an outlier. # Hence, we shift the last bin by a tiny amount. if not iterable(bins): dr = diff(range) / bins * 1e-10 edges = linspace(range[0], range[1] + dr, bins + 1, endpoint=True) else: edges = asarray(bins, float) dedges = diff(edges) bincenters = edges[:-1] + dedges / 2. # Number of bins nbin = len(edges) - 1 # Measure of bin precision. decimal = int(-log10(dedges.min()) + 10) # Choose the fastest histogramming method even = (len(set(around(dedges, decimal))) == 1) if strategy is None: if even: strategy = 'binsize' else: if nbin > 30: # approximative threshold strategy = 'searchsort' else: strategy = 'digitize' else: if strategy not in ['binsize', 'digitize', 'searchsort']: raise ValueError('Unknown histogramming strategy.', strategy) if strategy == 'binsize' and not even: raise ValueError( 'This binsize strategy cannot be used for uneven bins.') # Stochastics for the fixed_binsize functions. start = float(edges[0]) binwidth = float(dedges[0]) # Looping to reduce memory usage block = 66600 slices = [slice(None)] * a.ndim for i in arange(0, len(a), block): slices[axis] = slice(i, i + block) at = a[slices] if weighted: at = concatenate((at, weights[slices]), axis) if strategy == 'binsize': count = apply_along_axis(_splitinmiddle, axis, at, flib.weighted_fixed_binsize, start, binwidth, nbin) elif strategy == 'searchsort': count = apply_along_axis(_splitinmiddle, axis, at, _histogram_searchsort_weighted, edges) elif strategy == 'digitize': count = apply_along_axis(_splitinmiddle, axis, at, _histogram_digitize, edges, normed) else: if strategy == 'binsize': count = apply_along_axis( flib.fixed_binsize, axis, at, start, binwidth, nbin) elif strategy == 'searchsort': count = apply_along_axis( _histogram_searchsort, axis, at, edges) elif strategy == 'digitize': count = apply_along_axis( _histogram_digitize, axis, at, None, edges, normed) if i == 0: total = count else: total += count # Outlier count upper = total.take(array([-1]), axis) lower = total.take(array([0]), axis) # Non-outlier count core = a.ndim * [slice(None)] core[axis] = slice(1, -1) hist = total[core] if normed: normalize = lambda x: atleast_1d(x / (x * dedges).sum()) hist = apply_along_axis(normalize, axis, hist) return hist, {'edges': edges, 'lower': lower, 'upper': upper, 'bincenters': bincenters, 'strategy': strategy}
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/utils.py#L172-L327
binomial distribution
python
def generic_distribution(target, seeds, func): r""" Accepts an 'rv_frozen' object from the Scipy.stats submodule and returns values from the distribution for the given seeds This uses the ``ppf`` method of the stats object Parameters ---------- target : OpenPNM Object The object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary properties. seeds : string, optional The dictionary key on the Geometry object containing random seed values (between 0 and 1) to use in the statistical distribution. func : object An 'rv_frozen' object from the Scipy.stats library with all of the parameters pre-specified. Examples -------- The following code illustrates the process of obtaining a 'frozen' Scipy stats object and adding it as a model: >>> import scipy >>> import openpnm as op >>> pn = op.network.Cubic(shape=[3, 3, 3]) >>> geo = op.geometry.GenericGeometry(network=pn, pores=pn.Ps, throats=pn.Ts) >>> geo.add_model(propname='pore.seed', ... model=op.models.geometry.pore_seed.random) Now retrieve the stats distribution and add to ``geo`` as a model: >>> stats_obj = scipy.stats.weibull_min(c=2, scale=.0001, loc=0) >>> geo.add_model(propname='pore.size', ... model=op.models.geometry.pore_size.generic_distribution, ... seeds='pore.seed', ... func=stats_obj) >>> import matplotlib.pyplot as plt >>> fig = plt.hist(stats_obj.ppf(q=scipy.rand(1000)), bins=50) """ seeds = target[seeds] value = func.ppf(seeds) return value
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/models/misc/misc.py#L292-L341
binomial distribution
python
def cumulative_distribution(self, X): """Computes the cumulative distribution function for the copula Args: X: `numpy.ndarray` or `pandas.DataFrame` Returns: np.array: cumulative probability """ self.check_fit() # Wrapper for pdf to accept vector as args def func(*args): return self.probability_density(list(args)) # Lower bound for integral, to split significant part from tail lower_bound = self.get_lower_bound() ranges = [[lower_bound, val] for val in X] return integrate.nquad(func, ranges)[0]
https://github.com/DAI-Lab/Copulas/blob/821df61c3d36a6b81ef2883935f935c2eaaa862c/copulas/multivariate/gaussian.py#L174-L193
binomial distribution
python
def randindex(lo, hi, n = 1.): """ Yields integers in the range [lo, hi) where 0 <= lo < hi. Each return value is a two-element tuple. The first element is the random integer, the second is the natural logarithm of the probability with which that integer will be chosen. The CDF for the distribution from which the integers are drawn goes as [integer]^{n}, where n > 0. Specifically, it's CDF(x) = (x^{n} - lo^{n}) / (hi^{n} - lo^{n}) n = 1 yields a uniform distribution; n > 1 favours larger integers, n < 1 favours smaller integers. """ if not 0 <= lo < hi: raise ValueError("require 0 <= lo < hi: lo = %d, hi = %d" % (lo, hi)) if n <= 0.: raise ValueError("n <= 0: %g" % n) elif n == 1.: # special case for uniform distribution try: lnP = math.log(1. / (hi - lo)) except ValueError: raise ValueError("[lo, hi) domain error") hi -= 1 rnd = random.randint while 1: yield rnd(lo, hi), lnP # CDF evaluated at index boundaries lnP = numpy.arange(lo, hi + 1, dtype = "double")**n lnP -= lnP[0] lnP /= lnP[-1] # differences give probabilities lnP = tuple(numpy.log(lnP[1:] - lnP[:-1])) if numpy.isinf(lnP).any(): raise ValueError("[lo, hi) domain error") beta = lo**n / (hi**n - lo**n) n = 1. / n alpha = hi / (1. + beta)**n flr = math.floor rnd = random.random while 1: index = int(flr(alpha * (rnd() + beta)**n)) # the tuple look-up provides the second part of the # range safety check on index assert index >= lo yield index, lnP[index - lo]
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/iterutils.py#L337-L386
binomial distribution
python
def weibull(target, seeds, shape, scale, loc): r""" Produces values from a Weibull distribution given a set of random numbers. Parameters ---------- target : OpenPNM Object The object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary properties. seeds : string, optional The dictionary key on the Geometry object containing random seed values (between 0 and 1) to use in the statistical distribution. shape : float This controls the skewness of the distribution, with 'shape' < 1 giving values clustered on the low end of the range with a long tail, and 'shape' > 1 giving a more symmetrical distribution. scale : float This controls the width of the distribution with most of values falling below this number. loc : float Applies an offset to the distribution such that the smallest values are above this number. Examples -------- The following code illustrates the inner workings of this function, which uses the 'weibull_min' method of the scipy.stats module. This can be used to find suitable values of 'shape', 'scale'` and 'loc'. Note that 'shape' is represented by 'c' in the actual function call. >>> import scipy >>> func = scipy.stats.weibull_min(c=1.5, scale=0.0001, loc=0) >>> import matplotlib.pyplot as plt >>> fig = plt.hist(func.ppf(q=scipy.rand(10000)), bins=50) """ seeds = target[seeds] value = spts.weibull_min.ppf(q=seeds, c=shape, scale=scale, loc=loc) return value
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/models/misc/misc.py#L207-L250
binomial distribution
python
def _ndtri(y): """ Port of cephes ``ndtri.c``: inverse normal distribution function. See https://github.com/jeremybarnes/cephes/blob/master/cprob/ndtri.c """ # approximation for 0 <= abs(z - 0.5) <= 3/8 P0 = [ -5.99633501014107895267E1, 9.80010754185999661536E1, -5.66762857469070293439E1, 1.39312609387279679503E1, -1.23916583867381258016E0, ] Q0 = [ 1.95448858338141759834E0, 4.67627912898881538453E0, 8.63602421390890590575E1, -2.25462687854119370527E2, 2.00260212380060660359E2, -8.20372256168333339912E1, 1.59056225126211695515E1, -1.18331621121330003142E0, ] # Approximation for interval z = sqrt(-2 log y ) between 2 and 8 # i.e., y between exp(-2) = .135 and exp(-32) = 1.27e-14. P1 = [ 4.05544892305962419923E0, 3.15251094599893866154E1, 5.71628192246421288162E1, 4.40805073893200834700E1, 1.46849561928858024014E1, 2.18663306850790267539E0, -1.40256079171354495875E-1, -3.50424626827848203418E-2, -8.57456785154685413611E-4, ] Q1 = [ 1.57799883256466749731E1, 4.53907635128879210584E1, 4.13172038254672030440E1, 1.50425385692907503408E1, 2.50464946208309415979E0, -1.42182922854787788574E-1, -3.80806407691578277194E-2, -9.33259480895457427372E-4, ] # Approximation for interval z = sqrt(-2 log y ) between 8 and 64 # i.e., y between exp(-32) = 1.27e-14 and exp(-2048) = 3.67e-890. P2 = [ 3.23774891776946035970E0, 6.91522889068984211695E0, 3.93881025292474443415E0, 1.33303460815807542389E0, 2.01485389549179081538E-1, 1.23716634817820021358E-2, 3.01581553508235416007E-4, 2.65806974686737550832E-6, 6.23974539184983293730E-9, ] Q2 = [ 6.02427039364742014255E0, 3.67983563856160859403E0, 1.37702099489081330271E0, 2.16236993594496635890E-1, 1.34204006088543189037E-2, 3.28014464682127739104E-4, 2.89247864745380683936E-6, 6.79019408009981274425E-9, ] sign_flag = 1 if y > (1 - EXP_NEG2): y = 1 - y sign_flag = 0 # Shortcut case where we don't need high precision # between -0.135 and 0.135 if y > EXP_NEG2: y -= 0.5 y2 = y ** 2 x = y + y * (y2 * _polevl(y2, P0, 4) / _p1evl(y2, Q0, 8)) x = x * ROOT_2PI return x x = math.sqrt(-2.0 * math.log(y)) x0 = x - math.log(x) / x z = 1.0 / x if x < 8.0: # y > exp(-32) = 1.2664165549e-14 x1 = z * _polevl(z, P1, 8) / _p1evl(z, Q1, 8) else: x1 = z * _polevl(z, P2, 8) / _p1evl(z, Q2, 8) x = x0 - x1 if sign_flag != 0: x = -x return x
https://github.com/dougthor42/PyErf/blob/cf38a2c62556cbd4927c9b3f5523f39b6a492472/pyerf/pyerf.py#L183-L287
binomial distribution
python
def marginal(repertoire, node_index): """Get the marginal distribution for a node.""" index = tuple(i for i in range(repertoire.ndim) if i != node_index) return repertoire.sum(index, keepdims=True)
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/distribution.py#L58-L62
binomial distribution
python
def random_real_solution(solution_size, lower_bounds, upper_bounds): """Make a list of random real numbers between lower and upper bounds.""" return [ random.uniform(lower_bounds[i], upper_bounds[i]) for i in range(solution_size) ]
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/common.py#L34-L39
binomial distribution
python
def nb_fit(data, P_init=None, R_init=None, epsilon=1e-8, max_iters=100): """ Fits the NB distribution to data using method of moments. Args: data (array): genes x cells P_init (array, optional): NB success prob param - genes x 1 R_init (array, optional): NB stopping param - genes x 1 Returns: P, R - fit to data """ means = data.mean(1) variances = data.var(1) if (means > variances).any(): raise ValueError("For NB fit, means must be less than variances") genes, cells = data.shape # method of moments P = 1.0 - means/variances R = means*(1-P)/P for i in range(genes): result = minimize(nb_ll_row, [P[i], R[i]], args=(data[i,:],), bounds = [(0, 1), (eps, None)]) params = result.x P[i] = params[0] R[i] = params[1] #R[i] = fsolve(nb_r_deriv, R[i], args = (data[i,:],)) #P[i] = data[i,:].mean()/(data[i,:].mean() + R[i]) return P,R
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/nb_clustering.py#L105-L133
binomial distribution
python
def from_config(cls, cp, section, variable_args): """Returns a distribution based on a configuration file. The parameters for the distribution are retrieved from the section titled "[`section`-`variable_args`]" in the config file. By default, only the name of the distribution (`uniform_angle`) needs to be specified. This will results in a uniform prior on `[0, 2pi)`. To make the domain cyclic, add `cyclic_domain =`. To specify boundaries that are not `[0, 2pi)`, add `(min|max)-var` arguments, where `var` is the name of the variable. For example, this will initialize a variable called `theta` with a uniform distribution on `[0, 2pi)` without cyclic boundaries: .. code-block:: ini [{section}-theta] name = uniform_angle This will make the domain cyclic on `[0, 2pi)`: .. code-block:: ini [{section}-theta] name = uniform_angle cyclic_domain = Parameters ---------- cp : pycbc.workflow.WorkflowConfigParser A parsed configuration file that contains the distribution options. section : str Name of the section in the configuration file. variable_args : str The names of the parameters for this distribution, separated by ``VARARGS_DELIM``. These must appear in the "tag" part of the section header. Returns ------- UniformAngle A distribution instance from the pycbc.inference.prior module. """ # we'll retrieve the setting for cyclic_domain directly additional_opts = {'cyclic_domain': cp.has_option_tag(section, 'cyclic_domain', variable_args)} return bounded.bounded_from_config(cls, cp, section, variable_args, bounds_required=False, additional_opts=additional_opts)
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/distributions/angular.py#L129-L178
binomial distribution
python
def _qnwnorm1(n): """ Compute nodes and weights for quadrature of univariate standard normal distribution Parameters ---------- n : int The number of nodes Returns ------- nodes : np.ndarray(dtype=float) An n element array of nodes nodes : np.ndarray(dtype=float) An n element array of weights Notes ----- Based of original function ``qnwnorm1`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002. """ maxit = 100 pim4 = 1 / np.pi**(0.25) m = int(fix((n + 1) / 2)) nodes = np.zeros(n) weights = np.zeros(n) for i in range(m): if i == 0: z = np.sqrt(2*n+1) - 1.85575 * ((2 * n + 1)**(-1 / 6.1)) elif i == 1: z = z - 1.14 * (n ** 0.426) / z elif i == 2: z = 1.86 * z + 0.86 * nodes[0] elif i == 3: z = 1.91 * z + 0.91 * nodes[1] else: z = 2 * z + nodes[i-2] its = 0 while its < maxit: its += 1 p1 = pim4 p2 = 0 for j in range(1, n+1): p3 = p2 p2 = p1 p1 = z * math.sqrt(2.0/j) * p2 - math.sqrt((j - 1.0) / j) * p3 pp = math.sqrt(2 * n) * p2 z1 = z z = z1 - p1/pp if abs(z - z1) < 1e-14: break if its == maxit: raise ValueError("Failed to converge in _qnwnorm1") nodes[n - 1 - i] = z nodes[i] = -z weights[i] = 2 / (pp*pp) weights[n - 1 - i] = weights[i] weights /= math.sqrt(math.pi) nodes = nodes * math.sqrt(2.0) return nodes, weights
https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/quad.py#L805-L880
binomial distribution
python
def divide(self, other, inplace=True): """ Returns the division of two gaussian distributions. Parameters ---------- other: GaussianDistribution The GaussianDistribution to be divided. inplace: boolean If True, modifies the distribution itself, otherwise returns a new GaussianDistribution object. Returns ------- CanonicalDistribution or None: if inplace=True (default) returns None. if inplace=False returns a new CanonicalDistribution instance. Examples -------- >>> import numpy as np >>> from pgmpy.factors.distributions import GaussianDistribution as GD >>> dis1 = GD(['x1', 'x2', 'x3'], np.array([[1], [-3], [4]]), ... np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]])) >>> dis2 = GD(['x3', 'x4'], [1, 2], [[2, 3], [5, 6]]) >>> dis3 = dis1.divide(dis2, inplace=False) >>> dis3.covariance array([[ 3.6, 1. , -0.4, -0.6], [ 1. , 2.5, -1. , -1.5], [-0.4, -1. , 1.6, 2.4], [-1. , -2.5, 4. , 4.5]]) >>> dis3.mean array([[ 1.6], [-1.5], [ 1.6], [ 3.5]]) """ return self._operate(other, operation='divide', inplace=inplace)
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/factors/distributions/GaussianDistribution.py#L508-L546
binomial distribution
python
def _bdtr(k, n, p): """The binomial cumulative distribution function. Args: k: floating point `Tensor`. n: floating point `Tensor`. p: floating point `Tensor`. Returns: `sum_{j=0}^k p^j (1 - p)^(n - j)`. """ # Trick for getting safe backprop/gradients into n, k when # betainc(a = 0, ..) = nan # Write: # where(unsafe, safe_output, betainc(where(unsafe, safe_input, input))) ones = tf.ones_like(n - k) k_eq_n = tf.equal(k, n) safe_dn = tf.where(k_eq_n, ones, n - k) dk = tf.math.betainc(a=safe_dn, b=k + 1, x=1 - p) return tf.where(k_eq_n, ones, dk)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/binomial.py#L43-L62
binomial distribution
python
def rweibull(alpha, beta, size=None): """ Weibull random variates. """ tmp = -np.log(runiform(0, 1, size)) return beta * (tmp ** (1. / alpha))
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2761-L2766
binomial distribution
python
def sample_outcomes(probs, n): """ For a discrete probability distribution ``probs`` with outcomes 0, 1, ..., k-1 draw ``n`` random samples. :param list probs: A list of probabilities. :param Number n: The number of random samples to draw. :return: An array of samples drawn from distribution probs over 0, ..., len(probs) - 1 :rtype: numpy.ndarray """ dist = np.cumsum(probs) rs = np.random.rand(n) return np.array([(np.where(r < dist)[0][0]) for r in rs])
https://github.com/rigetti/grove/blob/dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3/grove/tomography/utils.py#L139-L151
binomial distribution
python
def distributions(self, complexes, counts, volume, maxstates=1e7, ordered=False, temp=37.0): '''Runs the \'distributions\' NUPACK command. Note: this is intended for a relatively small number of species (on the order of ~20 total strands for complex size ~14). :param complexes: A list of the type returned by the complexes() method. :type complexes: list :param counts: A list of the exact number of molecules of each initial species (the strands in the complexes command). :type counts: list of ints :param volume: The volume, in liters, of the container. :type volume: float :param maxstates: Maximum number of states to be enumerated, needed as allowing too many states can lead to a segfault. In NUPACK, this is referred to as lambda. :type maxstates: float :param ordered: Consider distinct ordered complexes - all distinct circular permutations of each complex. :type ordered: bool :param temp: Temperature in C. :type temp: float :returns: A list of dictionaries containing (at least) a 'complexes' key for the unique complex, an 'ev' key for the expected value of the complex population and a 'probcols' list indicating the probability that a given complex has population 0, 1, ... max(pop) at equilibrium. :rtype: list :raises: LambdaError if maxstates is exceeded. ''' # Check inputs nstrands = len(complexes[0]['strands']) if len(counts) != nstrands: raise ValueError('counts argument not same length as strands.') # Set up command-line arguments cmd_args = [] if ordered: cmd_args.append('-ordered') # Write .count file countpath = os.path.join(self._tempdir, 'distributions.count') with open(countpath, 'w') as f: f.writelines([str(c) for c in counts] + [str(volume)]) # Write .cx or .ocx file header = ['%t Number of strands: {}'.format(nstrands), '%\tid\tsequence'] for i, strand in enumerate(complexes['strands']): header.append('%\t{}\t{}'.format(i + 1, strand)) header.append('%\tT = {}'.format(temp)) body = [] for i, cx in enumerate(complexes): permutation = '\t'.join(complexes['complex']) line = '{}\t{}\t{}'.format(i + 1, permutation, complexes['energy']) body.append(line) if ordered: cxfile = os.path.join(self._tempdir, 'distributions.ocx') else: cxfile = os.path.join(self._tempdir, 'distributions.cx') with open(cxfile) as f: f.writelines(header + body) # Run 'distributions' stdout = self._run('distributions', cmd_args, None) # Parse STDOUT stdout_lines = stdout.split('\n') if stdout_lines[0].startswith('Exceeded maximum number'): raise LambdaError('Exceeded maxstates combinations.') # pop_search = re.search('There are (*) pop', stdout_lines[0]).group(1) # populations = int(pop_search) # kT_search = re.search('of the box: (*) kT', stdout_lines[1]).group(1) # kT = float(kT_search) # Parse .dist file (comments header + TSV) dist_lines = self._read_tempfile('distributions.dist').split('\n') tsv_lines = [l for l in dist_lines if not l.startswith('%')] tsv_lines.pop() output = [] for i, line in enumerate(tsv_lines): data = line.split('\t') # Column 0 is an index # Columns 1-nstrands are complexes cx = [int(d) for d in data[1:nstrands]] # Column nstrands + 1 is expected value of complex ev = float(data[nstrands + 1]) # Columns nstrands + 2 and on are probability columns probcols = [float(d) for d in data[nstrands + 2:]] output[i]['complex'] = cx output[i]['ev'] = ev output[i]['probcols'] = probcols return output
https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_structure/nupack.py#L1245-L1343
binomial distribution
python
def gaussian_distribution(mean, stdev, num_pts=50): """ get an x and y numpy.ndarray that spans the +/- 4 standard deviation range of a gaussian distribution with a given mean and standard deviation. useful for plotting Parameters ---------- mean : float the mean of the distribution stdev : float the standard deviation of the distribution num_pts : int the number of points in the returned ndarrays. Default is 50 Returns ------- x : numpy.ndarray the x-values of the distribution y : numpy.ndarray the y-values of the distribution """ xstart = mean - (4.0 * stdev) xend = mean + (4.0 * stdev) x = np.linspace(xstart,xend,num_pts) y = (1.0/np.sqrt(2.0*np.pi*stdev*stdev)) * np.exp(-1.0 * ((x - mean)**2)/(2.0*stdev*stdev)) return x,y
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/plot/plot_utils.py#L141-L168
binomial distribution
python
def conditional_distribution(self, values, inplace=True): """ Returns Conditional Probability Distribution after setting values to 1. Parameters ---------- values: list or array_like A list of tuples of the form (variable_name, variable_state). The values on which to condition the Joint Probability Distribution. inplace: Boolean (default True) If False returns a new instance of JointProbabilityDistribution Examples -------- >>> import numpy as np >>> from pgmpy.factors.discrete import JointProbabilityDistribution >>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 2, 2], np.ones(8)/8) >>> prob.conditional_distribution([('x1', 1)]) >>> print(prob) x2 x3 P(x2,x3) ---- ---- ---------- x2_0 x3_0 0.2500 x2_0 x3_1 0.2500 x2_1 x3_0 0.2500 x2_1 x3_1 0.2500 """ JPD = self if inplace else self.copy() JPD.reduce(values) JPD.normalize() if not inplace: return JPD
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/factors/discrete/JointProbabilityDistribution.py#L238-L268
binomial distribution
python
def Bernoulli(cls, mean: 'TensorFluent', batch_size: Optional[int] = None) -> Tuple[Distribution, 'TensorFluent']: '''Returns a TensorFluent for the Bernoulli sampling op with given mean parameter. Args: mean: The mean parameter of the Bernoulli distribution. batch_size: The size of the batch (optional). Returns: The Bernoulli distribution and a TensorFluent sample drawn from the distribution. ''' probs = mean.tensor dist = tf.distributions.Bernoulli(probs=probs, dtype=tf.bool) batch = mean.batch if not batch and batch_size is not None: t = dist.sample(batch_size) batch = True else: t = dist.sample() scope = mean.scope.as_list() return (dist, TensorFluent(t, scope, batch=batch))
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L85-L106
binomial distribution
python
def get_marginal_distribution(self, index_points=None): """Compute the marginal of this GP over function values at `index_points`. Args: index_points: `float` `Tensor` representing finite (batch of) vector(s) of points in the index set over which the GP is defined. Shape has the form `[b1, ..., bB, e, f1, ..., fF]` where `F` is the number of feature dimensions and must equal `kernel.feature_ndims` and `e` is the number (size) of index points in each batch. Ultimately this distribution corresponds to a `e`-dimensional multivariate normal. The batch shape must be broadcastable with `kernel.batch_shape` and any batch dims yielded by `mean_fn`. Returns: marginal: a `Normal` or `MultivariateNormalLinearOperator` distribution, according to whether `index_points` consists of one or many index points, respectively. """ with self._name_scope('get_marginal_distribution'): # TODO(cgs): consider caching the result here, keyed on `index_points`. index_points = self._get_index_points(index_points) covariance = self._compute_covariance(index_points) loc = self._mean_fn(index_points) # If we're sure the number of index points is 1, we can just construct a # scalar Normal. This has computational benefits and supports things like # CDF that aren't otherwise straightforward to provide. if self._is_univariate_marginal(index_points): scale = tf.sqrt(covariance) # `loc` has a trailing 1 in the shape; squeeze it. loc = tf.squeeze(loc, axis=-1) return normal.Normal( loc=loc, scale=scale, validate_args=self._validate_args, allow_nan_stats=self._allow_nan_stats, name='marginal_distribution') else: scale = tf.linalg.LinearOperatorLowerTriangular( tf.linalg.cholesky(_add_diagonal_shift(covariance, self.jitter)), is_non_singular=True, name='GaussianProcessScaleLinearOperator') return mvn_linear_operator.MultivariateNormalLinearOperator( loc=loc, scale=scale, validate_args=self._validate_args, allow_nan_stats=self._allow_nan_stats, name='marginal_distribution')
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/gaussian_process.py#L320-L366
binomial distribution
python
def ks_unif_pelz_good(samples, statistic): """ Approximates the statistic distribution by a transformed Li-Chien formula. This ought to be a bit more accurate than using the Kolmogorov limit, but should only be used with large squared sample count times statistic. See: doi:10.18637/jss.v039.i11 and http://www.jstor.org/stable/2985019. """ x = 1 / statistic r2 = 1 / samples rx = sqrt(r2) * x r2x = r2 * x r2x2 = r2x * x r4x = r2x * r2 r4x2 = r2x2 * r2 r4x3 = r2x2 * r2x r5x3 = r4x2 * rx r5x4 = r4x3 * rx r6x3 = r4x2 * r2x r7x5 = r5x4 * r2x r9x6 = r7x5 * r2x r11x8 = r9x6 * r2x2 a1 = rx * (-r6x3 / 108 + r4x2 / 18 - r4x / 36 - r2x / 3 + r2 / 6 + 2) a2 = pi2 / 3 * r5x3 * (r4x3 / 8 - r2x2 * 5 / 12 - r2x * 4 / 45 + x + 1 / 6) a3 = pi4 / 9 * r7x5 * (-r4x3 / 6 + r2x2 / 4 + r2x * 53 / 90 - 1 / 2) a4 = pi6 / 108 * r11x8 * (r2x2 / 6 - 1) a5 = pi2 / 18 * r5x3 * (r2x / 2 - 1) a6 = -pi4 * r9x6 / 108 w = -pi2 / 2 * r2x2 return hpi1d2 * ((a1 + (a2 + (a3 + a4 * hs2) * hs2) * hs2) * exp(w * hs2) + (a5 + a6 * is2) * is2 * exp(w * is2)).sum()
https://github.com/wrwrwr/scikit-gof/blob/b950572758b9ebe38b9ea954ccc360d55cdf9c39/skgof/ksdist.py#L172-L202