query
stringlengths 9
60
| language
stringclasses 1
value | code
stringlengths 105
25.7k
| url
stringlengths 91
217
|
---|---|---|---|
k means clustering
|
python
|
def k_means_clustering(data,K):
"""
K-means clustering is an algorithm that take a data set and
a number of clusters K and returns the labels which represents
the clusters of data which are similar to others
Parameters
--------------------
data: array-like, shape= (m_samples,n_samples)
K: integer
number of K clusters
Returns
-------
labels: array-like, shape (1,n_samples)
"""
N = data.shape[0]
centroids, data_norms = orthogonal_initialization(data,K)
old_centroids= np.zeros((N,K))
labels = []
# Run the main k-means algorithm
while not _has_converged(centroids, old_centroids):
labels = get_labels(data, centroids,K)
centroids = get_centroids(data,K,labels,centroids,data_norms)
old_centroids = centroids
return labels
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/utils/k_means_clustering.py#L21-L47
|
k means clustering
|
python
|
def k_means(
X,
n_clusters,
init="k-means||",
precompute_distances="auto",
n_init=1,
max_iter=300,
verbose=False,
tol=1e-4,
random_state=None,
copy_x=True,
n_jobs=-1,
algorithm="full",
return_n_iter=False,
oversampling_factor=2,
init_max_iter=None,
):
"""K-means algorithm for clustering
Differences from scikit-learn:
* init='k-means||'
* oversampling_factor keyword
* n_jobs=-1
"""
labels, inertia, centers, n_iter = _kmeans_single_lloyd(
X,
n_clusters,
max_iter=max_iter,
init=init,
verbose=verbose,
tol=tol,
random_state=random_state,
oversampling_factor=oversampling_factor,
init_max_iter=init_max_iter,
)
if return_n_iter:
return labels, centers, inertia, n_iter
else:
return labels, centers, inertia
|
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/cluster/k_means.py#L236-L275
|
k means clustering
|
python
|
def kmean_clustering(network, n_clusters=10, load_cluster=False,
line_length_factor=1.25,
remove_stubs=False, use_reduced_coordinates=False,
bus_weight_tocsv=None, bus_weight_fromcsv=None,
n_init=10, max_iter=300, tol=1e-4,
n_jobs=1):
""" Main function of the k-mean clustering approach. Maps an original
network to a new one with adjustable number of nodes and new coordinates.
Parameters
----------
network : :class:`pypsa.Network
Container for all network components.
n_clusters : int
Desired number of clusters.
load_cluster : boolean
Loads cluster coordinates from a former calculation.
line_length_factor : float
Factor to multiply the crow-flies distance between new buses in order
to get new line lengths.
remove_stubs: boolean
Removes stubs and stubby trees (i.e. sequentially reducing dead-ends).
use_reduced_coordinates: boolean
If True, do not average cluster coordinates, but take from busmap.
bus_weight_tocsv : str
Creates a bus weighting based on conventional generation and load
and save it to a csv file.
bus_weight_fromcsv : str
Loads a bus weighting from a csv file to apply it to the clustering
algorithm.
Returns
-------
network : pypsa.Network object
Container for all network components.
"""
def weighting_for_scenario(x, save=None):
"""
"""
b_i = x.index
g = normed(gen.reindex(b_i, fill_value=0))
l = normed(load.reindex(b_i, fill_value=0))
w = g + l
weight = ((w * (100000. / w.max())).astype(int)
).reindex(network.buses.index, fill_value=1)
if save:
weight.to_csv(save)
return weight
def normed(x):
return (x / x.sum()).fillna(0.)
print('start k-mean clustering')
# prepare k-mean
# k-means clustering (first try)
network.generators.control = "PV"
network.storage_units.control[network.storage_units.carrier == \
'extendable_storage'] = "PV"
network.buses['v_nom'] = 380.
# problem our lines have no v_nom. this is implicitly defined by the
# connected buses:
network.lines["v_nom"] = network.lines.bus0.map(network.buses.v_nom)
# adjust the x of the lines which are not 380.
lines_v_nom_b = network.lines.v_nom != 380
network.lines.loc[lines_v_nom_b, 'x'] *= \
(380. / network.lines.loc[lines_v_nom_b, 'v_nom'])**2
network.lines.loc[lines_v_nom_b, 'v_nom'] = 380.
trafo_index = network.transformers.index
transformer_voltages = \
pd.concat([network.transformers.bus0.map(network.buses.v_nom),
network.transformers.bus1.map(network.buses.v_nom)], axis=1)
network.import_components_from_dataframe(
network.transformers.loc[:, [
'bus0', 'bus1', 'x', 's_nom', 'capital_cost', 'sub_network', 's_nom_total']]
.assign(x=network.transformers.x * (380. /
transformer_voltages.max(axis=1))**2, length = 1)
.set_index('T' + trafo_index),
'Line')
network.transformers.drop(trafo_index, inplace=True)
for attr in network.transformers_t:
network.transformers_t[attr] = network.transformers_t[attr]\
.reindex(columns=[])
# remove stubs
if remove_stubs:
network.determine_network_topology()
busmap = busmap_by_stubs(network)
network.generators['weight'] = network.generators['p_nom']
aggregate_one_ports = components.one_port_components.copy()
aggregate_one_ports.discard('Generator')
# reset coordinates to the new reduced guys, rather than taking an
# average (copied from pypsa.networkclustering)
if use_reduced_coordinates:
# TODO : FIX THIS HACK THAT HAS UNEXPECTED SIDE-EFFECTS,
# i.e. network is changed in place!!
network.buses.loc[busmap.index, ['x', 'y']
] = network.buses.loc[busmap, ['x', 'y']].values
clustering = get_clustering_from_busmap(
network,
busmap,
aggregate_generators_weighted=True,
aggregate_one_ports=aggregate_one_ports,
line_length_factor=line_length_factor)
network = clustering.network
# define weighting based on conventional 'old' generator spatial
# distribution
non_conv_types = {
'biomass',
'wind_onshore',
'wind_offshore',
'solar',
'geothermal',
'load shedding',
'extendable_storage'}
# Attention: network.generators.carrier.unique()
gen = (network.generators.loc[(network.generators.carrier
.isin(non_conv_types) == False)]
.groupby('bus').p_nom.sum()
.reindex(network.buses.index, fill_value=0.) +
network.storage_units
.loc[(network.storage_units.carrier
.isin(non_conv_types) == False)]
.groupby('bus').p_nom.sum()
.reindex(network.buses.index, fill_value=0.))
load = network.loads_t.p_set.mean().groupby(network.loads.bus).sum()
# k-mean clustering
# busmap = busmap_by_kmeans(network, bus_weightings=pd.Series(np.repeat(1,
# len(network.buses)), index=network.buses.index) , n_clusters= 10)
# State whether to create a bus weighting and save it, create or not save
# it, or use a bus weighting from a csv file
if bus_weight_tocsv is not None:
weight = weighting_for_scenario(x=network.buses, save=bus_weight_tocsv)
elif bus_weight_fromcsv is not None:
weight = pd.Series.from_csv(bus_weight_fromcsv)
weight.index = weight.index.astype(str)
else:
weight = weighting_for_scenario(x=network.buses, save=False)
busmap = busmap_by_kmeans(
network,
bus_weightings=pd.Series(weight),
n_clusters=n_clusters,
load_cluster=load_cluster,
n_init=n_init,
max_iter=max_iter,
tol=tol,
n_jobs=n_jobs)
# ToDo change function in order to use bus_strategies or similar
network.generators['weight'] = network.generators['p_nom']
aggregate_one_ports = components.one_port_components.copy()
aggregate_one_ports.discard('Generator')
clustering = get_clustering_from_busmap(
network,
busmap,
aggregate_generators_weighted=True,
aggregate_one_ports=aggregate_one_ports)
return clustering
|
https://github.com/openego/eTraGo/blob/2a8fc6d4368d0e9abe6fe0d0c39baf66ea0126b9/etrago/cluster/networkclustering.py#L389-L566
|
k means clustering
|
python
|
def kmeans_clustering(self, numc, X=None, npcs=15):
"""Performs k-means clustering.
Parameters
----------
numc - int
Number of clusters
npcs - int, optional, default 15
Number of principal components to use as inpute for k-means
clustering.
"""
from sklearn.cluster import KMeans
if X is None:
D_sub = self.adata.uns['X_processed']
X = (
D_sub -
D_sub.mean(0)).dot(
self.adata.uns['pca_obj'].components_[
:npcs,
:].T)
save = True
else:
save = False
cl = KMeans(n_clusters=numc).fit_predict(Normalizer().fit_transform(X))
if save:
self.adata.obs['kmeans_clusters'] = pd.Categorical(cl)
else:
return cl
|
https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L1318-L1350
|
k means clustering
|
python
|
def spectral_clustering(geom, K, eigen_solver = 'dense', random_state = None, solver_kwds = None,
renormalize = True, stabalize = True, additional_vectors = 0):
"""
Spectral clustering for find K clusters by using the eigenvectors of a
matrix which is derived from a set of similarities S.
Parameters
-----------
S: array-like,shape(n_sample,n_sample)
similarity matrix
K: integer
number of K clusters
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
algorithm will attempt to choose the best method for input data
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type. This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
AMG requires pyamg to be installed. It can be faster on very large,
sparse problems, but may also lead to instabilities.
random_state : numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.RandomState
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
renormalize : (bool) whether or not to set the rows of the eigenvectors to have norm 1
this can improve label quality
stabalize : (bool) whether or not to compute the (more stable) eigenvectors of L = D^-1/2*S*D^-1/2
instead of P = D^-1*S
additional_vectors : (int) compute additional eigen vectors when computing eigen decomposition.
When eigen_solver = 'amg' or 'lopcg' often if a small number of eigen values is sought the
largest eigenvalue returned is *not* equal to 1 (it should be). This can usually be fixed
by requesting more than K eigenvalues until the first eigenvalue is close to 1 and then
omitted. The remaining K-1 eigenvectors should be informative.
Returns
-------
labels: array-like, shape (1,n_samples)
"""
# Step 1: get similarity matrix
if geom.affinity_matrix is None:
S = geom.compute_affinity_matrix()
else:
S = geom.affinity_matrix
# Check for stability method, symmetric solvers require this
if eigen_solver in ['lobpcg', 'amg']:
stabalize = True
if stabalize:
geom.laplacian_type = 'symmetricnormalized'
return_lapsym = True
else:
geom.laplacian_type = 'randomwalk'
return_lapsym = False
# Step 2: get the Laplacian matrix
P = geom.compute_laplacian_matrix(return_lapsym = return_lapsym)
# by default the Laplacian is subtracted from the Identify matrix (this step may not be needed)
P += identity(P.shape[0])
# Step 3: Compute the top K eigenvectors and drop the first
if eigen_solver in ['auto', 'amg', 'lobpcg']:
n_components = 2*int(np.log(P.shape[0]))*K + 1
n_components += int(additional_vectors)
else:
n_components = K
n_components = min(n_components, P.shape[0])
(lambdas, eigen_vectors) = eigen_decomposition(P, n_components=n_components, eigen_solver=eigen_solver,
random_state=random_state, drop_first = True,
solver_kwds=solver_kwds)
# the first vector is usually uninformative
if eigen_solver in ['auto', 'lobpcg', 'amg']:
if np.abs(lambdas[0] - 1) > 1e-4:
warnings.warn("largest eigenvalue not equal to 1. Results may be poor. Try increasing additional_vectors parameter")
eigen_vectors = eigen_vectors[:, 1:K]
lambdas = lambdas[1:K]
# If stability method chosen, adjust eigenvectors
if stabalize:
w = np.array(geom.laplacian_weights)
eigen_vectors /= np.sqrt(w[:,np.newaxis])
eigen_vectors /= np.linalg.norm(eigen_vectors, axis = 0)
# If renormalize: set each data point to unit length
if renormalize:
norms = np.linalg.norm(eigen_vectors, axis=1)
eigen_vectors /= norms[:,np.newaxis]
# Step 4: run k-means clustering
labels = k_means_clustering(eigen_vectors,K)
return labels, eigen_vectors, P
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/utils/spectral_clustering.py#L94-L193
|
k means clustering
|
python
|
def _bisect_kmeans(X, n_clusters, n_trials, max_iter, tol):
""" Apply Bisecting Kmeans clustering
to reach n_clusters number of clusters
"""
membs = np.empty(shape=X.shape[0], dtype=int)
centers = dict() #np.empty(shape=(n_clusters,X.shape[1]), dtype=float)
sse_arr = dict() #-1.0*np.ones(shape=n_clusters, dtype=float)
## data structure to store cluster hierarchies
tree = treelib.Tree()
tree = _add_tree_node(tree, 0, ilev=0, X=X)
km = _kmeans.KMeans(n_clusters=2, n_trials=n_trials, max_iter=max_iter, tol=tol)
for i in range(1,n_clusters):
sel_clust_id,sel_memb_ids = _select_cluster_2_split(membs, tree)
X_sub = X[sel_memb_ids,:]
km.fit(X_sub)
#print("Bisecting Step %d :"%i, sel_clust_id, km.sse_arr_, km.centers_)
## Updating the clusters & properties
#sse_arr[[sel_clust_id,i]] = km.sse_arr_
#centers[[sel_clust_id,i]] = km.centers_
tree = _add_tree_node(tree, 2*i-1, i, \
size=np.sum(km.labels_ == 0), center=km.centers_[0], \
sse=km.sse_arr_[0], parent= sel_clust_id)
tree = _add_tree_node(tree, 2*i, i, \
size=np.sum(km.labels_ == 1), center=km.centers_[1], \
sse=km.sse_arr_[1], parent= sel_clust_id)
pred_labels = km.labels_
pred_labels[np.where(pred_labels == 1)[0]] = 2*i
pred_labels[np.where(pred_labels == 0)[0]] = 2*i - 1
#if sel_clust_id == 1:
# pred_labels[np.where(pred_labels == 0)[0]] = sel_clust_id
# pred_labels[np.where(pred_labels == 1)[0]] = i
#else:
# pred_labels[np.where(pred_labels == 1)[0]] = i
# pred_labels[np.where(pred_labels == 0)[0]] = sel_clust_id
membs[sel_memb_ids] = pred_labels
for n in tree.leaves():
label = n.data['label']
centers[label] = n.data['center']
sse_arr[label] = n.data['sse']
return(centers, membs, sse_arr, tree)
|
https://github.com/vmirly/pyclust/blob/bdb12be4649e70c6c90da2605bc5f4b314e2d07e/pyclust/_bisect_kmeans.py#L110-L157
|
k means clustering
|
python
|
def process(self):
"""!
@brief Performs cluster analysis in line with rules of K-Means algorithm.
@return (kmeans) Returns itself (K-Means instance).
@remark Results of clustering can be obtained using corresponding get methods.
@see get_clusters()
@see get_centers()
"""
if len(self.__pointer_data[0]) != len(self.__centers[0]):
raise ValueError("Dimension of the input data and dimension of the initial cluster centers must be equal.")
if self.__ccore is True:
self.__process_by_ccore()
else:
self.__process_by_python()
return self
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/kmeans.py#L365-L386
|
k means clustering
|
python
|
def cluster_kmeans(data, n_clusters, **kwargs):
"""
Identify clusters using K - Means algorithm.
Parameters
----------
data : array_like
array of size [n_samples, n_features].
n_clusters : int
The number of clusters expected in the data.
Returns
-------
dict
boolean array for each identified cluster.
"""
km = cl.KMeans(n_clusters, **kwargs)
kmf = km.fit(data)
labels = kmf.labels_
return labels, [np.nan]
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/clustering.py#L35-L56
|
k means clustering
|
python
|
def cluster_kmeans(data=None, k=None, max_iter=10, tolerance=1e-5, stride=1,
metric='euclidean', init_strategy='kmeans++', fixed_seed=False,
n_jobs=None, chunksize=None, skip=0, keep_data=False, clustercenters=None, **kwargs):
r"""k-means clustering
If data is given, it performs a k-means clustering and then assigns the
data using a Voronoi discretization. It returns a :class:`KmeansClustering <pyemma.coordinates.clustering.KmeansClustering>`
object that can be used to extract the discretized data sequences, or to
assign other data points to the same partition. If data is not given, an
empty :class:`KmeansClustering <pyemma.coordinates.clustering.KmeansClustering>`
will be created that still needs to be parametrized, e.g. in a :func:`pipeline`.
Parameters
----------
data: ndarray (T, d) or list of ndarray (T_i, d) or a reader created by :func:`source`
input data, if available in memory
k: int
the number of cluster centers. When not specified (None), min(sqrt(N), 5000) is chosen as default value,
where N denotes the number of data points
max_iter : int
maximum number of iterations before stopping. When not specified (None), min(sqrt(N),5000) is chosen
as default value, where N denotes the number of data points
tolerance : float
stop iteration when the relative change in the cost function
:math:`C(S) = \sum_{i=1}^{k} \sum_{\mathbf x \in S_i} \left\| \mathbf x - \boldsymbol\mu_i \right\|^2`
is smaller than tolerance.
stride : int, optional, default = 1
If set to 1, all input data will be used for estimation. Note that this
could cause this calculation to be very slow for large data sets. Since
molecular dynamics data is usually correlated at short timescales, it
is often sufficient to estimate transformations at a longer stride.
Note that the stride option in the get_output() function of the returned
object is independent, so you can parametrize at a long stride, and
still map all frames through the transformer.
metric : str
metric to use during clustering ('euclidean', 'minRMSD')
init_strategy : str
determines if the initial cluster centers are chosen according to the kmeans++-algorithm
or drawn uniformly distributed from the provided data set
fixed_seed : bool or (positive) integer
if set to true, the random seed gets fixed resulting in deterministic behavior; default is false.
If an integer >= 0 is given, use this to initialize the random generator.
n_jobs : int or None, default None
Number of threads to use during assignment of the data.
If None, all available CPUs will be used.
chunksize: int, default=None
Number of data frames to process at once. Choose a higher value here,
to optimize thread usage and gain processing speed. If None is passed,
use the default value of the underlying reader/data source. Choose zero to
disable chunking at all.
skip : int, default=0
skip the first initial n frames per trajectory.
keep_data: boolean, default=False
if you intend to quickly resume a non-converged kmeans iteration, set this to True.
Otherwise the linear memory array will have to be re-created. Note that the data will also be deleted,
if and only if the estimation converged within the given tolerance parameter.
clustercenters: ndarray (k, dim), default=None
if passed, the init_strategy is ignored and these centers will be iterated.
Returns
-------
kmeans : a :class:`KmeansClustering <pyemma.coordinates.clustering.KmeansClustering>` clustering object
Object for kmeans clustering.
It holds discrete trajectories and cluster center information.
Examples
--------
>>> import numpy as np
>>> from pyemma.util.contexts import settings
>>> import pyemma.coordinates as coor
>>> traj_data = [np.random.random((100, 3)), np.random.random((100,3))]
>>> with settings(show_progress_bars=False):
... cluster_obj = coor.cluster_kmeans(traj_data, k=20, stride=1)
... cluster_obj.get_output() # doctest: +ELLIPSIS
[array([...
.. seealso:: **Theoretical background**: `Wiki page <http://en.wikipedia.org/wiki/K-means_clustering>`_
.. autoclass:: pyemma.coordinates.clustering.kmeans.KmeansClustering
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.coordinates.clustering.kmeans.KmeansClustering
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.coordinates.clustering.kmeans.KmeansClustering
:attributes:
References
----------
The k-means algorithms was invented in [1]_. The term k-means was
first used in [2]_.
.. [1] Steinhaus, H. (1957).
Sur la division des corps materiels en parties.
Bull. Acad. Polon. Sci. (in French) 4, 801-804.
.. [2] MacQueen, J. B. (1967).
Some Methods for classification and Analysis of Multivariate Observations.
Proceedings of 5th Berkeley Symposium on Mathematical Statistics and
Probability 1. University of California Press. pp. 281-297
"""
from pyemma.coordinates.clustering.kmeans import KmeansClustering
res = KmeansClustering(n_clusters=k, max_iter=max_iter, metric=metric, tolerance=tolerance,
init_strategy=init_strategy, fixed_seed=fixed_seed, n_jobs=n_jobs, skip=skip,
keep_data=keep_data, clustercenters=clustercenters, stride=stride)
from pyemma.util.reflection import get_default_args
cs = _check_old_chunksize_arg(chunksize, get_default_args(cluster_kmeans)['chunksize'], **kwargs)
if data is not None:
res.estimate(data, chunksize=cs)
else:
res.chunksize = cs
return res
|
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/api.py#L1695-L1829
|
k means clustering
|
python
|
def create(dataset, num_clusters=None, features=None, label=None,
initial_centers=None, max_iterations=10, batch_size=None,
verbose=True):
"""
Create a k-means clustering model. The KmeansModel object contains the
computed cluster centers and the cluster assignment for each instance in
the input 'dataset'.
Given a number of clusters, k-means iteratively chooses the best cluster
centers and assigns nearby points to the best cluster. If no points change
cluster membership between iterations, the algorithm terminates.
Parameters
----------
dataset : SFrame
Each row in the SFrame is an observation.
num_clusters : int
Number of clusters. This is the 'k' in k-means.
features : list[str], optional
Names of feature columns to use in computing distances between
observations and cluster centers. 'None' (the default) indicates that
all columns should be used as features. Columns may be of the following
types:
- *Numeric*: values of numeric type integer or float.
- *Array*: list of numeric (int or float) values. Each list element
is treated as a distinct feature in the model.
- *Dict*: dictionary of keys mapped to numeric values. Each unique key
is treated as a distinct feature in the model.
Note that columns of type *list* are not supported. Convert them to
array columns if all entries in the list are of numeric types.
label : str, optional
Name of the column to use as row labels in the Kmeans output. The
values in this column must be integers or strings. If not specified,
row numbers are used by default.
initial_centers : SFrame, optional
Initial centers to use when starting the K-means algorithm. If
specified, this parameter overrides the *num_clusters* parameter. The
'initial_centers' SFrame must contain the same features used in the
input 'dataset'.
If not specified (the default), initial centers are chosen
intelligently with the K-means++ algorithm.
max_iterations : int, optional
The maximum number of iterations to run. Prints a warning if the
algorithm does not converge after max_iterations iterations. If set to
0, the model returns clusters defined by the initial centers and
assignments to those centers.
batch_size : int, optional
Number of randomly-chosen data points to use in each iteration. If
'None' (the default) or greater than the number of rows in 'dataset',
then this parameter is ignored: all rows of `dataset` are used in each
iteration and model training terminates once point assignments stop
changing or `max_iterations` is reached.
verbose : bool, optional
If True, print model training progress to the screen.
Returns
-------
out : KmeansModel
A Model object containing a cluster id for each vertex, and the centers
of the clusters.
See Also
--------
KmeansModel
Notes
-----
- Integer features in the 'dataset' or 'initial_centers' inputs are
converted internally to float type, and the corresponding features in the
output centers are float-typed.
- It can be important for the K-means model to standardize the features so
they have the same scale. This function does *not* standardize
automatically.
References
----------
- `Wikipedia - k-means clustering
<http://en.wikipedia.org/wiki/K-means_clustering>`_
- Artuhur, D. and Vassilvitskii, S. (2007) `k-means++: The Advantages of
Careful Seeding <http://ilpubs.stanford.edu:8090/778/1/2006-13.pdf>`_. In
Proceedings of the Eighteenth Annual ACM-SIAM Symposium on Discrete
Algorithms. pp. 1027-1035.
- Elkan, C. (2003) `Using the triangle inequality to accelerate k-means
<http://www.aaai.org/Papers/ICML/2003/ICML03-022.pdf>`_. In Proceedings
of the Twentieth International Conference on Machine Learning, Volume 3,
pp. 147-153.
- Sculley, D. (2010) `Web Scale K-Means Clustering
<http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf>`_. In
Proceedings of the 19th International Conference on World Wide Web. pp.
1177-1178
Examples
--------
>>> sf = turicreate.SFrame({
... 'x1': [0.6777, -9.391, 7.0385, 2.2657, 7.7864, -10.16, -8.162,
... 8.8817, -9.525, -9.153, 2.0860, 7.6619, 6.5511, 2.7020],
... 'x2': [5.6110, 8.5139, 5.3913, 5.4743, 8.3606, 7.8843, 2.7305,
... 5.1679, 6.7231, 3.7051, 1.7682, 7.4608, 3.1270, 6.5624]})
...
>>> model = turicreate.kmeans.create(sf, num_clusters=3)
"""
opts = {'model_name': 'kmeans',
'max_iterations': max_iterations,
}
## Validate the input dataset and initial centers.
_validate_dataset(dataset)
if initial_centers is not None:
_validate_initial_centers(initial_centers)
## Validate and determine the correct number of clusters.
opts['num_clusters'] = _validate_num_clusters(num_clusters,
initial_centers,
dataset.num_rows())
## Validate the row label
col_type_map = {c: dataset[c].dtype for c in dataset.column_names()}
if label is not None:
_validate_row_label(label, col_type_map)
if label in ['cluster_id', 'distance']:
raise ValueError("Row label column name cannot be 'cluster_id' " +
"or 'distance'; these are reserved for other " +
"columns in the Kmeans model's output.")
opts['row_labels'] = dataset[label]
opts['row_label_name'] = label
else:
opts['row_labels'] = _tc.SArray.from_sequence(dataset.num_rows())
opts['row_label_name'] = 'row_id'
## Validate the features relative to the input dataset.
if features is None:
features = dataset.column_names()
valid_features = _validate_features(features, col_type_map,
valid_types=[_array, dict, int, float],
label=label)
sf_features = dataset.select_columns(valid_features)
opts['features'] = sf_features
## Validate the features in the initial centers (if provided)
if initial_centers is not None:
try:
initial_centers = initial_centers.select_columns(valid_features)
except:
raise ValueError("Specified features cannot be extracted from " +
"the provided initial centers.")
if initial_centers.column_types() != sf_features.column_types():
raise TypeError("Feature types are different in the dataset and " +
"initial centers.")
else:
initial_centers = _tc.SFrame()
opts['initial_centers'] = initial_centers
## Validate the batch size and determine the training method.
if batch_size is None:
opts['method'] = 'elkan'
opts['batch_size'] = dataset.num_rows()
else:
opts['method'] = 'minibatch'
opts['batch_size'] = batch_size
## Create and return the model
with _QuietProgress(verbose):
params = _tc.extensions._kmeans.train(opts)
return KmeansModel(params['model'])
|
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/clustering/kmeans.py#L410-L602
|
k means clustering
|
python
|
def _kmeans(X, n_clusters, max_iter, n_trials, tol):
""" Run multiple trials of k-means clustering,
and outputt he best centers, and cluster labels
"""
n_samples, n_features = X.shape[0], X.shape[1]
centers_best = np.empty(shape=(n_clusters,n_features), dtype=float)
labels_best = np.empty(shape=n_samples, dtype=int)
for i in range(n_trials):
centers, labels, sse_tot, sse_arr, n_iter = _kmeans_run(X, n_clusters, max_iter, tol)
if i==0:
sse_tot_best = sse_tot
sse_arr_best = sse_arr
n_iter_best = n_iter
centers_best = centers.copy()
labels_best = labels.copy()
if sse_tot < sse_tot_best:
sse_tot_best = sse_tot
sse_arr_best = sse_arr
n_iter_best = n_iter
centers_best = centers.copy()
labels_best = labels.copy()
return(centers_best, labels_best, sse_arr_best, n_iter_best)
|
https://github.com/vmirly/pyclust/blob/bdb12be4649e70c6c90da2605bc5f4b314e2d07e/pyclust/_kmeans.py#L78-L101
|
k means clustering
|
python
|
def filter_clustering(self, analytes, filt=False, normalise=True,
method='meanshift', include_time=False,
sort=None, min_data=10, **kwargs):
"""
Applies an n - dimensional clustering filter to the data.
Available Clustering Algorithms
* 'meanshift': The `sklearn.cluster.MeanShift` algorithm.
Automatically determines number of clusters
in data based on the `bandwidth` of expected
variation.
* 'kmeans': The `sklearn.cluster.KMeans` algorithm. Determines
the characteristics of a known number of clusters
within the data. Must provide `n_clusters` to specify
the expected number of clusters.
* 'DBSCAN': The `sklearn.cluster.DBSCAN` algorithm. Automatically
determines the number and characteristics of clusters
within the data based on the 'connectivity' of the
data (i.e. how far apart each data point is in a
multi - dimensional parameter space). Requires you to
set `eps`, the minimum distance point must be from
another point to be considered in the same cluster,
and `min_samples`, the minimum number of points that
must be within the minimum distance for it to be
considered a cluster. It may also be run in automatic
mode by specifying `n_clusters` alongside
`min_samples`, where eps is decreased until the
desired number of clusters is obtained.
For more information on these algorithms, refer to the
documentation.
Parameters
----------
analytes : str
The analyte(s) that the filter applies to.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
normalise : bool
Whether or not to normalise the data to zero mean and unit
variance. Reccomended if clustering based on more than 1 analyte.
Uses `sklearn.preprocessing.scale`.
method : str
Which clustering algorithm to use (see above).
include_time : bool
Whether or not to include the Time variable in the clustering
analysis. Useful if you're looking for spatially continuous
clusters in your data, i.e. this will identify each spot in your
analysis as an individual cluster.
sort : bool, str or array-like
Whether or not to label the resulting clusters according to their
contents. If used, the cluster with the lowest values will be
labelled from 0, in order of increasing cluster mean value.analytes.
The sorting rules depend on the value of 'sort', which can be the name
of a single analyte (str), a list of several analyte names (array-like)
or True (bool), to specify all analytes used to calcualte the cluster.
min_data : int
The minimum number of data points that should be considered by
the filter. Default = 10.
**kwargs
Parameters passed to the clustering algorithm specified by
`method`.
Meanshift Parameters
--------------------
bandwidth : str or float
The bandwith (float) or bandwidth method ('scott' or 'silverman')
used to estimate the data bandwidth.
bin_seeding : bool
Modifies the behaviour of the meanshift algorithm. Refer to
sklearn.cluster.meanshift documentation.
K - Means Parameters
------------------
n_clusters : int
The number of clusters expected in the data.
DBSCAN Parameters
-----------------
eps : float
The minimum 'distance' points must be apart for them to be in the
same cluster. Defaults to 0.3. Note: If the data are normalised
(they should be for DBSCAN) this is in terms of total sample
variance. Normalised data have a mean of 0 and a variance of 1.
min_samples : int
The minimum number of samples within distance `eps` required
to be considered as an independent cluster.
n_clusters : int
The number of clusters expected. If specified, `eps` will be
incrementally reduced until the expected number of clusters is
found.
maxiter : int
The maximum number of iterations DBSCAN will run.
Returns
-------
None
"""
params = locals()
del(params['self'])
# convert string to list, if single analyte
if isinstance(analytes, str):
analytes = [analytes]
setn = self.filt.maxset + 1
# generate filter
vals = np.vstack(nominal_values(list(self.focus.values())))
if filt is not None:
ind = (self.filt.grab_filt(filt, analytes) &
np.apply_along_axis(all, 0, ~np.isnan(vals)))
else:
ind = np.apply_along_axis(all, 0, ~np.isnan(vals))
if sum(ind) > min_data:
# get indices for data passed to clustering
sampled = np.arange(self.Time.size)[ind]
# generate data for clustering
if include_time:
extra = self.Time
else:
extra = None
# get data as array
ds = stack_keys(self.focus, analytes, extra)
# apply filter, and get nominal values
ds = nominal_values(ds[ind, :])
if normalise | (len(analytes) > 1):
ds = preprocessing.scale(ds)
method_key = {'kmeans': clustering.cluster_kmeans,
# 'DBSCAN': clustering.cluster_DBSCAN,
'meanshift': clustering.cluster_meanshift}
cfun = method_key[method]
labels, core_samples_mask = cfun(ds, **kwargs)
# return labels, and if DBSCAN core_sample_mask
labels_unique = np.unique(labels)
# label the clusters according to their contents
if (sort is not None) & (sort is not False):
if isinstance(sort, str):
sort = [sort]
sanalytes = analytes
# make boolean filter to select analytes
if sort is True:
sortk = np.array([True] * len(sanalytes))
else:
sortk = np.array([s in sort for s in sanalytes])
# create per-point mean based on selected analytes.
sd = np.apply_along_axis(sum, 1, ds[:, sortk])
# calculate per-cluster means
avs = [np.nanmean(sd[labels == lab]) for lab in labels_unique]
# re-order the cluster labels based on their means
order = [x[0] for x in sorted(enumerate(avs), key=lambda x:x[1])]
sdict = dict(zip(order, labels_unique))
else:
sdict = dict(zip(labels_unique, labels_unique))
filts = {}
for ind, lab in sdict.items():
filts[lab] = labels == ind
# only applies to DBSCAN results.
if not all(np.isnan(core_samples_mask)):
filts['core'] = core_samples_mask
resized = {}
for k, v in filts.items():
resized[k] = np.zeros(self.Time.size, dtype=bool)
resized[k][sampled] = v
namebase = '-'.join(analytes) + '_cluster-' + method
info = '-'.join(analytes) + ' cluster filter.'
if method == 'DBSCAN':
for k, v in resized.items():
if isinstance(k, str):
name = namebase + '_core'
elif k < 0:
name = namebase + '_noise'
else:
name = namebase + '_{:.0f}'.format(k)
self.filt.add(name, v, info=info, params=params, setn=setn)
else:
for k, v in resized.items():
name = namebase + '_{:.0f}'.format(k)
self.filt.add(name, v, info=info, params=params, setn=setn)
else:
# if there are no data
name = '-'.join(analytes) + '_cluster-' + method + '_0'
info = '-'.join(analytes) + ' cluster filter failed.'
self.filt.add(name, np.zeros(self.Time.size, dtype=bool),
info=info, params=params, setn=setn)
return
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L705-L912
|
k means clustering
|
python
|
def _kmeans_run(X, n_clusters, max_iter, tol):
""" Run a single trial of k-means clustering
on dataset X, and given number of clusters
"""
membs = np.empty(shape=X.shape[0], dtype=int)
centers = _kmeans_init(X, n_clusters)
sse_last = 9999.9
n_iter = 0
for it in range(1,max_iter):
membs = _assign_clusters(X, centers)
centers,sse_arr = _update_centers(X, membs, n_clusters)
sse_total = np.sum(sse_arr)
if np.abs(sse_total - sse_last) < tol:
n_iter = it
break
sse_last = sse_total
return(centers, membs, sse_total, sse_arr, n_iter)
|
https://github.com/vmirly/pyclust/blob/bdb12be4649e70c6c90da2605bc5f4b314e2d07e/pyclust/_kmeans.py#L57-L75
|
k means clustering
|
python
|
def show_clusters(sample, clusters, centers, initial_centers = None, **kwargs):
"""!
@brief Display K-Means clustering results.
@param[in] sample (list): Dataset that was used for clustering.
@param[in] clusters (array_like): Clusters that were allocated by the algorithm.
@param[in] centers (array_like): Centers that were allocated by the algorithm.
@param[in] initial_centers (array_like): Initial centers that were used by the algorithm, if 'None' then initial centers are not displyed.
@param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'figure', 'display', 'offset').
<b>Keyword Args:</b><br>
- figure (figure): If 'None' then new is figure is created, otherwise specified figure is used for visualization.
- display (bool): If 'True' then figure will be shown by the method, otherwise it should be shown manually using matplotlib function 'plt.show()'.
- offset (uint): Specify axes index on the figure where results should be drawn (only if argument 'figure' is specified).
@return (figure) Figure where clusters were drawn.
"""
visualizer = cluster_visualizer()
visualizer.append_clusters(clusters, sample)
offset = kwargs.get('offset', 0)
figure = kwargs.get('figure', None)
display = kwargs.get('display', True)
if figure is None:
figure = visualizer.show(display = False)
else:
visualizer.show(figure = figure, display = False)
kmeans_visualizer.__draw_centers(figure, offset, visualizer, centers, initial_centers)
kmeans_visualizer.__draw_rays(figure, offset, visualizer, sample, clusters, centers)
if display is True:
plt.show()
return figure
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/kmeans.py#L144-L181
|
k means clustering
|
python
|
def fit_kmeans(self, data, n_clusters, **kwargs):
"""
Fit KMeans clustering algorithm to data.
Parameters
----------
data : array-like
A dataset formatted by `classifier.fitting_data`.
n_clusters : int
The number of clusters in the data.
**kwargs
passed to `sklearn.cluster.KMeans`.
Returns
-------
Fitted `sklearn.cluster.KMeans` object.
"""
km = cl.KMeans(n_clusters=n_clusters, **kwargs)
km.fit(data)
return km
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/classifier_obj.py#L89-L108
|
k means clustering
|
python
|
def fit(self, X, y=None, **kwargs):
"""Compute k-modes clustering.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
"""
X = pandas_to_numpy(X)
random_state = check_random_state(self.random_state)
self._enc_cluster_centroids, self._enc_map, self.labels_,\
self.cost_, self.n_iter_ = k_modes(X,
self.n_clusters,
self.max_iter,
self.cat_dissim,
self.init,
self.n_init,
self.verbose,
random_state,
self.n_jobs)
return self
|
https://github.com/nicodv/kmodes/blob/cdb19fe5448aba1bf501626694bb52e68eafab45/kmodes/kmodes.py#L381-L401
|
k means clustering
|
python
|
def fit(self, data, method='kmeans', **kwargs):
"""
fit classifiers from large dataset.
Parameters
----------
data : dict
A dict of data for clustering. Must contain
items with the same name as analytes used for
clustering.
method : str
A string defining the clustering method used. Can be:
* 'kmeans' : K-Means clustering algorithm
* 'meanshift' : Meanshift algorithm
n_clusters : int
*K-Means only*. The numebr of clusters to identify
bandwidth : float
*Meanshift only.*
The bandwidth value used during clustering.
If none, determined automatically. Note:
the data are scaled before clutering, so
this is not in the same units as the data.
bin_seeding : bool
*Meanshift only.*
Whether or not to use 'bin_seeding'. See
documentation for `sklearn.cluster.MeanShift`.
**kwargs :
passed to `sklearn.cluster.MeanShift`.
Returns
-------
list
"""
self.method = method
ds_fit = self.fitting_data(data)
mdict = {'kmeans': self.fit_kmeans,
'meanshift': self.fit_meanshift}
clust = mdict[method]
self.classifier = clust(data=ds_fit, **kwargs)
# sort cluster centers by value of first column, to avoid random variation.
c0 = self.classifier.cluster_centers_.T[self.sort_by]
self.classifier.cluster_centers_ = self.classifier.cluster_centers_[np.argsort(c0)]
# recalculate the labels, so it's consistent with cluster centers
self.classifier.labels_ = self.classifier.predict(ds_fit)
self.classifier.ulabels_ = np.unique(self.classifier.labels_)
return
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/classifier_obj.py#L139-L190
|
k means clustering
|
python
|
def cluster(x, cluster='KMeans', n_clusters=3, ndims=None, format_data=True):
"""
Performs clustering analysis and returns a list of cluster labels
Parameters
----------
x : A Numpy array, Pandas Dataframe or list of arrays/dfs
The data to be clustered. You can pass a single array/df or a list.
If a list is passed, the arrays will be stacked and the clustering
will be performed across all lists (i.e. not within each list).
cluster : str or dict
Model to use to discover clusters. Support algorithms are: KMeans,
MiniBatchKMeans, AgglomerativeClustering, Birch, FeatureAgglomeration,
SpectralClustering and HDBSCAN (default: KMeans). Can be passed as a
string, but for finer control of the model parameters, pass as a
dictionary, e.g. reduce={'model' : 'KMeans', 'params' : {'max_iter' : 100}}.
See scikit-learn specific model docs for details on parameters supported for
each model.
n_clusters : int
Number of clusters to discover. Not required for HDBSCAN.
format_data : bool
Whether or not to first call the format_data function (default: True).
ndims : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
Returns
----------
cluster_labels : list
An list of cluster labels
"""
if cluster == None:
return x
elif (isinstance(cluster, six.string_types) and cluster=='HDBSCAN') or \
(isinstance(cluster, dict) and cluster['model']=='HDBSCAN'):
if not _has_hdbscan:
raise ImportError('HDBSCAN is not installed. Please install hdbscan>=0.8.11')
if ndims != None:
warnings.warn('The ndims argument is now deprecated. Ignoring dimensionality reduction step.')
if format_data:
x = formatter(x, ppca=True)
# if reduce is a string, find the corresponding model
if isinstance(cluster, six.string_types):
model = models[cluster]
if cluster != 'HDBSCAN':
model_params = {
'n_clusters' : n_clusters
}
else:
model_params = {}
# if its a dict, use custom params
elif type(cluster) is dict:
if isinstance(cluster['model'], six.string_types):
model = models[cluster['model']]
model_params = cluster['params']
# initialize model
model = model(**model_params)
# fit the model
model.fit(np.vstack(x))
# return the labels
return list(model.labels_)
|
https://github.com/ContextLab/hypertools/blob/b76c7ac8061998b560e969ff8e4f4c915088e7a0/hypertools/tools/cluster.py#L28-L100
|
k means clustering
|
python
|
def do_clustering(types, max_clust):
"""
Helper method for clustering that takes a list of all of the things being
clustered (which are assumed to be binary numbers represented as strings),
and an int representing the maximum number of clusters that are allowed.
Returns: A dictionary mapping cluster ids to lists of numbers that are part
of that cluster.
"""
# Fill in leading zeros to make all numbers same length.
ls = [list(t[t.find("b")+1:]) for t in types]
prepend_zeros_to_lists(ls)
dist_matrix = pdist(ls, weighted_hamming)
clusters = hierarchicalcluster.complete(dist_matrix)
clusters = hierarchicalcluster.fcluster(clusters, max_clust,
criterion="maxclust")
# Group members of each cluster together
cluster_dict = dict((c, []) for c in set(clusters))
for i in range(len(types)):
cluster_dict[clusters[i]].append(types[i])
return cluster_dict
|
https://github.com/emilydolson/avida-spatial-tools/blob/7beb0166ccefad5fa722215b030ac2a53d62b59e/avidaspatial/transform_data.py#L35-L58
|
k means clustering
|
python
|
def cluster_mini_batch_kmeans(data=None, k=100, max_iter=10, batch_size=0.2, metric='euclidean',
init_strategy='kmeans++', n_jobs=None, chunksize=None, skip=0, clustercenters=None, **kwargs):
r"""k-means clustering with mini-batch strategy
Mini-batch k-means is an approximation to k-means which picks a randomly
selected subset of data points to be updated in each iteration. Usually
much faster than k-means but will likely deliver a less optimal result.
Returns
-------
kmeans_mini : a :class:`MiniBatchKmeansClustering <pyemma.coordinates.clustering.MiniBatchKmeansClustering>` clustering object
Object for mini-batch kmeans clustering.
It holds discrete trajectories and cluster center information.
See also
--------
:func:`kmeans <pyemma.coordinates.kmeans>` : for full k-means clustering
.. autoclass:: pyemma.coordinates.clustering.kmeans.MiniBatchKmeansClustering
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.coordinates.clustering.kmeans.MiniBatchKmeansClustering
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.coordinates.clustering.kmeans.MiniBatchKmeansClustering
:attributes:
References
----------
.. [1] http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
"""
from pyemma.coordinates.clustering.kmeans import MiniBatchKmeansClustering
res = MiniBatchKmeansClustering(n_clusters=k, max_iter=max_iter, metric=metric, init_strategy=init_strategy,
batch_size=batch_size, n_jobs=n_jobs, skip=skip, clustercenters=clustercenters)
from pyemma.util.reflection import get_default_args
cs = _check_old_chunksize_arg(chunksize, get_default_args(cluster_mini_batch_kmeans)['chunksize'], **kwargs)
if data is not None:
res.estimate(data, chunksize=cs)
else:
res.chunksize = chunksize
return res
|
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/api.py#L1645-L1692
|
k means clustering
|
python
|
def _k_modes_iter(X, centroids, cl_attr_freq, membship, dissim, random_state):
"""Single iteration of k-modes clustering algorithm"""
moves = 0
for ipoint, curpoint in enumerate(X):
clust = np.argmin(dissim(centroids, curpoint, X=X, membship=membship))
if membship[clust, ipoint]:
# Point is already in its right place.
continue
# Move point, and update old/new cluster frequencies and centroids.
moves += 1
old_clust = np.argwhere(membship[:, ipoint])[0][0]
cl_attr_freq, membship, centroids = move_point_cat(
curpoint, ipoint, clust, old_clust, cl_attr_freq, membship, centroids
)
# In case of an empty cluster, reinitialize with a random point
# from the largest cluster.
if not membship[old_clust, :].any():
from_clust = membship.sum(axis=1).argmax()
choices = [ii for ii, ch in enumerate(membship[from_clust, :]) if ch]
rindx = random_state.choice(choices)
cl_attr_freq, membship, centroids = move_point_cat(
X[rindx], rindx, old_clust, from_clust, cl_attr_freq, membship, centroids
)
return centroids, moves
|
https://github.com/nicodv/kmodes/blob/cdb19fe5448aba1bf501626694bb52e68eafab45/kmodes/kmodes.py#L134-L162
|
k means clustering
|
python
|
def lloyd_cluster(G, seeds, maxiter=10):
"""Perform Lloyd clustering on graph with weighted edges.
Parameters
----------
G : csr_matrix, csc_matrix
A sparse NxN matrix where each nonzero entry G[i,j] is the distance
between nodes i and j.
seeds : int array
If seeds is an integer, then its value determines the number of
clusters. Otherwise, seeds is an array of unique integers between 0
and N-1 that will be used as the initial seeds for clustering.
maxiter : int
The maximum number of iterations to perform.
Returns
-------
distances : array
final distances
clusters : int array
id of each cluster of points
seeds : int array
index of each seed
Notes
-----
If G has complex values, abs(G) is used instead.
"""
G = asgraph(G)
N = G.shape[0]
if G.dtype.kind == 'c':
# complex dtype
G = np.abs(G)
# interpret seeds argument
if np.isscalar(seeds):
seeds = np.random.permutation(N)[:seeds]
seeds = seeds.astype('intc')
else:
seeds = np.array(seeds, dtype='intc')
if len(seeds) < 1:
raise ValueError('at least one seed is required')
if seeds.min() < 0:
raise ValueError('invalid seed index (%d)' % seeds.min())
if seeds.max() >= N:
raise ValueError('invalid seed index (%d)' % seeds.max())
clusters = np.empty(N, dtype='intc')
distances = np.empty(N, dtype=G.dtype)
for i in range(maxiter):
last_seeds = seeds.copy()
amg_core.lloyd_cluster(N, G.indptr, G.indices, G.data,
len(seeds), distances, clusters, seeds)
if (seeds == last_seeds).all():
break
return (distances, clusters, seeds)
|
https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/graph.py#L177-L240
|
k means clustering
|
python
|
def clust_coef(self, node):
"""
Computes and returns the local clustering coefficient of node. The
local cluster coefficient is proportion of the actual number of edges between
neighbours of node and the maximum number of edges between those neighbours.
See <http://en.wikipedia.org/wiki/Clustering_coefficient#Local_clustering_coefficient>
for a formal definition.
"""
num = 0
nbr_set = set(self.out_nbrs(node))
if node in nbr_set:
nbr_set.remove(node) # loop defense
for nbr in nbr_set:
sec_set = set(self.out_nbrs(nbr))
if nbr in sec_set:
sec_set.remove(nbr) # loop defense
num += len(nbr_set & sec_set)
nbr_num = len(nbr_set)
if nbr_num:
clust_coef = float(num) / (nbr_num * (nbr_num - 1))
else:
clust_coef = 0.0
return clust_coef
|
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/altgraph/Graph.py#L619-L645
|
k means clustering
|
python
|
def eeg_microstates_clustering(data, n_microstates=4, clustering_method="kmeans", n_jobs=1, n_init=25, occurence_rejection_treshold=0.05, max_refitting=5, verbose=True):
"""
Fit the clustering algorithm.
"""
# Create training set
training_set = data.copy()
if verbose is True:
print("- Initializing the clustering algorithm...")
if clustering_method == "kmeans":
algorithm = sklearn.cluster.KMeans(init='k-means++', n_clusters=n_microstates, n_init=n_init, n_jobs=n_jobs)
elif clustering_method == "spectral":
algorithm = sklearn.cluster.SpectralClustering(n_clusters=n_microstates, n_init=n_init, n_jobs=n_jobs)
elif clustering_method == "agglom":
algorithm = sklearn.cluster.AgglomerativeClustering(n_clusters=n_microstates, linkage="complete")
elif clustering_method == "dbscan":
algorithm = sklearn.cluster.DBSCAN(min_samples=100)
elif clustering_method == "affinity":
algorithm = sklearn.cluster.AffinityPropagation(damping=0.5)
else:
print("NeuroKit Error: eeg_microstates(): clustering_method must be 'kmeans', 'spectral', 'dbscan', 'affinity' or 'agglom'")
refitting = 0 # Initialize the number of refittings
good_fit_achieved = False
while good_fit_achieved is False:
good_fit_achieved = True
if verbose is True:
print("- Fitting the classifier...")
# Fit the algorithm
algorithm.fit(training_set)
if verbose is True:
print("- Clustering back the initial data...")
# Predict the more likely cluster for each observation
predicted = algorithm.fit_predict(training_set)
if verbose is True:
print("- Check for abnormalities...")
# Check for abnormalities and prune the training set until none found
occurences = dict(collections.Counter(predicted))
masks = [np.array([True]*len(training_set))]
for microstate in occurences:
# is the frequency of one microstate inferior to a treshold
if occurences[microstate] < len(data)*occurence_rejection_treshold:
good_fit_achieved = False
refitting += 1 # Increment the refitting
print("NeuroKit Warning: eeg_microstates(): detected some outliers: refitting the classifier (n=" + str(refitting) + ").")
masks.append(predicted!=microstate)
mask = np.all(masks, axis=0)
training_set = training_set[mask]
return(algorithm)
|
https://github.com/neuropsychology/NeuroKit.py/blob/c9589348fbbde0fa7e986048c48f38e6b488adfe/examples/UnderDev/eeg/eeg_microstates.py#L201-L253
|
k means clustering
|
python
|
def _hclust(self, nclusters, method, noise=False):
"""
:param nclusters: Number of clusters to return
:param linkage_method: single, complete, average, ward, weighted, centroid or median
(http://docs.scipy.org/doc/scipy/reference/cluster.hierarchy.html)
:param noise: Add Gaussian noise to the distance matrix prior to clustering (bool, default=False)
:return: Partition object describing clustering
"""
matrix = self.get_dm(noise)
linkmat = fastcluster.linkage(squareform(matrix), method)
self.nclusters = nclusters # Store these in case we want to plot
self.linkmat = linkmat #
return _hclust(linkmat, nclusters)
|
https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/clustering.py#L419-L432
|
k means clustering
|
python
|
def fit(self, X, y=None, categorical=None):
"""Compute k-prototypes clustering.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
categorical : Index of columns that contain categorical data
"""
if categorical is not None:
assert isinstance(categorical, (int, list, tuple)), "The 'categorical' \
argument needs to be an integer with the index of the categorical \
column in your data, or a list or tuple of several of them, \
but it is a {}.".format(type(categorical))
X = pandas_to_numpy(X)
random_state = check_random_state(self.random_state)
# If self.gamma is None, gamma will be automatically determined from
# the data. The function below returns its value.
self._enc_cluster_centroids, self._enc_map, self.labels_, self.cost_,\
self.n_iter_, self.gamma = k_prototypes(X,
categorical,
self.n_clusters,
self.max_iter,
self.num_dissim,
self.cat_dissim,
self.gamma,
self.init,
self.n_init,
self.verbose,
random_state,
self.n_jobs)
return self
|
https://github.com/nicodv/kmodes/blob/cdb19fe5448aba1bf501626694bb52e68eafab45/kmodes/kprototypes.py#L431-L463
|
k means clustering
|
python
|
def fit(self, X, y=None, sample_weight=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
y : Ignored
not used, present here for API consistency by convention.
sample_weight : array-like, shape (n_samples,), optional
The weights for each observation in X. If None, all observations
are assigned equal weight (default: None)
"""
if self.normalize:
X = normalize(X)
random_state = check_random_state(self.random_state)
# TODO: add check that all data is unit-normalized
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = spherical_k_means(
X,
n_clusters=self.n_clusters,
sample_weight=sample_weight,
init=self.init,
n_init=self.n_init,
max_iter=self.max_iter,
verbose=self.verbose,
tol=self.tol,
random_state=random_state,
copy_x=self.copy_x,
n_jobs=self.n_jobs,
return_n_iter=True,
)
return self
|
https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/spherical_kmeans.py#L329-L366
|
k means clustering
|
python
|
def clustering_gmm(data,
n_clusters,
tol=1e-7,
min_covar=None,
scale='logicle'):
"""
Find clusters in an array using a Gaussian Mixture Model.
Before clustering, `data` can be automatically rescaled as specified by
the `scale` argument.
Parameters
----------
data : FCSData or array_like
Data to cluster.
n_clusters : int
Number of clusters to find.
tol : float, optional
Tolerance for convergence. Directly passed to either
``GaussianMixture`` or ``GMM``, depending on ``scikit-learn``'s
version.
min_covar : float, optional
The minimum trace that the initial covariance matrix will have. If
``scikit-learn``'s version is older than 0.18, `min_covar` is also
passed directly to ``GMM``.
scale : str, optional
Rescaling applied to `data` before performing clustering. Can be
either ``linear`` (no rescaling), ``log``, or ``logicle``.
Returns
-------
labels : array
Nx1 array with labels for each element in `data`, assigning
``data[i]`` to cluster ``labels[i]``.
Notes
-----
A Gaussian Mixture Model finds clusters by fitting a linear combination
of `n_clusters` Gaussian probability density functions (pdf) to `data`
using Expectation Maximization (EM).
This method can be fairly sensitive to the initial parameter choice. To
generate a reasonable set of initial conditions, `clustering_gmm`
first divides all points in `data` into `n_clusters` groups of the
same size based on their Euclidean distance to the minimum value. Then,
for each group, the 50% samples farther away from the mean are
discarded. The mean and covariance are calculated from the remaining
samples of each group, and used as initial conditions for the GMM EM
algorithm.
`clustering_gmm` internally uses a `GaussianMixture` object from the
``scikit-learn`` library (``GMM`` if ``scikit-learn``'s version is
lower than 0.18), with full covariance matrices for each cluster. For
more information, consult ``scikit-learn``'s documentation.
"""
# Initialize min_covar parameter
# Parameter is initialized differently depending on scikit's version
if min_covar is None:
if packaging.version.parse(sklearn.__version__) \
>= packaging.version.parse('0.18'):
min_covar = 1e-3
else:
min_covar = 5e-5
# Copy events before rescaling
data = data.copy()
# Apply rescaling
if scale=='linear':
# No rescaling
pass
elif scale=='log':
# Logarithm of zero and negatives is undefined. Therefore, saturate
# any non-positives to a small positive value.
# The machine epsilon `eps` is the smallest number such that
# `1.0 + eps != eps`. For a 64-bit floating point, `eps ~= 1e-15`.
data[data < 1e-15] = 1e-15
# Rescale
data = np.log10(data)
elif scale=='logicle':
# Use the logicle transform class in the plot module, and transform
# data one channel at a time.
for ch in range(data.shape[1]):
# We need a transformation from "data value" to "display scale"
# units. To do so, we use an inverse logicle transformation.
t = FlowCal.plot._LogicleTransform(data=data, channel=ch).inverted()
data[:,ch] = t.transform_non_affine(data[:,ch],
mask_out_of_range=False)
else:
raise ValueError("scale {} not supported".format(scale))
###
# Parameter initialization
###
weights = np.tile(1.0 / n_clusters, n_clusters)
means = []
covars = []
# Calculate distance to minimum value. Then, sort based on this distance.
dist = np.sum((data - np.min(data, axis=0))**2., axis=1)
sorted_idx = np.argsort(dist)
# Expected number of elements per cluster
n_per_cluster = data.shape[0]/float(n_clusters)
# Get means and covariances per cluster
# We will just use a fraction of ``1 - discard_frac`` of the data.
# Data at the edges that actually corresponds to another cluster can
# really mess up the final result.
discard_frac = 0.5
for i in range(n_clusters):
il = int((i + discard_frac/2)*n_per_cluster)
ih = int((i + 1 - discard_frac/2)*n_per_cluster)
sorted_idx_cluster = sorted_idx[il:ih]
data_cluster = data[sorted_idx_cluster]
# Calculate means and covariances
means.append(np.mean(data_cluster, axis=0))
if data.shape[1] == 1:
cov = np.cov(data_cluster.T).reshape(1,1)
else:
cov = np.cov(data_cluster.T)
# Add small number to diagonal to avoid near-singular covariances
cov += np.eye(data.shape[1]) * min_covar
covars.append(cov)
# Means should be an array
means = np.array(means)
###
# Run Gaussian Mixture Model Clustering
###
if packaging.version.parse(sklearn.__version__) \
>= packaging.version.parse('0.18'):
# GaussianMixture uses precisions, the inverse of covariances.
# To get the inverse, we solve the linear equation C*P = I. We also
# use the fact that C is positive definite.
precisions = [scipy.linalg.solve(c,
np.eye(c.shape[0]),
assume_a='pos')
for c in covars]
precisions = np.array(precisions)
# Initialize GaussianMixture object
gmm = GaussianMixture(n_components=n_clusters,
tol=tol,
covariance_type='full',
weights_init=weights,
means_init=means,
precisions_init=precisions,
max_iter=500)
else:
# Initialize GMM object
gmm = GMM(n_components=n_clusters,
tol=tol,
min_covar=min_covar,
covariance_type='full',
params='mc',
init_params='')
# Set initial parameters
gmm.weight_ = weights
gmm.means_ = means
gmm.covars_ = covars
# Fit
gmm.fit(data)
# Get labels by sampling from the responsibilities
# This avoids the complete elimination of a cluster if two or more
# clusters have very similar means.
resp = gmm.predict_proba(data)
labels = [np.random.choice(range(n_clusters), p=ri) for ri in resp]
return labels
|
https://github.com/taborlab/FlowCal/blob/031a7af82acb1d46879a8e384a1a00f27f0bdc7a/FlowCal/mef.py#L36-L213
|
k means clustering
|
python
|
def estimate_k(X, max_k):
"""
Estimate k for K-Means.
Adapted from
<https://datasciencelab.wordpress.com/2014/01/21/selection-of-k-in-k-means-clustering-reloaded/>
"""
ks = range(1, max_k)
fs = np.zeros(len(ks))
# Special case K=1
fs[0], Sk = _fK(1)
# Rest of Ks
for k in ks[1:]:
fs[k-1], Sk = _fK(k, Skm1=Sk)
return np.argmin(fs) + 1
|
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/cluster/parameter.py#L35-L51
|
k means clustering
|
python
|
def fit(self, Z):
"""Compute k-means clustering.
Parameters
----------
Z : ArrayRDD or DictRDD containing array-like or sparse matrix
Train data.
Returns
-------
self
"""
X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z
check_rdd(X, (np.ndarray, sp.spmatrix))
if self.init == 'k-means||':
self._mllib_model = MLlibKMeans.train(
X.unblock(),
self.n_clusters,
maxIterations=self.max_iter,
initializationMode="k-means||")
self.cluster_centers_ = self._mllib_model.centers
else:
models = X.map(lambda X: super(SparkKMeans, self).fit(X))
models = models.map(lambda model: model.cluster_centers_).collect()
return super(SparkKMeans, self).fit(np.concatenate(models))
|
https://github.com/lensacom/sparkit-learn/blob/0498502107c1f7dcf33cda0cdb6f5ba4b42524b7/splearn/cluster/k_means_.py#L74-L98
|
k means clustering
|
python
|
def fit(self, blocks, y=None):
"""
Fit a k-means clustering model using an ordered sequence of blocks.
"""
self.kmeans.fit(make_weninger_features(blocks))
# set the cluster center closest to the origin to exactly (0.0, 0.0)
self.kmeans.cluster_centers_.sort(axis=0)
self.kmeans.cluster_centers_[0, :] = np.zeros(2)
return self
|
https://github.com/dragnet-org/dragnet/blob/532c9d9f28e5b1b57f3cabc708218d3863a16322/dragnet/features/weninger.py#L82-L90
|
k means clustering
|
python
|
def clustering_factory(clf):
"""Embeds scikit-plot plotting methods in an sklearn clusterer instance.
Args:
clf: Scikit-learn clusterer instance
Returns:
The same scikit-learn clusterer instance passed in **clf** with
embedded scikit-plot instance methods.
Raises:
ValueError: If **clf** does not contain the instance methods necessary
for scikit-plot instance methods.
"""
required_methods = ['fit', 'fit_predict']
for method in required_methods:
if not hasattr(clf, method):
raise TypeError('"{}" is not in clf. Did you '
'pass a clusterer instance?'.format(method))
additional_methods = {
'plot_silhouette': plot_silhouette,
'plot_elbow_curve': plot_elbow_curve
}
for key, fn in six.iteritems(additional_methods):
if hasattr(clf, key):
warnings.warn('"{}" method already in clf. '
'Overriding anyway. This may '
'result in unintended behavior.'.format(key))
setattr(clf, key, types.MethodType(fn, clf))
return clf
|
https://github.com/reiinakano/scikit-plot/blob/2dd3e6a76df77edcbd724c4db25575f70abb57cb/scikitplot/clustering.py#L18-L50
|
k means clustering
|
python
|
def cluster(self, nclusters, linkage_method=linkage.WARD, **kwargs):
"""
Do hierarchical clustering on a distance matrix using one of the methods:
methods.SINGLE = single-linkage clustering
methods.COMPLETE = complete-linkage clustering
methods.AVERAGE = average-linkage clustering
methods.WARD = Ward's minimum variance method
"""
if linkage_method == linkage.SINGLE:
return self._hclust(nclusters, 'single', **kwargs)
elif linkage_method == linkage.COMPLETE:
return self._hclust(nclusters, 'complete', **kwargs)
elif linkage_method == linkage.AVERAGE:
return self._hclust(nclusters, 'average', **kwargs)
elif linkage_method == linkage.WARD:
return self._hclust(nclusters, 'ward', **kwargs)
elif linkage_method == linkage.WEIGHTED:
return self._hclust(nclusters, 'weighted', **kwargs)
elif linkage_method == linkage.CENTROID:
return self._hclust(nclusters, 'centroid', **kwargs)
elif linkage_method == linkage.MEDIAN:
return self._hclust(nclusters, 'median', **kwargs)
else:
raise ValueError('Unknown linkage_method: {}'.format(linkage_method))
|
https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/clustering.py#L393-L417
|
k means clustering
|
python
|
def cluster(points, radius):
"""
Clustering of points in space.
`radius` is the radius of local search.
Individual subsets can be accessed through ``actor.clusters``.
.. hint:: |clustering| |clustering.py|_
"""
if isinstance(points, vtk.vtkActor):
poly = points.GetMapper().GetInput()
else:
src = vtk.vtkPointSource()
src.SetNumberOfPoints(len(points))
src.Update()
vpts = src.GetOutput().GetPoints()
for i, p in enumerate(points):
vpts.SetPoint(i, p)
poly = src.GetOutput()
cluster = vtk.vtkEuclideanClusterExtraction()
cluster.SetInputData(poly)
cluster.SetExtractionModeToAllClusters()
cluster.SetRadius(radius)
cluster.ColorClustersOn()
cluster.Update()
idsarr = cluster.GetOutput().GetPointData().GetArray("ClusterId")
Nc = cluster.GetNumberOfExtractedClusters()
sets = [[] for i in range(Nc)]
for i, p in enumerate(points):
sets[idsarr.GetValue(i)].append(p)
acts = []
for i, aset in enumerate(sets):
acts.append(vs.Points(aset, c=i))
actor = Assembly(acts)
actor.info["clusters"] = sets
print("Nr. of extracted clusters", Nc)
if Nc > 10:
print("First ten:")
for i in range(Nc):
if i > 9:
print("...")
break
print("Cluster #" + str(i) + ", N =", len(sets[i]))
print("Access individual clusters through attribute: actor.cluster")
return actor
|
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/analysis.py#L1290-L1340
|
k means clustering
|
python
|
def process(self):
"""!
@brief Performs clustering process in line with rules of BANG clustering algorithm.
@return (bang) Returns itself (BANG instance).
@see get_clusters()
@see get_noise()
@see get_directory()
@see get_dendrogram()
"""
self.__directory = bang_directory(self.__data, self.__levels,
density_threshold=self.__density_threshold,
amount_threshold=self.__amount_threshold)
self.__allocate_clusters()
return self
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/bang.py#L1039-L1056
|
k means clustering
|
python
|
def new_clustered_sortind(x, k=10, row_key=None, cluster_key=None):
"""
Uses MiniBatch k-means clustering to cluster matrix into groups.
Each cluster of rows is then sorted by `scorefunc` -- by default, the max
peak height when all rows in a cluster are averaged, or
cluster.mean(axis=0).max().
Returns the index that will sort the rows of `x` and a list of "breaks".
`breaks` is essentially a cumulative row count for each cluster boundary.
In other words, after plotting the array you can use axhline on each
"break" to plot the cluster boundary.
If `k` is a list or tuple, iteratively try each one and select the best
with the lowest mean distance from cluster centers.
:param x: Matrix whose rows are to be clustered
:param k: Number of clusters to create or a list of potential clusters; the
optimum will be chosen from the list
:param row_key:
Optional function to act as a sort key for sorting rows within
clusters. Signature should be `scorefunc(a)` where `a` is a 1-D NumPy
array.
:param cluster_key:
Optional function for sorting clusters. Signature is `clusterfunc(a)`
where `a` is a NumPy array containing all rows of `x` for cluster `i`.
It must return a single value.
"""
try:
from sklearn.cluster import MiniBatchKMeans
except ImportError:
raise ImportError('please install scikits.learn for '
'clustering.')
# If integer, do it once and we're done
if isinstance(k, int):
best_k = k
else:
mean_dists = {}
for _k in k:
mbk = MiniBatchKMeans(init='k-means++', n_clusters=_k)
mbk.fit(x)
mean_dists[_k] = mbk.transform(x).mean()
best_k = sorted(mean_dists.items(), key=lambda x: x[1])[-1][0]
mbk = MiniBatchKMeans(init='k-means++', n_clusters=best_k)
mbk.fit(x)
k = best_k
labels = mbk.labels_
scores = np.zeros(labels.shape, dtype=float)
if cluster_key:
# It's easier for calling code to provide something that operates on
# a cluster level, but here it's converted to work on a label level
# that looks in to the array `x`.
def _cluster_key(i):
return cluster_key(x[labels == i, :])
sorted_labels = sorted(range(k), key=_cluster_key)
else:
# Otherwise just use them as-is.
sorted_labels = range(k)
if row_key:
# Again, easier to provide a function to operate on a row. But here we
# need it to accept an index
def _row_key(i):
return row_key(x[i, :])
final_ind = []
breaks = []
pos = 0
for label in sorted_labels:
# which rows in `x` have this label
label_inds = np.nonzero(labels == label)[0]
if row_key:
label_sort_ind = sorted(label_inds, key=_row_key)
else:
label_sort_ind = label_inds
for li in label_sort_ind:
final_ind.append(li)
pos += len(label_inds)
breaks.append(pos)
return np.array(final_ind), np.array(breaks)
|
https://github.com/daler/metaseq/blob/fa875d1f72317aa7ef95cb128b739956b16eef9f/metaseq/plotutils.py#L663-L747
|
k means clustering
|
python
|
def show_clusters(data, clusters, noise=None):
"""!
@brief Display CLIQUE clustering results.
@param[in] data (list): Data that was used for clustering.
@param[in] clusters (array_like): Clusters that were allocated by the algorithm.
@param[in] noise (array_like): Noise that were allocated by the algorithm.
"""
visualizer = cluster_visualizer()
visualizer.append_clusters(clusters, data)
visualizer.append_cluster(noise or [], data, marker='x')
visualizer.show()
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/clique.py#L92-L104
|
k means clustering
|
python
|
def clustering_coef_bu(G):
'''
The clustering coefficient is the fraction of triangles around a node
(equiv. the fraction of nodes neighbors that are neighbors of each other).
Parameters
----------
A : NxN np.ndarray
binary undirected connection matrix
Returns
-------
C : Nx1 np.ndarray
clustering coefficient vector
'''
n = len(G)
C = np.zeros((n,))
for u in range(n):
V, = np.where(G[u, :])
k = len(V)
if k >= 2: # degree must be at least 2
S = G[np.ix_(V, V)]
C[u] = np.sum(S) / (k * k - k)
return C
|
https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/clustering.py#L130-L155
|
k means clustering
|
python
|
def cluster(self, matrix=None, level=None, sequence=None):
"""
Perform hierarchical clustering.
:param matrix: The 2D list that is currently under processing. The
matrix contains the distances of each item with each other
:param level: The current level of clustering
:param sequence: The sequence number of the clustering
"""
logger.info("Performing cluster()")
if matrix is None:
# create level 0, first iteration (sequence)
level = 0
sequence = 0
matrix = []
# if the matrix only has two rows left, we are done
linkage = partial(self.linkage, distance_function=self.distance)
initial_element_count = len(self._data)
while len(matrix) > 2 or matrix == []:
item_item_matrix = Matrix(self._data,
linkage,
True,
0)
item_item_matrix.genmatrix(self.num_processes)
matrix = item_item_matrix.matrix
smallestpair = None
mindistance = None
rowindex = 0 # keep track of where we are in the matrix
# find the minimum distance
for row in matrix:
cellindex = 0 # keep track of where we are in the matrix
for cell in row:
# if we are not on the diagonal (which is always 0)
# and if this cell represents a new minimum...
cell_lt_mdist = cell < mindistance if mindistance else False
if ((rowindex != cellindex) and
(cell_lt_mdist or smallestpair is None)):
smallestpair = (rowindex, cellindex)
mindistance = cell
cellindex += 1
rowindex += 1
sequence += 1
level = matrix[smallestpair[1]][smallestpair[0]]
cluster = Cluster(level, self._data[smallestpair[0]],
self._data[smallestpair[1]])
# maintain the data, by combining the the two most similar items
# in the list we use the min and max functions to ensure the
# integrity of the data. imagine: if we first remove the item
# with the smaller index, all the rest of the items shift down by
# one. So the next index will be wrong. We could simply adjust the
# value of the second "remove" call, but we don't know the order
# in which they come. The max and min approach clarifies that
self._data.remove(self._data[max(smallestpair[0],
smallestpair[1])]) # remove item 1
self._data.remove(self._data[min(smallestpair[0],
smallestpair[1])]) # remove item 2
self._data.append(cluster) # append item 1 and 2 combined
self.publish_progress(initial_element_count, len(self._data))
# all the data is in one single cluster. We return that and stop
self.__cluster_created = True
logger.info("Call to cluster() is complete")
return
|
https://github.com/exhuma/python-cluster/blob/4c0ac14d9beafcd51f0d849151514083c296402f/cluster/method/hierarchical.py#L120-L189
|
k means clustering
|
python
|
def process(self):
"""!
@brief Performs cluster analysis in line with rules of ROCK algorithm.
@remark Results of clustering can be obtained using corresponding get methods.
@see get_clusters()
"""
# TODO: (Not related to specification, just idea) First iteration should be investigated. Euclidean distance should be used for clustering between two
# points and rock algorithm between clusters because we consider non-categorical samples. But it is required more investigations.
if (self.__ccore is True):
self.__clusters = wrapper.rock(self.__pointer_data, self.__eps, self.__number_clusters, self.__threshold);
else:
self.__clusters = [[index] for index in range(len(self.__pointer_data))];
while (len(self.__clusters) > self.__number_clusters):
indexes = self.__find_pair_clusters(self.__clusters);
if (indexes != [-1, -1]):
self.__clusters[indexes[0]] += self.__clusters[indexes[1]];
self.__clusters.pop(indexes[1]); # remove merged cluster.
else:
break;
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/rock.py#L97-L123
|
k means clustering
|
python
|
def process(self):
"""!
@brief Performs cluster analysis in line with rules of X-Means algorithm.
@remark Results of clustering can be obtained using corresponding gets methods.
@see get_clusters()
@see get_centers()
"""
if (self.__ccore is True):
self.__clusters, self.__centers = wrapper.xmeans(self.__pointer_data, self.__centers, self.__kmax, self.__tolerance, self.__criterion)
else:
self.__clusters = []
while len(self.__centers) <= self.__kmax:
current_cluster_number = len(self.__centers)
self.__clusters, self.__centers = self.__improve_parameters(self.__centers)
allocated_centers = self.__improve_structure(self.__clusters, self.__centers)
if current_cluster_number == len(allocated_centers):
#if ( (current_cluster_number == len(allocated_centers)) or (len(allocated_centers) > self.__kmax) ):
break
else:
self.__centers = allocated_centers
self.__clusters, self.__centers = self.__improve_parameters(self.__centers)
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/xmeans.py#L157-L185
|
k means clustering
|
python
|
def cluster(self, algorithm=None):
"""Cluster the codebook. The clusters of the data instances can be
assigned based on the BMUs. The method populates the class variable
Somoclu.clusters. If viewing methods are called after clustering, but
without colors for best matching units, colors will be automatically
assigned based on cluster membership.
:param algorithm: Optional parameter to specify a scikit-learn
clustering algorithm. The default is K-means with
eight clusters.
:type filename: sklearn.base.ClusterMixin.
"""
import sklearn.base
if algorithm is None:
import sklearn.cluster
algorithm = sklearn.cluster.KMeans()
elif not isinstance(algorithm, sklearn.base.ClusterMixin):
raise Exception("Cannot use algorithm of type " + type(algorithm))
original_shape = self.codebook.shape
self.codebook.shape = (self._n_columns * self._n_rows, self.n_dim)
linear_clusters = algorithm.fit_predict(self.codebook)
self.codebook.shape = original_shape
self.clusters = np.zeros((self._n_rows, self._n_columns), dtype=int)
for i, c in enumerate(linear_clusters):
self.clusters[i // self._n_columns, i % self._n_columns] = c
|
https://github.com/peterwittek/somoclu/blob/b31dfbeba6765e64aedddcf8259626d6684f5349/src/Python/somoclu/train.py#L531-L556
|
k means clustering
|
python
|
def xmeans(cls, initial_centers=None, kmax=20, tolerance=0.025, criterion=splitting_type.BAYESIAN_INFORMATION_CRITERION, ccore=False):
"""
Constructor of the x-means clustering.rst algorithm
:param initial_centers: Initial coordinates of centers of clusters that are represented by list: [center1, center2, ...]
Note: The dimensions of the initial centers should be same as of the dataset.
:param kmax: Maximum number of clusters that can be allocated.
:param tolerance: Stop condition for each iteration: if maximum value of change of centers of clusters is less than tolerance than algorithm will stop processing
:param criterion: Type of splitting creation.
:param ccore: Defines should be CCORE (C++ pyclustering library) used instead of Python code or not.
:return: returns the clustering.rst object
"""
model = xmeans(None, initial_centers, kmax, tolerance, criterion, ccore)
return cls(model)
|
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L26-L39
|
k means clustering
|
python
|
def is_pyclustering_instance(model):
"""
Checks if the clustering.rst algorithm belongs to pyclustering
:param model: the clustering.rst algorithm model
:return: the truth value (Boolean)
"""
return any(isinstance(model, i) for i in [xmeans, clarans, rock, optics])
|
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L205-L212
|
k means clustering
|
python
|
def cluster(self, dist_type='cosine', run_clustering=True,
dendro=True, views=['N_row_sum', 'N_row_var'],
linkage_type='average', sim_mat=False, filter_sim=0.1,
calc_cat_pval=False, run_enrichr=None, enrichrgram=None):
'''
The main function performs hierarchical clustering, optionally generates filtered views (e.g. row-filtered views), and generates the :``visualization_json``.
'''
initialize_net.viz(self)
make_clust_fun.make_clust(self, dist_type=dist_type, run_clustering=run_clustering,
dendro=dendro,
requested_views=views,
linkage_type=linkage_type,
sim_mat=sim_mat,
filter_sim=filter_sim,
calc_cat_pval=calc_cat_pval,
run_enrichr=run_enrichr,
enrichrgram=enrichrgram)
|
https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L89-L106
|
k means clustering
|
python
|
def clustering_coef_wu(W):
'''
The weighted clustering coefficient is the average "intensity" of
triangles around a node.
Parameters
----------
W : NxN np.ndarray
weighted undirected connection matrix
Returns
-------
C : Nx1 np.ndarray
clustering coefficient vector
'''
K = np.array(np.sum(np.logical_not(W == 0), axis=1), dtype=float)
ws = cuberoot(W)
cyc3 = np.diag(np.dot(ws, np.dot(ws, ws)))
K[np.where(cyc3 == 0)] = np.inf # if no 3-cycles exist, set C=0
C = cyc3 / (K * (K - 1))
return C
|
https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/clustering.py#L194-L214
|
k means clustering
|
python
|
def plot_elbow_curve(clf, X, title='Elbow Plot', cluster_ranges=None, n_jobs=1,
show_cluster_time=True, ax=None, figsize=None,
title_fontsize="large", text_fontsize="medium"):
"""Plots elbow curve of different values of K for KMeans clustering.
Args:
clf: Clusterer instance that implements ``fit``,``fit_predict``, and
``score`` methods, and an ``n_clusters`` hyperparameter.
e.g. :class:`sklearn.cluster.KMeans` instance
X (array-like, shape (n_samples, n_features)):
Data to cluster, where n_samples is the number of samples and
n_features is the number of features.
title (string, optional): Title of the generated plot. Defaults to
"Elbow Plot"
cluster_ranges (None or :obj:`list` of int, optional): List of
n_clusters for which to plot the explained variances. Defaults to
``range(1, 12, 2)``.
n_jobs (int, optional): Number of jobs to run in parallel. Defaults to
1.
show_cluster_time (bool, optional): Include plot of time it took to
cluster for a particular K.
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot as skplt
>>> kmeans = KMeans(random_state=1)
>>> skplt.cluster.plot_elbow_curve(kmeans, cluster_ranges=range(1, 30))
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_elbow_curve.png
:align: center
:alt: Elbow Curve
"""
if cluster_ranges is None:
cluster_ranges = range(1, 12, 2)
else:
cluster_ranges = sorted(cluster_ranges)
if not hasattr(clf, 'n_clusters'):
raise TypeError('"n_clusters" attribute not in classifier. '
'Cannot plot elbow method.')
tuples = Parallel(n_jobs=n_jobs)(delayed(_clone_and_score_clusterer)
(clf, X, i) for i in cluster_ranges)
clfs, times = zip(*tuples)
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
ax.plot(cluster_ranges, np.absolute(clfs), 'b*-')
ax.grid(True)
ax.set_xlabel('Number of clusters', fontsize=text_fontsize)
ax.set_ylabel('Sum of Squared Errors', fontsize=text_fontsize)
ax.tick_params(labelsize=text_fontsize)
if show_cluster_time:
ax2_color = 'green'
ax2 = ax.twinx()
ax2.plot(cluster_ranges, times, ':', alpha=0.75, color=ax2_color)
ax2.set_ylabel('Clustering duration (seconds)',
color=ax2_color, alpha=0.75,
fontsize=text_fontsize)
ax2.tick_params(colors=ax2_color, labelsize=text_fontsize)
return ax
|
https://github.com/reiinakano/scikit-plot/blob/2dd3e6a76df77edcbd724c4db25575f70abb57cb/scikitplot/cluster.py#L19-L107
|
k means clustering
|
python
|
def cluster(self, n, embed_dim=None, algo=mds.CLASSICAL, method=methods.KMEANS):
"""
Cluster the embedded coordinates using multidimensional scaling
Parameters
----------
n: int
The number of clusters to return
embed_dim int
The dimensionality of the underlying coordinates
Defaults to same value as n
method: enum value (methods.KMEANS | methods.GMM)
The clustering method to use
Returns
-------
Partition: Partition object describing the data partition
"""
if n == 1:
return Partition([1] * len(self.get_dm(False)))
if embed_dim is None:
embed_dim = n
if algo == mds.CLASSICAL:
self._coords = self.dm.embedding(embed_dim, 'cmds')
elif algo == mds.METRIC:
self._coords = self.dm.embedding(embed_dim, 'mmds')
else:
raise OptionError(algo, list(mds.reverse.values()))
if method == methods.KMEANS:
p = self.kmeans(n, self._coords.values)
elif method == methods.GMM:
p = self.gmm(n, self._coords.values)
elif method == methods.WARD:
linkmat = fastcluster.linkage(self._coords.values, 'ward')
p = _hclust(linkmat, n)
else:
raise OptionError(method, list(methods.reverse.values()))
#if self._verbosity > 0:
# print('Using clustering method: {}'.format(methods.reverse[method]))
return p
|
https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/clustering.py#L329-L371
|
k means clustering
|
python
|
def process(self):
"""!
@brief Performs cluster analysis in line with rules of BIRCH algorithm.
@remark Results of clustering can be obtained using corresponding gets methods.
@see get_clusters()
"""
self.__insert_data();
self.__extract_features();
# in line with specification modify hierarchical algorithm should be used for further clustering
current_number_clusters = len(self.__features);
while (current_number_clusters > self.__number_clusters):
indexes = self.__find_nearest_cluster_features();
self.__features[indexes[0]] += self.__features[indexes[1]];
self.__features.pop(indexes[1]);
current_number_clusters = len(self.__features);
# decode data
self.__decode_data();
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/birch.py#L99-L124
|
k means clustering
|
python
|
def cluster_ensembles(cluster_runs, hdf5_file_name = None, verbose = False, N_clusters_max = None):
"""Call up to three different functions for heuristic ensemble clustering
(namely CSPA, HGPA and MCLA) then select as the definitive
consensus clustering the one with the highest average mutual information score
between its vector of consensus labels and the vectors of labels associated to each
partition from the ensemble.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
hdf5_file_name : file object or string, optional (default = None)
The handle or name of an HDF5 file where any array needed
for consensus_clustering and too large to fit into memory
is to be stored. Created if not specified at input.
verbose : Boolean, optional (default = False)
Specifies if messages concerning the status of the many functions
subsequently called 'cluster_ensembles' will be displayed
on the standard output.
N_clusters_max : int, optional
The number of clusters in which to partition the samples into
a consensus clustering. This defaults to the highest number of clusters
encountered in the sets of independent clusterings on subsamples
of the data-set (i.e. the maximum of the entries in "cluster_runs").
Returns
-------
cluster_ensemble : array of shape (n_samples,)
For the final ensemble clustering, this vector contains the
cluster IDs of each sample in the whole data-set.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
if hdf5_file_name is None:
hdf5_file_name = './Cluster_Ensembles.h5'
fileh = tables.open_file(hdf5_file_name, 'w')
fileh.create_group(fileh.root, 'consensus_group')
fileh.close()
cluster_ensemble = []
score = np.empty(0)
if cluster_runs.shape[1] > 10000:
consensus_functions = [HGPA, MCLA]
function_names = ['HGPA', 'MCLA']
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"due to a rather large number of cells in your data-set, "
"using only 'HyperGraph Partitioning Algorithm' (HGPA) "
"and 'Meta-CLustering Algorithm' (MCLA) "
"as ensemble consensus functions.\n")
else:
consensus_functions = [CSPA, HGPA, MCLA]
function_names = ['CSPA', 'HGPA', 'MCLA']
hypergraph_adjacency = build_hypergraph_adjacency(cluster_runs)
store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name)
for i in range(len(consensus_functions)):
cluster_ensemble.append(consensus_functions[i](hdf5_file_name, cluster_runs, verbose, N_clusters_max))
score = np.append(score, ceEvalMutual(cluster_runs, cluster_ensemble[i], verbose))
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"{0} at {1}.".format(function_names[i], score[i]))
print('*****')
return cluster_ensemble[np.argmax(score)]
|
https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L240-L315
|
k means clustering
|
python
|
def lloyd_aggregation(C, ratio=0.03, distance='unit', maxiter=10):
"""Aggregate nodes using Lloyd Clustering.
Parameters
----------
C : csr_matrix
strength of connection matrix
ratio : scalar
Fraction of the nodes which will be seeds.
distance : ['unit','abs','inv',None]
Distance assigned to each edge of the graph G used in Lloyd clustering
For each nonzero value C[i,j]:
======= ===========================
'unit' G[i,j] = 1
'abs' G[i,j] = abs(C[i,j])
'inv' G[i,j] = 1.0/abs(C[i,j])
'same' G[i,j] = C[i,j]
'sub' G[i,j] = C[i,j] - min(C)
======= ===========================
maxiter : int
Maximum number of iterations to perform
Returns
-------
AggOp : csr_matrix
aggregation operator which determines the sparsity pattern
of the tentative prolongator
seeds : array
array of Cpts, i.e., Cpts[i] = root node of aggregate i
See Also
--------
amg_core.standard_aggregation
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> from pyamg.gallery import poisson
>>> from pyamg.aggregation.aggregate import lloyd_aggregation
>>> A = poisson((4,), format='csr') # 1D mesh with 4 vertices
>>> A.todense()
matrix([[ 2., -1., 0., 0.],
[-1., 2., -1., 0.],
[ 0., -1., 2., -1.],
[ 0., 0., -1., 2.]])
>>> lloyd_aggregation(A)[0].todense() # one aggregate
matrix([[1],
[1],
[1],
[1]], dtype=int8)
>>> # more seeding for two aggregates
>>> Agg = lloyd_aggregation(A,ratio=0.5)[0].todense()
"""
if ratio <= 0 or ratio > 1:
raise ValueError('ratio must be > 0.0 and <= 1.0')
if not (isspmatrix_csr(C) or isspmatrix_csc(C)):
raise TypeError('expected csr_matrix or csc_matrix')
if distance == 'unit':
data = np.ones_like(C.data).astype(float)
elif distance == 'abs':
data = abs(C.data)
elif distance == 'inv':
data = 1.0/abs(C.data)
elif distance is 'same':
data = C.data
elif distance is 'min':
data = C.data - C.data.min()
else:
raise ValueError('unrecognized value distance=%s' % distance)
if C.dtype == complex:
data = np.real(data)
assert(data.min() >= 0)
G = C.__class__((data, C.indices, C.indptr), shape=C.shape)
num_seeds = int(min(max(ratio * G.shape[0], 1), G.shape[0]))
distances, clusters, seeds = lloyd_cluster(G, num_seeds, maxiter=maxiter)
row = (clusters >= 0).nonzero()[0]
col = clusters[row]
data = np.ones(len(row), dtype='int8')
AggOp = coo_matrix((data, (row, col)),
shape=(G.shape[0], num_seeds)).tocsr()
return AggOp, seeds
|
https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/aggregation/aggregate.py#L180-L272
|
k means clustering
|
python
|
def clustered_sortind(x, k=10, scorefunc=None):
"""
Uses MiniBatch k-means clustering to cluster matrix into groups.
Each cluster of rows is then sorted by `scorefunc` -- by default, the max
peak height when all rows in a cluster are averaged, or
cluster.mean(axis=0).max().
Returns the index that will sort the rows of `x` and a list of "breaks".
`breaks` is essentially a cumulative row count for each cluster boundary.
In other words, after plotting the array you can use axhline on each
"break" to plot the cluster boundary.
If `k` is a list or tuple, iteratively try each one and select the best
with the lowest mean distance from cluster centers.
:param x: Matrix whose rows are to be clustered
:param k: Number of clusters to create or a list of potential clusters; the
optimum will be chosen from the list
:param scorefunc: Optional function for sorting rows within clusters. Must
accept a single argument of a NumPy array.
"""
try:
from sklearn.cluster import MiniBatchKMeans
except ImportError:
raise ImportError('please install scikits.learn for '
'clustering.')
# If integer, do it once and we're done
if isinstance(k, int):
best_k = k
else:
mean_dists = {}
for _k in k:
mbk = MiniBatchKMeans(init='k-means++', n_clusters=_k)
mbk.fit(x)
mean_dists[_k] = mbk.transform(x).mean()
best_k = sorted(mean_dists.items(), key=lambda x: x[1])[-1][0]
mbk = MiniBatchKMeans(init='k-means++', n_clusters=best_k)
mbk.fit(x)
k = best_k
labels = mbk.labels_
scores = np.zeros(labels.shape, dtype=float)
if not scorefunc:
def scorefunc(x):
return x.mean(axis=0).max()
for label in range(k):
ind = labels == label
score = scorefunc(x[ind, :])
scores[ind] = score
pos = 0
breaks = []
ind = np.argsort(scores)
for k, g in itertools.groupby(labels[ind]):
pos += len(list(g))
breaks.append(pos)
return ind, breaks
|
https://github.com/daler/metaseq/blob/fa875d1f72317aa7ef95cb128b739956b16eef9f/metaseq/plotutils.py#L598-L660
|
k means clustering
|
python
|
def cluster(seqs, threshold=0.975, out_file=None, temp_dir=None, make_db=True,
quiet=False, threads=0, return_just_seq_ids=False, max_memory=800, debug=False):
'''
Perform sequence clustering with CD-HIT.
Args:
seqs (list): An iterable of sequences, in any format that `abutils.utils.sequence.Sequence()`
can handle
threshold (float): Clustering identity threshold. Default is `0.975`.
out_file (str): Path to the clustering output file. Default is to use
`tempfile.NamedTemporaryFile` to generate an output file name.
temp_dir (str): Path to the temporary directory. If not provided, `'/tmp'` is used.
make_db (bool): Whether to build a SQlite database of sequence information. Required
if you want to calculate consensus/centroid sequences for the resulting
clusters or if you need to access the clustered sequences (not just sequence IDs)
Default is `True`.
quiet (bool): If `True`, surpresses printing of output/progress info. Default is `False`.
threads (int): Number of threads (CPU cores) to be used for clustering. Default is `0`,
which results in all available cores being used.
return_just_seq_ids (bool): If `True`, will return a 2D list of sequence IDs
(a list containing a list of sequence IDs for each cluster) rather than returning a
list of `Cluster` objects.
max_memory (int): Max memory (in MB) for CD-HIT. Will be passed directly to CD-HIT through
the `-M` runtime option. Default is `800`.
debug (bool): If `True`, print standard output and standard error from CD-HIT. Default is `False`.
Returns:
list: A list of `Cluster` objects (or a 2D list of sequence IDs, if `return_just_seq_ids` is `True`).
'''
if make_db:
ofile, cfile, seq_db, db_path = cdhit(seqs, out_file=out_file, temp_dir=temp_dir,
threshold=threshold, make_db=True, quiet=quiet,
threads=threads, max_memory=max_memory, debug=debug)
return parse_clusters(ofile, cfile, seq_db=seq_db, db_path=db_path, return_just_seq_ids=return_just_seq_ids)
else:
seqs = [Sequence(s) for s in seqs]
seq_dict = {s.id: s for s in seqs}
ofile, cfile, = cdhit(seqs, out_file=out_file, temp_dir=temp_dir, threads=threads,
threshold=threshold, make_db=False, quiet=quiet,
max_memory=max_memory, debug=debug)
return parse_clusters(ofile, cfile, seq_dict=seq_dict, return_just_seq_ids=return_just_seq_ids)
|
https://github.com/briney/abutils/blob/944755fc7d28bfc7d4f1ffad94ca0bf9d74ec54b/abutils/utils/cluster.py#L233-L284
|
k means clustering
|
python
|
def hierarch_cluster(M):
"""Cluster matrix using hierarchical clustering.
Parameters
----------
M : np.ndarray
Matrix, for example, distance matrix.
Returns
-------
Mclus : np.ndarray
Clustered matrix.
indices : np.ndarray
Indices used to cluster the matrix.
"""
import scipy as sp
import scipy.cluster
link = sp.cluster.hierarchy.linkage(M)
indices = sp.cluster.hierarchy.leaves_list(link)
Mclus = np.array(M[:, indices])
Mclus = Mclus[indices, :]
if False:
pl.matshow(Mclus)
pl.colorbar()
return Mclus, indices
|
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/utils.py#L896-L920
|
k means clustering
|
python
|
def getlevel(self, threshold):
"""
Returns all clusters with a maximum distance of *threshold* in between
each other
:param threshold: the maximum distance between clusters.
See :py:meth:`~cluster.cluster.Cluster.getlevel`
"""
# if it's not worth clustering, just return the data
if len(self._input) <= 1:
return self._input
# initialize the cluster if not yet done
if not self.__cluster_created:
self.cluster()
return self._data[0].getlevel(threshold)
|
https://github.com/exhuma/python-cluster/blob/4c0ac14d9beafcd51f0d849151514083c296402f/cluster/method/hierarchical.py#L191-L209
|
k means clustering
|
python
|
def cluster_lsun():
"Not so applicable for this sample"
start_centers = [[1.0, 3.5], [2.0, 0.5], [3.0, 3.0]]
template_clustering(start_centers, FCPS_SAMPLES.SAMPLE_LSUN, criterion = splitting_type.BAYESIAN_INFORMATION_CRITERION)
template_clustering(start_centers, FCPS_SAMPLES.SAMPLE_LSUN, criterion = splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH)
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/examples/xmeans_examples.py#L115-L119
|
k means clustering
|
python
|
def __store_clustering_results(self, amount_clusters, leaf_blocks):
"""!
@brief Stores clustering results in a convenient way.
@param[in] amount_clusters (uint): Amount of cluster that was allocated during processing.
@param[in] leaf_blocks (list): Leaf BANG-blocks (the smallest cells).
"""
self.__clusters = [[] for _ in range(amount_clusters)]
for block in leaf_blocks:
index = block.get_cluster()
if index is not None:
self.__clusters[index] += block.get_points()
else:
self.__noise += block.get_points()
self.__clusters = [ list(set(cluster)) for cluster in self.__clusters ]
self.__noise = list(set(self.__noise))
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/bang.py#L1194-L1212
|
k means clustering
|
python
|
def process(self):
"""!
@brief Performs cluster analysis in line with rules of K-Medoids algorithm.
@return (kmedoids) Returns itself (K-Medoids instance).
@remark Results of clustering can be obtained using corresponding get methods.
@see get_clusters()
@see get_medoids()
"""
if self.__ccore is True:
ccore_metric = metric_wrapper.create_instance(self.__metric)
self.__clusters, self.__medoid_indexes = wrapper.kmedoids(self.__pointer_data, self.__medoid_indexes, self.__tolerance, self.__itermax, ccore_metric.get_pointer(), self.__data_type)
else:
changes = float('inf')
iterations = 0
while changes > self.__tolerance and iterations < self.__itermax:
self.__clusters = self.__update_clusters()
update_medoid_indexes = self.__update_medoids()
changes = max([self.__distance_calculator(self.__medoid_indexes[index], update_medoid_indexes[index]) for index in range(len(update_medoid_indexes))])
self.__medoid_indexes = update_medoid_indexes
iterations += 1
return self
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/kmedoids.py#L141-L172
|
k means clustering
|
python
|
def _calculate_cluster_distance(end_iter):
"""Compute allowed distance for clustering based on end confidence intervals.
"""
out = []
sizes = []
for x in end_iter:
out.append(x)
sizes.append(x.end1 - x.start1)
sizes.append(x.end2 - x.start2)
distance = sum(sizes) // len(sizes)
return distance, out
|
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L233-L243
|
k means clustering
|
python
|
def cluster(data,inputs,verbose=False):
"""Clusters data
Using the new offset model, this method uses a greedy algorithm to cluster
the data. It starts with all the data points in separate clusters and tests
whether combining them increases the overall log-likelihood (LL). It then
iteratively joins pairs of clusters which cause the greatest increase in
the LL, until no join increases the LL.
arguments:
inputs -- the 'X's in a list, one item per cluster
data -- the 'Y's in a list, one item per cluster
returns a list of the clusters.
"""
N=len(data)
#Define a set of N active cluster
active = []
for p in range(0,N):
active.append([p])
loglikes = np.zeros(len(active))
loglikes[:] = None
pairloglikes = np.zeros([len(active),len(active)])
pairloglikes[:] = None
pairoffset = np.zeros([len(active),len(active)])
it = 0
while True:
if verbose:
it +=1
print("Iteration %d" % it)
#Compute the log-likelihood of each cluster (add them together)
for clusti in range(len(active)):
if verbose:
sys.stdout.write('.')
sys.stdout.flush()
if np.isnan(loglikes[clusti]):
loglikes[clusti], unused_offset = get_log_likelihood_offset(inputs,data,[clusti])
#try combining with each other cluster...
for clustj in range(clusti): #count from 0 to clustj-1
temp = [clusti,clustj]
if np.isnan(pairloglikes[clusti,clustj]):
pairloglikes[clusti,clustj],pairoffset[clusti,clustj] = get_log_likelihood_offset(inputs,data,temp)
seploglikes = np.repeat(loglikes[:,None].T,len(loglikes),0)+np.repeat(loglikes[:,None],len(loglikes),1)
loglikeimprovement = pairloglikes - seploglikes #how much likelihood improves with clustering
top = np.unravel_index(np.nanargmax(pairloglikes-seploglikes), pairloglikes.shape)
#if loglikeimprovement.shape[0]<3:
# #no more clustering to do - this shouldn't happen really unless
# #we've set the threshold to apply clustering to less than 0
# break
#if theres further clustering to be done...
if loglikeimprovement[top[0],top[1]]>0:
active[top[0]].extend(active[top[1]])
offset=pairoffset[top[0],top[1]]
inputs[top[0]] = np.vstack([inputs[top[0]],inputs[top[1]]-offset])
data[top[0]] = np.hstack([data[top[0]],data[top[1]]])
del inputs[top[1]]
del data[top[1]]
del active[top[1]]
#None = message to say we need to recalculate
pairloglikes[:,top[0]] = None
pairloglikes[top[0],:] = None
pairloglikes = np.delete(pairloglikes,top[1],0)
pairloglikes = np.delete(pairloglikes,top[1],1)
loglikes[top[0]] = None
loglikes = np.delete(loglikes,top[1])
else:
break
#if loglikeimprovement[top[0],top[1]]>0:
# print "joined"
# print top
# print offset
# print offsets
# print offsets[top[1]]-offsets[top[0]]
#TODO Add a way to return the offsets applied to all the time series
return active
|
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/util/cluster_with_offset.py#L91-L179
|
k means clustering
|
python
|
def process(self):
"""!
@brief Performs cluster analysis in line with rules of K-Medians algorithm.
@return (kmedians) Returns itself (K-Medians instance).
@remark Results of clustering can be obtained using corresponding get methods.
@see get_clusters()
@see get_medians()
"""
if self.__ccore is True:
ccore_metric = metric_wrapper.create_instance(self.__metric)
self.__clusters, self.__medians = wrapper.kmedians(self.__pointer_data, self.__medians, self.__tolerance, self.__itermax, ccore_metric.get_pointer())
else:
changes = float('inf')
# Check for dimension
if len(self.__pointer_data[0]) != len(self.__medians[0]):
raise NameError('Dimension of the input data and dimension of the initial medians must be equal.')
iterations = 0
while changes > self.__tolerance and iterations < self.__itermax:
self.__clusters = self.__update_clusters()
updated_centers = self.__update_medians()
changes = max([self.__metric(self.__medians[index], updated_centers[index]) for index in range(len(updated_centers))])
self.__medians = updated_centers
iterations += 1
return self
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/kmedians.py#L104-L139
|
k means clustering
|
python
|
def fit(self, X):
""" Apply KMeans Clustering
X: dataset with feature vectors
"""
self.centers_, self.labels_, self.sse_arr_, self.n_iter_ = \
_kmeans(X, self.n_clusters, self.max_iter, self.n_trials, self.tol)
|
https://github.com/vmirly/pyclust/blob/bdb12be4649e70c6c90da2605bc5f4b314e2d07e/pyclust/_kmeans.py#L137-L142
|
k means clustering
|
python
|
def show_clusters(data, observer, marker='.', markersize=None):
"""!
@brief Shows allocated clusters by the genetic algorithm.
@param[in] data (list): Input data that was used for clustering process by the algorithm.
@param[in] observer (ga_observer): Observer that was used for collection information about clustering process.
@param[in] marker (char): Type of marker that should be used for object (point) representation.
@param[in] markersize (uint): Size of the marker that is used for object (point) representation.
@note If you have clusters instead of observer then 'cluster_visualizer' can be used for visualization purposes.
@see cluster_visualizer
"""
figure = plt.figure()
ax1 = figure.add_subplot(121)
clusters = ga_math.get_clusters_representation(observer.get_global_best()['chromosome'][-1])
visualizer = cluster_visualizer(1, 2)
visualizer.append_clusters(clusters, data, 0, marker, markersize)
visualizer.show(figure, display=False)
ga_visualizer.show_evolution(observer, 0, None, ax1, True)
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/ga.py#L248-L272
|
k means clustering
|
python
|
def filter_clustering(self, analytes, filt=False, normalise=True,
method='kmeans', include_time=False, samples=None,
sort=True, subset=None, level='sample', min_data=10, **kwargs):
"""
Applies an n - dimensional clustering filter to the data.
Parameters
----------
analytes : str
The analyte(s) that the filter applies to.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
normalise : bool
Whether or not to normalise the data to zero mean and unit
variance. Reccomended if clustering based on more than 1 analyte.
Uses `sklearn.preprocessing.scale`.
method : str
Which clustering algorithm to use:
* 'meanshift': The `sklearn.cluster.MeanShift` algorithm.
Automatically determines number of clusters
in data based on the `bandwidth` of expected
variation.
* 'kmeans': The `sklearn.cluster.KMeans` algorithm. Determines
the characteristics of a known number of clusters
within the data. Must provide `n_clusters` to specify
the expected number of clusters.
level : str
Whether to conduct the clustering analysis at the 'sample' or
'population' level.
include_time : bool
Whether or not to include the Time variable in the clustering
analysis. Useful if you're looking for spatially continuous
clusters in your data, i.e. this will identify each spot in your
analysis as an individual cluster.
samples : optional, array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
sort : bool
Whether or not you want the cluster labels to
be sorted by the mean magnitude of the signals
they are based on (0 = lowest)
min_data : int
The minimum number of data points that should be considered by
the filter. Default = 10.
**kwargs
Parameters passed to the clustering algorithm specified by
`method`.
Meanshift Parameters
bandwidth : str or float
The bandwith (float) or bandwidth method ('scott' or 'silverman')
used to estimate the data bandwidth.
bin_seeding : bool
Modifies the behaviour of the meanshift algorithm. Refer to
sklearn.cluster.meanshift documentation.
K-Means Parameters
n_clusters : int
The number of clusters expected in the data.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
if isinstance(analytes, str):
analytes = [analytes]
self.minimal_analytes.update(analytes)
if level == 'sample':
with self.pbar.set(total=len(samples), desc='Clustering Filter') as prog:
for s in samples:
self.data[s].filter_clustering(analytes=analytes, filt=filt,
normalise=normalise,
method=method,
include_time=include_time,
min_data=min_data,
sort=sort,
**kwargs)
prog.update()
if level == 'population':
if isinstance(sort, bool):
sort_by = 0
else:
sort_by = sort
name = '_'.join(analytes) + '_{}'.format(method)
self.fit_classifier(name=name, analytes=analytes, method=method,
subset=subset, filt=filt, sort_by=sort_by, **kwargs)
self.apply_classifier(name=name, subset=subset)
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L2046-L2143
|
k means clustering
|
python
|
def cluster(self, n, embed_dim=None, algo=spectral.SPECTRAL, method=methods.KMEANS):
"""
Cluster the embedded coordinates using spectral clustering
Parameters
----------
n: int
The number of clusters to return
embed_dim: int
The dimensionality of the underlying coordinates
Defaults to same value as n
algo: enum value (spectral.SPECTRAL | spectral.KPCA | spectral.ZELNIKMANOR)
Type of embedding to use
method: enum value (methods.KMEANS | methods.GMM)
The clustering method to use
Returns
-------
Partition: Partition object describing the data partition
"""
if n == 1:
return Partition([1] * len(self.get_dm(False)))
if embed_dim is None:
embed_dim = n
if algo == spectral.SPECTRAL:
self._coords = self.spectral_embedding(embed_dim)
elif algo == spectral.KPCA:
self._coords = self.kpca_embedding(embed_dim)
elif algo == spectral.ZELNIKMANOR:
self._coords = self.spectral_embedding_(embed_dim)
else:
raise OptionError(algo, list(spectral.reverse.values()))
if method == methods.KMEANS:
p = self.kmeans(n, self._coords.df.values)
elif method == methods.GMM:
p = self.gmm(n, self._coords.df.values)
elif method == methods.WARD:
linkmat = fastcluster.linkage(self._coords.values, 'ward')
p = _hclust(linkmat, n)
else:
raise OptionError(method, list(methods.reverse.values()))
if self._verbosity > 0:
print('Using clustering method: {}'.format(methods.reverse[method]))
return p
|
https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/clustering.py#L234-L279
|
k means clustering
|
python
|
def sgraph(N_clusters_max, file_name):
"""Runs METIS or hMETIS and returns the labels found by those
(hyper-)graph partitioning algorithms.
Parameters
----------
N_clusters_max : int
file_name : string
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of any of three approximation algorithms for consensus clustering
(either of CSPA, HGPA or MCLA).
"""
if file_name == 'DO_NOT_PROCESS':
return []
print('\n#')
k = str(N_clusters_max)
out_name = file_name + '.part.' + k
if file_name == 'wgraph_HGPA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling shmetis for hypergraph partitioning.")
if sys.platform.startswith('linux'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis')
elif sys.platform.startswith('darwin'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis')
else:
print("ERROR: Cluster_Ensembles: sgraph:\n"
"your platform is not supported. Some code required for graph partition "
"is only available for Linux distributions and OS X.")
sys.exit(1)
args = "{0} ./".format(shmetis_path) + file_name + " " + k + " 15"
subprocess.call(args, shell = True)
elif file_name == 'wgraph_CSPA' or file_name == 'wgraph_MCLA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling gpmetis for graph partitioning.")
args = "gpmetis ./" + file_name + " " + k
subprocess.call(args, shell = True)
else:
raise NameError("ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable "
"file-name.".format(file_name))
labels = np.empty(0, dtype = int)
with open(out_name, 'r') as file:
print("INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; "
"loading {}".format(out_name))
labels = np.loadtxt(out_name, dtype = int)
labels = labels.reshape(labels.size)
labels = one_to_max(labels)
subprocess.call(['rm', out_name])
print('#')
return labels
|
https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L1157-L1221
|
k means clustering
|
python
|
def classify_clusters(points, n=10):
"""
Return an array of K-Means cluster classes for an array of `shapely.geometry.Point` objects.
"""
arr = [[p.x, p.y] for p in points.values]
clf = KMeans(n_clusters=n)
clf.fit(arr)
classes = clf.predict(arr)
return classes
|
https://github.com/ResidentMario/geoplot/blob/942b474878187a87a95a27fbe41285dfdc1d20ca/geoplot/utils.py#L23-L31
|
k means clustering
|
python
|
def cluster_labels_A(hdf5_file, c, lock, I, rows_slice):
"""One of the task to be performed by a pool of subprocesses, as the first
step in identifying the cluster labels and indices of the cluster centers
for Affinity Propagation clustering.
"""
with Worker.hdf5_lock:
with tables.open_file(hdf5_file, 'r+') as fileh:
S = fileh.root.aff_prop_group.similarities
s = S[rows_slice, ...]
s = np.argmax(s[:, I], axis = 1)
with lock:
c[rows_slice] = s[:]
del s
|
https://github.com/GGiecold/Concurrent_AP/blob/d4cebe06268b5d520352a83cadb2f7520650460c/Concurrent_AP.py#L827-L843
|
k means clustering
|
python
|
def cluster_sample5():
"Start with wrong number of clusters."
start_centers = [[0.0, 1.0], [0.0, 0.0]]
template_clustering(start_centers, SIMPLE_SAMPLES.SAMPLE_SIMPLE5, criterion = splitting_type.BAYESIAN_INFORMATION_CRITERION)
template_clustering(start_centers, SIMPLE_SAMPLES.SAMPLE_SIMPLE5, criterion = splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH)
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/examples/xmeans_examples.py#L99-L103
|
k means clustering
|
python
|
def cluster_centers_(self):
"""
Searches for or creates cluster centers for the specified clustering
algorithm. This algorithm ensures that that the centers are
appropriately drawn and scaled so that distance between clusters are
maintained.
"""
# TODO: Handle agglomerative clustering and LDA
for attr in ('cluster_centers_',):
try:
return getattr(self.estimator, attr)
except AttributeError:
continue
raise AttributeError(
"could not find or make cluster_centers_ for {}".format(
self.estimator.__class__.__name__
))
|
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/cluster/icdm.py#L230-L247
|
k means clustering
|
python
|
def process(self):
"""!
@brief Performs cluster analysis by competition between neurons of SOM.
@remark Results of clustering can be obtained using corresponding get methods.
@see get_clusters()
"""
self.__network = som(1, self.__amount_clusters, type_conn.grid_four, None, self.__ccore);
self.__network.train(self.__data_pointer, self.__epouch, True);
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/somsc.py#L87-L98
|
k means clustering
|
python
|
def rock(cls, data, eps, number_clusters, threshold=0.5, ccore=False):
"""
Constructor of the ROCK cluster analysis algorithm
:param eps: Connectivity radius (similarity threshold), points are neighbors if distance between them is less than connectivity radius
:param number_clusters: Defines number of clusters that should be allocated from the input data set
:param threshold: Value that defines degree of normalization that influences on choice of clusters for merging during processing
:param ccore: Defines should be CCORE (C++ pyclustering library) used instead of Python code or not.
:return: The resulting clustering.rst object
"""
data = cls.input_preprocess(data)
model = rock(data, eps, number_clusters, threshold, ccore)
return cls(model)
|
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L55-L67
|
k means clustering
|
python
|
def cluster_regspace(data=None, dmin=-1, max_centers=1000, stride=1, metric='euclidean',
n_jobs=None, chunksize=None, skip=0, **kwargs):
r"""Regular space clustering
If given data, it performs a regular space clustering [1]_ and returns a
:class:`RegularSpaceClustering <pyemma.coordinates.clustering.RegularSpaceClustering>` object that
can be used to extract the discretized data sequences, or to assign other
data points to the same partition. If data is not given, an empty
:class:`RegularSpaceClustering <pyemma.coordinates.clustering.RegularSpaceClustering>` will be created
that still needs to be parametrized, e.g. in a :func:`pipeline`.
Regular space clustering is very similar to Hartigan's leader algorithm [2]_.
It consists of two passes through the data. Initially, the first data point
is added to the list of centers. For every subsequent data point, if it has
a greater distance than dmin from every center, it also becomes a center.
In the second pass, a Voronoi discretization with the computed centers is
used to partition the data.
Parameters
----------
data : ndarray (T, d) or list of ndarray (T_i, d) or a reader created by :func:`source
input data, if available in memory
dmin : float
the minimal distance between cluster centers
max_centers : int (optional), default=1000
If max_centers is reached, the algorithm will stop to find more centers,
but it is possible that parts of the state space are not properly `
discretized. This will generate a warning. If that happens, it is
suggested to increase dmin such that the number of centers stays below
max_centers.
stride : int, optional, default = 1
If set to 1, all input data will be used for estimation. Note that this
could cause this calculation to be very slow for large data sets. Since
molecular dynamics data is usually correlated at short timescales, it is
often sufficient to estimate transformations at a longer stride. Note
that the stride option in the get_output() function of the returned
object is independent, so you can parametrize at a long stride, and
still map all frames through the transformer.
metric : str
metric to use during clustering ('euclidean', 'minRMSD')
n_jobs : int or None, default None
Number of threads to use during assignment of the data.
If None, all available CPUs will be used.
chunksize: int, default=None
Number of data frames to process at once. Choose a higher value here,
to optimize thread usage and gain processing speed. If None is passed,
use the default value of the underlying reader/data source. Choose zero to
disable chunking at all.
Returns
-------
regSpace : a :class:`RegularSpaceClustering <pyemma.coordinates.clustering.RegularSpaceClustering>` clustering object
Object for regular space clustering.
It holds discrete trajectories and cluster center information.
.. autoclass:: pyemma.coordinates.clustering.regspace.RegularSpaceClustering
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.coordinates.clustering.regspace.RegularSpaceClustering
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.coordinates.clustering.regspace.RegularSpaceClustering
:attributes:
References
----------
.. [1] Prinz J-H, Wu H, Sarich M, Keller B, Senne M, Held M, Chodera JD, Schuette Ch and Noe F. 2011.
Markov models of molecular kinetics: Generation and Validation.
J. Chem. Phys. 134, 174105.
.. [2] Hartigan J. Clustering algorithms.
New York: Wiley; 1975.
"""
if dmin == -1:
raise ValueError("provide a minimum distance for clustering, e.g. 2.0")
from pyemma.coordinates.clustering.regspace import RegularSpaceClustering as _RegularSpaceClustering
res = _RegularSpaceClustering(dmin, max_centers=max_centers, metric=metric,
n_jobs=n_jobs, stride=stride, skip=skip)
from pyemma.util.reflection import get_default_args
cs = _check_old_chunksize_arg(chunksize, get_default_args(cluster_regspace)['chunksize'], **kwargs)
if data is not None:
res.estimate(data, chunksize=cs)
else:
res.chunksize = cs
return res
|
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/api.py#L1911-L2009
|
k means clustering
|
python
|
def calc_clusters(returns, n=None, plot=False):
"""
Calculates the clusters based on k-means
clustering.
Args:
* returns (pd.DataFrame): DataFrame of returns
* n (int): Specify # of clusters. If None, this
will be automatically determined
* plot (bool): Show plot?
Returns:
* dict with structure: {cluster# : [col names]}
"""
# calculate correlation
corr = returns.corr()
# calculate dissimilarity matrix
diss = 1 - corr
# scale down to 2 dimensions using MDS
# (multi-dimensional scaling) using the
# dissimilarity matrix
mds = sklearn.manifold.MDS(dissimilarity='precomputed')
xy = mds.fit_transform(diss)
def routine(k):
# fit KMeans
km = sklearn.cluster.KMeans(n_clusters=k)
km_fit = km.fit(xy)
labels = km_fit.labels_
centers = km_fit.cluster_centers_
# get {ticker: label} mappings
mappings = dict(zip(returns.columns, labels))
# print % of var explained
totss = 0
withinss = 0
# column average fot totss
avg = np.array([np.mean(xy[:, 0]), np.mean(xy[:, 1])])
for idx, lbl in enumerate(labels):
withinss += sum((xy[idx] - centers[lbl]) ** 2)
totss += sum((xy[idx] - avg) ** 2)
pvar_expl = 1.0 - withinss / totss
return mappings, pvar_expl, labels
if n:
result = routine(n)
else:
n = len(returns.columns)
n1 = int(np.ceil(n * 0.6666666666))
for i in range(2, n1 + 1):
result = routine(i)
if result[1] > 0.9:
break
if plot:
fig, ax = plt.subplots()
ax.scatter(xy[:, 0], xy[:, 1], c=result[2], s=90)
for i, txt in enumerate(returns.columns):
ax.annotate(txt, (xy[i, 0], xy[i, 1]), size=14)
# sanitize return value
tmp = result[0]
# map as such {cluster: [list of tickers], cluster2: [...]}
inv_map = {}
for k, v in iteritems(tmp):
inv_map[v] = inv_map.get(v, [])
inv_map[v].append(k)
return inv_map
|
https://github.com/pmorissette/ffn/blob/ef09f28b858b7ffcd2627ce6a4dc618183a6bc8a/ffn/core.py#L1730-L1802
|
k means clustering
|
python
|
def __calculate_clusters(self, k):
"""!
@brief Performs cluster analysis using specified K value.
@param[in] k (uint): Amount of clusters that should be allocated.
@return (array_like) Allocated clusters.
"""
initial_values = kmeans_plusplus_initializer(self.__data, k).initialize(return_index=self.__return_index)
algorithm_type = self.__algorithm.get_type()
return algorithm_type(self.__data, initial_values).process().get_clusters()
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/silhouette.py#L471-L482
|
k means clustering
|
python
|
def cluster_sample2():
"Start with wrong number of clusters."
start_centers = [[3.5, 4.8], [2.6, 2.5]]
template_clustering(start_centers, SIMPLE_SAMPLES.SAMPLE_SIMPLE2, criterion = splitting_type.BAYESIAN_INFORMATION_CRITERION)
template_clustering(start_centers, SIMPLE_SAMPLES.SAMPLE_SIMPLE2, criterion = splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH)
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/examples/xmeans_examples.py#L70-L74
|
k means clustering
|
python
|
def cluster_sample1():
"Start with wrong number of clusters."
start_centers = [[3.7, 5.5]]
template_clustering(start_centers, SIMPLE_SAMPLES.SAMPLE_SIMPLE1, criterion = splitting_type.BAYESIAN_INFORMATION_CRITERION)
template_clustering(start_centers, SIMPLE_SAMPLES.SAMPLE_SIMPLE1, criterion = splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH)
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/examples/xmeans_examples.py#L60-L64
|
k means clustering
|
python
|
def clustering_coef_bd(A):
'''
The clustering coefficient is the fraction of triangles around a node
(equiv. the fraction of nodes neighbors that are neighbors of each other).
Parameters
----------
A : NxN np.ndarray
binary directed connection matrix
Returns
-------
C : Nx1 np.ndarray
clustering coefficient vector
Notes
-----
Methodological note: In directed graphs, 3 nodes generate up to 8
triangles (2*2*2 edges). The number of existing triangles is the main
diagonal of S^3/2. The number of all (in or out) neighbour pairs is
K(K-1)/2. Each neighbour pair may generate two triangles. "False pairs"
are i<->j edge pairs (these do not generate triangles). The number of
false pairs is the main diagonal of A^2.
Thus the maximum possible number of triangles =
= (2 edges)*([ALL PAIRS] - [FALSE PAIRS])
= 2 * (K(K-1)/2 - diag(A^2))
= K(K-1) - 2(diag(A^2))
'''
S = A + A.T # symmetrized input graph
K = np.sum(S, axis=1) # total degree (in+out)
cyc3 = np.diag(np.dot(S, np.dot(S, S))) / 2 # number of 3-cycles
K[np.where(cyc3 == 0)] = np.inf # if no 3-cycles exist, make C=0
# number of all possible 3 cycles
CYC3 = K * (K - 1) - 2 * np.diag(np.dot(A, A))
C = cyc3 / CYC3
return C
|
https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/clustering.py#L92-L127
|
k means clustering
|
python
|
def show_clusters(sample, clusters, representatives, **kwargs):
"""!
@brief Display BSAS clustering results.
@param[in] sample (list): Dataset that was used for clustering.
@param[in] clusters (array_like): Clusters that were allocated by the algorithm.
@param[in] representatives (array_like): Allocated representatives correspond to clusters.
@param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'figure', 'display', 'offset').
<b>Keyword Args:</b><br>
- figure (figure): If 'None' then new is figure is created, otherwise specified figure is used for visualization.
- display (bool): If 'True' then figure will be shown by the method, otherwise it should be shown manually using matplotlib function 'plt.show()'.
- offset (uint): Specify axes index on the figure where results should be drawn (only if argument 'figure' is specified).
@return (figure) Figure where clusters were drawn.
"""
figure = kwargs.get('figure', None)
display = kwargs.get('display', True)
offset = kwargs.get('offset', 0)
visualizer = cluster_visualizer()
visualizer.append_clusters(clusters, sample, canvas=offset)
for cluster_index in range(len(clusters)):
visualizer.append_cluster_attribute(offset, cluster_index, [representatives[cluster_index]], '*', 10)
return visualizer.show(figure=figure, display=display)
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/bsas.py#L46-L74
|
k means clustering
|
python
|
def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print('INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.')
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
w = hypergraph_adjacency.sum(axis = 1)
N_rows = hypergraph_adjacency.shape[0]
print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. "
"Starting computation of Jaccard similarity matrix.")
# Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix.
with tables.open_file(hdf5_file_name, 'r+') as fileh:
FILTERS = get_compression_filter(4 * (N_rows ** 2))
similarities_MCLA = fileh.create_carray(fileh.root.consensus_group,
'similarities_MCLA', tables.Float32Atom(),
(N_rows, N_rows), "Matrix of pairwise Jaccard "
"similarity scores", filters = FILTERS)
scale_factor = 100.0
print("INFO: Cluster_Ensembles: MCLA: "
"starting computation of Jaccard similarity matrix.")
squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose())
squared_sums = hypergraph_adjacency.sum(axis = 1)
squared_sums = np.squeeze(np.asarray(squared_sums))
chunks_size = get_chunk_size(N_rows, 7)
for i in range(0, N_rows, chunks_size):
n_dim = min(chunks_size, N_rows - i)
temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense()
temp = np.squeeze(np.asarray(temp))
x = squared_sums[i:min(i+chunks_size, N_rows)]
x = x.reshape(-1, 1)
x = np.dot(x, np.ones((1, squared_sums.size)))
y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1))
temp = np.divide(temp, x + y - temp)
temp *= scale_factor
Jaccard_matrix = np.rint(temp)
similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix
del Jaccard_matrix, temp, x, y
gc.collect()
# Done computing the matrix of pairwise Jaccard similarity scores.
print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of "
"pairwise Jaccard similarity scores.")
cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w)
cluster_labels = one_to_max(cluster_labels)
# After 'cmetis' returns, we are done with clustering hyper-edges
# We are now ready to start the procedure meant to collapse meta-clusters.
N_consensus = np.amax(cluster_labels) + 1
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus * N_samples)
clb_cum = fileh.create_carray(fileh.root.consensus_group, 'clb_cum',
tables.Float32Atom(), (N_consensus, N_samples),
'Matrix of mean memberships, forming meta-clusters',
filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 7)
for i in range(0, N_consensus, chunks_size):
x = min(chunks_size, N_consensus - i)
matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1)))
M = np.zeros((x, N_samples))
for j in range(x):
coord = np.where(matched_clusters[0] == j)[0]
M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0))
clb_cum[i:min(i+chunks_size, N_consensus)] = M
# Done with collapsing the hyper-edges into a single meta-hyper-edge,
# for each of the (N_consensus - 1) meta-clusters.
del hypergraph_adjacency
gc.collect()
# Each object will now be assigned to its most associated meta-cluster.
chunks_size = get_chunk_size(N_consensus, 4)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0]
else:
szumsz = np.zeros(0)
for i in range(N_chunks):
M = clb_cum[:, i*chunks_size:(i+1)*chunks_size]
szumsz = np.append(szumsz, M.sum(axis = 0))
if remainder != 0:
M = clb_cum[:, N_chunks*chunks_size:N_samples]
szumsz = np.append(szumsz, M.sum(axis = 0))
null_columns = np.where(szumsz == 0)[0]
if null_columns.size != 0:
print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations "
"in 'clb_cum' matrix of meta-clusters.".format(null_columns.size))
clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size)
random_state = np.random.RandomState()
tmp = fileh.create_carray(fileh.root.consensus_group, 'tmp', tables.Float32Atom(),
(N_consensus, N_samples), "Temporary matrix to help with "
"collapsing to meta-hyper-edges", filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 2)
N_chunks, remainder = divmod(N_consensus, chunks_size)
if N_chunks == 0:
tmp[:] = random_state.rand(N_consensus, N_samples)
else:
for i in range(N_chunks):
tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples)
if remainder !=0:
tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples)
expr = tables.Expr("clb_cum + (tmp / 10000)")
expr.set_output(clb_cum)
expr.eval()
expr = tables.Expr("abs(tmp)")
expr.set_output(tmp)
expr.eval()
chunks_size = get_chunk_size(N_consensus, 2)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
sum_diag = tmp[:].sum(axis = 0)
else:
sum_diag = np.empty(0)
for i in range(N_chunks):
M = tmp[:, i*chunks_size:(i+1)*chunks_size]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
if remainder != 0:
M = tmp[:, N_chunks*chunks_size:N_samples]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
fileh.remove_node(fileh.root.consensus_group, "tmp")
# The corresponding disk space will be freed after a call to 'fileh.close()'.
inv_sum_diag = np.reciprocal(sum_diag.astype(float))
if N_chunks == 0:
clb_cum *= inv_sum_diag
max_entries = np.amax(clb_cum, axis = 0)
else:
max_entries = np.zeros(N_samples)
for i in range(N_chunks):
clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size]
max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0)
if remainder != 0:
clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples]
max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0)
cluster_labels = np.zeros(N_samples, dtype = int)
winner_probabilities = np.zeros(N_samples)
chunks_size = get_chunk_size(N_samples, 2)
for i in reversed(range(0, N_consensus, chunks_size)):
ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)])
cluster_labels[ind[1]] = i + ind[0]
winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])]
# Done with competing for objects.
cluster_labels = one_to_max(cluster_labels)
print("INFO: Cluster_Ensembles: MCLA: delivering "
"{} clusters.".format(np.unique(cluster_labels).size))
print("INFO: Cluster_Ensembles: MCLA: average posterior "
"probability is {}".format(np.mean(winner_probabilities)))
if cluster_labels.size <= 7:
print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:")
print(winner_probabilities)
print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:")
print(clb_cum)
fileh.remove_node(fileh.root.consensus_group, "clb_cum")
fileh.close()
return cluster_labels
|
https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L660-L881
|
k means clustering
|
python
|
def show_blocks(directory):
"""!
@brief Show BANG-blocks (leafs only) in data space.
@details BANG-blocks represents grid that was used for clustering process.
@param[in] directory (bang_directory): Directory that was created by BANG algorithm during clustering process.
"""
dimension = len(directory.get_data()[0])
amount_canvases = 1
if dimension > 1:
amount_canvases = int(dimension * (dimension - 1) / 2)
figure = plt.figure()
grid_spec = gridspec.GridSpec(1, amount_canvases)
pairs = list(itertools.combinations(range(dimension), 2))
if len(pairs) == 0: pairs = [(0, 0)]
for index in range(amount_canvases):
ax = figure.add_subplot(grid_spec[index])
bang_visualizer.__draw_blocks(ax, directory.get_leafs(), pairs[index])
bang_visualizer.__draw_two_dimension_data(ax, directory.get_data(), pairs[index])
plt.show()
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/bang.py#L59-L85
|
k means clustering
|
python
|
def estimate_K_xmeans(self, th=0.2, maxK = 10):
"""Estimates K running X-means algorithm (Pelleg & Moore, 2000)."""
# Run initial K-means
means, labels = self.run_kmeans(self.X, self.init_K)
# Run X-means algorithm
stop = False
curr_K = self.init_K
while not stop:
stop = True
final_means = []
for k in range(curr_K):
# Find the data that corresponds to the k-th cluster
D = self.get_clustered_data(self.X, labels, k)
if len(D) == 0 or D.shape[0] == 1:
continue
# Whiten and find whitened mean
stdD = np.std(D, axis=0)
#D = vq.whiten(D)
D /= float(stdD) # Same as line above
mean = D.mean(axis=0)
# Cluster this subspace by half (K=2)
half_means, half_labels = self.run_kmeans(D, K=2)
# Compute BICs
bic1 = self.compute_bic(D, [mean], K=1,
labels=np.zeros(D.shape[0]),
R=D.shape[0])
bic2 = self.compute_bic(D, half_means, K=2,
labels=half_labels, R=D.shape[0])
# Split or not
max_bic = np.max([np.abs(bic1), np.abs(bic2)])
norm_bic1 = bic1 / float(max_bic)
norm_bic2 = bic2 / float(max_bic)
diff_bic = np.abs(norm_bic1 - norm_bic2)
# Split!
#print "diff_bic", diff_bic
if diff_bic > th:
final_means.append(half_means[0] * stdD)
final_means.append(half_means[1] * stdD)
curr_K += 1
stop = False
# Don't split
else:
final_means.append(mean * stdD)
final_means = np.asarray(final_means)
#print "Estimated K: ", curr_K
if self.plot:
plt.scatter(self.X[:, 0], self.X[:, 1])
plt.scatter(final_means[:, 0], final_means[:, 1], color="y")
plt.show()
if curr_K >= maxK or self.X.shape[-1] != final_means.shape[-1]:
stop = True
else:
labels, dist = vq.vq(self.X, final_means)
return curr_K
|
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/fmc2d/xmeans.py#L18-L82
|
k means clustering
|
python
|
def clarans(cls, number_clusters, num_local, max_neighbour):
"""
Constructor of the CLARANS clustering.rst algorithm
:param number_clusters: the number of clusters to be allocated
:param num_local: the number of local minima obtained (amount of iterations for solving the problem).
:param max_neighbour: the number of local minima obtained (amount of iterations for solving the problem).
:return: the resulting clustering.rst object
"""
model = clarans(None, number_clusters, num_local, max_neighbour)
return cls(model)
|
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L42-L52
|
k means clustering
|
python
|
def louvain_clustering(self, X=None, res=1, method='modularity'):
"""Runs Louvain clustering using the vtraag implementation. Assumes
that 'louvain' optional dependency is installed.
Parameters
----------
res - float, optional, default 1
The resolution parameter which tunes the number of clusters Louvain
finds.
method - str, optional, default 'modularity'
Can be 'modularity' or 'significance', which are two different
optimizing funcitons in the Louvain algorithm.
"""
if X is None:
X = self.adata.uns['neighbors']['connectivities']
save = True
else:
if not sp.isspmatrix_csr(X):
X = sp.csr_matrix(X)
save = False
import igraph as ig
import louvain
adjacency = sparse_knn(X.dot(X.T) / self.k, self.k).tocsr()
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=True)
g.add_vertices(adjacency.shape[0])
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except BaseException:
pass
if method == 'significance':
cl = louvain.find_partition(g, louvain.SignificanceVertexPartition)
else:
cl = louvain.find_partition(
g,
louvain.RBConfigurationVertexPartition,
resolution_parameter=res)
if save:
self.adata.obs['louvain_clusters'] = pd.Categorical(np.array(cl.membership))
else:
return np.array(cl.membership)
|
https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L1265-L1316
|
k means clustering
|
python
|
def _kmedoids_run(X, n_clusters, distance, max_iter, tol, rng):
""" Run a single trial of k-medoids clustering
on dataset X, and given number of clusters
"""
membs = np.empty(shape=X.shape[0], dtype=int)
centers = kmeans._kmeans_init(X, n_clusters, method='', rng=rng)
sse_last = 9999.9
n_iter = 0
for it in range(1,max_iter):
membs = kmeans._assign_clusters(X, centers)
centers,sse_arr = _update_centers(X, membs, n_clusters, distance)
sse_total = np.sum(sse_arr)
if np.abs(sse_total - sse_last) < tol:
n_iter = it
break
sse_last = sse_total
return(centers, membs, sse_total, sse_arr, n_iter)
|
https://github.com/vmirly/pyclust/blob/bdb12be4649e70c6c90da2605bc5f4b314e2d07e/pyclust/_kmedoids.py#L31-L49
|
k means clustering
|
python
|
def kmeans(phate_op, k=8, random_state=None):
"""KMeans on the PHATE potential
Clustering on the PHATE operator as introduced in Moon et al.
This is similar to spectral clustering.
Parameters
----------
phate_op : phate.PHATE
Fitted PHATE operator
k : int, optional (default: 8)
Number of clusters
random_state : int or None, optional (default: None)
Random seed for k-means
Returns
-------
clusters : np.ndarray
Integer array of cluster assignments
"""
if phate_op.graph is not None:
diff_potential = phate_op.calculate_potential()
if isinstance(phate_op.graph, graphtools.graphs.LandmarkGraph):
diff_potential = phate_op.graph.interpolate(diff_potential)
return cluster.KMeans(k, random_state=random_state).fit_predict(diff_potential)
else:
raise exceptions.NotFittedError(
"This PHATE instance is not fitted yet. Call "
"'fit' with appropriate arguments before "
"using this method.")
|
https://github.com/KrishnaswamyLab/PHATE/blob/346a4597dcfc523f8bef99bce482e677282b6719/Python/phate/cluster.py#L5-L35
|
k means clustering
|
python
|
def process(self):
"""!
@brief Run clustering process of the algorithm.
@details This method should be called before call 'get_clusters()'.
"""
previous_likelihood = -200000
current_likelihood = -100000
current_iteration = 0
while(self.__stop is False) and (abs(previous_likelihood - current_likelihood) > self.__tolerance) and (current_iteration < self.__iterations):
self.__expectation_step()
self.__maximization_step()
current_iteration += 1
self.__extract_clusters()
self.__notify()
previous_likelihood = current_likelihood
current_likelihood = self.__log_likelihood()
self.__stop = self.__get_stop_condition()
self.__normalize_probabilities()
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/ema.py#L521-L545
|
k means clustering
|
python
|
def cluster_elongate():
"Not so applicable for this sample"
start_centers = [[1.0, 4.5], [3.1, 2.7]]
template_clustering(start_centers, SIMPLE_SAMPLES.SAMPLE_ELONGATE, criterion = splitting_type.BAYESIAN_INFORMATION_CRITERION)
template_clustering(start_centers, SIMPLE_SAMPLES.SAMPLE_ELONGATE, criterion = splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH)
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/examples/xmeans_examples.py#L109-L113
|
k means clustering
|
python
|
def clustering_fields(self, value):
"""Union[List[str], None]: Fields defining clustering for the table
(Defaults to :data:`None`).
"""
if value is not None:
prop = self._properties.setdefault("clustering", {})
prop["fields"] = value
else:
if "clustering" in self._properties:
del self._properties["clustering"]
|
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/table.py#L664-L674
|
k means clustering
|
python
|
def zip_cluster(data, k, init=None, max_iters=100):
"""
Performs hard EM clustering using the zero-inflated Poisson distribution.
Args:
data (array): A 2d array- genes x cells
k (int): Number of clusters
init (array, optional): Initial centers - genes x k array. Default: None, use kmeans++
max_iters (int, optional): Maximum number of iterations. Default: 100
Returns:
assignments (array): integer assignments of cells to clusters (length cells)
L (array): Poisson parameter (genes x k)
M (array): zero-inflation parameter (genes x k)
"""
genes, cells = data.shape
init, new_assignments = kmeans_pp(data+eps, k, centers=init)
centers = np.copy(init)
M = np.zeros(centers.shape)
assignments = new_assignments
for c in range(k):
centers[:,c], M[:,c] = zip_fit_params_mle(data[:, assignments==c])
for it in range(max_iters):
lls = zip_ll(data, centers, M)
new_assignments = np.argmax(lls, 1)
if np.equal(assignments, new_assignments).all():
return assignments, centers, M
for c in range(k):
centers[:,c], M[:,c] = zip_fit_params_mle(data[:, assignments==c])
assignments = new_assignments
return assignments, centers, M
|
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/zip_clustering.py#L46-L76
|
k means clustering
|
python
|
def cluster(data, noreverse, nthreads):
"""
Calls vsearch for clustering across samples.
"""
## input and output file handles
cathaplos = os.path.join(data.dirs.across, data.name+"_catshuf.tmp")
uhaplos = os.path.join(data.dirs.across, data.name+".utemp")
hhaplos = os.path.join(data.dirs.across, data.name+".htemp")
logfile = os.path.join(data.dirs.across, "s6_cluster_stats.txt")
## parameters that vary by datatype
## (too low of cov values yield too many poor alignments)
strand = "plus"
cov = 0.75 ##0.90
if data.paramsdict["datatype"] in ["gbs", "2brad"]:
strand = "both"
cov = 0.60
elif data.paramsdict["datatype"] == "pairgbs":
strand = "both"
cov = 0.75 ##0.90
## nthreads is calculated in 'call_cluster()'
cmd = [ipyrad.bins.vsearch,
"-cluster_smallmem", cathaplos,
"-strand", strand,
"-query_cov", str(cov),
"-minsl", str(0.5),
"-id", str(data.paramsdict["clust_threshold"]),
"-userout", uhaplos,
"-notmatched", hhaplos,
"-userfields", "query+target+qstrand",
"-maxaccepts", "1",
"-maxrejects", "0",
"-fasta_width", "0",
"-threads", str(nthreads), #"0",
"-fulldp",
"-usersort",
"-log", logfile]
## override reverse clustering option
if noreverse:
strand = "plus" # -leftjust "
try:
## this seems to start vsearch on a different pid than the engine
## and so it's hard to kill...
LOGGER.info(cmd)
(dog, owner) = pty.openpty()
proc = sps.Popen(cmd, stdout=owner, stderr=owner, close_fds=True)
prog = 0
newprog = 0
while 1:
isdat = select.select([dog], [], [], 0)
if isdat[0]:
dat = os.read(dog, 80192)
else:
dat = ""
if "Clustering" in dat:
try:
newprog = int(dat.split()[-1][:-1])
## may raise value error when it gets to the end
except ValueError:
pass
## break if done
## catches end chunk of printing if clustering went really fast
elif "Clusters:" in dat:
LOGGER.info("ended vsearch tracking loop")
break
else:
time.sleep(0.1)
## print progress
if newprog != prog:
print(newprog)
prog = newprog
## another catcher to let vsearch cleanup after clustering is done
proc.wait()
print(100)
except KeyboardInterrupt:
LOGGER.info("interrupted vsearch here: %s", proc.pid)
os.kill(proc.pid, 2)
raise KeyboardInterrupt()
except sps.CalledProcessError as inst:
raise IPyradWarningExit("""
Error in vsearch: \n{}\n{}""".format(inst, sps.STDOUT))
except OSError as inst:
raise IPyradWarningExit("""
Failed to allocate pty: \n{}""".format(inst))
finally:
data.stats_files.s6 = logfile
|
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L541-L636
|
k means clustering
|
python
|
def fit_classifier(self, name, analytes, method, samples=None,
subset=None, filt=True, sort_by=0, **kwargs):
"""
Create a clustering classifier based on all samples, or a subset.
Parameters
----------
name : str
The name of the classifier.
analytes : str or iterable
Which analytes the clustering algorithm should consider.
method : str
Which clustering algorithm to use. Can be:
'meanshift'
The `sklearn.cluster.MeanShift` algorithm.
Automatically determines number of clusters
in data based on the `bandwidth` of expected
variation.
'kmeans'
The `sklearn.cluster.KMeans` algorithm. Determines
the characteristics of a known number of clusters
within the data. Must provide `n_clusters` to specify
the expected number of clusters.
samples : iterable
list of samples to consider. Overrides 'subset'.
subset : str
The subset of samples used to fit the classifier. Ignored if
'samples' is specified.
sort_by : int
Which analyte the resulting clusters should be sorted
by - defaults to 0, which is the first analyte.
**kwargs :
method-specific keyword parameters - see below.
Meanshift Parameters
bandwidth : str or float
The bandwith (float) or bandwidth method ('scott' or 'silverman')
used to estimate the data bandwidth.
bin_seeding : bool
Modifies the behaviour of the meanshift algorithm. Refer to
sklearn.cluster.meanshift documentation.
K - Means Parameters
n_clusters : int
The number of clusters expected in the data.
Returns
-------
name : str
"""
# isolate data
if samples is not None:
subset = self.make_subset(samples)
self.get_focus(subset=subset, filt=filt)
# create classifer
c = classifier(analytes,
sort_by)
# fit classifier
c.fit(data=self.focus,
method=method,
**kwargs)
self.classifiers[name] = c
return name
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L2146-L2211
|
k means clustering
|
python
|
def hclust_ordering(X, metric="sqeuclidean"):
""" A leaf ordering is under-defined, this picks the ordering that keeps nearby samples similar.
"""
# compute a hierarchical clustering
D = sp.spatial.distance.pdist(X, metric)
cluster_matrix = sp.cluster.hierarchy.complete(D)
# merge clusters, rotating them to make the end points match as best we can
sets = [[i] for i in range(X.shape[0])]
for i in range(cluster_matrix.shape[0]):
s1 = sets[int(cluster_matrix[i,0])]
s2 = sets[int(cluster_matrix[i,1])]
# compute distances between the end points of the lists
d_s1_s2 = pdist(np.vstack([X[s1[-1],:], X[s2[0],:]]), metric)[0]
d_s2_s1 = pdist(np.vstack([X[s1[0],:], X[s2[-1],:]]), metric)[0]
d_s1r_s2 = pdist(np.vstack([X[s1[0],:], X[s2[0],:]]), metric)[0]
d_s1_s2r = pdist(np.vstack([X[s1[-1],:], X[s2[-1],:]]), metric)[0]
# concatenete the lists in the way the minimizes the difference between
# the samples at the junction
best = min(d_s1_s2, d_s2_s1, d_s1r_s2, d_s1_s2r)
if best == d_s1_s2:
sets.append(s1 + s2)
elif best == d_s2_s1:
sets.append(s2 + s1)
elif best == d_s1r_s2:
sets.append(list(reversed(s1)) + s2)
else:
sets.append(s1 + list(reversed(s2)))
return sets[-1]
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/common.py#L215-L247
|
k means clustering
|
python
|
def clustering_coefficient_unweighted(user):
"""
The clustering coefficient of the user in the unweighted, undirected ego
network.
It is defined by counting the number of closed triplets including
the current user:
.. math::
C = \\frac{2 * \\text{closed triplets}}{ \\text{degree} \, (\\text{degree - 1})}
where ``degree`` is the degree of the current user in the network.
"""
matrix = matrix_undirected_unweighted(user)
closed_triplets = 0
for a, b in combinations(range(len(matrix)), 2):
a_b, a_c, b_c = matrix[a][b], matrix[a][0], matrix[b][0]
if a_b is None or a_c is None or b_c is None:
continue
if a_b > 0 and a_c > 0 and b_c > 0:
closed_triplets += 1.
d_ego = sum(matrix[0])
return 2 * closed_triplets / (d_ego * (d_ego - 1)) if d_ego > 1 else 0
|
https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/network.py#L174-L200
|
k means clustering
|
python
|
def show_grid(cells, data):
"""!
@brief Show CLIQUE blocks as a grid in data space.
@details Each block contains points and according to this density is displayed. CLIQUE grid helps to visualize
grid that was used for clustering process.
@param[in] cells (list): List of cells that is produced by CLIQUE algorithm.
@param[in] data (array_like): Input data that was used for clustering process.
"""
dimension = cells[0].dimensions
amount_canvases = 1
if dimension > 1:
amount_canvases = int(dimension * (dimension - 1) / 2)
figure = plt.figure()
grid_spec = gridspec.GridSpec(1, amount_canvases)
pairs = list(itertools.combinations(range(dimension), 2))
if len(pairs) == 0: pairs = [(0, 0)]
for index in range(amount_canvases):
ax = figure.add_subplot(grid_spec[index])
clique_visualizer.__draw_cells(ax, cells, pairs[index])
clique_visualizer.__draw_two_dimension_data(ax, data, pairs[index])
plt.show()
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/clique.py#L61-L88
|
k means clustering
|
python
|
def _restricted_growth_notation(l):
""" The clustering returned by the hcluster module gives group
membership without regard for numerical order This function preserves
the group membership, but sorts the labelling into numerical order """
list_length = len(l)
d = defaultdict(list)
for (i, element) in enumerate(l):
d[element].append(i)
l2 = [None] * list_length
for (name, index_list) in enumerate(sorted(d.values(), key=min)):
for index in index_list:
l2[index] = name
return tuple(l2)
|
https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/partition.py#L113-L130
|
k means clustering
|
python
|
def cluster_sample3():
"Start with wrong number of clusters."
start_centers = [[0.2, 0.1], [4.0, 1.0]]
template_clustering(start_centers, SIMPLE_SAMPLES.SAMPLE_SIMPLE3, criterion = splitting_type.BAYESIAN_INFORMATION_CRITERION)
template_clustering(start_centers, SIMPLE_SAMPLES.SAMPLE_SIMPLE3, criterion = splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH)
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/examples/xmeans_examples.py#L80-L84
|
k means clustering
|
python
|
def __improve_parameters(self, centers, available_indexes = None):
"""!
@brief Performs k-means clustering in the specified region.
@param[in] centers (list): Centers of clusters.
@param[in] available_indexes (list): Indexes that defines which points can be used for k-means clustering, if None - then all points are used.
@return (list) List of allocated clusters, each cluster contains indexes of objects in list of data.
"""
if available_indexes and len(available_indexes) == 1:
index_center = available_indexes[0]
return [ available_indexes ], self.__pointer_data[index_center]
local_data = self.__pointer_data
if available_indexes:
local_data = [ self.__pointer_data[i] for i in available_indexes ]
local_centers = centers
if centers is None:
local_centers = kmeans_plusplus_initializer(local_data, 2, kmeans_plusplus_initializer.FARTHEST_CENTER_CANDIDATE).initialize()
kmeans_instance = kmeans(local_data, local_centers, tolerance=self.__tolerance, ccore=False)
kmeans_instance.process()
local_centers = kmeans_instance.get_centers()
clusters = kmeans_instance.get_clusters()
if available_indexes:
clusters = self.__local_to_global_clusters(clusters, available_indexes)
return clusters, local_centers
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/xmeans.py#L229-L261
|
k means clustering
|
python
|
def _clone_and_score_clusterer(clf, X, n_clusters):
"""Clones and scores clusterer instance.
Args:
clf: Clusterer instance that implements ``fit``,``fit_predict``, and
``score`` methods, and an ``n_clusters`` hyperparameter.
e.g. :class:`sklearn.cluster.KMeans` instance
X (array-like, shape (n_samples, n_features)):
Data to cluster, where n_samples is the number of samples and
n_features is the number of features.
n_clusters (int): Number of clusters
Returns:
score: Score of clusters
time: Number of seconds it took to fit cluster
"""
start = time.time()
clf = clone(clf)
setattr(clf, 'n_clusters', n_clusters)
return clf.fit(X).score(X), time.time() - start
|
https://github.com/reiinakano/scikit-plot/blob/2dd3e6a76df77edcbd724c4db25575f70abb57cb/scikitplot/cluster.py#L110-L132
|
k means clustering
|
python
|
def get_ordering(self):
"""!
@brief Returns clustering ordering information about the input data set.
@details Clustering ordering of data-set contains the information about the internal clustering structure in line with connectivity radius.
@return (ordering_analyser) Analyser of clustering ordering.
@see process()
@see get_clusters()
@see get_noise()
@see get_radius()
@see get_optics_objects()
"""
if self.__ordering is None:
self.__ordering = []
for cluster in self.__clusters:
for index_object in cluster:
optics_object = self.__optics_objects[index_object]
if optics_object.reachability_distance is not None:
self.__ordering.append(optics_object.reachability_distance)
return self.__ordering
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/optics.py#L525-L549
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.