Skip to content

Commit 4d65ce4

Browse files
committed
Pushing the docs to dev/ for branch: master, commit 823382995c046d98a4d892fff1f564c0f04ba340
1 parent e08c7c4 commit 4d65ce4

File tree

1,093 files changed

+4505
-3576
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,093 files changed

+4505
-3576
lines changed
6.94 KB
Binary file not shown.
5.53 KB
Binary file not shown.

dev/_downloads/plot_agglomerative_clustering.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
"cell_type": "markdown",
1616
"metadata": {},
1717
"source": [
18-
"\nAgglomerative clustering with and without structure\n===================================================\n\nThis example shows the effect of imposing a connectivity graph to capture\nlocal structure in the data. The graph is simply the graph of 20 nearest\nneighbors.\n\nTwo consequences of imposing a connectivity can be seen. First clustering\nwith a connectivity matrix is much faster.\n\nSecond, when using a connectivity matrix, average and complete linkage are\nunstable and tend to create a few clusters that grow very quickly. Indeed,\naverage and complete linkage fight this percolation behavior by considering all\nthe distances between two clusters when merging them. The connectivity\ngraph breaks this mechanism. This effect is more pronounced for very\nsparse graphs (try decreasing the number of neighbors in\nkneighbors_graph) and with complete linkage. In particular, having a very\nsmall number of neighbors in the graph, imposes a geometry that is\nclose to that of single linkage, which is well known to have this\npercolation instability.\n\n"
18+
"\nAgglomerative clustering with and without structure\n===================================================\n\nThis example shows the effect of imposing a connectivity graph to capture\nlocal structure in the data. The graph is simply the graph of 20 nearest\nneighbors.\n\nTwo consequences of imposing a connectivity can be seen. First clustering\nwith a connectivity matrix is much faster.\n\nSecond, when using a connectivity matrix, single, average and complete\nlinkage are unstable and tend to create a few clusters that grow very\nquickly. Indeed, average and complete linkage fight this percolation behavior\nby considering all the distances between two clusters when merging them (\nwhile single linkage exaggerates the behaviour by considering only the\nshortest distance between clusters). The connectivity graph breaks this\nmechanism for average and complete linkage, making them resemble the more\nbrittle single linkage. This effect is more pronounced for very sparse graphs\n(try decreasing the number of neighbors in kneighbors_graph) and with\ncomplete linkage. In particular, having a very small number of neighbors in\nthe graph, imposes a geometry that is close to that of single linkage,\nwhich is well known to have this percolation instability. \n"
1919
]
2020
},
2121
{
@@ -26,7 +26,7 @@
2626
},
2727
"outputs": [],
2828
"source": [
29-
"# Authors: Gael Varoquaux, Nelle Varoquaux\n# License: BSD 3 clause\n\nimport time\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn.neighbors import kneighbors_graph\n\n# Generate sample data\nn_samples = 1500\nnp.random.seed(0)\nt = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))\nx = t * np.cos(t)\ny = t * np.sin(t)\n\n\nX = np.concatenate((x, y))\nX += .7 * np.random.randn(2, n_samples)\nX = X.T\n\n# Create a graph capturing local connectivity. Larger number of neighbors\n# will give more homogeneous clusters to the cost of computation\n# time. A very large number of neighbors gives more evenly distributed\n# cluster sizes, but may not impose the local manifold structure of\n# the data\nknn_graph = kneighbors_graph(X, 30, include_self=False)\n\nfor connectivity in (None, knn_graph):\n for n_clusters in (30, 3):\n plt.figure(figsize=(10, 4))\n for index, linkage in enumerate(('average', 'complete', 'ward')):\n plt.subplot(1, 3, index + 1)\n model = AgglomerativeClustering(linkage=linkage,\n connectivity=connectivity,\n n_clusters=n_clusters)\n t0 = time.time()\n model.fit(X)\n elapsed_time = time.time() - t0\n plt.scatter(X[:, 0], X[:, 1], c=model.labels_,\n cmap=plt.cm.spectral)\n plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),\n fontdict=dict(verticalalignment='top'))\n plt.axis('equal')\n plt.axis('off')\n\n plt.subplots_adjust(bottom=0, top=.89, wspace=0,\n left=0, right=1)\n plt.suptitle('n_cluster=%i, connectivity=%r' %\n (n_clusters, connectivity is not None), size=17)\n\n\nplt.show()"
29+
"# Authors: Gael Varoquaux, Nelle Varoquaux\n# License: BSD 3 clause\n\nimport time\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn.neighbors import kneighbors_graph\n\n# Generate sample data\nn_samples = 1500\nnp.random.seed(0)\nt = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))\nx = t * np.cos(t)\ny = t * np.sin(t)\n\n\nX = np.concatenate((x, y))\nX += .7 * np.random.randn(2, n_samples)\nX = X.T\n\n# Create a graph capturing local connectivity. Larger number of neighbors\n# will give more homogeneous clusters to the cost of computation\n# time. A very large number of neighbors gives more evenly distributed\n# cluster sizes, but may not impose the local manifold structure of\n# the data\nknn_graph = kneighbors_graph(X, 30, include_self=False)\n\nfor connectivity in (None, knn_graph):\n for n_clusters in (30, 3):\n plt.figure(figsize=(10, 4))\n for index, linkage in enumerate(('average',\n 'complete',\n 'ward',\n 'single')):\n plt.subplot(1, 4, index + 1)\n model = AgglomerativeClustering(linkage=linkage,\n connectivity=connectivity,\n n_clusters=n_clusters)\n t0 = time.time()\n model.fit(X)\n elapsed_time = time.time() - t0\n plt.scatter(X[:, 0], X[:, 1], c=model.labels_,\n cmap=plt.cm.spectral)\n plt.title('linkage=%s\\n(time %.2fs)' % (linkage, elapsed_time),\n fontdict=dict(verticalalignment='top'))\n plt.axis('equal')\n plt.axis('off')\n\n plt.subplots_adjust(bottom=0, top=.89, wspace=0,\n left=0, right=1)\n plt.suptitle('n_cluster=%i, connectivity=%r' %\n (n_clusters, connectivity is not None), size=17)\n\n\nplt.show()"
3030
]
3131
}
3232
],

dev/_downloads/plot_agglomerative_clustering.py

Lines changed: 18 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -9,17 +9,18 @@
99
Two consequences of imposing a connectivity can be seen. First clustering
1010
with a connectivity matrix is much faster.
1111
12-
Second, when using a connectivity matrix, average and complete linkage are
13-
unstable and tend to create a few clusters that grow very quickly. Indeed,
14-
average and complete linkage fight this percolation behavior by considering all
15-
the distances between two clusters when merging them. The connectivity
16-
graph breaks this mechanism. This effect is more pronounced for very
17-
sparse graphs (try decreasing the number of neighbors in
18-
kneighbors_graph) and with complete linkage. In particular, having a very
19-
small number of neighbors in the graph, imposes a geometry that is
20-
close to that of single linkage, which is well known to have this
21-
percolation instability.
22-
"""
12+
Second, when using a connectivity matrix, single, average and complete
13+
linkage are unstable and tend to create a few clusters that grow very
14+
quickly. Indeed, average and complete linkage fight this percolation behavior
15+
by considering all the distances between two clusters when merging them (
16+
while single linkage exaggerates the behaviour by considering only the
17+
shortest distance between clusters). The connectivity graph breaks this
18+
mechanism for average and complete linkage, making them resemble the more
19+
brittle single linkage. This effect is more pronounced for very sparse graphs
20+
(try decreasing the number of neighbors in kneighbors_graph) and with
21+
complete linkage. In particular, having a very small number of neighbors in
22+
the graph, imposes a geometry that is close to that of single linkage,
23+
which is well known to have this percolation instability. """
2324
# Authors: Gael Varoquaux, Nelle Varoquaux
2425
# License: BSD 3 clause
2526

@@ -52,8 +53,11 @@
5253
for connectivity in (None, knn_graph):
5354
for n_clusters in (30, 3):
5455
plt.figure(figsize=(10, 4))
55-
for index, linkage in enumerate(('average', 'complete', 'ward')):
56-
plt.subplot(1, 3, index + 1)
56+
for index, linkage in enumerate(('average',
57+
'complete',
58+
'ward',
59+
'single')):
60+
plt.subplot(1, 4, index + 1)
5761
model = AgglomerativeClustering(linkage=linkage,
5862
connectivity=connectivity,
5963
n_clusters=n_clusters)
@@ -62,7 +66,7 @@
6266
elapsed_time = time.time() - t0
6367
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
6468
cmap=plt.cm.spectral)
65-
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
69+
plt.title('linkage=%s\n(time %.2fs)' % (linkage, elapsed_time),
6670
fontdict=dict(verticalalignment='top'))
6771
plt.axis('equal')
6872
plt.axis('off')

dev/_downloads/plot_digits_linkage.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
"cell_type": "markdown",
1616
"metadata": {},
1717
"source": [
18-
"\n# Various Agglomerative Clustering on a 2D embedding of digits\n\n\nAn illustration of various linkage option for agglomerative clustering on\na 2D embedding of the digits dataset.\n\nThe goal of this example is to show intuitively how the metrics behave, and\nnot to find good clusters for the digits. This is why the example works on a\n2D embedding.\n\nWhat this example shows us is the behavior \"rich getting richer\" of\nagglomerative clustering that tends to create uneven cluster sizes.\nThis behavior is especially pronounced for the average linkage strategy,\nthat ends up with a couple of singleton clusters.\n\n"
18+
"\n# Various Agglomerative Clustering on a 2D embedding of digits\n\n\nAn illustration of various linkage option for agglomerative clustering on\na 2D embedding of the digits dataset.\n\nThe goal of this example is to show intuitively how the metrics behave, and\nnot to find good clusters for the digits. This is why the example works on a\n2D embedding.\n\nWhat this example shows us is the behavior \"rich getting richer\" of\nagglomerative clustering that tends to create uneven cluster sizes.\nThis behavior is pronounced for the average linkage strategy,\nthat ends up with a couple of singleton clusters, while in the case\nof single linkage we get a single central cluster with all other clusters\nbeing drawn from noise points around the fringes.\n\n"
1919
]
2020
},
2121
{
@@ -26,7 +26,7 @@
2626
},
2727
"outputs": [],
2828
"source": [
29-
"# Authors: Gael Varoquaux\n# License: BSD 3 clause (C) INRIA 2014\n\nprint(__doc__)\nfrom time import time\n\nimport numpy as np\nfrom scipy import ndimage\nfrom matplotlib import pyplot as plt\n\nfrom sklearn import manifold, datasets\n\ndigits = datasets.load_digits(n_class=10)\nX = digits.data\ny = digits.target\nn_samples, n_features = X.shape\n\nnp.random.seed(0)\n\ndef nudge_images(X, y):\n # Having a larger dataset shows more clearly the behavior of the\n # methods, but we multiply the size of the dataset only by 2, as the\n # cost of the hierarchical clustering methods are strongly\n # super-linear in n_samples\n shift = lambda x: ndimage.shift(x.reshape((8, 8)),\n .3 * np.random.normal(size=2),\n mode='constant',\n ).ravel()\n X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])\n Y = np.concatenate([y, y], axis=0)\n return X, Y\n\n\nX, y = nudge_images(X, y)\n\n\n#----------------------------------------------------------------------\n# Visualize the clustering\ndef plot_clustering(X_red, X, labels, title=None):\n x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)\n X_red = (X_red - x_min) / (x_max - x_min)\n\n plt.figure(figsize=(6, 4))\n for i in range(X_red.shape[0]):\n plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),\n color=plt.cm.spectral(labels[i] / 10.),\n fontdict={'weight': 'bold', 'size': 9})\n\n plt.xticks([])\n plt.yticks([])\n if title is not None:\n plt.title(title, size=17)\n plt.axis('off')\n plt.tight_layout()\n\n#----------------------------------------------------------------------\n# 2D embedding of the digits dataset\nprint(\"Computing embedding\")\nX_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)\nprint(\"Done.\")\n\nfrom sklearn.cluster import AgglomerativeClustering\n\nfor linkage in ('ward', 'average', 'complete'):\n clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)\n t0 = time()\n clustering.fit(X_red)\n print(\"%s : %.2fs\" % (linkage, time() - t0))\n\n plot_clustering(X_red, X, clustering.labels_, \"%s linkage\" % linkage)\n\n\nplt.show()"
29+
"# Authors: Gael Varoquaux\n# License: BSD 3 clause (C) INRIA 2014\n\nprint(__doc__)\nfrom time import time\n\nimport numpy as np\nfrom scipy import ndimage\nfrom matplotlib import pyplot as plt\n\nfrom sklearn import manifold, datasets\n\ndigits = datasets.load_digits(n_class=10)\nX = digits.data\ny = digits.target\nn_samples, n_features = X.shape\n\nnp.random.seed(0)\n\ndef nudge_images(X, y):\n # Having a larger dataset shows more clearly the behavior of the\n # methods, but we multiply the size of the dataset only by 2, as the\n # cost of the hierarchical clustering methods are strongly\n # super-linear in n_samples\n shift = lambda x: ndimage.shift(x.reshape((8, 8)),\n .3 * np.random.normal(size=2),\n mode='constant',\n ).ravel()\n X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])\n Y = np.concatenate([y, y], axis=0)\n return X, Y\n\n\nX, y = nudge_images(X, y)\n\n\n#----------------------------------------------------------------------\n# Visualize the clustering\ndef plot_clustering(X_red, X, labels, title=None):\n x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)\n X_red = (X_red - x_min) / (x_max - x_min)\n\n plt.figure(figsize=(6, 4))\n for i in range(X_red.shape[0]):\n plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),\n color=plt.cm.spectral(labels[i] / 10.),\n fontdict={'weight': 'bold', 'size': 9})\n\n plt.xticks([])\n plt.yticks([])\n if title is not None:\n plt.title(title, size=17)\n plt.axis('off')\n plt.tight_layout(rect=[0, 0.03, 1, 0.95])\n\n#----------------------------------------------------------------------\n# 2D embedding of the digits dataset\nprint(\"Computing embedding\")\nX_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)\nprint(\"Done.\")\n\nfrom sklearn.cluster import AgglomerativeClustering\n\nfor linkage in ('ward', 'average', 'complete', 'single'):\n clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)\n t0 = time()\n clustering.fit(X_red)\n print(\"%s :\\t%.2fs\" % (linkage, time() - t0))\n\n plot_clustering(X_red, X, clustering.labels_, \"%s linkage\" % linkage)\n\n\nplt.show()"
3030
]
3131
}
3232
],

dev/_downloads/plot_digits_linkage.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,10 @@
1212
1313
What this example shows us is the behavior "rich getting richer" of
1414
agglomerative clustering that tends to create uneven cluster sizes.
15-
This behavior is especially pronounced for the average linkage strategy,
16-
that ends up with a couple of singleton clusters.
15+
This behavior is pronounced for the average linkage strategy,
16+
that ends up with a couple of singleton clusters, while in the case
17+
of single linkage we get a single central cluster with all other clusters
18+
being drawn from noise points around the fringes.
1719
"""
1820

1921
# Authors: Gael Varoquaux
@@ -69,7 +71,7 @@ def plot_clustering(X_red, X, labels, title=None):
6971
if title is not None:
7072
plt.title(title, size=17)
7173
plt.axis('off')
72-
plt.tight_layout()
74+
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
7375

7476
#----------------------------------------------------------------------
7577
# 2D embedding of the digits dataset
@@ -79,11 +81,11 @@ def plot_clustering(X_red, X, labels, title=None):
7981

8082
from sklearn.cluster import AgglomerativeClustering
8183

82-
for linkage in ('ward', 'average', 'complete'):
84+
for linkage in ('ward', 'average', 'complete', 'single'):
8385
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
8486
t0 = time()
8587
clustering.fit(X_red)
86-
print("%s : %.2fs" % (linkage, time() - t0))
88+
print("%s :\t%.2fs" % (linkage, time() - t0))
8789

8890
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
8991

0 commit comments

Comments
 (0)