Skip to content

Commit f6bd689

Browse files
committed
Pushing the docs to dev/ for branch: master, commit 21eb82dea262f4354e50903c150316887c994101
1 parent 2cf7445 commit f6bd689

File tree

1,053 files changed

+3185
-3185
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,053 files changed

+3185
-3185
lines changed
-6 Bytes
Binary file not shown.
-6 Bytes
Binary file not shown.

dev/_downloads/plot_digits_linkage.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
},
2727
"outputs": [],
2828
"source": [
29-
"# Authors: Gael Varoquaux\n# License: BSD 3 clause (C) INRIA 2014\n\nprint(__doc__)\nfrom time import time\n\nimport numpy as np\nfrom scipy import ndimage\nfrom matplotlib import pyplot as plt\n\nfrom sklearn import manifold, datasets\n\ndigits = datasets.load_digits(n_class=10)\nX = digits.data\ny = digits.target\nn_samples, n_features = X.shape\n\nnp.random.seed(0)\n\ndef nudge_images(X, y):\n # Having a larger dataset shows more clearly the behavior of the\n # methods, but we multiply the size of the dataset only by 2, as the\n # cost of the hierarchical clustering methods are strongly\n # super-linear in n_samples\n shift = lambda x: ndimage.shift(x.reshape((8, 8)),\n .3 * np.random.normal(size=2),\n mode='constant',\n ).ravel()\n X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])\n Y = np.concatenate([y, y], axis=0)\n return X, Y\n\n\nX, y = nudge_images(X, y)\n\n\n#----------------------------------------------------------------------\n# Visualize the clustering\ndef plot_clustering(X_red, X, labels, title=None):\n x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)\n X_red = (X_red - x_min) / (x_max - x_min)\n\n plt.figure(figsize=(6, 4))\n for i in range(X_red.shape[0]):\n plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),\n color=plt.cm.nipy_spectral(labels[i] / 10.),\n fontdict={'weight': 'bold', 'size': 9})\n\n plt.xticks([])\n plt.yticks([])\n if title is not None:\n plt.title(title, size=17)\n plt.axis('off')\n plt.tight_layout(rect=[0, 0.03, 1, 0.95])\n\n#----------------------------------------------------------------------\n# 2D embedding of the digits dataset\nprint(\"Computing embedding\")\nX_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)\nprint(\"Done.\")\n\nfrom sklearn.cluster import AgglomerativeClustering\n\nfor linkage in ('ward', 'average', 'complete', 'single'):\n clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)\n t0 = time()\n clustering.fit(X_red)\n print(\"%s :\\t%.2fs\" % (linkage, time() - t0))\n\n plot_clustering(X_red, X, clustering.labels_, \"%s linkage\" % linkage)\n\n\nplt.show()"
29+
"# Authors: Gael Varoquaux\n# License: BSD 3 clause (C) INRIA 2014\n\nprint(__doc__)\nfrom time import time\n\nimport numpy as np\nfrom scipy import ndimage\nfrom matplotlib import pyplot as plt\n\nfrom sklearn import manifold, datasets\n\ndigits = datasets.load_digits(n_class=10)\nX = digits.data\ny = digits.target\nn_samples, n_features = X.shape\n\nnp.random.seed(0)\n\ndef nudge_images(X, y):\n # Having a larger dataset shows more clearly the behavior of the\n # methods, but we multiply the size of the dataset only by 2, as the\n # cost of the hierarchical clustering methods are strongly\n # super-linear in n_samples\n shift = lambda x: ndimage.shift(x.reshape((8, 8)),\n .3 * np.random.normal(size=2),\n mode='constant',\n ).ravel()\n X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])\n Y = np.concatenate([y, y], axis=0)\n return X, Y\n\n\nX, y = nudge_images(X, y)\n\n\n#----------------------------------------------------------------------\n# Visualize the clustering\ndef plot_clustering(X_red, labels, title=None):\n x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)\n X_red = (X_red - x_min) / (x_max - x_min)\n\n plt.figure(figsize=(6, 4))\n for i in range(X_red.shape[0]):\n plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),\n color=plt.cm.nipy_spectral(labels[i] / 10.),\n fontdict={'weight': 'bold', 'size': 9})\n\n plt.xticks([])\n plt.yticks([])\n if title is not None:\n plt.title(title, size=17)\n plt.axis('off')\n plt.tight_layout(rect=[0, 0.03, 1, 0.95])\n\n#----------------------------------------------------------------------\n# 2D embedding of the digits dataset\nprint(\"Computing embedding\")\nX_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)\nprint(\"Done.\")\n\nfrom sklearn.cluster import AgglomerativeClustering\n\nfor linkage in ('ward', 'average', 'complete', 'single'):\n clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)\n t0 = time()\n clustering.fit(X_red)\n print(\"%s :\\t%.2fs\" % (linkage, time() - t0))\n\n plot_clustering(X_red, clustering.labels_, \"%s linkage\" % linkage)\n\n\nplt.show()"
3030
]
3131
}
3232
],

dev/_downloads/plot_digits_linkage.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ def nudge_images(X, y):
5656

5757
#----------------------------------------------------------------------
5858
# Visualize the clustering
59-
def plot_clustering(X_red, X, labels, title=None):
59+
def plot_clustering(X_red, labels, title=None):
6060
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
6161
X_red = (X_red - x_min) / (x_max - x_min)
6262

@@ -87,7 +87,7 @@ def plot_clustering(X_red, X, labels, title=None):
8787
clustering.fit(X_red)
8888
print("%s :\t%.2fs" % (linkage, time() - t0))
8989

90-
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
90+
plot_clustering(X_red, clustering.labels_, "%s linkage" % linkage)
9191

9292

9393
plt.show()

dev/_downloads/scikit-learn-docs.pdf

4.8 KB
Binary file not shown.

0 commit comments

Comments
 (0)