Skip to content

Commit 1c5e4ca

Browse files
committed
Pushing the docs to dev/ for branch: master, commit 269afa3a77972e883aa1d64081b8f25d1819d5ac
1 parent 660f543 commit 1c5e4ca

File tree

1,208 files changed

+3715
-3676
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,208 files changed

+3715
-3676
lines changed
Binary file not shown.

dev/_downloads/64866fb2a9398ff657578febcb91d430/plot_mnist_filters.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
"cell_type": "markdown",
1616
"metadata": {},
1717
"source": [
18-
"\n# Visualization of MLP weights on MNIST\n\n\nSometimes looking at the learned coefficients of a neural network can provide\ninsight into the learning behavior. For example if weights look unstructured,\nmaybe some were not used at all, or if very large coefficients exist, maybe\nregularization was too low or the learning rate too high.\n\nThis example shows how to plot some of the first layer weights in a\nMLPClassifier trained on the MNIST dataset.\n\nThe input data consists of 28x28 pixel handwritten digits, leading to 784\nfeatures in the dataset. Therefore the first layer weight matrix have the shape\n(784, hidden_layer_sizes[0]). We can therefore visualize a single column of\nthe weight matrix as a 28x28 pixel image.\n\nTo make the example run faster, we use very few hidden units, and train only\nfor a very short time. Training longer would result in weights with a much\nsmoother spatial appearance.\n"
18+
"\n# Visualization of MLP weights on MNIST\n\n\nSometimes looking at the learned coefficients of a neural network can provide\ninsight into the learning behavior. For example if weights look unstructured,\nmaybe some were not used at all, or if very large coefficients exist, maybe\nregularization was too low or the learning rate too high.\n\nThis example shows how to plot some of the first layer weights in a\nMLPClassifier trained on the MNIST dataset.\n\nThe input data consists of 28x28 pixel handwritten digits, leading to 784\nfeatures in the dataset. Therefore the first layer weight matrix have the shape\n(784, hidden_layer_sizes[0]). We can therefore visualize a single column of\nthe weight matrix as a 28x28 pixel image.\n\nTo make the example run faster, we use very few hidden units, and train only\nfor a very short time. Training longer would result in weights with a much\nsmoother spatial appearance. The example will throw a warning because it\ndoesn't converge, in this case this is what we want because of CI's time\nconstraints.\n"
1919
]
2020
},
2121
{
@@ -26,7 +26,7 @@
2626
},
2727
"outputs": [],
2828
"source": [
29-
"import matplotlib.pyplot as plt\nfrom sklearn.datasets import fetch_openml\nfrom sklearn.neural_network import MLPClassifier\n\nprint(__doc__)\n\n# Load data from https://www.openml.org/d/554\nX, y = fetch_openml('mnist_784', version=1, return_X_y=True)\nX = X / 255.\n\n# rescale the data, use the traditional train/test split\nX_train, X_test = X[:60000], X[60000:]\ny_train, y_test = y[:60000], y[60000:]\n\nmlp = MLPClassifier(hidden_layer_sizes=(50,), max_iter=10, alpha=1e-4,\n solver='sgd', verbose=10, random_state=1,\n learning_rate_init=.1)\n\nmlp.fit(X_train, y_train)\nprint(\"Training set score: %f\" % mlp.score(X_train, y_train))\nprint(\"Test set score: %f\" % mlp.score(X_test, y_test))\n\nfig, axes = plt.subplots(4, 4)\n# use global min / max to ensure all weights are shown on the same scale\nvmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()\nfor coef, ax in zip(mlp.coefs_[0].T, axes.ravel()):\n ax.matshow(coef.reshape(28, 28), cmap=plt.cm.gray, vmin=.5 * vmin,\n vmax=.5 * vmax)\n ax.set_xticks(())\n ax.set_yticks(())\n\nplt.show()"
29+
"import warnings\n\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import fetch_openml\nfrom sklearn.exceptions import ConvergenceWarning\nfrom sklearn.neural_network import MLPClassifier\n\nprint(__doc__)\n\n# Load data from https://www.openml.org/d/554\nX, y = fetch_openml('mnist_784', version=1, return_X_y=True)\nX = X / 255.\n\n# rescale the data, use the traditional train/test split\nX_train, X_test = X[:60000], X[60000:]\ny_train, y_test = y[:60000], y[60000:]\n\nmlp = MLPClassifier(hidden_layer_sizes=(50,), max_iter=10, alpha=1e-4,\n solver='sgd', verbose=10, random_state=1,\n learning_rate_init=.1)\n\n# this example won't converge because of CI's time constraints, so we catch the\n# warning and are ignore it here\nwith warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=ConvergenceWarning,\n module=\"sklearn\")\n mlp.fit(X_train, y_train)\n\nprint(\"Training set score: %f\" % mlp.score(X_train, y_train))\nprint(\"Test set score: %f\" % mlp.score(X_test, y_test))\n\nfig, axes = plt.subplots(4, 4)\n# use global min / max to ensure all weights are shown on the same scale\nvmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()\nfor coef, ax in zip(mlp.coefs_[0].T, axes.ravel()):\n ax.matshow(coef.reshape(28, 28), cmap=plt.cm.gray, vmin=.5 * vmin,\n vmax=.5 * vmax)\n ax.set_xticks(())\n ax.set_yticks(())\n\nplt.show()"
3030
]
3131
}
3232
],

dev/_downloads/9b3be64651591413a73d3848e0317ffd/plot_mnist_filters.py

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,10 +18,16 @@
1818
1919
To make the example run faster, we use very few hidden units, and train only
2020
for a very short time. Training longer would result in weights with a much
21-
smoother spatial appearance.
21+
smoother spatial appearance. The example will throw a warning because it
22+
doesn't converge, in this case this is what we want because of CI's time
23+
constraints.
2224
"""
25+
26+
import warnings
27+
2328
import matplotlib.pyplot as plt
2429
from sklearn.datasets import fetch_openml
30+
from sklearn.exceptions import ConvergenceWarning
2531
from sklearn.neural_network import MLPClassifier
2632

2733
print(__doc__)
@@ -38,7 +44,13 @@
3844
solver='sgd', verbose=10, random_state=1,
3945
learning_rate_init=.1)
4046

41-
mlp.fit(X_train, y_train)
47+
# this example won't converge because of CI's time constraints, so we catch the
48+
# warning and are ignore it here
49+
with warnings.catch_warnings():
50+
warnings.filterwarnings("ignore", category=ConvergenceWarning,
51+
module="sklearn")
52+
mlp.fit(X_train, y_train)
53+
4254
print("Training set score: %f" % mlp.score(X_train, y_train))
4355
print("Test set score: %f" % mlp.score(X_test, y_test))
4456

Binary file not shown.

dev/_downloads/scikit-learn-docs.pdf

-22.5 KB
Binary file not shown.

dev/_images/iris.png

0 Bytes
-908 Bytes
-908 Bytes
-711 Bytes
-711 Bytes

0 commit comments

Comments
 (0)