Skip to content

Commit 2646c1c

Browse files
committed
Pushing the docs to 0.18/ for branch: 0.18.X, commit 4c65d8e615c9331d37cbb6225c5b67c445a5c959
1 parent 11c837d commit 2646c1c

File tree

1,125 files changed

+8967
-8803
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,125 files changed

+8967
-8803
lines changed

0.18/.buildinfo

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
# Sphinx build info version 1
22
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
3-
config: daca7b4e886bb848243b580a93354fb1
3+
config: 0ac8918fccdb8852d193e9b60f7a6338
44
tags: 645f666f9bcd5a90fca523b33c5a78b7

0.18/_downloads/plot_compare_cross_decomposition.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@
4242
"execution_count": null,
4343
"cell_type": "code",
4444
"source": [
45-
"n = 500\n# 2 latents vars:\nl1 = np.random.normal(size=n)\nl2 = np.random.normal(size=n)\n\nlatents = np.array([l1, l1, l2, l2]).T\nX = latents + np.random.normal(size=4 * n).reshape((n, 4))\nY = latents + np.random.normal(size=4 * n).reshape((n, 4))\n\nX_train = X[:n / 2]\nY_train = Y[:n / 2]\nX_test = X[n / 2:]\nY_test = Y[n / 2:]\n\nprint(\"Corr(X)\")\nprint(np.round(np.corrcoef(X.T), 2))\nprint(\"Corr(Y)\")\nprint(np.round(np.corrcoef(Y.T), 2))"
45+
"n = 500\n# 2 latents vars:\nl1 = np.random.normal(size=n)\nl2 = np.random.normal(size=n)\n\nlatents = np.array([l1, l1, l2, l2]).T\nX = latents + np.random.normal(size=4 * n).reshape((n, 4))\nY = latents + np.random.normal(size=4 * n).reshape((n, 4))\n\nX_train = X[:n // 2]\nY_train = Y[:n // 2]\nX_test = X[n // 2:]\nY_test = Y[n // 2:]\n\nprint(\"Corr(X)\")\nprint(np.round(np.corrcoef(X.T), 2))\nprint(\"Corr(Y)\")\nprint(np.round(np.corrcoef(Y.T), 2))"
4646
],
4747
"outputs": [],
4848
"metadata": {

0.18/_downloads/plot_compare_cross_decomposition.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -36,10 +36,10 @@
3636
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
3737
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
3838

39-
X_train = X[:n / 2]
40-
Y_train = Y[:n / 2]
41-
X_test = X[n / 2:]
42-
Y_test = Y[n / 2:]
39+
X_train = X[:n // 2]
40+
Y_train = Y[:n // 2]
41+
X_test = X[n // 2:]
42+
Y_test = Y[n // 2:]
4343

4444
print("Corr(X)")
4545
print(np.round(np.corrcoef(X.T), 2))

0.18/_downloads/plot_dbscan.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@
7878
"execution_count": null,
7979
"cell_type": "code",
8080
"source": [
81-
"import matplotlib.pyplot as plt\n\n# Black removed and is used for noise instead.\nunique_labels = set(labels)\ncolors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))\nfor k, col in zip(unique_labels, colors):\n if k == -1:\n # Black used for noise.\n col = 'k'\n\n class_member_mask = (labels == k)\n\n xy = X[class_member_mask & core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,\n markeredgecolor='k', markersize=14)\n\n xy = X[class_member_mask & ~core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,\n markeredgecolor='k', markersize=6)\n\nplt.title('Estimated number of clusters: %d' % n_clusters_)\nplt.show()"
81+
"import matplotlib.pyplot as plt\n\n# Black removed and is used for noise instead.\nunique_labels = set(labels)\ncolors = [plt.cm.Spectral(each)\n for each in np.linspace(0, 1, len(unique_labels))]\nfor k, col in zip(unique_labels, colors):\n if k == -1:\n # Black used for noise.\n col = [0, 0, 0, 1]\n\n class_member_mask = (labels == k)\n\n xy = X[class_member_mask & core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),\n markeredgecolor='k', markersize=14)\n\n xy = X[class_member_mask & ~core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),\n markeredgecolor='k', markersize=6)\n\nplt.title('Estimated number of clusters: %d' % n_clusters_)\nplt.show()"
8282
],
8383
"outputs": [],
8484
"metadata": {

0.18/_downloads/plot_dbscan.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -52,20 +52,21 @@
5252

5353
# Black removed and is used for noise instead.
5454
unique_labels = set(labels)
55-
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
55+
colors = [plt.cm.Spectral(each)
56+
for each in np.linspace(0, 1, len(unique_labels))]
5657
for k, col in zip(unique_labels, colors):
5758
if k == -1:
5859
# Black used for noise.
59-
col = 'k'
60+
col = [0, 0, 0, 1]
6061

6162
class_member_mask = (labels == k)
6263

6364
xy = X[class_member_mask & core_samples_mask]
64-
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
65+
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
6566
markeredgecolor='k', markersize=14)
6667

6768
xy = X[class_member_mask & ~core_samples_mask]
68-
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
69+
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
6970
markeredgecolor='k', markersize=6)
7071

7172
plt.title('Estimated number of clusters: %d' % n_clusters_)

0.18/_downloads/plot_digits_classification.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
"execution_count": null,
2525
"cell_type": "code",
2626
"source": [
27-
"print(__doc__)\n\n# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>\n# License: BSD 3 clause\n\n# Standard scientific Python imports\nimport matplotlib.pyplot as plt\n\n# Import datasets, classifiers and performance metrics\nfrom sklearn import datasets, svm, metrics\n\n# The digits dataset\ndigits = datasets.load_digits()\n\n# The data that we are interested in is made of 8x8 images of digits, let's\n# have a look at the first 4 images, stored in the `images` attribute of the\n# dataset. If we were working from image files, we could load them using\n# matplotlib.pyplot.imread. Note that each image must have the same size. For these\n# images, we know which digit they represent: it is given in the 'target' of\n# the dataset.\nimages_and_labels = list(zip(digits.images, digits.target))\nfor index, (image, label) in enumerate(images_and_labels[:4]):\n plt.subplot(2, 4, index + 1)\n plt.axis('off')\n plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')\n plt.title('Training: %i' % label)\n\n# To apply a classifier on this data, we need to flatten the image, to\n# turn the data in a (samples, feature) matrix:\nn_samples = len(digits.images)\ndata = digits.images.reshape((n_samples, -1))\n\n# Create a classifier: a support vector classifier\nclassifier = svm.SVC(gamma=0.001)\n\n# We learn the digits on the first half of the digits\nclassifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])\n\n# Now predict the value of the digit on the second half:\nexpected = digits.target[n_samples / 2:]\npredicted = classifier.predict(data[n_samples / 2:])\n\nprint(\"Classification report for classifier %s:\\n%s\\n\"\n % (classifier, metrics.classification_report(expected, predicted)))\nprint(\"Confusion matrix:\\n%s\" % metrics.confusion_matrix(expected, predicted))\n\nimages_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))\nfor index, (image, prediction) in enumerate(images_and_predictions[:4]):\n plt.subplot(2, 4, index + 5)\n plt.axis('off')\n plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')\n plt.title('Prediction: %i' % prediction)\n\nplt.show()"
27+
"print(__doc__)\n\n# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>\n# License: BSD 3 clause\n\n# Standard scientific Python imports\nimport matplotlib.pyplot as plt\n\n# Import datasets, classifiers and performance metrics\nfrom sklearn import datasets, svm, metrics\n\n# The digits dataset\ndigits = datasets.load_digits()\n\n# The data that we are interested in is made of 8x8 images of digits, let's\n# have a look at the first 4 images, stored in the `images` attribute of the\n# dataset. If we were working from image files, we could load them using\n# matplotlib.pyplot.imread. Note that each image must have the same size. For these\n# images, we know which digit they represent: it is given in the 'target' of\n# the dataset.\nimages_and_labels = list(zip(digits.images, digits.target))\nfor index, (image, label) in enumerate(images_and_labels[:4]):\n plt.subplot(2, 4, index + 1)\n plt.axis('off')\n plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')\n plt.title('Training: %i' % label)\n\n# To apply a classifier on this data, we need to flatten the image, to\n# turn the data in a (samples, feature) matrix:\nn_samples = len(digits.images)\ndata = digits.images.reshape((n_samples, -1))\n\n# Create a classifier: a support vector classifier\nclassifier = svm.SVC(gamma=0.001)\n\n# We learn the digits on the first half of the digits\nclassifier.fit(data[:n_samples // 2], digits.target[:n_samples // 2])\n\n# Now predict the value of the digit on the second half:\nexpected = digits.target[n_samples // 2:]\npredicted = classifier.predict(data[n_samples // 2:])\n\nprint(\"Classification report for classifier %s:\\n%s\\n\"\n % (classifier, metrics.classification_report(expected, predicted)))\nprint(\"Confusion matrix:\\n%s\" % metrics.confusion_matrix(expected, predicted))\n\nimages_and_predictions = list(zip(digits.images[n_samples // 2:], predicted))\nfor index, (image, prediction) in enumerate(images_and_predictions[:4]):\n plt.subplot(2, 4, index + 5)\n plt.axis('off')\n plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')\n plt.title('Prediction: %i' % prediction)\n\nplt.show()"
2828
],
2929
"outputs": [],
3030
"metadata": {

0.18/_downloads/plot_digits_classification.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -46,17 +46,17 @@
4646
classifier = svm.SVC(gamma=0.001)
4747

4848
# We learn the digits on the first half of the digits
49-
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
49+
classifier.fit(data[:n_samples // 2], digits.target[:n_samples // 2])
5050

5151
# Now predict the value of the digit on the second half:
52-
expected = digits.target[n_samples / 2:]
53-
predicted = classifier.predict(data[n_samples / 2:])
52+
expected = digits.target[n_samples // 2:]
53+
predicted = classifier.predict(data[n_samples // 2:])
5454

5555
print("Classification report for classifier %s:\n%s\n"
5656
% (classifier, metrics.classification_report(expected, predicted)))
5757
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
5858

59-
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
59+
images_and_predictions = list(zip(digits.images[n_samples // 2:], predicted))
6060
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
6161
plt.subplot(2, 4, index + 5)
6262
plt.axis('off')

0.18/_downloads/plot_iris_exercise.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
"execution_count": null,
2525
"cell_type": "code",
2626
"source": [
27-
"print(__doc__)\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets, svm\n\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\n\nX = X[y != 0, :2]\ny = y[y != 0]\n\nn_sample = len(X)\n\nnp.random.seed(0)\norder = np.random.permutation(n_sample)\nX = X[order]\ny = y[order].astype(np.float)\n\nX_train = X[:.9 * n_sample]\ny_train = y[:.9 * n_sample]\nX_test = X[.9 * n_sample:]\ny_test = y[.9 * n_sample:]\n\n# fit the model\nfor fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):\n clf = svm.SVC(kernel=kernel, gamma=10)\n clf.fit(X_train, y_train)\n\n plt.figure(fig_num)\n plt.clf()\n plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)\n\n # Circle out the test data\n plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)\n\n plt.axis('tight')\n x_min = X[:, 0].min()\n x_max = X[:, 0].max()\n y_min = X[:, 1].min()\n y_max = X[:, 1].max()\n\n XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]\n Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])\n\n # Put the result into a color plot\n Z = Z.reshape(XX.shape)\n plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)\n plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],\n levels=[-.5, 0, .5])\n\n plt.title(kernel)\nplt.show()"
27+
"print(__doc__)\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets, svm\n\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\n\nX = X[y != 0, :2]\ny = y[y != 0]\n\nn_sample = len(X)\n\nnp.random.seed(0)\norder = np.random.permutation(n_sample)\nX = X[order]\ny = y[order].astype(np.float)\n\nX_train = X[:int(.9 * n_sample)]\ny_train = y[:int(.9 * n_sample)]\nX_test = X[int(.9 * n_sample):]\ny_test = y[int(.9 * n_sample):]\n\n# fit the model\nfor fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):\n clf = svm.SVC(kernel=kernel, gamma=10)\n clf.fit(X_train, y_train)\n\n plt.figure(fig_num)\n plt.clf()\n plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)\n\n # Circle out the test data\n plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)\n\n plt.axis('tight')\n x_min = X[:, 0].min()\n x_max = X[:, 0].max()\n y_min = X[:, 1].min()\n y_max = X[:, 1].max()\n\n XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]\n Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])\n\n # Put the result into a color plot\n Z = Z.reshape(XX.shape)\n plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)\n plt.contour(XX, YY, Z, colors=['k', 'k', 'k'],\n linestyles=['--', '-', '--'], levels=[-.5, 0, .5])\n\n plt.title(kernel)\nplt.show()"
2828
],
2929
"outputs": [],
3030
"metadata": {

0.18/_downloads/plot_iris_exercise.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -29,10 +29,10 @@
2929
X = X[order]
3030
y = y[order].astype(np.float)
3131

32-
X_train = X[:.9 * n_sample]
33-
y_train = y[:.9 * n_sample]
34-
X_test = X[.9 * n_sample:]
35-
y_test = y[.9 * n_sample:]
32+
X_train = X[:int(.9 * n_sample)]
33+
y_train = y[:int(.9 * n_sample)]
34+
X_test = X[int(.9 * n_sample):]
35+
y_test = y[int(.9 * n_sample):]
3636

3737
# fit the model
3838
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
@@ -58,8 +58,8 @@
5858
# Put the result into a color plot
5959
Z = Z.reshape(XX.shape)
6060
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
61-
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
62-
levels=[-.5, 0, .5])
61+
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'],
62+
linestyles=['--', '-', '--'], levels=[-.5, 0, .5])
6363

6464
plt.title(kernel)
6565
plt.show()

0 commit comments

Comments
 (0)