Skip to content

Commit 0327801

Browse files
committed
Pushing the docs to 1.0/ for branch: 1.0.X, commit 0d378913be6d7e485b792ea36e9268be31ed52d0
1 parent c8b12ad commit 0327801

File tree

2,705 files changed

+37111
-29420
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

2,705 files changed

+37111
-29420
lines changed

1.0/.buildinfo

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
# Sphinx build info version 1
22
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
3-
config: 61eee5c1734c1de5af47281cdb7e94fd
3+
config: 2eb390e0f69446f70670632283d7ce9d
44
tags: 645f666f9bcd5a90fca523b33c5a78b7

1.0/_downloads/006fc185672e58b056a5c134db26935c/plot_coin_segmentation.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
},
2727
"outputs": [],
2828
"source": [
29-
"print(__doc__)\n\n# Author: Gael Varoquaux <[email protected]>, Brian Cheung\n# License: BSD 3 clause\n\nimport time\n\nimport numpy as np\nfrom scipy.ndimage.filters import gaussian_filter\nimport matplotlib.pyplot as plt\nimport skimage\nfrom skimage.data import coins\nfrom skimage.transform import rescale\n\nfrom sklearn.feature_extraction import image\nfrom sklearn.cluster import spectral_clustering\nfrom sklearn.utils.fixes import parse_version\n\n# these were introduced in skimage-0.14\nif parse_version(skimage.__version__) >= parse_version('0.14'):\n rescale_params = {'anti_aliasing': False, 'multichannel': False}\nelse:\n rescale_params = {}\n\n# load the coins as a numpy array\norig_coins = coins()\n\n# Resize it to 20% of the original size to speed up the processing\n# Applying a Gaussian filter for smoothing prior to down-scaling\n# reduces aliasing artifacts.\nsmoothened_coins = gaussian_filter(orig_coins, sigma=2)\nrescaled_coins = rescale(smoothened_coins, 0.2, mode=\"reflect\",\n **rescale_params)\n\n# Convert the image into a graph with the value of the gradient on the\n# edges.\ngraph = image.img_to_graph(rescaled_coins)\n\n# Take a decreasing function of the gradient: an exponential\n# The smaller beta is, the more independent the segmentation is of the\n# actual image. For beta=1, the segmentation is close to a voronoi\nbeta = 10\neps = 1e-6\ngraph.data = np.exp(-beta * graph.data / graph.data.std()) + eps\n\n# Apply spectral clustering (this step goes much faster if you have pyamg\n# installed)\nN_REGIONS = 25"
29+
"# Author: Gael Varoquaux <[email protected]>, Brian Cheung\n# License: BSD 3 clause\n\nimport time\n\nimport numpy as np\nfrom scipy.ndimage.filters import gaussian_filter\nimport matplotlib.pyplot as plt\nimport skimage\nfrom skimage.data import coins\nfrom skimage.transform import rescale\n\nfrom sklearn.feature_extraction import image\nfrom sklearn.cluster import spectral_clustering\nfrom sklearn.utils.fixes import parse_version\n\n# these were introduced in skimage-0.14\nif parse_version(skimage.__version__) >= parse_version(\"0.14\"):\n rescale_params = {\"anti_aliasing\": False, \"multichannel\": False}\nelse:\n rescale_params = {}\n\n# load the coins as a numpy array\norig_coins = coins()\n\n# Resize it to 20% of the original size to speed up the processing\n# Applying a Gaussian filter for smoothing prior to down-scaling\n# reduces aliasing artifacts.\nsmoothened_coins = gaussian_filter(orig_coins, sigma=2)\nrescaled_coins = rescale(smoothened_coins, 0.2, mode=\"reflect\", **rescale_params)\n\n# Convert the image into a graph with the value of the gradient on the\n# edges.\ngraph = image.img_to_graph(rescaled_coins)\n\n# Take a decreasing function of the gradient: an exponential\n# The smaller beta is, the more independent the segmentation is of the\n# actual image. For beta=1, the segmentation is close to a voronoi\nbeta = 10\neps = 1e-6\ngraph.data = np.exp(-beta * graph.data / graph.data.std()) + eps\n\n# Apply spectral clustering (this step goes much faster if you have pyamg\n# installed)\nN_REGIONS = 25"
3030
]
3131
},
3232
{
@@ -44,7 +44,7 @@
4444
},
4545
"outputs": [],
4646
"source": [
47-
"for assign_labels in ('kmeans', 'discretize'):\n t0 = time.time()\n labels = spectral_clustering(graph, n_clusters=N_REGIONS,\n assign_labels=assign_labels, random_state=42)\n t1 = time.time()\n labels = labels.reshape(rescaled_coins.shape)\n\n plt.figure(figsize=(5, 5))\n plt.imshow(rescaled_coins, cmap=plt.cm.gray)\n for l in range(N_REGIONS):\n plt.contour(labels == l,\n colors=[plt.cm.nipy_spectral(l / float(N_REGIONS))])\n plt.xticks(())\n plt.yticks(())\n title = 'Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0))\n print(title)\n plt.title(title)\nplt.show()"
47+
"for assign_labels in (\"kmeans\", \"discretize\"):\n t0 = time.time()\n labels = spectral_clustering(\n graph, n_clusters=N_REGIONS, assign_labels=assign_labels, random_state=42\n )\n t1 = time.time()\n labels = labels.reshape(rescaled_coins.shape)\n\n plt.figure(figsize=(5, 5))\n plt.imshow(rescaled_coins, cmap=plt.cm.gray)\n for l in range(N_REGIONS):\n plt.contour(labels == l, colors=[plt.cm.nipy_spectral(l / float(N_REGIONS))])\n plt.xticks(())\n plt.yticks(())\n title = \"Spectral clustering: %s, %.2fs\" % (assign_labels, (t1 - t0))\n print(title)\n plt.title(title)\nplt.show()"
4848
]
4949
}
5050
],

1.0/_downloads/00ae629d652473137a3905a5e08ea815/plot_iris_dtc.py

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414
1515
We also show the tree structure of a model built on all of the features.
1616
"""
17-
print(__doc__)
1817

1918
import numpy as np
2019
import matplotlib.pyplot as plt
@@ -30,8 +29,7 @@
3029
# Load data
3130
iris = load_iris()
3231

33-
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
34-
[1, 2], [1, 3], [2, 3]]):
32+
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 3]]):
3533
# We only take the two corresponding features
3634
X = iris.data[:, pair]
3735
y = iris.target
@@ -44,8 +42,9 @@
4442

4543
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
4644
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
47-
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
48-
np.arange(y_min, y_max, plot_step))
45+
xx, yy = np.meshgrid(
46+
np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step)
47+
)
4948
plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)
5049

5150
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
@@ -58,11 +57,18 @@
5857
# Plot the training points
5958
for i, color in zip(range(n_classes), plot_colors):
6059
idx = np.where(y == i)
61-
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
62-
cmap=plt.cm.RdYlBu, edgecolor='black', s=15)
60+
plt.scatter(
61+
X[idx, 0],
62+
X[idx, 1],
63+
c=color,
64+
label=iris.target_names[i],
65+
cmap=plt.cm.RdYlBu,
66+
edgecolor="black",
67+
s=15,
68+
)
6369

6470
plt.suptitle("Decision surface of a decision tree using paired features")
65-
plt.legend(loc='lower right', borderpad=0, handletextpad=0)
71+
plt.legend(loc="lower right", borderpad=0, handletextpad=0)
6672
plt.axis("tight")
6773

6874
plt.figure()

1.0/_downloads/010337852815f8103ac6cca38a812b3c/plot_roc_crossval.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@
2929
:ref:`sphx_glr_auto_examples_model_selection_plot_roc.py`,
3030
3131
"""
32-
print(__doc__)
3332

3433
import numpy as np
3534
import matplotlib.pyplot as plt

1.0/_downloads/01fdc7c95204e4a420de7cd297711693/plot_feature_union.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
1414
The combination used in this example is not particularly helpful on this
1515
dataset and is only used to illustrate the usage of FeatureUnion.
16+
1617
"""
1718

1819
# Author: Andreas Mueller <[email protected]>
@@ -50,9 +51,11 @@
5051

5152
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
5253

53-
param_grid = dict(features__pca__n_components=[1, 2, 3],
54-
features__univ_select__k=[1, 2],
55-
svm__C=[0.1, 1, 10])
54+
param_grid = dict(
55+
features__pca__n_components=[1, 2, 3],
56+
features__univ_select__k=[1, 2],
57+
svm__C=[0.1, 1, 10],
58+
)
5659

5760
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
5861
grid_search.fit(X, y)

1.0/_downloads/023324c27491610e7c0ccff87c59abf9/plot_kernel_pca.py

Lines changed: 15 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,8 @@
55
66
This example shows that Kernel PCA is able to find a projection of the data
77
that makes data linearly separable.
8+
89
"""
9-
print(__doc__)
1010

1111
# Authors: Mathieu Blondel
1212
# Andreas Mueller
@@ -20,7 +20,7 @@
2020

2121
np.random.seed(0)
2222

23-
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
23+
X, y = make_circles(n_samples=400, factor=0.3, noise=0.05)
2424

2525
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
2626
X_kpca = kpca.fit_transform(X)
@@ -31,47 +31,39 @@
3131
# Plot results
3232

3333
plt.figure()
34-
plt.subplot(2, 2, 1, aspect='equal')
34+
plt.subplot(2, 2, 1, aspect="equal")
3535
plt.title("Original space")
3636
reds = y == 0
3737
blues = y == 1
3838

39-
plt.scatter(X[reds, 0], X[reds, 1], c="red",
40-
s=20, edgecolor='k')
41-
plt.scatter(X[blues, 0], X[blues, 1], c="blue",
42-
s=20, edgecolor='k')
39+
plt.scatter(X[reds, 0], X[reds, 1], c="red", s=20, edgecolor="k")
40+
plt.scatter(X[blues, 0], X[blues, 1], c="blue", s=20, edgecolor="k")
4341
plt.xlabel("$x_1$")
4442
plt.ylabel("$x_2$")
4543

4644
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
4745
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
4846
# projection on the first principal component (in the phi space)
4947
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
50-
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
48+
plt.contour(X1, X2, Z_grid, colors="grey", linewidths=1, origin="lower")
5149

52-
plt.subplot(2, 2, 2, aspect='equal')
53-
plt.scatter(X_pca[reds, 0], X_pca[reds, 1], c="red",
54-
s=20, edgecolor='k')
55-
plt.scatter(X_pca[blues, 0], X_pca[blues, 1], c="blue",
56-
s=20, edgecolor='k')
50+
plt.subplot(2, 2, 2, aspect="equal")
51+
plt.scatter(X_pca[reds, 0], X_pca[reds, 1], c="red", s=20, edgecolor="k")
52+
plt.scatter(X_pca[blues, 0], X_pca[blues, 1], c="blue", s=20, edgecolor="k")
5753
plt.title("Projection by PCA")
5854
plt.xlabel("1st principal component")
5955
plt.ylabel("2nd component")
6056

61-
plt.subplot(2, 2, 3, aspect='equal')
62-
plt.scatter(X_kpca[reds, 0], X_kpca[reds, 1], c="red",
63-
s=20, edgecolor='k')
64-
plt.scatter(X_kpca[blues, 0], X_kpca[blues, 1], c="blue",
65-
s=20, edgecolor='k')
57+
plt.subplot(2, 2, 3, aspect="equal")
58+
plt.scatter(X_kpca[reds, 0], X_kpca[reds, 1], c="red", s=20, edgecolor="k")
59+
plt.scatter(X_kpca[blues, 0], X_kpca[blues, 1], c="blue", s=20, edgecolor="k")
6660
plt.title("Projection by KPCA")
6761
plt.xlabel(r"1st principal component in space induced by $\phi$")
6862
plt.ylabel("2nd component")
6963

70-
plt.subplot(2, 2, 4, aspect='equal')
71-
plt.scatter(X_back[reds, 0], X_back[reds, 1], c="red",
72-
s=20, edgecolor='k')
73-
plt.scatter(X_back[blues, 0], X_back[blues, 1], c="blue",
74-
s=20, edgecolor='k')
64+
plt.subplot(2, 2, 4, aspect="equal")
65+
plt.scatter(X_back[reds, 0], X_back[reds, 1], c="red", s=20, edgecolor="k")
66+
plt.scatter(X_back[blues, 0], X_back[blues, 1], c="blue", s=20, edgecolor="k")
7567
plt.title("Original space after inverse transform")
7668
plt.xlabel("$x_1$")
7769
plt.ylabel("$x_2$")

1.0/_downloads/02a1306a494b46cc56c930ceec6e8c4a/plot_species_kde.py

Lines changed: 25 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@
3535
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
3636
190:231-259, 2006.
3737
""" # noqa: E501
38+
3839
# Author: Jake Vanderplas <[email protected]>
3940
#
4041
# License: BSD 3 clause
@@ -48,6 +49,7 @@
4849
# otherwise, we'll improvise later...
4950
try:
5051
from mpl_toolkits.basemap import Basemap
52+
5153
basemap = True
5254
except ImportError:
5355
basemap = False
@@ -82,13 +84,14 @@ def construct_grids(batch):
8284

8385
# Get matrices/arrays of species IDs and locations
8486
data = fetch_species_distributions()
85-
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
87+
species_names = ["Bradypus Variegatus", "Microryzomys Minutus"]
8688

87-
Xtrain = np.vstack([data['train']['dd lat'],
88-
data['train']['dd long']]).T
89-
ytrain = np.array([d.decode('ascii').startswith('micro')
90-
for d in data['train']['species']], dtype='int')
91-
Xtrain *= np.pi / 180. # Convert lat/long to radians
89+
Xtrain = np.vstack([data["train"]["dd lat"], data["train"]["dd long"]]).T
90+
ytrain = np.array(
91+
[d.decode("ascii").startswith("micro") for d in data["train"]["species"]],
92+
dtype="int",
93+
)
94+
Xtrain *= np.pi / 180.0 # Convert lat/long to radians
9295

9396
# Set up the data grid for the contour plot
9497
xgrid, ygrid = construct_grids(data)
@@ -98,7 +101,7 @@ def construct_grids(batch):
98101

99102
xy = np.vstack([Y.ravel(), X.ravel()]).T
100103
xy = xy[land_mask]
101-
xy *= np.pi / 180.
104+
xy *= np.pi / 180.0
102105

103106
# Plot map of South America with distributions of each species
104107
fig = plt.figure()
@@ -109,12 +112,13 @@ def construct_grids(batch):
109112

110113
# construct a kernel density estimate of the distribution
111114
print(" - computing KDE in spherical coordinates")
112-
kde = KernelDensity(bandwidth=0.04, metric='haversine',
113-
kernel='gaussian', algorithm='ball_tree')
115+
kde = KernelDensity(
116+
bandwidth=0.04, metric="haversine", kernel="gaussian", algorithm="ball_tree"
117+
)
114118
kde.fit(Xtrain[ytrain == i])
115119

116120
# evaluate only on the land: -9999 indicates ocean
117-
Z = np.full(land_mask.shape[0], -9999, dtype='int')
121+
Z = np.full(land_mask.shape[0], -9999, dtype="int")
118122
Z[land_mask] = np.exp(kde.score_samples(xy))
119123
Z = Z.reshape(X.shape)
120124

@@ -124,16 +128,21 @@ def construct_grids(batch):
124128

125129
if basemap:
126130
print(" - plot coastlines using basemap")
127-
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
128-
urcrnrlat=Y.max(), llcrnrlon=X.min(),
129-
urcrnrlon=X.max(), resolution='c')
131+
m = Basemap(
132+
projection="cyl",
133+
llcrnrlat=Y.min(),
134+
urcrnrlat=Y.max(),
135+
llcrnrlon=X.min(),
136+
urcrnrlon=X.max(),
137+
resolution="c",
138+
)
130139
m.drawcoastlines()
131140
m.drawcountries()
132141
else:
133142
print(" - plot coastlines from coverage")
134-
plt.contour(X, Y, land_reference,
135-
levels=[-9998], colors="k",
136-
linestyles="solid")
143+
plt.contour(
144+
X, Y, land_reference, levels=[-9998], colors="k", linestyles="solid"
145+
)
137146
plt.xticks([])
138147
plt.yticks([])
139148

1.0/_downloads/02a7bbce3c39c70d62d80e875968e5c6/plot_digits_kde_sampling.py

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
a generative model for a dataset. With this generative model in place,
99
new samples can be drawn. These new samples reflect the underlying model
1010
of the data.
11+
1112
"""
1213

1314
import numpy as np
@@ -26,7 +27,7 @@
2627
data = pca.fit_transform(digits.data)
2728

2829
# use grid search cross-validation to optimize the bandwidth
29-
params = {'bandwidth': np.logspace(-1, 1, 20)}
30+
params = {"bandwidth": np.logspace(-1, 1, 20)}
3031
grid = GridSearchCV(KernelDensity(), params)
3132
grid.fit(data)
3233

@@ -48,14 +49,16 @@
4849
for j in range(11):
4950
ax[4, j].set_visible(False)
5051
for i in range(4):
51-
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
52-
cmap=plt.cm.binary, interpolation='nearest')
52+
im = ax[i, j].imshow(
53+
real_data[i, j].reshape((8, 8)), cmap=plt.cm.binary, interpolation="nearest"
54+
)
5355
im.set_clim(0, 16)
54-
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
55-
cmap=plt.cm.binary, interpolation='nearest')
56+
im = ax[i + 5, j].imshow(
57+
new_data[i, j].reshape((8, 8)), cmap=plt.cm.binary, interpolation="nearest"
58+
)
5659
im.set_clim(0, 16)
5760

58-
ax[0, 5].set_title('Selection from the input data')
61+
ax[0, 5].set_title("Selection from the input data")
5962
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
6063

6164
plt.show()

0 commit comments

Comments
 (0)