Skip to content

Commit ae22730

Browse files
committed
Pushing the docs to dev/ for branch: main, commit 5763e5adf6c769c134647dc95e0760c128e9cc8f
1 parent fafe673 commit ae22730

File tree

1,319 files changed

+5779
-5767
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,319 files changed

+5779
-5767
lines changed

dev/.buildinfo

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
# Sphinx build info version 1
22
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
3-
config: 6c4d83364794080b68900dc8ca3ed494
3+
config: e77cfb196fa5da77d099af55c5fec29d
44
tags: 645f666f9bcd5a90fca523b33c5a78b7
Binary file not shown.

dev/_downloads/2338f6e7d44c2931a41926d4f9726d9b/plot_linkage_comparison.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -33,28 +33,28 @@
3333
from sklearn import cluster, datasets
3434
from sklearn.preprocessing import StandardScaler
3535

36-
np.random.seed(0)
37-
3836
# %%
3937
# Generate datasets. We choose the size big enough to see the scalability
4038
# of the algorithms, but not too big to avoid too long running times
4139

4240
n_samples = 1500
43-
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=0.5, noise=0.05)
44-
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=0.05)
45-
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
46-
no_structure = np.random.rand(n_samples, 2), None
41+
noisy_circles = datasets.make_circles(
42+
n_samples=n_samples, factor=0.5, noise=0.05, random_state=170
43+
)
44+
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=0.05, random_state=170)
45+
blobs = datasets.make_blobs(n_samples=n_samples, random_state=170)
46+
rng = np.random.RandomState(170)
47+
no_structure = rng.rand(n_samples, 2), None
4748

4849
# Anisotropicly distributed data
49-
random_state = 170
50-
X, y = datasets.make_blobs(n_samples=n_samples, random_state=random_state)
50+
X, y = datasets.make_blobs(n_samples=n_samples, random_state=170)
5151
transformation = [[0.6, -0.6], [-0.4, 0.8]]
5252
X_aniso = np.dot(X, transformation)
5353
aniso = (X_aniso, y)
5454

5555
# blobs with varied variances
5656
varied = datasets.make_blobs(
57-
n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state
57+
n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=170
5858
)
5959

6060
# %%

dev/_downloads/24475810034a0d0d190a9de0f87d72b5/plot_all_scaling.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -102,11 +102,15 @@
102102
),
103103
(
104104
"Data after quantile transformation (uniform pdf)",
105-
QuantileTransformer(output_distribution="uniform").fit_transform(X),
105+
QuantileTransformer(
106+
output_distribution="uniform", random_state=42
107+
).fit_transform(X),
106108
),
107109
(
108110
"Data after quantile transformation (gaussian pdf)",
109-
QuantileTransformer(output_distribution="normal").fit_transform(X),
111+
QuantileTransformer(
112+
output_distribution="normal", random_state=42
113+
).fit_transform(X),
110114
),
111115
("Data after sample-wise L2 normalizing", Normalizer().fit_transform(X)),
112116
]
Binary file not shown.

dev/_downloads/74caedf3eb449b80f3f00e66c1c576bd/plot_discretization_classification.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ def get_name(estimator):
7474
(
7575
make_pipeline(
7676
StandardScaler(),
77-
KBinsDiscretizer(encode="onehot"),
77+
KBinsDiscretizer(encode="onehot", random_state=0),
7878
LogisticRegression(random_state=0),
7979
),
8080
{
@@ -85,7 +85,7 @@ def get_name(estimator):
8585
(
8686
make_pipeline(
8787
StandardScaler(),
88-
KBinsDiscretizer(encode="onehot"),
88+
KBinsDiscretizer(encode="onehot", random_state=0),
8989
LinearSVC(random_state=0, dual="auto"),
9090
),
9191
{

dev/_downloads/aa8e07ce1b796a15ada1d9f0edce48b5/plot_discretization_classification.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
},
1616
"outputs": [],
1717
"source": [
18-
"# Code source: Tom Dupr\u00e9 la Tour\n# Adapted from plot_classifier_comparison by Ga\u00ebl Varoquaux and Andreas M\u00fcller\n#\n# License: BSD 3 clause\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.colors import ListedColormap\n\nfrom sklearn.datasets import make_circles, make_classification, make_moons\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.exceptions import ConvergenceWarning\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import GridSearchCV, train_test_split\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import KBinsDiscretizer, StandardScaler\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.utils._testing import ignore_warnings\n\nh = 0.02 # step size in the mesh\n\n\ndef get_name(estimator):\n name = estimator.__class__.__name__\n if name == \"Pipeline\":\n name = [get_name(est[1]) for est in estimator.steps]\n name = \" + \".join(name)\n return name\n\n\n# list of (estimator, param_grid), where param_grid is used in GridSearchCV\n# The parameter spaces in this example are limited to a narrow band to reduce\n# its runtime. In a real use case, a broader search space for the algorithms\n# should be used.\nclassifiers = [\n (\n make_pipeline(StandardScaler(), LogisticRegression(random_state=0)),\n {\"logisticregression__C\": np.logspace(-1, 1, 3)},\n ),\n (\n make_pipeline(StandardScaler(), LinearSVC(random_state=0, dual=\"auto\")),\n {\"linearsvc__C\": np.logspace(-1, 1, 3)},\n ),\n (\n make_pipeline(\n StandardScaler(),\n KBinsDiscretizer(encode=\"onehot\"),\n LogisticRegression(random_state=0),\n ),\n {\n \"kbinsdiscretizer__n_bins\": np.arange(5, 8),\n \"logisticregression__C\": np.logspace(-1, 1, 3),\n },\n ),\n (\n make_pipeline(\n StandardScaler(),\n KBinsDiscretizer(encode=\"onehot\"),\n LinearSVC(random_state=0, dual=\"auto\"),\n ),\n {\n \"kbinsdiscretizer__n_bins\": np.arange(5, 8),\n \"linearsvc__C\": np.logspace(-1, 1, 3),\n },\n ),\n (\n make_pipeline(\n StandardScaler(), GradientBoostingClassifier(n_estimators=5, random_state=0)\n ),\n {\"gradientboostingclassifier__learning_rate\": np.logspace(-2, 0, 5)},\n ),\n (\n make_pipeline(StandardScaler(), SVC(random_state=0)),\n {\"svc__C\": np.logspace(-1, 1, 3)},\n ),\n]\n\nnames = [get_name(e).replace(\"StandardScaler + \", \"\") for e, _ in classifiers]\n\nn_samples = 100\ndatasets = [\n make_moons(n_samples=n_samples, noise=0.2, random_state=0),\n make_circles(n_samples=n_samples, noise=0.2, factor=0.5, random_state=1),\n make_classification(\n n_samples=n_samples,\n n_features=2,\n n_redundant=0,\n n_informative=2,\n random_state=2,\n n_clusters_per_class=1,\n ),\n]\n\nfig, axes = plt.subplots(\n nrows=len(datasets), ncols=len(classifiers) + 1, figsize=(21, 9)\n)\n\ncm_piyg = plt.cm.PiYG\ncm_bright = ListedColormap([\"#b30065\", \"#178000\"])\n\n# iterate over datasets\nfor ds_cnt, (X, y) in enumerate(datasets):\n print(f\"\\ndataset {ds_cnt}\\n---------\")\n\n # split into training and test part\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.5, random_state=42\n )\n\n # create the grid for background colors\n x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5\n y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n\n # plot the dataset first\n ax = axes[ds_cnt, 0]\n if ds_cnt == 0:\n ax.set_title(\"Input data\")\n # plot the training points\n ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors=\"k\")\n # and testing points\n ax.scatter(\n X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6, edgecolors=\"k\"\n )\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xticks(())\n ax.set_yticks(())\n\n # iterate over classifiers\n for est_idx, (name, (estimator, param_grid)) in enumerate(zip(names, classifiers)):\n ax = axes[ds_cnt, est_idx + 1]\n\n clf = GridSearchCV(estimator=estimator, param_grid=param_grid)\n with ignore_warnings(category=ConvergenceWarning):\n clf.fit(X_train, y_train)\n score = clf.score(X_test, y_test)\n print(f\"{name}: {score:.2f}\")\n\n # plot the decision boundary. For that, we will assign a color to each\n # point in the mesh [x_min, x_max]*[y_min, y_max].\n if hasattr(clf, \"decision_function\"):\n Z = clf.decision_function(np.column_stack([xx.ravel(), yy.ravel()]))\n else:\n Z = clf.predict_proba(np.column_stack([xx.ravel(), yy.ravel()]))[:, 1]\n\n # put the result into a color plot\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, cmap=cm_piyg, alpha=0.8)\n\n # plot the training points\n ax.scatter(\n X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors=\"k\"\n )\n # and testing points\n ax.scatter(\n X_test[:, 0],\n X_test[:, 1],\n c=y_test,\n cmap=cm_bright,\n edgecolors=\"k\",\n alpha=0.6,\n )\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xticks(())\n ax.set_yticks(())\n\n if ds_cnt == 0:\n ax.set_title(name.replace(\" + \", \"\\n\"))\n ax.text(\n 0.95,\n 0.06,\n (f\"{score:.2f}\").lstrip(\"0\"),\n size=15,\n bbox=dict(boxstyle=\"round\", alpha=0.8, facecolor=\"white\"),\n transform=ax.transAxes,\n horizontalalignment=\"right\",\n )\n\n\nplt.tight_layout()\n\n# Add suptitles above the figure\nplt.subplots_adjust(top=0.90)\nsuptitles = [\n \"Linear classifiers\",\n \"Feature discretization and linear classifiers\",\n \"Non-linear classifiers\",\n]\nfor i, suptitle in zip([1, 3, 5], suptitles):\n ax = axes[0, i]\n ax.text(\n 1.05,\n 1.25,\n suptitle,\n transform=ax.transAxes,\n horizontalalignment=\"center\",\n size=\"x-large\",\n )\nplt.show()"
18+
"# Code source: Tom Dupr\u00e9 la Tour\n# Adapted from plot_classifier_comparison by Ga\u00ebl Varoquaux and Andreas M\u00fcller\n#\n# License: BSD 3 clause\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.colors import ListedColormap\n\nfrom sklearn.datasets import make_circles, make_classification, make_moons\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.exceptions import ConvergenceWarning\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import GridSearchCV, train_test_split\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import KBinsDiscretizer, StandardScaler\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.utils._testing import ignore_warnings\n\nh = 0.02 # step size in the mesh\n\n\ndef get_name(estimator):\n name = estimator.__class__.__name__\n if name == \"Pipeline\":\n name = [get_name(est[1]) for est in estimator.steps]\n name = \" + \".join(name)\n return name\n\n\n# list of (estimator, param_grid), where param_grid is used in GridSearchCV\n# The parameter spaces in this example are limited to a narrow band to reduce\n# its runtime. In a real use case, a broader search space for the algorithms\n# should be used.\nclassifiers = [\n (\n make_pipeline(StandardScaler(), LogisticRegression(random_state=0)),\n {\"logisticregression__C\": np.logspace(-1, 1, 3)},\n ),\n (\n make_pipeline(StandardScaler(), LinearSVC(random_state=0, dual=\"auto\")),\n {\"linearsvc__C\": np.logspace(-1, 1, 3)},\n ),\n (\n make_pipeline(\n StandardScaler(),\n KBinsDiscretizer(encode=\"onehot\", random_state=0),\n LogisticRegression(random_state=0),\n ),\n {\n \"kbinsdiscretizer__n_bins\": np.arange(5, 8),\n \"logisticregression__C\": np.logspace(-1, 1, 3),\n },\n ),\n (\n make_pipeline(\n StandardScaler(),\n KBinsDiscretizer(encode=\"onehot\", random_state=0),\n LinearSVC(random_state=0, dual=\"auto\"),\n ),\n {\n \"kbinsdiscretizer__n_bins\": np.arange(5, 8),\n \"linearsvc__C\": np.logspace(-1, 1, 3),\n },\n ),\n (\n make_pipeline(\n StandardScaler(), GradientBoostingClassifier(n_estimators=5, random_state=0)\n ),\n {\"gradientboostingclassifier__learning_rate\": np.logspace(-2, 0, 5)},\n ),\n (\n make_pipeline(StandardScaler(), SVC(random_state=0)),\n {\"svc__C\": np.logspace(-1, 1, 3)},\n ),\n]\n\nnames = [get_name(e).replace(\"StandardScaler + \", \"\") for e, _ in classifiers]\n\nn_samples = 100\ndatasets = [\n make_moons(n_samples=n_samples, noise=0.2, random_state=0),\n make_circles(n_samples=n_samples, noise=0.2, factor=0.5, random_state=1),\n make_classification(\n n_samples=n_samples,\n n_features=2,\n n_redundant=0,\n n_informative=2,\n random_state=2,\n n_clusters_per_class=1,\n ),\n]\n\nfig, axes = plt.subplots(\n nrows=len(datasets), ncols=len(classifiers) + 1, figsize=(21, 9)\n)\n\ncm_piyg = plt.cm.PiYG\ncm_bright = ListedColormap([\"#b30065\", \"#178000\"])\n\n# iterate over datasets\nfor ds_cnt, (X, y) in enumerate(datasets):\n print(f\"\\ndataset {ds_cnt}\\n---------\")\n\n # split into training and test part\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.5, random_state=42\n )\n\n # create the grid for background colors\n x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5\n y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n\n # plot the dataset first\n ax = axes[ds_cnt, 0]\n if ds_cnt == 0:\n ax.set_title(\"Input data\")\n # plot the training points\n ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors=\"k\")\n # and testing points\n ax.scatter(\n X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6, edgecolors=\"k\"\n )\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xticks(())\n ax.set_yticks(())\n\n # iterate over classifiers\n for est_idx, (name, (estimator, param_grid)) in enumerate(zip(names, classifiers)):\n ax = axes[ds_cnt, est_idx + 1]\n\n clf = GridSearchCV(estimator=estimator, param_grid=param_grid)\n with ignore_warnings(category=ConvergenceWarning):\n clf.fit(X_train, y_train)\n score = clf.score(X_test, y_test)\n print(f\"{name}: {score:.2f}\")\n\n # plot the decision boundary. For that, we will assign a color to each\n # point in the mesh [x_min, x_max]*[y_min, y_max].\n if hasattr(clf, \"decision_function\"):\n Z = clf.decision_function(np.column_stack([xx.ravel(), yy.ravel()]))\n else:\n Z = clf.predict_proba(np.column_stack([xx.ravel(), yy.ravel()]))[:, 1]\n\n # put the result into a color plot\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, cmap=cm_piyg, alpha=0.8)\n\n # plot the training points\n ax.scatter(\n X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors=\"k\"\n )\n # and testing points\n ax.scatter(\n X_test[:, 0],\n X_test[:, 1],\n c=y_test,\n cmap=cm_bright,\n edgecolors=\"k\",\n alpha=0.6,\n )\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xticks(())\n ax.set_yticks(())\n\n if ds_cnt == 0:\n ax.set_title(name.replace(\" + \", \"\\n\"))\n ax.text(\n 0.95,\n 0.06,\n (f\"{score:.2f}\").lstrip(\"0\"),\n size=15,\n bbox=dict(boxstyle=\"round\", alpha=0.8, facecolor=\"white\"),\n transform=ax.transAxes,\n horizontalalignment=\"right\",\n )\n\n\nplt.tight_layout()\n\n# Add suptitles above the figure\nplt.subplots_adjust(top=0.90)\nsuptitles = [\n \"Linear classifiers\",\n \"Feature discretization and linear classifiers\",\n \"Non-linear classifiers\",\n]\nfor i, suptitle in zip([1, 3, 5], suptitles):\n ax = axes[0, i]\n ax.text(\n 1.05,\n 1.25,\n suptitle,\n transform=ax.transAxes,\n horizontalalignment=\"center\",\n size=\"x-large\",\n )\nplt.show()"
1919
]
2020
}
2121
],

dev/_downloads/be7e9c5a81790b318c3a8028ced647ff/plot_linkage_comparison.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
},
1616
"outputs": [],
1717
"source": [
18-
"import time\nimport warnings\nfrom itertools import cycle, islice\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom sklearn import cluster, datasets\nfrom sklearn.preprocessing import StandardScaler\n\nnp.random.seed(0)"
18+
"import time\nimport warnings\nfrom itertools import cycle, islice\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom sklearn import cluster, datasets\nfrom sklearn.preprocessing import StandardScaler"
1919
]
2020
},
2121
{
@@ -33,7 +33,7 @@
3333
},
3434
"outputs": [],
3535
"source": [
36-
"n_samples = 1500\nnoisy_circles = datasets.make_circles(n_samples=n_samples, factor=0.5, noise=0.05)\nnoisy_moons = datasets.make_moons(n_samples=n_samples, noise=0.05)\nblobs = datasets.make_blobs(n_samples=n_samples, random_state=8)\nno_structure = np.random.rand(n_samples, 2), None\n\n# Anisotropicly distributed data\nrandom_state = 170\nX, y = datasets.make_blobs(n_samples=n_samples, random_state=random_state)\ntransformation = [[0.6, -0.6], [-0.4, 0.8]]\nX_aniso = np.dot(X, transformation)\naniso = (X_aniso, y)\n\n# blobs with varied variances\nvaried = datasets.make_blobs(\n n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state\n)"
36+
"n_samples = 1500\nnoisy_circles = datasets.make_circles(\n n_samples=n_samples, factor=0.5, noise=0.05, random_state=170\n)\nnoisy_moons = datasets.make_moons(n_samples=n_samples, noise=0.05, random_state=170)\nblobs = datasets.make_blobs(n_samples=n_samples, random_state=170)\nrng = np.random.RandomState(170)\nno_structure = rng.rand(n_samples, 2), None\n\n# Anisotropicly distributed data\nX, y = datasets.make_blobs(n_samples=n_samples, random_state=170)\ntransformation = [[0.6, -0.6], [-0.4, 0.8]]\nX_aniso = np.dot(X, transformation)\naniso = (X_aniso, y)\n\n# blobs with varied variances\nvaried = datasets.make_blobs(\n n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=170\n)"
3737
]
3838
},
3939
{

0 commit comments

Comments
 (0)