Skip to content

Commit 0f2488b

Browse files
committed
Pushing the docs to 0.24/ for branch: 0.24.X, commit c6512929fbee7232949c0f18cfb28cf3b5959df9
1 parent 52a4d8c commit 0f2488b

File tree

2,238 files changed

+6025
-5989
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

2,238 files changed

+6025
-5989
lines changed

0.24/.buildinfo

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
# Sphinx build info version 1
22
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
3-
config: e8d5f9a59377c09b45e80124a56b44a8
3+
config: fff5355dfe346dfbc28352cd7dcc4660
44
tags: 645f666f9bcd5a90fca523b33c5a78b7
Binary file not shown.

0.24/_downloads/1a55101a8e49ab5d3213dadb31332045/plot_digits_classification.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,8 +78,9 @@
7878
# digit value in the title.
7979

8080
_, axes = plt.subplots(nrows=1, ncols=4, figsize=(10, 3))
81-
for ax, image, prediction in zip(axes, digits.images, predicted):
81+
for ax, image, prediction in zip(axes, X_test, predicted):
8282
ax.set_axis_off()
83+
image = image.reshape(8, 8)
8384
ax.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
8485
ax.set_title(f'Prediction: {prediction}')
8586

0.24/_downloads/521b554adefca348463adbbe047d7e99/plot_linear_model_coefficient_interpretation.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -325,7 +325,7 @@
325325
columns=feature_names
326326
)
327327
plt.figure(figsize=(9, 7))
328-
sns.swarmplot(data=coefs, orient='h', color='k', alpha=0.5)
328+
sns.stripplot(data=coefs, orient='h', color='k', alpha=0.5)
329329
sns.boxplot(data=coefs, orient='h', color='cyan', saturation=0.5)
330330
plt.axvline(x=0, color='.5')
331331
plt.xlabel('Coefficient importance')
@@ -376,7 +376,7 @@
376376
columns=feature_names[:-1]
377377
)
378378
plt.figure(figsize=(9, 7))
379-
sns.swarmplot(data=coefs, orient='h', color='k', alpha=0.5)
379+
sns.stripplot(data=coefs, orient='h', color='k', alpha=0.5)
380380
sns.boxplot(data=coefs, orient='h', color='cyan', saturation=0.5)
381381
plt.axvline(x=0, color='.5')
382382
plt.title('Coefficient importance and its variability')
@@ -469,7 +469,7 @@
469469
columns=feature_names
470470
)
471471
plt.figure(figsize=(9, 7))
472-
sns.swarmplot(data=coefs, orient='h', color='k', alpha=0.5)
472+
sns.stripplot(data=coefs, orient='h', color='k', alpha=0.5)
473473
sns.boxplot(data=coefs, orient='h', color='cyan', saturation=0.5)
474474
plt.axvline(x=0, color='.5')
475475
plt.title('Coefficient variability')

0.24/_downloads/57163227aeb4c19ca4c69b87a8d1949c/plot_learning_curve.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ def plot_learning_curve(estimator, title, X, y, axes=None, ylim=None, cv=None,
7777
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
7878
for more details.
7979
80-
train_sizes : array-like of shape (n_ticks,), dtype={int, float}
80+
train_sizes : array-like of shape (n_ticks,)
8181
Relative or absolute numbers of training examples that will be used to
8282
generate the learning curve. If the ``dtype`` is float, it is regarded
8383
as a fraction of the maximum size of the training set (that is
Binary file not shown.

0.24/_downloads/ca0bfe2435d9b3fffe21c713e63d3a6f/plot_learning_curve.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
},
2727
"outputs": [],
2828
"source": [
29-
"print(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.svm import SVC\nfrom sklearn.datasets import load_digits\nfrom sklearn.model_selection import learning_curve\nfrom sklearn.model_selection import ShuffleSplit\n\n\ndef plot_learning_curve(estimator, title, X, y, axes=None, ylim=None, cv=None,\n n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):\n \"\"\"\n Generate 3 plots: the test and training learning curve, the training\n samples vs fit times curve, the fit times vs score curve.\n\n Parameters\n ----------\n estimator : estimator instance\n An estimator instance implementing `fit` and `predict` methods which\n will be cloned for each validation.\n\n title : str\n Title for the chart.\n\n X : array-like of shape (n_samples, n_features)\n Training vector, where ``n_samples`` is the number of samples and\n ``n_features`` is the number of features.\n\n y : array-like of shape (n_samples) or (n_samples, n_features)\n Target relative to ``X`` for classification or regression;\n None for unsupervised learning.\n\n axes : array-like of shape (3,), default=None\n Axes to use for plotting the curves.\n\n ylim : tuple of shape (2,), default=None\n Defines minimum and maximum y-values plotted, e.g. (ymin, ymax).\n\n cv : int, cross-validation generator or an iterable, default=None\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 5-fold cross-validation,\n - integer, to specify the number of folds.\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if ``y`` is binary or multiclass,\n :class:`StratifiedKFold` used. If the estimator is not a classifier\n or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.\n\n Refer :ref:`User Guide <cross_validation>` for the various\n cross-validators that can be used here.\n\n n_jobs : int or None, default=None\n Number of jobs to run in parallel.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n train_sizes : array-like of shape (n_ticks,), dtype={int, float}\n Relative or absolute numbers of training examples that will be used to\n generate the learning curve. If the ``dtype`` is float, it is regarded\n as a fraction of the maximum size of the training set (that is\n determined by the selected validation method), i.e. it has to be within\n (0, 1]. Otherwise it is interpreted as absolute sizes of the training\n sets. Note that for classification the number of samples usually have\n to be big enough to contain at least one sample from each class.\n (default: np.linspace(0.1, 1.0, 5))\n \"\"\"\n if axes is None:\n _, axes = plt.subplots(1, 3, figsize=(20, 5))\n\n axes[0].set_title(title)\n if ylim is not None:\n axes[0].set_ylim(*ylim)\n axes[0].set_xlabel(\"Training examples\")\n axes[0].set_ylabel(\"Score\")\n\n train_sizes, train_scores, test_scores, fit_times, _ = \\\n learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs,\n train_sizes=train_sizes,\n return_times=True)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n fit_times_mean = np.mean(fit_times, axis=1)\n fit_times_std = np.std(fit_times, axis=1)\n\n # Plot learning curve\n axes[0].grid()\n axes[0].fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n axes[0].fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1,\n color=\"g\")\n axes[0].plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n axes[0].plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n axes[0].legend(loc=\"best\")\n\n # Plot n_samples vs fit_times\n axes[1].grid()\n axes[1].plot(train_sizes, fit_times_mean, 'o-')\n axes[1].fill_between(train_sizes, fit_times_mean - fit_times_std,\n fit_times_mean + fit_times_std, alpha=0.1)\n axes[1].set_xlabel(\"Training examples\")\n axes[1].set_ylabel(\"fit_times\")\n axes[1].set_title(\"Scalability of the model\")\n\n # Plot fit_time vs score\n axes[2].grid()\n axes[2].plot(fit_times_mean, test_scores_mean, 'o-')\n axes[2].fill_between(fit_times_mean, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1)\n axes[2].set_xlabel(\"fit_times\")\n axes[2].set_ylabel(\"Score\")\n axes[2].set_title(\"Performance of the model\")\n\n return plt\n\n\nfig, axes = plt.subplots(3, 2, figsize=(10, 15))\n\nX, y = load_digits(return_X_y=True)\n\ntitle = \"Learning Curves (Naive Bayes)\"\n# Cross validation with 100 iterations to get smoother mean test and train\n# score curves, each time with 20% data randomly selected as a validation set.\ncv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)\n\nestimator = GaussianNB()\nplot_learning_curve(estimator, title, X, y, axes=axes[:, 0], ylim=(0.7, 1.01),\n cv=cv, n_jobs=4)\n\ntitle = r\"Learning Curves (SVM, RBF kernel, $\\gamma=0.001$)\"\n# SVC is more expensive so we do a lower number of CV iterations:\ncv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=0)\nestimator = SVC(gamma=0.001)\nplot_learning_curve(estimator, title, X, y, axes=axes[:, 1], ylim=(0.7, 1.01),\n cv=cv, n_jobs=4)\n\nplt.show()"
29+
"print(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.svm import SVC\nfrom sklearn.datasets import load_digits\nfrom sklearn.model_selection import learning_curve\nfrom sklearn.model_selection import ShuffleSplit\n\n\ndef plot_learning_curve(estimator, title, X, y, axes=None, ylim=None, cv=None,\n n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):\n \"\"\"\n Generate 3 plots: the test and training learning curve, the training\n samples vs fit times curve, the fit times vs score curve.\n\n Parameters\n ----------\n estimator : estimator instance\n An estimator instance implementing `fit` and `predict` methods which\n will be cloned for each validation.\n\n title : str\n Title for the chart.\n\n X : array-like of shape (n_samples, n_features)\n Training vector, where ``n_samples`` is the number of samples and\n ``n_features`` is the number of features.\n\n y : array-like of shape (n_samples) or (n_samples, n_features)\n Target relative to ``X`` for classification or regression;\n None for unsupervised learning.\n\n axes : array-like of shape (3,), default=None\n Axes to use for plotting the curves.\n\n ylim : tuple of shape (2,), default=None\n Defines minimum and maximum y-values plotted, e.g. (ymin, ymax).\n\n cv : int, cross-validation generator or an iterable, default=None\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 5-fold cross-validation,\n - integer, to specify the number of folds.\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if ``y`` is binary or multiclass,\n :class:`StratifiedKFold` used. If the estimator is not a classifier\n or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.\n\n Refer :ref:`User Guide <cross_validation>` for the various\n cross-validators that can be used here.\n\n n_jobs : int or None, default=None\n Number of jobs to run in parallel.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n train_sizes : array-like of shape (n_ticks,)\n Relative or absolute numbers of training examples that will be used to\n generate the learning curve. If the ``dtype`` is float, it is regarded\n as a fraction of the maximum size of the training set (that is\n determined by the selected validation method), i.e. it has to be within\n (0, 1]. Otherwise it is interpreted as absolute sizes of the training\n sets. Note that for classification the number of samples usually have\n to be big enough to contain at least one sample from each class.\n (default: np.linspace(0.1, 1.0, 5))\n \"\"\"\n if axes is None:\n _, axes = plt.subplots(1, 3, figsize=(20, 5))\n\n axes[0].set_title(title)\n if ylim is not None:\n axes[0].set_ylim(*ylim)\n axes[0].set_xlabel(\"Training examples\")\n axes[0].set_ylabel(\"Score\")\n\n train_sizes, train_scores, test_scores, fit_times, _ = \\\n learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs,\n train_sizes=train_sizes,\n return_times=True)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n fit_times_mean = np.mean(fit_times, axis=1)\n fit_times_std = np.std(fit_times, axis=1)\n\n # Plot learning curve\n axes[0].grid()\n axes[0].fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n axes[0].fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1,\n color=\"g\")\n axes[0].plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n axes[0].plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n axes[0].legend(loc=\"best\")\n\n # Plot n_samples vs fit_times\n axes[1].grid()\n axes[1].plot(train_sizes, fit_times_mean, 'o-')\n axes[1].fill_between(train_sizes, fit_times_mean - fit_times_std,\n fit_times_mean + fit_times_std, alpha=0.1)\n axes[1].set_xlabel(\"Training examples\")\n axes[1].set_ylabel(\"fit_times\")\n axes[1].set_title(\"Scalability of the model\")\n\n # Plot fit_time vs score\n axes[2].grid()\n axes[2].plot(fit_times_mean, test_scores_mean, 'o-')\n axes[2].fill_between(fit_times_mean, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1)\n axes[2].set_xlabel(\"fit_times\")\n axes[2].set_ylabel(\"Score\")\n axes[2].set_title(\"Performance of the model\")\n\n return plt\n\n\nfig, axes = plt.subplots(3, 2, figsize=(10, 15))\n\nX, y = load_digits(return_X_y=True)\n\ntitle = \"Learning Curves (Naive Bayes)\"\n# Cross validation with 100 iterations to get smoother mean test and train\n# score curves, each time with 20% data randomly selected as a validation set.\ncv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)\n\nestimator = GaussianNB()\nplot_learning_curve(estimator, title, X, y, axes=axes[:, 0], ylim=(0.7, 1.01),\n cv=cv, n_jobs=4)\n\ntitle = r\"Learning Curves (SVM, RBF kernel, $\\gamma=0.001$)\"\n# SVC is more expensive so we do a lower number of CV iterations:\ncv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=0)\nestimator = SVC(gamma=0.001)\nplot_learning_curve(estimator, title, X, y, axes=axes[:, 1], ylim=(0.7, 1.01),\n cv=cv, n_jobs=4)\n\nplt.show()"
3030
]
3131
}
3232
],

0.24/_downloads/cf0f90f46eb559facf7f63f124f61e04/plot_linear_model_coefficient_interpretation.ipynb

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -314,7 +314,7 @@
314314
},
315315
"outputs": [],
316316
"source": [
317-
"from sklearn.model_selection import cross_validate\nfrom sklearn.model_selection import RepeatedKFold\n\ncv_model = cross_validate(\n model, X, y, cv=RepeatedKFold(n_splits=5, n_repeats=5),\n return_estimator=True, n_jobs=-1\n)\ncoefs = pd.DataFrame(\n [est.named_steps['transformedtargetregressor'].regressor_.coef_ *\n X_train_preprocessed.std(axis=0)\n for est in cv_model['estimator']],\n columns=feature_names\n)\nplt.figure(figsize=(9, 7))\nsns.swarmplot(data=coefs, orient='h', color='k', alpha=0.5)\nsns.boxplot(data=coefs, orient='h', color='cyan', saturation=0.5)\nplt.axvline(x=0, color='.5')\nplt.xlabel('Coefficient importance')\nplt.title('Coefficient importance and its variability')\nplt.subplots_adjust(left=.3)"
317+
"from sklearn.model_selection import cross_validate\nfrom sklearn.model_selection import RepeatedKFold\n\ncv_model = cross_validate(\n model, X, y, cv=RepeatedKFold(n_splits=5, n_repeats=5),\n return_estimator=True, n_jobs=-1\n)\ncoefs = pd.DataFrame(\n [est.named_steps['transformedtargetregressor'].regressor_.coef_ *\n X_train_preprocessed.std(axis=0)\n for est in cv_model['estimator']],\n columns=feature_names\n)\nplt.figure(figsize=(9, 7))\nsns.stripplot(data=coefs, orient='h', color='k', alpha=0.5)\nsns.boxplot(data=coefs, orient='h', color='cyan', saturation=0.5)\nplt.axvline(x=0, color='.5')\nplt.xlabel('Coefficient importance')\nplt.title('Coefficient importance and its variability')\nplt.subplots_adjust(left=.3)"
318318
]
319319
},
320320
{
@@ -350,7 +350,7 @@
350350
},
351351
"outputs": [],
352352
"source": [
353-
"column_to_drop = ['AGE']\n\ncv_model = cross_validate(\n model, X.drop(columns=column_to_drop), y,\n cv=RepeatedKFold(n_splits=5, n_repeats=5),\n return_estimator=True, n_jobs=-1\n)\ncoefs = pd.DataFrame(\n [est.named_steps['transformedtargetregressor'].regressor_.coef_ *\n X_train_preprocessed.drop(columns=column_to_drop).std(axis=0)\n for est in cv_model['estimator']],\n columns=feature_names[:-1]\n)\nplt.figure(figsize=(9, 7))\nsns.swarmplot(data=coefs, orient='h', color='k', alpha=0.5)\nsns.boxplot(data=coefs, orient='h', color='cyan', saturation=0.5)\nplt.axvline(x=0, color='.5')\nplt.title('Coefficient importance and its variability')\nplt.xlabel('Coefficient importance')\nplt.subplots_adjust(left=.3)"
353+
"column_to_drop = ['AGE']\n\ncv_model = cross_validate(\n model, X.drop(columns=column_to_drop), y,\n cv=RepeatedKFold(n_splits=5, n_repeats=5),\n return_estimator=True, n_jobs=-1\n)\ncoefs = pd.DataFrame(\n [est.named_steps['transformedtargetregressor'].regressor_.coef_ *\n X_train_preprocessed.drop(columns=column_to_drop).std(axis=0)\n for est in cv_model['estimator']],\n columns=feature_names[:-1]\n)\nplt.figure(figsize=(9, 7))\nsns.stripplot(data=coefs, orient='h', color='k', alpha=0.5)\nsns.boxplot(data=coefs, orient='h', color='cyan', saturation=0.5)\nplt.axvline(x=0, color='.5')\nplt.title('Coefficient importance and its variability')\nplt.xlabel('Coefficient importance')\nplt.subplots_adjust(left=.3)"
354354
]
355355
},
356356
{
@@ -440,7 +440,7 @@
440440
},
441441
"outputs": [],
442442
"source": [
443-
"cv_model = cross_validate(\n model, X, y, cv=RepeatedKFold(n_splits=5, n_repeats=5),\n return_estimator=True, n_jobs=-1\n)\ncoefs = pd.DataFrame(\n [est.named_steps['transformedtargetregressor'].regressor_.coef_\n for est in cv_model['estimator']],\n columns=feature_names\n)\nplt.figure(figsize=(9, 7))\nsns.swarmplot(data=coefs, orient='h', color='k', alpha=0.5)\nsns.boxplot(data=coefs, orient='h', color='cyan', saturation=0.5)\nplt.axvline(x=0, color='.5')\nplt.title('Coefficient variability')\nplt.subplots_adjust(left=.3)"
443+
"cv_model = cross_validate(\n model, X, y, cv=RepeatedKFold(n_splits=5, n_repeats=5),\n return_estimator=True, n_jobs=-1\n)\ncoefs = pd.DataFrame(\n [est.named_steps['transformedtargetregressor'].regressor_.coef_\n for est in cv_model['estimator']],\n columns=feature_names\n)\nplt.figure(figsize=(9, 7))\nsns.stripplot(data=coefs, orient='h', color='k', alpha=0.5)\nsns.boxplot(data=coefs, orient='h', color='cyan', saturation=0.5)\nplt.axvline(x=0, color='.5')\nplt.title('Coefficient variability')\nplt.subplots_adjust(left=.3)"
444444
]
445445
},
446446
{

0.24/_downloads/eb87d6211b2c0a7c2dc460a9e28b1f6a/plot_digits_classification.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@
8080
},
8181
"outputs": [],
8282
"source": [
83-
"_, axes = plt.subplots(nrows=1, ncols=4, figsize=(10, 3))\nfor ax, image, prediction in zip(axes, digits.images, predicted):\n ax.set_axis_off()\n ax.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')\n ax.set_title(f'Prediction: {prediction}')"
83+
"_, axes = plt.subplots(nrows=1, ncols=4, figsize=(10, 3))\nfor ax, image, prediction in zip(axes, X_test, predicted):\n ax.set_axis_off()\n image = image.reshape(8, 8)\n ax.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')\n ax.set_title(f'Prediction: {prediction}')"
8484
]
8585
},
8686
{

0 commit comments

Comments
 (0)