Skip to content

Commit ff5a144

Browse files
committed
Pushing the docs to 0.20/ for branch: 0.20.X, commit 8d8939bdc3ed91ef5a84aa73628cb776af0d986d
1 parent 0f4c21f commit ff5a144

File tree

1,262 files changed

+13569
-15791
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,262 files changed

+13569
-15791
lines changed

0.20/.buildinfo

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
# Sphinx build info version 1
22
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
3-
config: 7ec53ea0763238f487c61d94c6d77d05
3+
config: 8fb17fab1fef49a19957d9b86d458a0c
44
tags: 645f666f9bcd5a90fca523b33c5a78b7
2.76 KB
Binary file not shown.
2.73 KB
Binary file not shown.

0.20/_downloads/plot_adaboost_hastie_10_2.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
},
2727
"outputs": [],
2828
"source": [
29-
"print(__doc__)\n\n# Author: Peter Prettenhofer <[email protected]>,\n# Noel Dawe <[email protected]>\n#\n# License: BSD 3 clause\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn import datasets\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import zero_one_loss\nfrom sklearn.ensemble import AdaBoostClassifier\n\n\nn_estimators = 400\n# A learning rate of 1. may not be optimal for both SAMME and SAMME.R\nlearning_rate = 1.\n\nX, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)\n\nX_test, y_test = X[2000:], y[2000:]\nX_train, y_train = X[:2000], y[:2000]\n\ndt_stump = DecisionTreeClassifier(max_depth=1)\ndt_stump.fit(X_train, y_train)\ndt_stump_err = 1.0 - dt_stump.score(X_test, y_test)\n\ndt = DecisionTreeClassifier(max_depth=9)\ndt.fit(X_train, y_train)\ndt_err = 1.0 - dt.score(X_test, y_test)\n\nada_discrete = AdaBoostClassifier(\n base_estimator=dt_stump,\n learning_rate=learning_rate,\n n_estimators=n_estimators,\n algorithm=\"SAMME\")\nada_discrete.fit(X_train, y_train)\n\nada_real = AdaBoostClassifier(\n base_estimator=dt_stump,\n learning_rate=learning_rate,\n n_estimators=n_estimators,\n algorithm=\"SAMME.R\")\nada_real.fit(X_train, y_train)\n\nfig = plt.figure()\nax = fig.add_subplot(111)\n\nax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',\n label='Decision Stump Error')\nax.plot([1, n_estimators], [dt_err] * 2, 'k--',\n label='Decision Tree Error')\n\nada_discrete_err = np.zeros((n_estimators,))\nfor i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):\n ada_discrete_err[i] = zero_one_loss(y_pred, y_test)\n\nada_discrete_err_train = np.zeros((n_estimators,))\nfor i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):\n ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)\n\nada_real_err = np.zeros((n_estimators,))\nfor i, y_pred in enumerate(ada_real.staged_predict(X_test)):\n ada_real_err[i] = zero_one_loss(y_pred, y_test)\n\nada_real_err_train = np.zeros((n_estimators,))\nfor i, y_pred in enumerate(ada_real.staged_predict(X_train)):\n ada_real_err_train[i] = zero_one_loss(y_pred, y_train)\n\nax.plot(np.arange(n_estimators) + 1, ada_discrete_err,\n label='Discrete AdaBoost Test Error',\n color='red')\nax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,\n label='Discrete AdaBoost Train Error',\n color='blue')\nax.plot(np.arange(n_estimators) + 1, ada_real_err,\n label='Real AdaBoost Test Error',\n color='orange')\nax.plot(np.arange(n_estimators) + 1, ada_real_err_train,\n label='Real AdaBoost Train Error',\n color='green')\n\nax.set_ylim((0.0, 0.5))\nax.set_xlabel('n_estimators')\nax.set_ylabel('error rate')\n\nleg = ax.legend(loc='upper right', fancybox=True)\nleg.get_frame().set_alpha(0.7)\n\nplt.show()"
29+
"print(__doc__)\n\n# Author: Peter Prettenhofer <[email protected]>,\n# Noel Dawe <[email protected]>\n#\n# License: BSD 3 clause\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn import datasets\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import zero_one_loss\nfrom sklearn.ensemble import AdaBoostClassifier\n\n\nn_estimators = 400\n# A learning rate of 1. may not be optimal for both SAMME and SAMME.R\nlearning_rate = 1.\n\nX, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)\n\nX_test, y_test = X[2000:], y[2000:]\nX_train, y_train = X[:2000], y[:2000]\n\ndt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)\ndt_stump.fit(X_train, y_train)\ndt_stump_err = 1.0 - dt_stump.score(X_test, y_test)\n\ndt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)\ndt.fit(X_train, y_train)\ndt_err = 1.0 - dt.score(X_test, y_test)\n\nada_discrete = AdaBoostClassifier(\n base_estimator=dt_stump,\n learning_rate=learning_rate,\n n_estimators=n_estimators,\n algorithm=\"SAMME\")\nada_discrete.fit(X_train, y_train)\n\nada_real = AdaBoostClassifier(\n base_estimator=dt_stump,\n learning_rate=learning_rate,\n n_estimators=n_estimators,\n algorithm=\"SAMME.R\")\nada_real.fit(X_train, y_train)\n\nfig = plt.figure()\nax = fig.add_subplot(111)\n\nax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',\n label='Decision Stump Error')\nax.plot([1, n_estimators], [dt_err] * 2, 'k--',\n label='Decision Tree Error')\n\nada_discrete_err = np.zeros((n_estimators,))\nfor i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):\n ada_discrete_err[i] = zero_one_loss(y_pred, y_test)\n\nada_discrete_err_train = np.zeros((n_estimators,))\nfor i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):\n ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)\n\nada_real_err = np.zeros((n_estimators,))\nfor i, y_pred in enumerate(ada_real.staged_predict(X_test)):\n ada_real_err[i] = zero_one_loss(y_pred, y_test)\n\nada_real_err_train = np.zeros((n_estimators,))\nfor i, y_pred in enumerate(ada_real.staged_predict(X_train)):\n ada_real_err_train[i] = zero_one_loss(y_pred, y_train)\n\nax.plot(np.arange(n_estimators) + 1, ada_discrete_err,\n label='Discrete AdaBoost Test Error',\n color='red')\nax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,\n label='Discrete AdaBoost Train Error',\n color='blue')\nax.plot(np.arange(n_estimators) + 1, ada_real_err,\n label='Real AdaBoost Test Error',\n color='orange')\nax.plot(np.arange(n_estimators) + 1, ada_real_err_train,\n label='Real AdaBoost Train Error',\n color='green')\n\nax.set_ylim((0.0, 0.5))\nax.set_xlabel('n_estimators')\nax.set_ylabel('error rate')\n\nleg = ax.legend(loc='upper right', fancybox=True)\nleg.get_frame().set_alpha(0.7)\n\nplt.show()"
3030
]
3131
}
3232
],

0.20/_downloads/plot_adaboost_hastie_10_2.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,11 +43,11 @@
4343
X_test, y_test = X[2000:], y[2000:]
4444
X_train, y_train = X[:2000], y[:2000]
4545

46-
dt_stump = DecisionTreeClassifier(max_depth=1)
46+
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
4747
dt_stump.fit(X_train, y_train)
4848
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
4949

50-
dt = DecisionTreeClassifier(max_depth=9)
50+
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
5151
dt.fit(X_train, y_train)
5252
dt_err = 1.0 - dt.score(X_test, y_test)
5353

0.20/_downloads/plot_classification_probability.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
"cell_type": "markdown",
1616
"metadata": {},
1717
"source": [
18-
"\n# Plot classification probability\n\n\nPlot the classification probability for different classifiers. We use a 3\nclass dataset, and we classify it with a Support Vector classifier, L1\nand L2 penalized logistic regression with either a One-Vs-Rest or multinomial\nsetting, and Gaussian process classification.\n\nThe logistic regression is not a multiclass classifier out of the box. As\na result it can identify only the first class.\n\n"
18+
"\n# Plot classification probability\n\n\nPlot the classification probability for different classifiers. We use a 3 class\ndataset, and we classify it with a Support Vector classifier, L1 and L2\npenalized logistic regression with either a One-Vs-Rest or multinomial setting,\nand Gaussian process classification.\n\nLinear SVC is not a probabilistic classifier by default but it has a built-in\ncalibration option enabled in this example (`probability=True`).\n\nThe logistic regression with One-Vs-Rest is not a multiclass classifier out of\nthe box. As a result it has more trouble in separating class 2 and 3 than the\nother estimators.\n\n"
1919
]
2020
},
2121
{
@@ -26,7 +26,7 @@
2626
},
2727
"outputs": [],
2828
"source": [
29-
"print(__doc__)\n\n# Author: Alexandre Gramfort <[email protected]>\n# License: BSD 3 clause\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.gaussian_process.kernels import RBF\nfrom sklearn import datasets\n\niris = datasets.load_iris()\nX = iris.data[:, 0:2] # we only take the first two features for visualization\ny = iris.target\n\nn_features = X.shape[1]\n\nC = 1.0\nkernel = 1.0 * RBF([1.0, 1.0]) # for GPC\n\n# Create different classifiers. The logistic regression cannot do\n# multiclass out of the box.\nclassifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),\n 'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),\n 'Linear SVC': SVC(kernel='linear', C=C, probability=True,\n random_state=0),\n 'L2 logistic (Multinomial)': LogisticRegression(\n C=C, solver='lbfgs', multi_class='multinomial'),\n 'GPC': GaussianProcessClassifier(kernel)\n }\n\nn_classifiers = len(classifiers)\n\nplt.figure(figsize=(3 * 2, n_classifiers * 2))\nplt.subplots_adjust(bottom=.2, top=.95)\n\nxx = np.linspace(3, 9, 100)\nyy = np.linspace(1, 5, 100).T\nxx, yy = np.meshgrid(xx, yy)\nXfull = np.c_[xx.ravel(), yy.ravel()]\n\nfor index, (name, classifier) in enumerate(classifiers.items()):\n classifier.fit(X, y)\n\n y_pred = classifier.predict(X)\n classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100\n print(\"classif_rate for %s : %f \" % (name, classif_rate))\n\n # View probabilities=\n probas = classifier.predict_proba(Xfull)\n n_classes = np.unique(y_pred).size\n for k in range(n_classes):\n plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)\n plt.title(\"Class %d\" % k)\n if k == 0:\n plt.ylabel(name)\n imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),\n extent=(3, 9, 1, 5), origin='lower')\n plt.xticks(())\n plt.yticks(())\n idx = (y_pred == k)\n if idx.any():\n plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='w', edgecolor='k')\n\nax = plt.axes([0.15, 0.04, 0.7, 0.05])\nplt.title(\"Probability\")\nplt.colorbar(imshow_handle, cax=ax, orientation='horizontal')\n\nplt.show()"
29+
"print(__doc__)\n\n# Author: Alexandre Gramfort <[email protected]>\n# License: BSD 3 clause\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.gaussian_process.kernels import RBF\nfrom sklearn import datasets\n\niris = datasets.load_iris()\nX = iris.data[:, 0:2] # we only take the first two features for visualization\ny = iris.target\n\nn_features = X.shape[1]\n\nC = 10\nkernel = 1.0 * RBF([1.0, 1.0]) # for GPC\n\n# Create different classifiers.\nclassifiers = {\n 'L1 logistic': LogisticRegression(C=C, penalty='l1',\n solver='saga',\n multi_class='multinomial',\n max_iter=10000),\n 'L2 logistic (Multinomial)': LogisticRegression(C=C, penalty='l2',\n solver='saga',\n multi_class='multinomial',\n max_iter=10000),\n 'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2',\n solver='saga',\n multi_class='ovr',\n max_iter=10000),\n 'Linear SVC': SVC(kernel='linear', C=C, probability=True,\n random_state=0),\n 'GPC': GaussianProcessClassifier(kernel)\n}\n\nn_classifiers = len(classifiers)\n\nplt.figure(figsize=(3 * 2, n_classifiers * 2))\nplt.subplots_adjust(bottom=.2, top=.95)\n\nxx = np.linspace(3, 9, 100)\nyy = np.linspace(1, 5, 100).T\nxx, yy = np.meshgrid(xx, yy)\nXfull = np.c_[xx.ravel(), yy.ravel()]\n\nfor index, (name, classifier) in enumerate(classifiers.items()):\n classifier.fit(X, y)\n\n y_pred = classifier.predict(X)\n accuracy = accuracy_score(y, y_pred)\n print(\"Accuracy (train) for %s: %0.1f%% \" % (name, accuracy * 100))\n\n # View probabilities:\n probas = classifier.predict_proba(Xfull)\n n_classes = np.unique(y_pred).size\n for k in range(n_classes):\n plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)\n plt.title(\"Class %d\" % k)\n if k == 0:\n plt.ylabel(name)\n imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),\n extent=(3, 9, 1, 5), origin='lower')\n plt.xticks(())\n plt.yticks(())\n idx = (y_pred == k)\n if idx.any():\n plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='w', edgecolor='k')\n\nax = plt.axes([0.15, 0.04, 0.7, 0.05])\nplt.title(\"Probability\")\nplt.colorbar(imshow_handle, cax=ax, orientation='horizontal')\n\nplt.show()"
3030
]
3131
}
3232
],

0.20/_downloads/plot_classification_probability.py

Lines changed: 33 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,17 @@
33
Plot classification probability
44
===============================
55
6-
Plot the classification probability for different classifiers. We use a 3
7-
class dataset, and we classify it with a Support Vector classifier, L1
8-
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
9-
setting, and Gaussian process classification.
6+
Plot the classification probability for different classifiers. We use a 3 class
7+
dataset, and we classify it with a Support Vector classifier, L1 and L2
8+
penalized logistic regression with either a One-Vs-Rest or multinomial setting,
9+
and Gaussian process classification.
1010
11-
The logistic regression is not a multiclass classifier out of the box. As
12-
a result it can identify only the first class.
11+
Linear SVC is not a probabilistic classifier by default but it has a built-in
12+
calibration option enabled in this example (`probability=True`).
13+
14+
The logistic regression with One-Vs-Rest is not a multiclass classifier out of
15+
the box. As a result it has more trouble in separating class 2 and 3 than the
16+
other estimators.
1317
"""
1418
print(__doc__)
1519

@@ -19,6 +23,7 @@ class dataset, and we classify it with a Support Vector classifier, L1
1923
import matplotlib.pyplot as plt
2024
import numpy as np
2125

26+
from sklearn.metrics import accuracy_score
2227
from sklearn.linear_model import LogisticRegression
2328
from sklearn.svm import SVC
2429
from sklearn.gaussian_process import GaussianProcessClassifier
@@ -31,19 +36,27 @@ class dataset, and we classify it with a Support Vector classifier, L1
3136

3237
n_features = X.shape[1]
3338

34-
C = 1.0
39+
C = 10
3540
kernel = 1.0 * RBF([1.0, 1.0]) # for GPC
3641

37-
# Create different classifiers. The logistic regression cannot do
38-
# multiclass out of the box.
39-
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
40-
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
41-
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
42-
random_state=0),
43-
'L2 logistic (Multinomial)': LogisticRegression(
44-
C=C, solver='lbfgs', multi_class='multinomial'),
45-
'GPC': GaussianProcessClassifier(kernel)
46-
}
42+
# Create different classifiers.
43+
classifiers = {
44+
'L1 logistic': LogisticRegression(C=C, penalty='l1',
45+
solver='saga',
46+
multi_class='multinomial',
47+
max_iter=10000),
48+
'L2 logistic (Multinomial)': LogisticRegression(C=C, penalty='l2',
49+
solver='saga',
50+
multi_class='multinomial',
51+
max_iter=10000),
52+
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2',
53+
solver='saga',
54+
multi_class='ovr',
55+
max_iter=10000),
56+
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
57+
random_state=0),
58+
'GPC': GaussianProcessClassifier(kernel)
59+
}
4760

4861
n_classifiers = len(classifiers)
4962

@@ -59,10 +72,10 @@ class dataset, and we classify it with a Support Vector classifier, L1
5972
classifier.fit(X, y)
6073

6174
y_pred = classifier.predict(X)
62-
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
63-
print("classif_rate for %s : %f " % (name, classif_rate))
75+
accuracy = accuracy_score(y, y_pred)
76+
print("Accuracy (train) for %s: %0.1f%% " % (name, accuracy * 100))
6477

65-
# View probabilities=
78+
# View probabilities:
6679
probas = classifier.predict_proba(Xfull)
6780
n_classes = np.unique(y_pred).size
6881
for k in range(n_classes):

0 commit comments

Comments
 (0)