Skip to content

Commit cca7e2d

Browse files
committed
Pushing the docs to dev/ for branch: master, commit 923b13ceda1d1d26a1013d0e326734a1dc58bd46
1 parent 28f6a15 commit cca7e2d

File tree

726 files changed

+3417
-2269
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

726 files changed

+3417
-2269
lines changed

dev/.buildinfo

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
# Sphinx build info version 1
22
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
3-
config: 26d4fa1243e0e76d5cd0208238c65303
3+
config: 39d2e4864eb47d9365787dcc7c68a004
44
tags: 645f666f9bcd5a90fca523b33c5a78b7
Binary file not shown.

dev/_downloads/51a82a09a4aa0f703f69fb5d4f15104f/plot_partial_dependence_visualization_api.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@
116116
},
117117
"outputs": [],
118118
"source": [
119-
"# Sets this image as the thumbnail for sphinx gallery\n# sphinx_gallery_thumbnail_number = 4\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 6))\ntree_disp.plot(ax=[ax1, ax2], line_kw={\"label\": \"Decision Tree\"})\nmlp_disp.plot(ax=[ax1, ax2], line_kw={\"label\": \"Multi-layer Perceptron\",\n \"c\": \"red\"})\nax1.legend()\nax2.legend()"
119+
"# Sets this image as the thumbnail for sphinx gallery\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 6))\ntree_disp.plot(ax=[ax1, ax2], line_kw={\"label\": \"Decision Tree\"})\nmlp_disp.plot(ax=[ax1, ax2], line_kw={\"label\": \"Multi-layer Perceptron\",\n \"c\": \"red\"})\nax1.legend()\nax2.legend()"
120120
]
121121
},
122122
{
Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,92 @@
1+
"""
2+
===================================
3+
Visualizations with Display Objects
4+
===================================
5+
6+
.. currentmodule:: sklearn.metrics
7+
8+
In this example, we will construct display objects,
9+
:class:`ConfusionMatrixDisplay`, :class:`RocCurveDisplay`, and
10+
:class:`PrecisionRecallDisplay` directly from their respective metrics. This
11+
is an alternative to using their corresponding plot functions when
12+
a model's predictions are already computed or expensive to compute. Note that
13+
this is advanced usage, and in general we recommend using their respective
14+
plot functions.
15+
"""
16+
print(__doc__)
17+
18+
##############################################################################
19+
# Load Data and train model
20+
# -------------------------
21+
# For this example, we load a blood transfusion service center data set from
22+
# `OpenML <https://www.openml.org/d/1464>`. This is a binary classification
23+
# problem where the target is whether an individual donated blood. Then the
24+
# data is split into a train and test dataset and a logistic regression is
25+
# fitted wtih the train dataset.
26+
from sklearn.datasets import fetch_openml
27+
from sklearn.preprocessing import StandardScaler
28+
from sklearn.pipeline import make_pipeline
29+
from sklearn.linear_model import LogisticRegression
30+
from sklearn.model_selection import train_test_split
31+
32+
X, y = fetch_openml(data_id=1464, return_X_y=True)
33+
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y)
34+
35+
clf = make_pipeline(StandardScaler(), LogisticRegression(random_state=0))
36+
clf.fit(X_train, y_train)
37+
38+
##############################################################################
39+
# Create :class:`ConfusionMatrixDisplay`
40+
##############################################################################
41+
# With the fitted model, we compute the predictions of the model on the test
42+
# dataset. These predictions are used to compute the confustion matrix which
43+
# is plotted with the :class:`ConfusionMatrixDisplay`
44+
from sklearn.metrics import confusion_matrix
45+
from sklearn.metrics import ConfusionMatrixDisplay
46+
47+
y_pred = clf.predict(X_test)
48+
cm = confusion_matrix(y_test, y_pred)
49+
50+
cm_display = ConfusionMatrixDisplay(cm).plot()
51+
52+
53+
##############################################################################
54+
# Create :class:`RocCurveDisplay`
55+
##############################################################################
56+
# The roc curve requires either the probabilities or the non-thresholded
57+
# decision values from the estimator. Since the logistic regression provides
58+
# a decision function, we will use it to plot the roc curve:
59+
from sklearn.metrics import roc_curve
60+
from sklearn.metrics import RocCurveDisplay
61+
y_score = clf.decision_function(X_test)
62+
63+
fpr, tpr, _ = roc_curve(y_test, y_score, pos_label=clf.classes_[1])
64+
roc_display = RocCurveDisplay(fpr=fpr, tpr=tpr).plot()
65+
66+
##############################################################################
67+
# Create :class:`PrecisionRecallDisplay`
68+
##############################################################################
69+
# Similarly, the precision recall curve can be plotted using `y_score` from
70+
# the prevision sections.
71+
from sklearn.metrics import precision_recall_curve
72+
from sklearn.metrics import PrecisionRecallDisplay
73+
74+
prec, recall, _ = precision_recall_curve(y_test, y_score,
75+
pos_label=clf.classes_[1])
76+
pr_display = PrecisionRecallDisplay(precision=prec, recall=recall).plot()
77+
78+
##############################################################################
79+
# Combining the display objects into a single plot
80+
##############################################################################
81+
# The display objects store the computed values that were passed as arguments.
82+
# This allows for the visualizations to be easliy combined using matplotlib's
83+
# API. In the following example, we place the displays next to each other in a
84+
# row.
85+
86+
# sphinx_gallery_thumbnail_number = 4
87+
import matplotlib.pyplot as plt
88+
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 8))
89+
90+
roc_display.plot(ax=ax1)
91+
pr_display.plot(ax=ax2)
92+
plt.show()
Lines changed: 144 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,144 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "code",
5+
"execution_count": null,
6+
"metadata": {
7+
"collapsed": false
8+
},
9+
"outputs": [],
10+
"source": [
11+
"%matplotlib inline"
12+
]
13+
},
14+
{
15+
"cell_type": "markdown",
16+
"metadata": {},
17+
"source": [
18+
"\n# Visualizations with Display Objects\n\n\n.. currentmodule:: sklearn.metrics\n\nIn this example, we will construct display objects,\n:class:`ConfusionMatrixDisplay`, :class:`RocCurveDisplay`, and\n:class:`PrecisionRecallDisplay` directly from their respective metrics. This\nis an alternative to using their corresponding plot functions when\na model's predictions are already computed or expensive to compute. Note that\nthis is advanced usage, and in general we recommend using their respective\nplot functions.\n"
19+
]
20+
},
21+
{
22+
"cell_type": "code",
23+
"execution_count": null,
24+
"metadata": {
25+
"collapsed": false
26+
},
27+
"outputs": [],
28+
"source": [
29+
"print(__doc__)"
30+
]
31+
},
32+
{
33+
"cell_type": "markdown",
34+
"metadata": {},
35+
"source": [
36+
"Load Data and train model\n-------------------------\nFor this example, we load a blood transfusion service center data set from\n`OpenML <https://www.openml.org/d/1464>`. This is a binary classification\nproblem where the target is whether an individual donated blood. Then the\ndata is split into a train and test dataset and a logistic regression is\nfitted wtih the train dataset.\n\n"
37+
]
38+
},
39+
{
40+
"cell_type": "code",
41+
"execution_count": null,
42+
"metadata": {
43+
"collapsed": false
44+
},
45+
"outputs": [],
46+
"source": [
47+
"from sklearn.datasets import fetch_openml\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\n\nX, y = fetch_openml(data_id=1464, return_X_y=True)\nX_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y)\n\nclf = make_pipeline(StandardScaler(), LogisticRegression(random_state=0))\nclf.fit(X_train, y_train)"
48+
]
49+
},
50+
{
51+
"cell_type": "markdown",
52+
"metadata": {},
53+
"source": [
54+
"Create :class:`ConfusionMatrixDisplay`\n#############################################################################\n With the fitted model, we compute the predictions of the model on the test\n dataset. These predictions are used to compute the confustion matrix which\n is plotted with the :class:`ConfusionMatrixDisplay`\n\n"
55+
]
56+
},
57+
{
58+
"cell_type": "code",
59+
"execution_count": null,
60+
"metadata": {
61+
"collapsed": false
62+
},
63+
"outputs": [],
64+
"source": [
65+
"from sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import ConfusionMatrixDisplay\n\ny_pred = clf.predict(X_test)\ncm = confusion_matrix(y_test, y_pred)\n\ncm_display = ConfusionMatrixDisplay(cm).plot()"
66+
]
67+
},
68+
{
69+
"cell_type": "markdown",
70+
"metadata": {},
71+
"source": [
72+
"Create :class:`RocCurveDisplay`\n#############################################################################\n The roc curve requires either the probabilities or the non-thresholded\n decision values from the estimator. Since the logistic regression provides\n a decision function, we will use it to plot the roc curve:\n\n"
73+
]
74+
},
75+
{
76+
"cell_type": "code",
77+
"execution_count": null,
78+
"metadata": {
79+
"collapsed": false
80+
},
81+
"outputs": [],
82+
"source": [
83+
"from sklearn.metrics import roc_curve\nfrom sklearn.metrics import RocCurveDisplay\ny_score = clf.decision_function(X_test)\n\nfpr, tpr, _ = roc_curve(y_test, y_score, pos_label=clf.classes_[1])\nroc_display = RocCurveDisplay(fpr=fpr, tpr=tpr).plot()"
84+
]
85+
},
86+
{
87+
"cell_type": "markdown",
88+
"metadata": {},
89+
"source": [
90+
"Create :class:`PrecisionRecallDisplay`\n#############################################################################\n Similarly, the precision recall curve can be plotted using `y_score` from\n the prevision sections.\n\n"
91+
]
92+
},
93+
{
94+
"cell_type": "code",
95+
"execution_count": null,
96+
"metadata": {
97+
"collapsed": false
98+
},
99+
"outputs": [],
100+
"source": [
101+
"from sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import PrecisionRecallDisplay\n\nprec, recall, _ = precision_recall_curve(y_test, y_score,\n pos_label=clf.classes_[1])\npr_display = PrecisionRecallDisplay(precision=prec, recall=recall).plot()"
102+
]
103+
},
104+
{
105+
"cell_type": "markdown",
106+
"metadata": {},
107+
"source": [
108+
"Combining the display objects into a single plot\n#############################################################################\n The display objects store the computed values that were passed as arguments.\n This allows for the visualizations to be easliy combined using matplotlib's\n API. In the following example, we place the displays next to each other in a\n row.\n\n"
109+
]
110+
},
111+
{
112+
"cell_type": "code",
113+
"execution_count": null,
114+
"metadata": {
115+
"collapsed": false
116+
},
117+
"outputs": [],
118+
"source": [
119+
"import matplotlib.pyplot as plt\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 8))\n\nroc_display.plot(ax=ax1)\npr_display.plot(ax=ax2)\nplt.show()"
120+
]
121+
}
122+
],
123+
"metadata": {
124+
"kernelspec": {
125+
"display_name": "Python 3",
126+
"language": "python",
127+
"name": "python3"
128+
},
129+
"language_info": {
130+
"codemirror_mode": {
131+
"name": "ipython",
132+
"version": 3
133+
},
134+
"file_extension": ".py",
135+
"mimetype": "text/x-python",
136+
"name": "python",
137+
"nbconvert_exporter": "python",
138+
"pygments_lexer": "ipython3",
139+
"version": "3.8.2"
140+
}
141+
},
142+
"nbformat": 4,
143+
"nbformat_minor": 0
144+
}
Binary file not shown.

dev/_downloads/scikit-learn-docs.pdf

80.5 KB
Binary file not shown.

dev/_images/iris.png

0 Bytes

0 commit comments

Comments
 (0)