Skip to content

Commit efa10d4

Browse files
committed
Pushing the docs to dev/ for branch: main, commit 5fbd02e9022d4b4329fe93218999b03d394ecb8f
1 parent d3c9831 commit efa10d4

File tree

1,249 files changed

+4508
-4508
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,249 files changed

+4508
-4508
lines changed
Binary file not shown.

dev/_downloads/6d4f620ec6653356eb970c2a6ed62081/plot_calibration_curve.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@
9191
},
9292
"outputs": [],
9393
"source": [
94-
"from collections import defaultdict\n\nimport pandas as pd\n\nfrom sklearn.metrics import (\n precision_score,\n recall_score,\n f1_score,\n brier_score_loss,\n log_loss,\n roc_auc_score,\n)\n\nscores = defaultdict(list)\nfor i, (clf, name) in enumerate(clf_list):\n clf.fit(X_train, y_train)\n y_prob = clf.predict_proba(X_test)\n y_pred = clf.predict(X_test)\n scores[\"Classifier\"].append(name)\n\n for metric in [brier_score_loss, log_loss]:\n score_name = metric.__name__.replace(\"_\", \" \").replace(\"score\", \"\").capitalize()\n scores[score_name].append(metric(y_test, y_prob[:, 1]))\n\n for metric in [precision_score, recall_score, f1_score, roc_auc_score]:\n score_name = metric.__name__.replace(\"_\", \" \").replace(\"score\", \"\").capitalize()\n scores[score_name].append(metric(y_test, y_pred))\n\n score_df = pd.DataFrame(scores).set_index(\"Classifier\")\n score_df.round(decimals=3)\n\nscore_df"
94+
"from collections import defaultdict\n\nimport pandas as pd\n\nfrom sklearn.metrics import (\n precision_score,\n recall_score,\n f1_score,\n brier_score_loss,\n log_loss,\n roc_auc_score,\n)\n\nscores = defaultdict(list)\nfor i, (clf, name) in enumerate(clf_list):\n clf.fit(X_train, y_train)\n y_prob = clf.predict_proba(X_test)\n y_pred = clf.predict(X_test)\n scores[\"Classifier\"].append(name)\n\n for metric in [brier_score_loss, log_loss, roc_auc_score]:\n score_name = metric.__name__.replace(\"_\", \" \").replace(\"score\", \"\").capitalize()\n scores[score_name].append(metric(y_test, y_prob[:, 1]))\n\n for metric in [precision_score, recall_score, f1_score]:\n score_name = metric.__name__.replace(\"_\", \" \").replace(\"score\", \"\").capitalize()\n scores[score_name].append(metric(y_test, y_pred))\n\n score_df = pd.DataFrame(scores).set_index(\"Classifier\")\n score_df.round(decimals=3)\n\nscore_df"
9595
]
9696
},
9797
{
@@ -149,7 +149,7 @@
149149
},
150150
"outputs": [],
151151
"source": [
152-
"scores = defaultdict(list)\nfor i, (clf, name) in enumerate(clf_list):\n clf.fit(X_train, y_train)\n y_prob = clf.predict_proba(X_test)\n y_pred = clf.predict(X_test)\n scores[\"Classifier\"].append(name)\n\n for metric in [brier_score_loss, log_loss]:\n score_name = metric.__name__.replace(\"_\", \" \").replace(\"score\", \"\").capitalize()\n scores[score_name].append(metric(y_test, y_prob[:, 1]))\n\n for metric in [precision_score, recall_score, f1_score, roc_auc_score]:\n score_name = metric.__name__.replace(\"_\", \" \").replace(\"score\", \"\").capitalize()\n scores[score_name].append(metric(y_test, y_pred))\n\n score_df = pd.DataFrame(scores).set_index(\"Classifier\")\n score_df.round(decimals=3)\n\nscore_df"
152+
"scores = defaultdict(list)\nfor i, (clf, name) in enumerate(clf_list):\n clf.fit(X_train, y_train)\n y_prob = clf.predict_proba(X_test)\n y_pred = clf.predict(X_test)\n scores[\"Classifier\"].append(name)\n\n for metric in [brier_score_loss, log_loss, roc_auc_score]:\n score_name = metric.__name__.replace(\"_\", \" \").replace(\"score\", \"\").capitalize()\n scores[score_name].append(metric(y_test, y_prob[:, 1]))\n\n for metric in [precision_score, recall_score, f1_score]:\n score_name = metric.__name__.replace(\"_\", \" \").replace(\"score\", \"\").capitalize()\n scores[score_name].append(metric(y_test, y_pred))\n\n score_df = pd.DataFrame(scores).set_index(\"Classifier\")\n score_df.round(decimals=3)\n\nscore_df"
153153
]
154154
},
155155
{
Binary file not shown.

dev/_downloads/85db957603c93bd3e0a4265ea6565b13/plot_calibration_curve.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -155,11 +155,11 @@
155155
y_pred = clf.predict(X_test)
156156
scores["Classifier"].append(name)
157157

158-
for metric in [brier_score_loss, log_loss]:
158+
for metric in [brier_score_loss, log_loss, roc_auc_score]:
159159
score_name = metric.__name__.replace("_", " ").replace("score", "").capitalize()
160160
scores[score_name].append(metric(y_test, y_prob[:, 1]))
161161

162-
for metric in [precision_score, recall_score, f1_score, roc_auc_score]:
162+
for metric in [precision_score, recall_score, f1_score]:
163163
score_name = metric.__name__.replace("_", " ").replace("score", "").capitalize()
164164
scores[score_name].append(metric(y_test, y_pred))
165165

@@ -300,11 +300,11 @@ def predict_proba(self, X):
300300
y_pred = clf.predict(X_test)
301301
scores["Classifier"].append(name)
302302

303-
for metric in [brier_score_loss, log_loss]:
303+
for metric in [brier_score_loss, log_loss, roc_auc_score]:
304304
score_name = metric.__name__.replace("_", " ").replace("score", "").capitalize()
305305
scores[score_name].append(metric(y_test, y_prob[:, 1]))
306306

307-
for metric in [precision_score, recall_score, f1_score, roc_auc_score]:
307+
for metric in [precision_score, recall_score, f1_score]:
308308
score_name = metric.__name__.replace("_", " ").replace("score", "").capitalize()
309309
scores[score_name].append(metric(y_test, y_pred))
310310

dev/_downloads/scikit-learn-docs.zip

2.37 KB
Binary file not shown.
154 Bytes
150 Bytes
-40 Bytes
60 Bytes
228 Bytes

0 commit comments

Comments
 (0)