Skip to content

Commit 4e63402

Browse files
committed
Pushing the docs to dev/ for branch: main, commit 17345f9d97828f977e5039db455828cfab10836a
1 parent 691bda7 commit 4e63402

File tree

1,225 files changed

+4386
-4386
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,225 files changed

+4386
-4386
lines changed
Binary file not shown.

dev/_downloads/3c9b7bcd0b16f172ac12ffad61f3b5f0/plot_stack_predictors.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,7 @@
178178
},
179179
"outputs": [],
180180
"source": [
181-
"import time\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import cross_validate, cross_val_predict\n\n\ndef plot_regression_results(ax, y_true, y_pred, title, scores, elapsed_time):\n \"\"\"Scatter plot of the predicted vs true targets.\"\"\"\n ax.plot(\n [y_true.min(), y_true.max()], [y_true.min(), y_true.max()], \"--r\", linewidth=2\n )\n ax.scatter(y_true, y_pred, alpha=0.2)\n\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"right\"].set_visible(False)\n ax.get_xaxis().tick_bottom()\n ax.get_yaxis().tick_left()\n ax.spines[\"left\"].set_position((\"outward\", 10))\n ax.spines[\"bottom\"].set_position((\"outward\", 10))\n ax.set_xlim([y_true.min(), y_true.max()])\n ax.set_ylim([y_true.min(), y_true.max()])\n ax.set_xlabel(\"Measured\")\n ax.set_ylabel(\"Predicted\")\n extra = plt.Rectangle(\n (0, 0), 0, 0, fc=\"w\", fill=False, edgecolor=\"none\", linewidth=0\n )\n ax.legend([extra], [scores], loc=\"upper left\")\n title = title + \"\\n Evaluation in {:.2f} seconds\".format(elapsed_time)\n ax.set_title(title)\n\n\nfig, axs = plt.subplots(2, 2, figsize=(9, 7))\naxs = np.ravel(axs)\n\nfor ax, (name, est) in zip(\n axs, estimators + [(\"Stacking Regressor\", stacking_regressor)]\n):\n start_time = time.time()\n score = cross_validate(\n est, X, y, scoring=[\"r2\", \"neg_mean_absolute_error\"], n_jobs=-1, verbose=0\n )\n elapsed_time = time.time() - start_time\n\n y_pred = cross_val_predict(est, X, y, n_jobs=-1, verbose=0)\n\n plot_regression_results(\n ax,\n y,\n y_pred,\n name,\n (r\"$R^2={:.2f} \\pm {:.2f}$\" + \"\\n\" + r\"$MAE={:.2f} \\pm {:.2f}$\").format(\n np.mean(score[\"test_r2\"]),\n np.std(score[\"test_r2\"]),\n -np.mean(score[\"test_neg_mean_absolute_error\"]),\n np.std(score[\"test_neg_mean_absolute_error\"]),\n ),\n elapsed_time,\n )\n\nplt.suptitle(\"Single predictors versus stacked predictors\")\nplt.tight_layout()\nplt.subplots_adjust(top=0.9)\nplt.show()"
181+
"import time\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import cross_validate, cross_val_predict\n\n\ndef plot_regression_results(ax, y_true, y_pred, title, scores, elapsed_time):\n \"\"\"Scatter plot of the predicted vs true targets.\"\"\"\n ax.plot(\n [y_true.min(), y_true.max()], [y_true.min(), y_true.max()], \"--r\", linewidth=2\n )\n ax.scatter(y_true, y_pred, alpha=0.2)\n\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"right\"].set_visible(False)\n ax.get_xaxis().tick_bottom()\n ax.get_yaxis().tick_left()\n ax.spines[\"left\"].set_position((\"outward\", 10))\n ax.spines[\"bottom\"].set_position((\"outward\", 10))\n ax.set_xlim([y_true.min(), y_true.max()])\n ax.set_ylim([y_true.min(), y_true.max()])\n ax.set_xlabel(\"Measured\")\n ax.set_ylabel(\"Predicted\")\n extra = plt.Rectangle(\n (0, 0), 0, 0, fc=\"w\", fill=False, edgecolor=\"none\", linewidth=0\n )\n ax.legend([extra], [scores], loc=\"upper left\")\n title = title + \"\\n Evaluation in {:.2f} seconds\".format(elapsed_time)\n ax.set_title(title)\n\n\nfig, axs = plt.subplots(2, 2, figsize=(9, 7))\naxs = np.ravel(axs)\n\nfor ax, (name, est) in zip(\n axs, estimators + [(\"Stacking Regressor\", stacking_regressor)]\n):\n start_time = time.time()\n score = cross_validate(\n est, X, y, scoring=[\"r2\", \"neg_mean_absolute_error\"], n_jobs=2, verbose=0\n )\n elapsed_time = time.time() - start_time\n\n y_pred = cross_val_predict(est, X, y, n_jobs=2, verbose=0)\n\n plot_regression_results(\n ax,\n y,\n y_pred,\n name,\n (r\"$R^2={:.2f} \\pm {:.2f}$\" + \"\\n\" + r\"$MAE={:.2f} \\pm {:.2f}$\").format(\n np.mean(score[\"test_r2\"]),\n np.std(score[\"test_r2\"]),\n -np.mean(score[\"test_neg_mean_absolute_error\"]),\n np.std(score[\"test_neg_mean_absolute_error\"]),\n ),\n elapsed_time,\n )\n\nplt.suptitle(\"Single predictors versus stacked predictors\")\nplt.tight_layout()\nplt.subplots_adjust(top=0.9)\nplt.show()"
182182
]
183183
},
184184
{

dev/_downloads/50040ae12dd16e7d2e79135d7793c17e/plot_release_highlights_0_22_0.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@
106106
feature_names = np.array([f"x_{i}" for i in range(X.shape[1])])
107107

108108
rf = RandomForestClassifier(random_state=0).fit(X, y)
109-
result = permutation_importance(rf, X, y, n_repeats=10, random_state=0, n_jobs=-1)
109+
result = permutation_importance(rf, X, y, n_repeats=10, random_state=0, n_jobs=2)
110110

111111
fig, ax = plt.subplots()
112112
sorted_idx = result.importances_mean.argsort()

dev/_downloads/521b554adefca348463adbbe047d7e99/plot_linear_model_coefficient_interpretation.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -311,7 +311,7 @@
311311
y,
312312
cv=RepeatedKFold(n_splits=5, n_repeats=5),
313313
return_estimator=True,
314-
n_jobs=-1,
314+
n_jobs=2,
315315
)
316316
coefs = pd.DataFrame(
317317
[
@@ -366,7 +366,7 @@
366366
y,
367367
cv=RepeatedKFold(n_splits=5, n_repeats=5),
368368
return_estimator=True,
369-
n_jobs=-1,
369+
n_jobs=2,
370370
)
371371
coefs = pd.DataFrame(
372372
[
@@ -465,7 +465,7 @@
465465
y,
466466
cv=RepeatedKFold(n_splits=5, n_repeats=5),
467467
return_estimator=True,
468-
n_jobs=-1,
468+
n_jobs=2,
469469
)
470470
coefs = pd.DataFrame(
471471
[
@@ -571,7 +571,7 @@
571571
y,
572572
cv=RepeatedKFold(n_splits=5, n_repeats=5),
573573
return_estimator=True,
574-
n_jobs=-1,
574+
n_jobs=2,
575575
)
576576
coefs = pd.DataFrame(
577577
[
Binary file not shown.

dev/_downloads/898b30acf62919d918478efbe526195f/plot_digits_pipe.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
},
2727
"outputs": [],
2828
"source": [
29-
"# Code source: Ga\u00ebl Varoquaux\n# Modified for documentation by Jaques Grobler\n# License: BSD 3 clause\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom sklearn import datasets\nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import GridSearchCV\n\n\n# Define a pipeline to search for the best combination of PCA truncation\n# and classifier regularization.\npca = PCA()\n# set the tolerance to a large value to make the example faster\nlogistic = LogisticRegression(max_iter=10000, tol=0.1)\npipe = Pipeline(steps=[(\"pca\", pca), (\"logistic\", logistic)])\n\nX_digits, y_digits = datasets.load_digits(return_X_y=True)\n\n# Parameters of pipelines can be set using \u2018__\u2019 separated parameter names:\nparam_grid = {\n \"pca__n_components\": [5, 15, 30, 45, 64],\n \"logistic__C\": np.logspace(-4, 4, 4),\n}\nsearch = GridSearchCV(pipe, param_grid, n_jobs=-1)\nsearch.fit(X_digits, y_digits)\nprint(\"Best parameter (CV score=%0.3f):\" % search.best_score_)\nprint(search.best_params_)\n\n# Plot the PCA spectrum\npca.fit(X_digits)\n\nfig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True, figsize=(6, 6))\nax0.plot(\n np.arange(1, pca.n_components_ + 1), pca.explained_variance_ratio_, \"+\", linewidth=2\n)\nax0.set_ylabel(\"PCA explained variance ratio\")\n\nax0.axvline(\n search.best_estimator_.named_steps[\"pca\"].n_components,\n linestyle=\":\",\n label=\"n_components chosen\",\n)\nax0.legend(prop=dict(size=12))\n\n# For each number of components, find the best classifier results\nresults = pd.DataFrame(search.cv_results_)\ncomponents_col = \"param_pca__n_components\"\nbest_clfs = results.groupby(components_col).apply(\n lambda g: g.nlargest(1, \"mean_test_score\")\n)\n\nbest_clfs.plot(\n x=components_col, y=\"mean_test_score\", yerr=\"std_test_score\", legend=False, ax=ax1\n)\nax1.set_ylabel(\"Classification accuracy (val)\")\nax1.set_xlabel(\"n_components\")\n\nplt.xlim(-1, 70)\n\nplt.tight_layout()\nplt.show()"
29+
"# Code source: Ga\u00ebl Varoquaux\n# Modified for documentation by Jaques Grobler\n# License: BSD 3 clause\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom sklearn import datasets\nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import GridSearchCV\n\n\n# Define a pipeline to search for the best combination of PCA truncation\n# and classifier regularization.\npca = PCA()\n# set the tolerance to a large value to make the example faster\nlogistic = LogisticRegression(max_iter=10000, tol=0.1)\npipe = Pipeline(steps=[(\"pca\", pca), (\"logistic\", logistic)])\n\nX_digits, y_digits = datasets.load_digits(return_X_y=True)\n\n# Parameters of pipelines can be set using \u2018__\u2019 separated parameter names:\nparam_grid = {\n \"pca__n_components\": [5, 15, 30, 45, 64],\n \"logistic__C\": np.logspace(-4, 4, 4),\n}\nsearch = GridSearchCV(pipe, param_grid, n_jobs=2)\nsearch.fit(X_digits, y_digits)\nprint(\"Best parameter (CV score=%0.3f):\" % search.best_score_)\nprint(search.best_params_)\n\n# Plot the PCA spectrum\npca.fit(X_digits)\n\nfig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True, figsize=(6, 6))\nax0.plot(\n np.arange(1, pca.n_components_ + 1), pca.explained_variance_ratio_, \"+\", linewidth=2\n)\nax0.set_ylabel(\"PCA explained variance ratio\")\n\nax0.axvline(\n search.best_estimator_.named_steps[\"pca\"].n_components,\n linestyle=\":\",\n label=\"n_components chosen\",\n)\nax0.legend(prop=dict(size=12))\n\n# For each number of components, find the best classifier results\nresults = pd.DataFrame(search.cv_results_)\ncomponents_col = \"param_pca__n_components\"\nbest_clfs = results.groupby(components_col).apply(\n lambda g: g.nlargest(1, \"mean_test_score\")\n)\n\nbest_clfs.plot(\n x=components_col, y=\"mean_test_score\", yerr=\"std_test_score\", legend=False, ax=ax1\n)\nax1.set_ylabel(\"Classification accuracy (val)\")\nax1.set_xlabel(\"n_components\")\n\nplt.xlim(-1, 70)\n\nplt.tight_layout()\nplt.show()"
3030
]
3131
}
3232
],

dev/_downloads/ba89a400c6902f85c10199ff86947d23/plot_digits_pipe.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@
4040
"pca__n_components": [5, 15, 30, 45, 64],
4141
"logistic__C": np.logspace(-4, 4, 4),
4242
}
43-
search = GridSearchCV(pipe, param_grid, n_jobs=-1)
43+
search = GridSearchCV(pipe, param_grid, n_jobs=2)
4444
search.fit(X_digits, y_digits)
4545
print("Best parameter (CV score=%0.3f):" % search.best_score_)
4646
print(search.best_params_)

dev/_downloads/c6ccb1a9c5f82321f082e9767a2706f3/plot_stack_predictors.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -249,11 +249,11 @@ def plot_regression_results(ax, y_true, y_pred, title, scores, elapsed_time):
249249
):
250250
start_time = time.time()
251251
score = cross_validate(
252-
est, X, y, scoring=["r2", "neg_mean_absolute_error"], n_jobs=-1, verbose=0
252+
est, X, y, scoring=["r2", "neg_mean_absolute_error"], n_jobs=2, verbose=0
253253
)
254254
elapsed_time = time.time() - start_time
255255

256-
y_pred = cross_val_predict(est, X, y, n_jobs=-1, verbose=0)
256+
y_pred = cross_val_predict(est, X, y, n_jobs=2, verbose=0)
257257

258258
plot_regression_results(
259259
ax,

dev/_downloads/cf0f90f46eb559facf7f63f124f61e04/plot_linear_model_coefficient_interpretation.ipynb

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -314,7 +314,7 @@
314314
},
315315
"outputs": [],
316316
"source": [
317-
"from sklearn.model_selection import cross_validate\nfrom sklearn.model_selection import RepeatedKFold\n\ncv_model = cross_validate(\n model,\n X,\n y,\n cv=RepeatedKFold(n_splits=5, n_repeats=5),\n return_estimator=True,\n n_jobs=-1,\n)\ncoefs = pd.DataFrame(\n [\n est.named_steps[\"transformedtargetregressor\"].regressor_.coef_\n * X_train_preprocessed.std(axis=0)\n for est in cv_model[\"estimator\"]\n ],\n columns=feature_names,\n)\nplt.figure(figsize=(9, 7))\nsns.stripplot(data=coefs, orient=\"h\", color=\"k\", alpha=0.5)\nsns.boxplot(data=coefs, orient=\"h\", color=\"cyan\", saturation=0.5)\nplt.axvline(x=0, color=\".5\")\nplt.xlabel(\"Coefficient importance\")\nplt.title(\"Coefficient importance and its variability\")\nplt.subplots_adjust(left=0.3)"
317+
"from sklearn.model_selection import cross_validate\nfrom sklearn.model_selection import RepeatedKFold\n\ncv_model = cross_validate(\n model,\n X,\n y,\n cv=RepeatedKFold(n_splits=5, n_repeats=5),\n return_estimator=True,\n n_jobs=2,\n)\ncoefs = pd.DataFrame(\n [\n est.named_steps[\"transformedtargetregressor\"].regressor_.coef_\n * X_train_preprocessed.std(axis=0)\n for est in cv_model[\"estimator\"]\n ],\n columns=feature_names,\n)\nplt.figure(figsize=(9, 7))\nsns.stripplot(data=coefs, orient=\"h\", color=\"k\", alpha=0.5)\nsns.boxplot(data=coefs, orient=\"h\", color=\"cyan\", saturation=0.5)\nplt.axvline(x=0, color=\".5\")\nplt.xlabel(\"Coefficient importance\")\nplt.title(\"Coefficient importance and its variability\")\nplt.subplots_adjust(left=0.3)"
318318
]
319319
},
320320
{
@@ -350,7 +350,7 @@
350350
},
351351
"outputs": [],
352352
"source": [
353-
"column_to_drop = [\"AGE\"]\n\ncv_model = cross_validate(\n model,\n X.drop(columns=column_to_drop),\n y,\n cv=RepeatedKFold(n_splits=5, n_repeats=5),\n return_estimator=True,\n n_jobs=-1,\n)\ncoefs = pd.DataFrame(\n [\n est.named_steps[\"transformedtargetregressor\"].regressor_.coef_\n * X_train_preprocessed.drop(columns=column_to_drop).std(axis=0)\n for est in cv_model[\"estimator\"]\n ],\n columns=feature_names[:-1],\n)\nplt.figure(figsize=(9, 7))\nsns.stripplot(data=coefs, orient=\"h\", color=\"k\", alpha=0.5)\nsns.boxplot(data=coefs, orient=\"h\", color=\"cyan\", saturation=0.5)\nplt.axvline(x=0, color=\".5\")\nplt.title(\"Coefficient importance and its variability\")\nplt.xlabel(\"Coefficient importance\")\nplt.subplots_adjust(left=0.3)"
353+
"column_to_drop = [\"AGE\"]\n\ncv_model = cross_validate(\n model,\n X.drop(columns=column_to_drop),\n y,\n cv=RepeatedKFold(n_splits=5, n_repeats=5),\n return_estimator=True,\n n_jobs=2,\n)\ncoefs = pd.DataFrame(\n [\n est.named_steps[\"transformedtargetregressor\"].regressor_.coef_\n * X_train_preprocessed.drop(columns=column_to_drop).std(axis=0)\n for est in cv_model[\"estimator\"]\n ],\n columns=feature_names[:-1],\n)\nplt.figure(figsize=(9, 7))\nsns.stripplot(data=coefs, orient=\"h\", color=\"k\", alpha=0.5)\nsns.boxplot(data=coefs, orient=\"h\", color=\"cyan\", saturation=0.5)\nplt.axvline(x=0, color=\".5\")\nplt.title(\"Coefficient importance and its variability\")\nplt.xlabel(\"Coefficient importance\")\nplt.subplots_adjust(left=0.3)"
354354
]
355355
},
356356
{
@@ -440,7 +440,7 @@
440440
},
441441
"outputs": [],
442442
"source": [
443-
"cv_model = cross_validate(\n model,\n X,\n y,\n cv=RepeatedKFold(n_splits=5, n_repeats=5),\n return_estimator=True,\n n_jobs=-1,\n)\ncoefs = pd.DataFrame(\n [\n est.named_steps[\"transformedtargetregressor\"].regressor_.coef_\n for est in cv_model[\"estimator\"]\n ],\n columns=feature_names,\n)\nplt.figure(figsize=(9, 7))\nsns.stripplot(data=coefs, orient=\"h\", color=\"k\", alpha=0.5)\nsns.boxplot(data=coefs, orient=\"h\", color=\"cyan\", saturation=0.5)\nplt.axvline(x=0, color=\".5\")\nplt.title(\"Coefficient variability\")\nplt.subplots_adjust(left=0.3)"
443+
"cv_model = cross_validate(\n model,\n X,\n y,\n cv=RepeatedKFold(n_splits=5, n_repeats=5),\n return_estimator=True,\n n_jobs=2,\n)\ncoefs = pd.DataFrame(\n [\n est.named_steps[\"transformedtargetregressor\"].regressor_.coef_\n for est in cv_model[\"estimator\"]\n ],\n columns=feature_names,\n)\nplt.figure(figsize=(9, 7))\nsns.stripplot(data=coefs, orient=\"h\", color=\"k\", alpha=0.5)\nsns.boxplot(data=coefs, orient=\"h\", color=\"cyan\", saturation=0.5)\nplt.axvline(x=0, color=\".5\")\nplt.title(\"Coefficient variability\")\nplt.subplots_adjust(left=0.3)"
444444
]
445445
},
446446
{
@@ -530,7 +530,7 @@
530530
},
531531
"outputs": [],
532532
"source": [
533-
"cv_model = cross_validate(\n model,\n X,\n y,\n cv=RepeatedKFold(n_splits=5, n_repeats=5),\n return_estimator=True,\n n_jobs=-1,\n)\ncoefs = pd.DataFrame(\n [\n est.named_steps[\"transformedtargetregressor\"].regressor_.coef_\n * X_train_preprocessed.std(axis=0)\n for est in cv_model[\"estimator\"]\n ],\n columns=feature_names,\n)\n\nplt.ylabel(\"Age coefficient\")\nplt.xlabel(\"Experience coefficient\")\nplt.grid(True)\nplt.xlim(-0.4, 0.5)\nplt.ylim(-0.4, 0.5)\nplt.scatter(coefs[\"AGE\"], coefs[\"EXPERIENCE\"])\n_ = plt.title(\"Co-variations of coefficients for AGE and EXPERIENCE across folds\")"
533+
"cv_model = cross_validate(\n model,\n X,\n y,\n cv=RepeatedKFold(n_splits=5, n_repeats=5),\n return_estimator=True,\n n_jobs=2,\n)\ncoefs = pd.DataFrame(\n [\n est.named_steps[\"transformedtargetregressor\"].regressor_.coef_\n * X_train_preprocessed.std(axis=0)\n for est in cv_model[\"estimator\"]\n ],\n columns=feature_names,\n)\n\nplt.ylabel(\"Age coefficient\")\nplt.xlabel(\"Experience coefficient\")\nplt.grid(True)\nplt.xlim(-0.4, 0.5)\nplt.ylim(-0.4, 0.5)\nplt.scatter(coefs[\"AGE\"], coefs[\"EXPERIENCE\"])\n_ = plt.title(\"Co-variations of coefficients for AGE and EXPERIENCE across folds\")"
534534
]
535535
},
536536
{

dev/_downloads/df790541d4c6bdebcc75018a2459467a/plot_release_highlights_0_22_0.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@
6969
},
7070
"outputs": [],
7171
"source": [
72-
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import make_classification\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.inspection import permutation_importance\n\nX, y = make_classification(random_state=0, n_features=5, n_informative=3)\nfeature_names = np.array([f\"x_{i}\" for i in range(X.shape[1])])\n\nrf = RandomForestClassifier(random_state=0).fit(X, y)\nresult = permutation_importance(rf, X, y, n_repeats=10, random_state=0, n_jobs=-1)\n\nfig, ax = plt.subplots()\nsorted_idx = result.importances_mean.argsort()\nax.boxplot(\n result.importances[sorted_idx].T, vert=False, labels=feature_names[sorted_idx]\n)\nax.set_title(\"Permutation Importance of each feature\")\nax.set_ylabel(\"Features\")\nfig.tight_layout()\nplt.show()"
72+
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import make_classification\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.inspection import permutation_importance\n\nX, y = make_classification(random_state=0, n_features=5, n_informative=3)\nfeature_names = np.array([f\"x_{i}\" for i in range(X.shape[1])])\n\nrf = RandomForestClassifier(random_state=0).fit(X, y)\nresult = permutation_importance(rf, X, y, n_repeats=10, random_state=0, n_jobs=2)\n\nfig, ax = plt.subplots()\nsorted_idx = result.importances_mean.argsort()\nax.boxplot(\n result.importances[sorted_idx].T, vert=False, labels=feature_names[sorted_idx]\n)\nax.set_title(\"Permutation Importance of each feature\")\nax.set_ylabel(\"Features\")\nfig.tight_layout()\nplt.show()"
7373
]
7474
},
7575
{

0 commit comments

Comments
 (0)