Skip to content

Commit d99e620

Browse files
committed
Pushing the docs to dev/ for branch: main, commit a530a176ad66725c482feaf1c19e5afe22817139
1 parent ed610e3 commit d99e620

File tree

1,319 files changed

+7200
-7221
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,319 files changed

+7200
-7221
lines changed

dev/.buildinfo

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
# Sphinx build info version 1
22
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
3-
config: 0416b8a1e7c7526b0abe1bceab0e2cc7
3+
config: 1205457fdb4f17847be1c3a0cd5bb518
44
tags: 645f666f9bcd5a90fca523b33c5a78b7
Binary file not shown.
Binary file not shown.

dev/_downloads/aec731a57fcba7fde8f5e3d94ffc7c69/plot_logistic_l1_l2_sparsity.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
},
1616
"outputs": [],
1717
"source": [
18-
"# Authors: Alexandre Gramfort <[email protected]>\n# Mathieu Blondel <[email protected]>\n# Andreas Mueller <[email protected]>\n# License: BSD 3 clause\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom sklearn import datasets\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import StandardScaler\n\nX, y = datasets.load_digits(return_X_y=True)\n\nX = StandardScaler().fit_transform(X)\n\n# classify small against large digits\ny = (y > 4).astype(int)\n\nl1_ratio = 0.5 # L1 weight in the Elastic-Net regularization\n\nfig, axes = plt.subplots(3, 3)\n\n# Set regularization parameter\nfor i, (C, axes_row) in enumerate(zip((1, 0.1, 0.01), axes)):\n # Increase tolerance for short training time\n clf_l1_LR = LogisticRegression(C=C, penalty=\"l1\", tol=0.01, solver=\"saga\")\n clf_l2_LR = LogisticRegression(C=C, penalty=\"l2\", tol=0.01, solver=\"saga\")\n clf_en_LR = LogisticRegression(\n C=C, penalty=\"elasticnet\", solver=\"saga\", l1_ratio=l1_ratio, tol=0.01\n )\n clf_l1_LR.fit(X, y)\n clf_l2_LR.fit(X, y)\n clf_en_LR.fit(X, y)\n\n coef_l1_LR = clf_l1_LR.coef_.ravel()\n coef_l2_LR = clf_l2_LR.coef_.ravel()\n coef_en_LR = clf_en_LR.coef_.ravel()\n\n # coef_l1_LR contains zeros due to the\n # L1 sparsity inducing norm\n\n sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100\n sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100\n sparsity_en_LR = np.mean(coef_en_LR == 0) * 100\n\n print(\"C=%.2f\" % C)\n print(\"{:<40} {:.2f}%\".format(\"Sparsity with L1 penalty:\", sparsity_l1_LR))\n print(\"{:<40} {:.2f}%\".format(\"Sparsity with Elastic-Net penalty:\", sparsity_en_LR))\n print(\"{:<40} {:.2f}%\".format(\"Sparsity with L2 penalty:\", sparsity_l2_LR))\n print(\"{:<40} {:.2f}\".format(\"Score with L1 penalty:\", clf_l1_LR.score(X, y)))\n print(\n \"{:<40} {:.2f}\".format(\"Score with Elastic-Net penalty:\", clf_en_LR.score(X, y))\n )\n print(\"{:<40} {:.2f}\".format(\"Score with L2 penalty:\", clf_l2_LR.score(X, y)))\n\n if i == 0:\n axes_row[0].set_title(\"L1 penalty\")\n axes_row[1].set_title(\"Elastic-Net\\nl1_ratio = %s\" % l1_ratio)\n axes_row[2].set_title(\"L2 penalty\")\n\n for ax, coefs in zip(axes_row, [coef_l1_LR, coef_en_LR, coef_l2_LR]):\n ax.imshow(\n np.abs(coefs.reshape(8, 8)),\n interpolation=\"nearest\",\n cmap=\"binary\",\n vmax=1,\n vmin=0,\n )\n ax.set_xticks(())\n ax.set_yticks(())\n\n axes_row[0].set_ylabel(\"C = %s\" % C)\n\nplt.show()"
18+
"# Authors: Alexandre Gramfort <[email protected]>\n# Mathieu Blondel <[email protected]>\n# Andreas Mueller <[email protected]>\n# License: BSD 3 clause\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom sklearn import datasets\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import StandardScaler\n\nX, y = datasets.load_digits(return_X_y=True)\n\nX = StandardScaler().fit_transform(X)\n\n# classify small against large digits\ny = (y > 4).astype(int)\n\nl1_ratio = 0.5 # L1 weight in the Elastic-Net regularization\n\nfig, axes = plt.subplots(3, 3)\n\n# Set regularization parameter\nfor i, (C, axes_row) in enumerate(zip((1, 0.1, 0.01), axes)):\n # Increase tolerance for short training time\n clf_l1_LR = LogisticRegression(C=C, penalty=\"l1\", tol=0.01, solver=\"saga\")\n clf_l2_LR = LogisticRegression(C=C, penalty=\"l2\", tol=0.01, solver=\"saga\")\n clf_en_LR = LogisticRegression(\n C=C, penalty=\"elasticnet\", solver=\"saga\", l1_ratio=l1_ratio, tol=0.01\n )\n clf_l1_LR.fit(X, y)\n clf_l2_LR.fit(X, y)\n clf_en_LR.fit(X, y)\n\n coef_l1_LR = clf_l1_LR.coef_.ravel()\n coef_l2_LR = clf_l2_LR.coef_.ravel()\n coef_en_LR = clf_en_LR.coef_.ravel()\n\n # coef_l1_LR contains zeros due to the\n # L1 sparsity inducing norm\n\n sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100\n sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100\n sparsity_en_LR = np.mean(coef_en_LR == 0) * 100\n\n print(f\"C={C:.2f}\")\n print(f\"{'Sparsity with L1 penalty:':<40} {sparsity_l1_LR:.2f}%\")\n print(f\"{'Sparsity with Elastic-Net penalty:':<40} {sparsity_en_LR:.2f}%\")\n print(f\"{'Sparsity with L2 penalty:':<40} {sparsity_l2_LR:.2f}%\")\n print(f\"{'Score with L1 penalty:':<40} {clf_l1_LR.score(X, y):.2f}\")\n print(f\"{'Score with Elastic-Net penalty:':<40} {clf_en_LR.score(X, y):.2f}\")\n print(f\"{'Score with L2 penalty:':<40} {clf_l2_LR.score(X, y):.2f}\")\n\n if i == 0:\n axes_row[0].set_title(\"L1 penalty\")\n axes_row[1].set_title(\"Elastic-Net\\nl1_ratio = %s\" % l1_ratio)\n axes_row[2].set_title(\"L2 penalty\")\n\n for ax, coefs in zip(axes_row, [coef_l1_LR, coef_en_LR, coef_l2_LR]):\n ax.imshow(\n np.abs(coefs.reshape(8, 8)),\n interpolation=\"nearest\",\n cmap=\"binary\",\n vmax=1,\n vmin=0,\n )\n ax.set_xticks(())\n ax.set_yticks(())\n\n axes_row[0].set_ylabel(f\"C = {C}\")\n\nplt.show()"
1919
]
2020
}
2121
],

dev/_downloads/fb191883ea7e76c5eb13dad28d2b0a72/plot_logistic_l1_l2_sparsity.py

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -61,15 +61,13 @@
6161
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
6262
sparsity_en_LR = np.mean(coef_en_LR == 0) * 100
6363

64-
print("C=%.2f" % C)
65-
print("{:<40} {:.2f}%".format("Sparsity with L1 penalty:", sparsity_l1_LR))
66-
print("{:<40} {:.2f}%".format("Sparsity with Elastic-Net penalty:", sparsity_en_LR))
67-
print("{:<40} {:.2f}%".format("Sparsity with L2 penalty:", sparsity_l2_LR))
68-
print("{:<40} {:.2f}".format("Score with L1 penalty:", clf_l1_LR.score(X, y)))
69-
print(
70-
"{:<40} {:.2f}".format("Score with Elastic-Net penalty:", clf_en_LR.score(X, y))
71-
)
72-
print("{:<40} {:.2f}".format("Score with L2 penalty:", clf_l2_LR.score(X, y)))
64+
print(f"C={C:.2f}")
65+
print(f"{'Sparsity with L1 penalty:':<40} {sparsity_l1_LR:.2f}%")
66+
print(f"{'Sparsity with Elastic-Net penalty:':<40} {sparsity_en_LR:.2f}%")
67+
print(f"{'Sparsity with L2 penalty:':<40} {sparsity_l2_LR:.2f}%")
68+
print(f"{'Score with L1 penalty:':<40} {clf_l1_LR.score(X, y):.2f}")
69+
print(f"{'Score with Elastic-Net penalty:':<40} {clf_en_LR.score(X, y):.2f}")
70+
print(f"{'Score with L2 penalty:':<40} {clf_l2_LR.score(X, y):.2f}")
7371

7472
if i == 0:
7573
axes_row[0].set_title("L1 penalty")
@@ -87,6 +85,6 @@
8785
ax.set_xticks(())
8886
ax.set_yticks(())
8987

90-
axes_row[0].set_ylabel("C = %s" % C)
88+
axes_row[0].set_ylabel(f"C = {C}")
9189

9290
plt.show()

dev/_downloads/scikit-learn-docs.zip

18.8 KB
Binary file not shown.
-19 Bytes
1 Byte
187 Bytes
-2 Bytes

0 commit comments

Comments
 (0)