Skip to content

Commit a07cc42

Browse files
committed
Pushing the docs to dev/ for branch: main, commit b9bd2d524db520a8e8740025ce7aca2342d37f38
1 parent c301b19 commit a07cc42

File tree

1,237 files changed

+4975
-4635
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,237 files changed

+4975
-4635
lines changed
Binary file not shown.

dev/_downloads/149ff4a0ff65a845f675cc7a0fcb86ea/plot_image_denoising.py

Lines changed: 48 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -32,16 +32,12 @@
3232
3333
"""
3434

35-
from time import time
36-
37-
import matplotlib.pyplot as plt
35+
# %%
36+
# Generate distorted image
37+
# ------------------------
3838
import numpy as np
3939
import scipy as sp
4040

41-
from sklearn.decomposition import MiniBatchDictionaryLearning
42-
from sklearn.feature_extraction.image import extract_patches_2d
43-
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
44-
4541

4642
try: # SciPy >= 0.16 have face in misc
4743
from scipy.misc import face
@@ -64,6 +60,44 @@
6460
distorted = face.copy()
6561
distorted[:, width // 2 :] += 0.075 * np.random.randn(height, width // 2)
6662

63+
64+
# %%
65+
# Display the distorted image
66+
# ---------------------------
67+
import matplotlib.pyplot as plt
68+
69+
70+
def show_with_diff(image, reference, title):
71+
"""Helper function to display denoising"""
72+
plt.figure(figsize=(5, 3.3))
73+
plt.subplot(1, 2, 1)
74+
plt.title("Image")
75+
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray, interpolation="nearest")
76+
plt.xticks(())
77+
plt.yticks(())
78+
plt.subplot(1, 2, 2)
79+
difference = image - reference
80+
81+
plt.title("Difference (norm: %.2f)" % np.sqrt(np.sum(difference**2)))
82+
plt.imshow(
83+
difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr, interpolation="nearest"
84+
)
85+
plt.xticks(())
86+
plt.yticks(())
87+
plt.suptitle(title, size=16)
88+
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
89+
90+
91+
show_with_diff(distorted, face, "Distorted image")
92+
93+
94+
# %%
95+
# Extract reference patches
96+
# ----------------------------
97+
from time import time
98+
99+
from sklearn.feature_extraction.image import extract_patches_2d
100+
67101
# Extract all reference patches from the left half of the image
68102
print("Extracting reference patches...")
69103
t0 = time()
@@ -74,8 +108,11 @@
74108
data /= np.std(data, axis=0)
75109
print("done in %.2fs." % (time() - t0))
76110

77-
# #############################################################################
111+
112+
# %%
78113
# Learn the dictionary from reference patches
114+
# -------------------------------------------
115+
from sklearn.decomposition import MiniBatchDictionaryLearning
79116

80117
print("Learning the dictionary...")
81118
t0 = time()
@@ -98,35 +135,10 @@
98135
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
99136

100137

101-
# #############################################################################
102-
# Display the distorted image
103-
104-
105-
def show_with_diff(image, reference, title):
106-
"""Helper function to display denoising"""
107-
plt.figure(figsize=(5, 3.3))
108-
plt.subplot(1, 2, 1)
109-
plt.title("Image")
110-
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray, interpolation="nearest")
111-
plt.xticks(())
112-
plt.yticks(())
113-
plt.subplot(1, 2, 2)
114-
difference = image - reference
115-
116-
plt.title("Difference (norm: %.2f)" % np.sqrt(np.sum(difference**2)))
117-
plt.imshow(
118-
difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr, interpolation="nearest"
119-
)
120-
plt.xticks(())
121-
plt.yticks(())
122-
plt.suptitle(title, size=16)
123-
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
124-
125-
126-
show_with_diff(distorted, face, "Distorted image")
127-
128-
# #############################################################################
138+
# %%
129139
# Extract noisy patches and reconstruct them using the dictionary
140+
# ---------------------------------------------------------------
141+
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
130142

131143
print("Extracting noisy patches... ")
132144
t0 = time()
Binary file not shown.

dev/_downloads/f726f6c50f1cc13e1afb7561fa005d16/plot_image_denoising.ipynb

Lines changed: 80 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,85 @@
1818
"\n# Image denoising using dictionary learning\n\nAn example comparing the effect of reconstructing noisy fragments\nof a raccoon face image using firstly online `DictionaryLearning` and\nvarious transform methods.\n\nThe dictionary is fitted on the distorted left half of the image, and\nsubsequently used to reconstruct the right half. Note that even better\nperformance could be achieved by fitting to an undistorted (i.e.\nnoiseless) image, but here we start from the assumption that it is not\navailable.\n\nA common practice for evaluating the results of image denoising is by looking\nat the difference between the reconstruction and the original image. If the\nreconstruction is perfect this will look like Gaussian noise.\n\nIt can be seen from the plots that the results of `omp` with two\nnon-zero coefficients is a bit less biased than when keeping only one\n(the edges look less prominent). It is in addition closer from the ground\ntruth in Frobenius norm.\n\nThe result of `least_angle_regression` is much more strongly biased: the\ndifference is reminiscent of the local intensity value of the original image.\n\nThresholding is clearly not useful for denoising, but it is here to show that\nit can produce a suggestive output with very high speed, and thus be useful\nfor other tasks such as object classification, where performance is not\nnecessarily related to visualisation.\n"
1919
]
2020
},
21+
{
22+
"cell_type": "markdown",
23+
"metadata": {},
24+
"source": [
25+
"## Generate distorted image\n\n"
26+
]
27+
},
28+
{
29+
"cell_type": "code",
30+
"execution_count": null,
31+
"metadata": {
32+
"collapsed": false
33+
},
34+
"outputs": [],
35+
"source": [
36+
"import numpy as np\nimport scipy as sp\n\n\ntry: # SciPy >= 0.16 have face in misc\n from scipy.misc import face\n\n face = face(gray=True)\nexcept ImportError:\n face = sp.face(gray=True)\n\n# Convert from uint8 representation with values between 0 and 255 to\n# a floating point representation with values between 0 and 1.\nface = face / 255.0\n\n# downsample for higher speed\nface = face[::4, ::4] + face[1::4, ::4] + face[::4, 1::4] + face[1::4, 1::4]\nface /= 4.0\nheight, width = face.shape\n\n# Distort the right half of the image\nprint(\"Distorting image...\")\ndistorted = face.copy()\ndistorted[:, width // 2 :] += 0.075 * np.random.randn(height, width // 2)"
37+
]
38+
},
39+
{
40+
"cell_type": "markdown",
41+
"metadata": {},
42+
"source": [
43+
"## Display the distorted image\n\n"
44+
]
45+
},
46+
{
47+
"cell_type": "code",
48+
"execution_count": null,
49+
"metadata": {
50+
"collapsed": false
51+
},
52+
"outputs": [],
53+
"source": [
54+
"import matplotlib.pyplot as plt\n\n\ndef show_with_diff(image, reference, title):\n \"\"\"Helper function to display denoising\"\"\"\n plt.figure(figsize=(5, 3.3))\n plt.subplot(1, 2, 1)\n plt.title(\"Image\")\n plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray, interpolation=\"nearest\")\n plt.xticks(())\n plt.yticks(())\n plt.subplot(1, 2, 2)\n difference = image - reference\n\n plt.title(\"Difference (norm: %.2f)\" % np.sqrt(np.sum(difference**2)))\n plt.imshow(\n difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr, interpolation=\"nearest\"\n )\n plt.xticks(())\n plt.yticks(())\n plt.suptitle(title, size=16)\n plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)\n\n\nshow_with_diff(distorted, face, \"Distorted image\")"
55+
]
56+
},
57+
{
58+
"cell_type": "markdown",
59+
"metadata": {},
60+
"source": [
61+
"## Extract reference patches\n\n"
62+
]
63+
},
64+
{
65+
"cell_type": "code",
66+
"execution_count": null,
67+
"metadata": {
68+
"collapsed": false
69+
},
70+
"outputs": [],
71+
"source": [
72+
"from time import time\n\nfrom sklearn.feature_extraction.image import extract_patches_2d\n\n# Extract all reference patches from the left half of the image\nprint(\"Extracting reference patches...\")\nt0 = time()\npatch_size = (7, 7)\ndata = extract_patches_2d(distorted[:, : width // 2], patch_size)\ndata = data.reshape(data.shape[0], -1)\ndata -= np.mean(data, axis=0)\ndata /= np.std(data, axis=0)\nprint(\"done in %.2fs.\" % (time() - t0))"
73+
]
74+
},
75+
{
76+
"cell_type": "markdown",
77+
"metadata": {},
78+
"source": [
79+
"## Learn the dictionary from reference patches\n\n"
80+
]
81+
},
82+
{
83+
"cell_type": "code",
84+
"execution_count": null,
85+
"metadata": {
86+
"collapsed": false
87+
},
88+
"outputs": [],
89+
"source": [
90+
"from sklearn.decomposition import MiniBatchDictionaryLearning\n\nprint(\"Learning the dictionary...\")\nt0 = time()\ndico = MiniBatchDictionaryLearning(n_components=50, alpha=1, n_iter=250)\nV = dico.fit(data).components_\ndt = time() - t0\nprint(\"done in %.2fs.\" % dt)\n\nplt.figure(figsize=(4.2, 4))\nfor i, comp in enumerate(V[:100]):\n plt.subplot(10, 10, i + 1)\n plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r, interpolation=\"nearest\")\n plt.xticks(())\n plt.yticks(())\nplt.suptitle(\n \"Dictionary learned from face patches\\n\"\n + \"Train time %.1fs on %d patches\" % (dt, len(data)),\n fontsize=16,\n)\nplt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)"
91+
]
92+
},
93+
{
94+
"cell_type": "markdown",
95+
"metadata": {},
96+
"source": [
97+
"## Extract noisy patches and reconstruct them using the dictionary\n\n"
98+
]
99+
},
21100
{
22101
"cell_type": "code",
23102
"execution_count": null,
@@ -26,7 +105,7 @@
26105
},
27106
"outputs": [],
28107
"source": [
29-
"from time import time\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy as sp\n\nfrom sklearn.decomposition import MiniBatchDictionaryLearning\nfrom sklearn.feature_extraction.image import extract_patches_2d\nfrom sklearn.feature_extraction.image import reconstruct_from_patches_2d\n\n\ntry: # SciPy >= 0.16 have face in misc\n from scipy.misc import face\n\n face = face(gray=True)\nexcept ImportError:\n face = sp.face(gray=True)\n\n# Convert from uint8 representation with values between 0 and 255 to\n# a floating point representation with values between 0 and 1.\nface = face / 255.0\n\n# downsample for higher speed\nface = face[::4, ::4] + face[1::4, ::4] + face[::4, 1::4] + face[1::4, 1::4]\nface /= 4.0\nheight, width = face.shape\n\n# Distort the right half of the image\nprint(\"Distorting image...\")\ndistorted = face.copy()\ndistorted[:, width // 2 :] += 0.075 * np.random.randn(height, width // 2)\n\n# Extract all reference patches from the left half of the image\nprint(\"Extracting reference patches...\")\nt0 = time()\npatch_size = (7, 7)\ndata = extract_patches_2d(distorted[:, : width // 2], patch_size)\ndata = data.reshape(data.shape[0], -1)\ndata -= np.mean(data, axis=0)\ndata /= np.std(data, axis=0)\nprint(\"done in %.2fs.\" % (time() - t0))\n\n# #############################################################################\n# Learn the dictionary from reference patches\n\nprint(\"Learning the dictionary...\")\nt0 = time()\ndico = MiniBatchDictionaryLearning(n_components=50, alpha=1, n_iter=250)\nV = dico.fit(data).components_\ndt = time() - t0\nprint(\"done in %.2fs.\" % dt)\n\nplt.figure(figsize=(4.2, 4))\nfor i, comp in enumerate(V[:100]):\n plt.subplot(10, 10, i + 1)\n plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r, interpolation=\"nearest\")\n plt.xticks(())\n plt.yticks(())\nplt.suptitle(\n \"Dictionary learned from face patches\\n\"\n + \"Train time %.1fs on %d patches\" % (dt, len(data)),\n fontsize=16,\n)\nplt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)\n\n\n# #############################################################################\n# Display the distorted image\n\n\ndef show_with_diff(image, reference, title):\n \"\"\"Helper function to display denoising\"\"\"\n plt.figure(figsize=(5, 3.3))\n plt.subplot(1, 2, 1)\n plt.title(\"Image\")\n plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray, interpolation=\"nearest\")\n plt.xticks(())\n plt.yticks(())\n plt.subplot(1, 2, 2)\n difference = image - reference\n\n plt.title(\"Difference (norm: %.2f)\" % np.sqrt(np.sum(difference**2)))\n plt.imshow(\n difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr, interpolation=\"nearest\"\n )\n plt.xticks(())\n plt.yticks(())\n plt.suptitle(title, size=16)\n plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)\n\n\nshow_with_diff(distorted, face, \"Distorted image\")\n\n# #############################################################################\n# Extract noisy patches and reconstruct them using the dictionary\n\nprint(\"Extracting noisy patches... \")\nt0 = time()\ndata = extract_patches_2d(distorted[:, width // 2 :], patch_size)\ndata = data.reshape(data.shape[0], -1)\nintercept = np.mean(data, axis=0)\ndata -= intercept\nprint(\"done in %.2fs.\" % (time() - t0))\n\ntransform_algorithms = [\n (\"Orthogonal Matching Pursuit\\n1 atom\", \"omp\", {\"transform_n_nonzero_coefs\": 1}),\n (\"Orthogonal Matching Pursuit\\n2 atoms\", \"omp\", {\"transform_n_nonzero_coefs\": 2}),\n (\"Least-angle regression\\n4 atoms\", \"lars\", {\"transform_n_nonzero_coefs\": 4}),\n (\"Thresholding\\n alpha=0.1\", \"threshold\", {\"transform_alpha\": 0.1}),\n]\n\nreconstructions = {}\nfor title, transform_algorithm, kwargs in transform_algorithms:\n print(title + \"...\")\n reconstructions[title] = face.copy()\n t0 = time()\n dico.set_params(transform_algorithm=transform_algorithm, **kwargs)\n code = dico.transform(data)\n patches = np.dot(code, V)\n\n patches += intercept\n patches = patches.reshape(len(data), *patch_size)\n if transform_algorithm == \"threshold\":\n patches -= patches.min()\n patches /= patches.max()\n reconstructions[title][:, width // 2 :] = reconstruct_from_patches_2d(\n patches, (height, width // 2)\n )\n dt = time() - t0\n print(\"done in %.2fs.\" % dt)\n show_with_diff(reconstructions[title], face, title + \" (time: %.1fs)\" % dt)\n\nplt.show()"
108+
"from sklearn.feature_extraction.image import reconstruct_from_patches_2d\n\nprint(\"Extracting noisy patches... \")\nt0 = time()\ndata = extract_patches_2d(distorted[:, width // 2 :], patch_size)\ndata = data.reshape(data.shape[0], -1)\nintercept = np.mean(data, axis=0)\ndata -= intercept\nprint(\"done in %.2fs.\" % (time() - t0))\n\ntransform_algorithms = [\n (\"Orthogonal Matching Pursuit\\n1 atom\", \"omp\", {\"transform_n_nonzero_coefs\": 1}),\n (\"Orthogonal Matching Pursuit\\n2 atoms\", \"omp\", {\"transform_n_nonzero_coefs\": 2}),\n (\"Least-angle regression\\n4 atoms\", \"lars\", {\"transform_n_nonzero_coefs\": 4}),\n (\"Thresholding\\n alpha=0.1\", \"threshold\", {\"transform_alpha\": 0.1}),\n]\n\nreconstructions = {}\nfor title, transform_algorithm, kwargs in transform_algorithms:\n print(title + \"...\")\n reconstructions[title] = face.copy()\n t0 = time()\n dico.set_params(transform_algorithm=transform_algorithm, **kwargs)\n code = dico.transform(data)\n patches = np.dot(code, V)\n\n patches += intercept\n patches = patches.reshape(len(data), *patch_size)\n if transform_algorithm == \"threshold\":\n patches -= patches.min()\n patches /= patches.max()\n reconstructions[title][:, width // 2 :] = reconstruct_from_patches_2d(\n patches, (height, width // 2)\n )\n dt = time() - t0\n print(\"done in %.2fs.\" % dt)\n show_with_diff(reconstructions[title], face, title + \" (time: %.1fs)\" % dt)\n\nplt.show()"
30109
]
31110
}
32111
],

dev/_downloads/scikit-learn-docs.zip

47.3 KB
Binary file not shown.
-208 Bytes
-48 Bytes
343 Bytes
427 Bytes
-71 Bytes

0 commit comments

Comments
 (0)