Skip to content

Commit 11a4595

Browse files
committed
Pushing the docs to dev/ for branch: main, commit 0248dabaa75dd82741ca2ee0c0521df576e0d6c6
1 parent 2b55c00 commit 11a4595

File tree

1,251 files changed

+4346
-4420
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,251 files changed

+4346
-4420
lines changed
Binary file not shown.

dev/_downloads/22c1b876aa7bf8b912208cbfed5299c7/plot_gmm_covariances.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ def make_ellipses(gmm, ax):
5959
angle = 180 * angle / np.pi # convert to degrees
6060
v = 2.0 * np.sqrt(2.0) * np.sqrt(v)
6161
ell = mpl.patches.Ellipse(
62-
gmm.means_[n, :2], v[0], v[1], 180 + angle, color=color
62+
gmm.means_[n, :2], v[0], v[1], angle=180 + angle, color=color
6363
)
6464
ell.set_clip_box(ax.bbox)
6565
ell.set_alpha(0.5)

dev/_downloads/471829dadf19abf3dd2b87b08c9ffc92/plot_gmm_covariances.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
},
2727
"outputs": [],
2828
"source": [
29-
"# Author: Ron Weiss <[email protected]>, Gael Varoquaux\n# Modified by Thierry Guillemot <[email protected]>\n# License: BSD 3 clause\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nimport numpy as np\n\nfrom sklearn import datasets\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.model_selection import StratifiedKFold\n\ncolors = [\"navy\", \"turquoise\", \"darkorange\"]\n\n\ndef make_ellipses(gmm, ax):\n for n, color in enumerate(colors):\n if gmm.covariance_type == \"full\":\n covariances = gmm.covariances_[n][:2, :2]\n elif gmm.covariance_type == \"tied\":\n covariances = gmm.covariances_[:2, :2]\n elif gmm.covariance_type == \"diag\":\n covariances = np.diag(gmm.covariances_[n][:2])\n elif gmm.covariance_type == \"spherical\":\n covariances = np.eye(gmm.means_.shape[1]) * gmm.covariances_[n]\n v, w = np.linalg.eigh(covariances)\n u = w[0] / np.linalg.norm(w[0])\n angle = np.arctan2(u[1], u[0])\n angle = 180 * angle / np.pi # convert to degrees\n v = 2.0 * np.sqrt(2.0) * np.sqrt(v)\n ell = mpl.patches.Ellipse(\n gmm.means_[n, :2], v[0], v[1], 180 + angle, color=color\n )\n ell.set_clip_box(ax.bbox)\n ell.set_alpha(0.5)\n ax.add_artist(ell)\n ax.set_aspect(\"equal\", \"datalim\")\n\n\niris = datasets.load_iris()\n\n# Break up the dataset into non-overlapping training (75%) and testing\n# (25%) sets.\nskf = StratifiedKFold(n_splits=4)\n# Only take the first fold.\ntrain_index, test_index = next(iter(skf.split(iris.data, iris.target)))\n\n\nX_train = iris.data[train_index]\ny_train = iris.target[train_index]\nX_test = iris.data[test_index]\ny_test = iris.target[test_index]\n\nn_classes = len(np.unique(y_train))\n\n# Try GMMs using different types of covariances.\nestimators = {\n cov_type: GaussianMixture(\n n_components=n_classes, covariance_type=cov_type, max_iter=20, random_state=0\n )\n for cov_type in [\"spherical\", \"diag\", \"tied\", \"full\"]\n}\n\nn_estimators = len(estimators)\n\nplt.figure(figsize=(3 * n_estimators // 2, 6))\nplt.subplots_adjust(\n bottom=0.01, top=0.95, hspace=0.15, wspace=0.05, left=0.01, right=0.99\n)\n\n\nfor index, (name, estimator) in enumerate(estimators.items()):\n # Since we have class labels for the training data, we can\n # initialize the GMM parameters in a supervised manner.\n estimator.means_init = np.array(\n [X_train[y_train == i].mean(axis=0) for i in range(n_classes)]\n )\n\n # Train the other parameters using the EM algorithm.\n estimator.fit(X_train)\n\n h = plt.subplot(2, n_estimators // 2, index + 1)\n make_ellipses(estimator, h)\n\n for n, color in enumerate(colors):\n data = iris.data[iris.target == n]\n plt.scatter(\n data[:, 0], data[:, 1], s=0.8, color=color, label=iris.target_names[n]\n )\n # Plot the test data with crosses\n for n, color in enumerate(colors):\n data = X_test[y_test == n]\n plt.scatter(data[:, 0], data[:, 1], marker=\"x\", color=color)\n\n y_train_pred = estimator.predict(X_train)\n train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100\n plt.text(0.05, 0.9, \"Train accuracy: %.1f\" % train_accuracy, transform=h.transAxes)\n\n y_test_pred = estimator.predict(X_test)\n test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100\n plt.text(0.05, 0.8, \"Test accuracy: %.1f\" % test_accuracy, transform=h.transAxes)\n\n plt.xticks(())\n plt.yticks(())\n plt.title(name)\n\nplt.legend(scatterpoints=1, loc=\"lower right\", prop=dict(size=12))\n\n\nplt.show()"
29+
"# Author: Ron Weiss <[email protected]>, Gael Varoquaux\n# Modified by Thierry Guillemot <[email protected]>\n# License: BSD 3 clause\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nimport numpy as np\n\nfrom sklearn import datasets\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.model_selection import StratifiedKFold\n\ncolors = [\"navy\", \"turquoise\", \"darkorange\"]\n\n\ndef make_ellipses(gmm, ax):\n for n, color in enumerate(colors):\n if gmm.covariance_type == \"full\":\n covariances = gmm.covariances_[n][:2, :2]\n elif gmm.covariance_type == \"tied\":\n covariances = gmm.covariances_[:2, :2]\n elif gmm.covariance_type == \"diag\":\n covariances = np.diag(gmm.covariances_[n][:2])\n elif gmm.covariance_type == \"spherical\":\n covariances = np.eye(gmm.means_.shape[1]) * gmm.covariances_[n]\n v, w = np.linalg.eigh(covariances)\n u = w[0] / np.linalg.norm(w[0])\n angle = np.arctan2(u[1], u[0])\n angle = 180 * angle / np.pi # convert to degrees\n v = 2.0 * np.sqrt(2.0) * np.sqrt(v)\n ell = mpl.patches.Ellipse(\n gmm.means_[n, :2], v[0], v[1], angle=180 + angle, color=color\n )\n ell.set_clip_box(ax.bbox)\n ell.set_alpha(0.5)\n ax.add_artist(ell)\n ax.set_aspect(\"equal\", \"datalim\")\n\n\niris = datasets.load_iris()\n\n# Break up the dataset into non-overlapping training (75%) and testing\n# (25%) sets.\nskf = StratifiedKFold(n_splits=4)\n# Only take the first fold.\ntrain_index, test_index = next(iter(skf.split(iris.data, iris.target)))\n\n\nX_train = iris.data[train_index]\ny_train = iris.target[train_index]\nX_test = iris.data[test_index]\ny_test = iris.target[test_index]\n\nn_classes = len(np.unique(y_train))\n\n# Try GMMs using different types of covariances.\nestimators = {\n cov_type: GaussianMixture(\n n_components=n_classes, covariance_type=cov_type, max_iter=20, random_state=0\n )\n for cov_type in [\"spherical\", \"diag\", \"tied\", \"full\"]\n}\n\nn_estimators = len(estimators)\n\nplt.figure(figsize=(3 * n_estimators // 2, 6))\nplt.subplots_adjust(\n bottom=0.01, top=0.95, hspace=0.15, wspace=0.05, left=0.01, right=0.99\n)\n\n\nfor index, (name, estimator) in enumerate(estimators.items()):\n # Since we have class labels for the training data, we can\n # initialize the GMM parameters in a supervised manner.\n estimator.means_init = np.array(\n [X_train[y_train == i].mean(axis=0) for i in range(n_classes)]\n )\n\n # Train the other parameters using the EM algorithm.\n estimator.fit(X_train)\n\n h = plt.subplot(2, n_estimators // 2, index + 1)\n make_ellipses(estimator, h)\n\n for n, color in enumerate(colors):\n data = iris.data[iris.target == n]\n plt.scatter(\n data[:, 0], data[:, 1], s=0.8, color=color, label=iris.target_names[n]\n )\n # Plot the test data with crosses\n for n, color in enumerate(colors):\n data = X_test[y_test == n]\n plt.scatter(data[:, 0], data[:, 1], marker=\"x\", color=color)\n\n y_train_pred = estimator.predict(X_train)\n train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100\n plt.text(0.05, 0.9, \"Train accuracy: %.1f\" % train_accuracy, transform=h.transAxes)\n\n y_test_pred = estimator.predict(X_test)\n test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100\n plt.text(0.05, 0.8, \"Test accuracy: %.1f\" % test_accuracy, transform=h.transAxes)\n\n plt.xticks(())\n plt.yticks(())\n plt.title(name)\n\nplt.legend(scatterpoints=1, loc=\"lower right\", prop=dict(size=12))\n\n\nplt.show()"
3030
]
3131
}
3232
],

0 commit comments

Comments
 (0)