Skip to content

Commit 1bc75cb

Browse files
committed
Pushing the docs to dev/ for branch: master, commit 9fda73654361521b1f7e5a3d72d347be82e3075a
1 parent 8ae1753 commit 1bc75cb

File tree

1,208 files changed

+3679
-3679
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,208 files changed

+3679
-3679
lines changed

dev/_downloads/01fdc7c95204e4a420de7cd297711693/plot_feature_union.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333
# This dataset is way too high-dimensional. Better do PCA:
3434
pca = PCA(n_components=2)
3535

36-
# Maybe some original features where good, too?
36+
# Maybe some original features were good, too?
3737
selection = SelectKBest(k=1)
3838

3939
# Build estimator from PCA and Univariate selection:
Binary file not shown.

dev/_downloads/1273a3baa87138f2b817bfc78fe7ecb4/plot_feature_union.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
},
2727
"outputs": [],
2828
"source": [
29-
"# Author: Andreas Mueller <[email protected]>\n#\n# License: BSD 3 clause\n\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.svm import SVC\nfrom sklearn.datasets import load_iris\nfrom sklearn.decomposition import PCA\nfrom sklearn.feature_selection import SelectKBest\n\niris = load_iris()\n\nX, y = iris.data, iris.target\n\n# This dataset is way too high-dimensional. Better do PCA:\npca = PCA(n_components=2)\n\n# Maybe some original features where good, too?\nselection = SelectKBest(k=1)\n\n# Build estimator from PCA and Univariate selection:\n\ncombined_features = FeatureUnion([(\"pca\", pca), (\"univ_select\", selection)])\n\n# Use combined features to transform dataset:\nX_features = combined_features.fit(X, y).transform(X)\nprint(\"Combined space has\", X_features.shape[1], \"features\")\n\nsvm = SVC(kernel=\"linear\")\n\n# Do grid search over k, n_components and C:\n\npipeline = Pipeline([(\"features\", combined_features), (\"svm\", svm)])\n\nparam_grid = dict(features__pca__n_components=[1, 2, 3],\n features__univ_select__k=[1, 2],\n svm__C=[0.1, 1, 10])\n\ngrid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)\ngrid_search.fit(X, y)\nprint(grid_search.best_estimator_)"
29+
"# Author: Andreas Mueller <[email protected]>\n#\n# License: BSD 3 clause\n\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.svm import SVC\nfrom sklearn.datasets import load_iris\nfrom sklearn.decomposition import PCA\nfrom sklearn.feature_selection import SelectKBest\n\niris = load_iris()\n\nX, y = iris.data, iris.target\n\n# This dataset is way too high-dimensional. Better do PCA:\npca = PCA(n_components=2)\n\n# Maybe some original features were good, too?\nselection = SelectKBest(k=1)\n\n# Build estimator from PCA and Univariate selection:\n\ncombined_features = FeatureUnion([(\"pca\", pca), (\"univ_select\", selection)])\n\n# Use combined features to transform dataset:\nX_features = combined_features.fit(X, y).transform(X)\nprint(\"Combined space has\", X_features.shape[1], \"features\")\n\nsvm = SVC(kernel=\"linear\")\n\n# Do grid search over k, n_components and C:\n\npipeline = Pipeline([(\"features\", combined_features), (\"svm\", svm)])\n\nparam_grid = dict(features__pca__n_components=[1, 2, 3],\n features__univ_select__k=[1, 2],\n svm__C=[0.1, 1, 10])\n\ngrid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)\ngrid_search.fit(X, y)\nprint(grid_search.best_estimator_)"
3030
]
3131
}
3232
],
Binary file not shown.

dev/_downloads/scikit-learn-docs.pdf

-3.33 KB
Binary file not shown.

dev/_images/binder_badge_logo.png

0 Bytes

dev/_images/iris.png

0 Bytes
249 Bytes
249 Bytes
133 Bytes

0 commit comments

Comments
 (0)