Skip to content

Commit dfaea59

Browse files
committed
Pushing the docs to dev/ for branch: master, commit 726fa36f2556e0d604d85a1de48ba56a8b6550db
1 parent ae919c6 commit dfaea59

File tree

608 files changed

+1843
-1205
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

608 files changed

+1843
-1205
lines changed
100 Bytes
Binary file not shown.
104 Bytes
Binary file not shown.

dev/_downloads/plot_missing_values.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
"cell_type": "markdown",
1616
"metadata": {},
1717
"source": [
18-
"\n# Imputing missing values before building an estimator\n\n\nMissing values can be replaced by the mean, the median or the most frequent\nvalue using the basic ``SimpleImputer``.\nThe median is a more robust estimator for data with high magnitude variables\nwhich could dominate results (otherwise known as a 'long tail').\n\nAnother option is the ``ChainedImputer``. This uses round-robin linear\nregression, treating every variable as an output in turn. The version\nimplemented assumes Gaussian (output) variables. If your features are obviously\nnon-Normal, consider transforming them to look more Normal so as to improve\nperformance.\n\n"
18+
"\n# Imputing missing values before building an estimator\n\n\nMissing values can be replaced by the mean, the median or the most frequent\nvalue using the basic :func:`sklearn.impute.SimpleImputer`.\nThe median is a more robust estimator for data with high magnitude variables\nwhich could dominate results (otherwise known as a 'long tail').\n\nAnother option is the :func:`sklearn.impute.ChainedImputer`. This uses\nround-robin linear regression, treating every variable as an output in\nturn. The version implemented assumes Gaussian (output) variables. If your\nfeatures are obviously non-Normal, consider transforming them to look more\nNormal so as to improve performance.\n\nIn addition of using an imputing method, we can also keep an indication of the\nmissing information using :func:`sklearn.impute.MissingIndicator` which might\ncarry some information.\n\n"
1919
]
2020
},
2121
{
@@ -26,7 +26,7 @@
2626
},
2727
"outputs": [],
2828
"source": [
29-
"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import load_diabetes\nfrom sklearn.datasets import load_boston\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.impute import SimpleImputer, ChainedImputer\nfrom sklearn.model_selection import cross_val_score\n\nrng = np.random.RandomState(0)\n\n\ndef get_results(dataset):\n X_full, y_full = dataset.data, dataset.target\n n_samples = X_full.shape[0]\n n_features = X_full.shape[1]\n\n # Estimate the score on the entire dataset, with no missing values\n estimator = RandomForestRegressor(random_state=0, n_estimators=100)\n full_scores = cross_val_score(estimator, X_full, y_full,\n scoring='neg_mean_squared_error')\n\n # Add missing values in 75% of the lines\n missing_rate = 0.75\n n_missing_samples = int(np.floor(n_samples * missing_rate))\n missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,\n dtype=np.bool),\n np.ones(n_missing_samples,\n dtype=np.bool)))\n rng.shuffle(missing_samples)\n missing_features = rng.randint(0, n_features, n_missing_samples)\n\n # Estimate the score after replacing missing values by 0\n X_missing = X_full.copy()\n X_missing[np.where(missing_samples)[0], missing_features] = 0\n y_missing = y_full.copy()\n estimator = RandomForestRegressor(random_state=0, n_estimators=100)\n zero_impute_scores = cross_val_score(estimator, X_missing, y_missing,\n scoring='neg_mean_squared_error')\n\n # Estimate the score after imputation (mean strategy) of the missing values\n X_missing = X_full.copy()\n X_missing[np.where(missing_samples)[0], missing_features] = 0\n y_missing = y_full.copy()\n estimator = Pipeline([(\"imputer\", SimpleImputer(missing_values=0,\n strategy=\"mean\")),\n (\"forest\", RandomForestRegressor(random_state=0,\n n_estimators=100))])\n mean_impute_scores = cross_val_score(estimator, X_missing, y_missing,\n scoring='neg_mean_squared_error')\n\n # Estimate the score after chained imputation of the missing values\n estimator = Pipeline([(\"imputer\", ChainedImputer(missing_values=0,\n random_state=0)),\n (\"forest\", RandomForestRegressor(random_state=0,\n n_estimators=100))])\n chained_impute_scores = cross_val_score(estimator, X_missing, y_missing,\n scoring='neg_mean_squared_error')\n\n return ((full_scores.mean(), full_scores.std()),\n (zero_impute_scores.mean(), zero_impute_scores.std()),\n (mean_impute_scores.mean(), mean_impute_scores.std()),\n (chained_impute_scores.mean(), chained_impute_scores.std()))\n\n\nresults_diabetes = np.array(get_results(load_diabetes()))\nmses_diabetes = results_diabetes[:, 0] * -1\nstds_diabetes = results_diabetes[:, 1]\n\nresults_boston = np.array(get_results(load_boston()))\nmses_boston = results_boston[:, 0] * -1\nstds_boston = results_boston[:, 1]\n\nn_bars = len(mses_diabetes)\nxval = np.arange(n_bars)\n\nx_labels = ['Full data',\n 'Zero imputation',\n 'Mean Imputation',\n 'Chained Imputation']\ncolors = ['r', 'g', 'b', 'orange']\n\n# plot diabetes results\nplt.figure(figsize=(12, 6))\nax1 = plt.subplot(121)\nfor j in xval:\n ax1.barh(j, mses_diabetes[j], xerr=stds_diabetes[j],\n color=colors[j], alpha=0.6, align='center')\n\nax1.set_title('Imputation Techniques with Diabetes Data')\nax1.set_xlim(left=np.min(mses_diabetes) * 0.9,\n right=np.max(mses_diabetes) * 1.1)\nax1.set_yticks(xval)\nax1.set_xlabel('MSE')\nax1.invert_yaxis()\nax1.set_yticklabels(x_labels)\n\n# plot boston results\nax2 = plt.subplot(122)\nfor j in xval:\n ax2.barh(j, mses_boston[j], xerr=stds_boston[j],\n color=colors[j], alpha=0.6, align='center')\n\nax2.set_title('Imputation Techniques with Boston Data')\nax2.set_yticks(xval)\nax2.set_xlabel('MSE')\nax2.invert_yaxis()\nax2.set_yticklabels([''] * n_bars)\n\nplt.show()"
29+
"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import load_diabetes\nfrom sklearn.datasets import load_boston\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.pipeline import make_pipeline, make_union\nfrom sklearn.impute import SimpleImputer, ChainedImputer, MissingIndicator\nfrom sklearn.model_selection import cross_val_score\n\nrng = np.random.RandomState(0)\n\n\ndef get_results(dataset):\n X_full, y_full = dataset.data, dataset.target\n n_samples = X_full.shape[0]\n n_features = X_full.shape[1]\n\n # Estimate the score on the entire dataset, with no missing values\n estimator = RandomForestRegressor(random_state=0, n_estimators=100)\n full_scores = cross_val_score(estimator, X_full, y_full,\n scoring='neg_mean_squared_error')\n\n # Add missing values in 75% of the lines\n missing_rate = 0.75\n n_missing_samples = int(np.floor(n_samples * missing_rate))\n missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,\n dtype=np.bool),\n np.ones(n_missing_samples,\n dtype=np.bool)))\n rng.shuffle(missing_samples)\n missing_features = rng.randint(0, n_features, n_missing_samples)\n\n # Estimate the score after replacing missing values by 0\n X_missing = X_full.copy()\n X_missing[np.where(missing_samples)[0], missing_features] = 0\n y_missing = y_full.copy()\n estimator = RandomForestRegressor(random_state=0, n_estimators=100)\n zero_impute_scores = cross_val_score(estimator, X_missing, y_missing,\n scoring='neg_mean_squared_error')\n\n # Estimate the score after imputation (mean strategy) of the missing values\n X_missing = X_full.copy()\n X_missing[np.where(missing_samples)[0], missing_features] = 0\n y_missing = y_full.copy()\n estimator = make_pipeline(\n make_union(SimpleImputer(missing_values=0, strategy=\"mean\"),\n MissingIndicator(missing_values=0)),\n RandomForestRegressor(random_state=0, n_estimators=100))\n mean_impute_scores = cross_val_score(estimator, X_missing, y_missing,\n scoring='neg_mean_squared_error')\n\n # Estimate the score after chained imputation of the missing values\n estimator = make_pipeline(\n make_union(ChainedImputer(missing_values=0, random_state=0),\n MissingIndicator(missing_values=0)),\n RandomForestRegressor(random_state=0, n_estimators=100))\n chained_impute_scores = cross_val_score(estimator, X_missing, y_missing,\n scoring='neg_mean_squared_error')\n\n return ((full_scores.mean(), full_scores.std()),\n (zero_impute_scores.mean(), zero_impute_scores.std()),\n (mean_impute_scores.mean(), mean_impute_scores.std()),\n (chained_impute_scores.mean(), chained_impute_scores.std()))\n\n\nresults_diabetes = np.array(get_results(load_diabetes()))\nmses_diabetes = results_diabetes[:, 0] * -1\nstds_diabetes = results_diabetes[:, 1]\n\nresults_boston = np.array(get_results(load_boston()))\nmses_boston = results_boston[:, 0] * -1\nstds_boston = results_boston[:, 1]\n\nn_bars = len(mses_diabetes)\nxval = np.arange(n_bars)\n\nx_labels = ['Full data',\n 'Zero imputation',\n 'Mean Imputation',\n 'Chained Imputation']\ncolors = ['r', 'g', 'b', 'orange']\n\n# plot diabetes results\nplt.figure(figsize=(12, 6))\nax1 = plt.subplot(121)\nfor j in xval:\n ax1.barh(j, mses_diabetes[j], xerr=stds_diabetes[j],\n color=colors[j], alpha=0.6, align='center')\n\nax1.set_title('Imputation Techniques with Diabetes Data')\nax1.set_xlim(left=np.min(mses_diabetes) * 0.9,\n right=np.max(mses_diabetes) * 1.1)\nax1.set_yticks(xval)\nax1.set_xlabel('MSE')\nax1.invert_yaxis()\nax1.set_yticklabels(x_labels)\n\n# plot boston results\nax2 = plt.subplot(122)\nfor j in xval:\n ax2.barh(j, mses_boston[j], xerr=stds_boston[j],\n color=colors[j], alpha=0.6, align='center')\n\nax2.set_title('Imputation Techniques with Boston Data')\nax2.set_yticks(xval)\nax2.set_xlabel('MSE')\nax2.invert_yaxis()\nax2.set_yticklabels([''] * n_bars)\n\nplt.show()"
3030
]
3131
}
3232
],

dev/_downloads/plot_missing_values.py

Lines changed: 20 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -4,15 +4,19 @@
44
====================================================
55
66
Missing values can be replaced by the mean, the median or the most frequent
7-
value using the basic ``SimpleImputer``.
7+
value using the basic :func:`sklearn.impute.SimpleImputer`.
88
The median is a more robust estimator for data with high magnitude variables
99
which could dominate results (otherwise known as a 'long tail').
1010
11-
Another option is the ``ChainedImputer``. This uses round-robin linear
12-
regression, treating every variable as an output in turn. The version
13-
implemented assumes Gaussian (output) variables. If your features are obviously
14-
non-Normal, consider transforming them to look more Normal so as to improve
15-
performance.
11+
Another option is the :func:`sklearn.impute.ChainedImputer`. This uses
12+
round-robin linear regression, treating every variable as an output in
13+
turn. The version implemented assumes Gaussian (output) variables. If your
14+
features are obviously non-Normal, consider transforming them to look more
15+
Normal so as to improve performance.
16+
17+
In addition of using an imputing method, we can also keep an indication of the
18+
missing information using :func:`sklearn.impute.MissingIndicator` which might
19+
carry some information.
1620
"""
1721

1822
import numpy as np
@@ -21,8 +25,8 @@
2125
from sklearn.datasets import load_diabetes
2226
from sklearn.datasets import load_boston
2327
from sklearn.ensemble import RandomForestRegressor
24-
from sklearn.pipeline import Pipeline
25-
from sklearn.impute import SimpleImputer, ChainedImputer
28+
from sklearn.pipeline import make_pipeline, make_union
29+
from sklearn.impute import SimpleImputer, ChainedImputer, MissingIndicator
2630
from sklearn.model_selection import cross_val_score
2731

2832
rng = np.random.RandomState(0)
@@ -60,18 +64,18 @@ def get_results(dataset):
6064
X_missing = X_full.copy()
6165
X_missing[np.where(missing_samples)[0], missing_features] = 0
6266
y_missing = y_full.copy()
63-
estimator = Pipeline([("imputer", SimpleImputer(missing_values=0,
64-
strategy="mean")),
65-
("forest", RandomForestRegressor(random_state=0,
66-
n_estimators=100))])
67+
estimator = make_pipeline(
68+
make_union(SimpleImputer(missing_values=0, strategy="mean"),
69+
MissingIndicator(missing_values=0)),
70+
RandomForestRegressor(random_state=0, n_estimators=100))
6771
mean_impute_scores = cross_val_score(estimator, X_missing, y_missing,
6872
scoring='neg_mean_squared_error')
6973

7074
# Estimate the score after chained imputation of the missing values
71-
estimator = Pipeline([("imputer", ChainedImputer(missing_values=0,
72-
random_state=0)),
73-
("forest", RandomForestRegressor(random_state=0,
74-
n_estimators=100))])
75+
estimator = make_pipeline(
76+
make_union(ChainedImputer(missing_values=0, random_state=0),
77+
MissingIndicator(missing_values=0)),
78+
RandomForestRegressor(random_state=0, n_estimators=100))
7579
chained_impute_scores = cross_val_score(estimator, X_missing, y_missing,
7680
scoring='neg_mean_squared_error')
7781

dev/_downloads/scikit-learn-docs.pdf

19.9 KB
Binary file not shown.

dev/_images/iris.png

0 Bytes
422 Bytes
422 Bytes
224 Bytes
224 Bytes

0 commit comments

Comments
 (0)