Skip to content

Commit dfab85d

Browse files
committed
Pushing the docs to dev/ for branch: master, commit 1675e77e3e41ca287a451bc65e37a2eba5ca1e39
1 parent e01f7d0 commit dfab85d

File tree

1,097 files changed

+3635
-4562
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,097 files changed

+3635
-4562
lines changed
284 Bytes
Binary file not shown.
280 Bytes
Binary file not shown.

dev/_downloads/plot_iterative_imputer_variants_comparison.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
},
2727
"outputs": [],
2828
"source": [
29-
"print(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom sklearn.datasets import fetch_california_housing\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.impute import IterativeImputer\nfrom sklearn.linear_model import BayesianRidge\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import ExtraTreesRegressor\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.model_selection import cross_val_score\n\nN_SPLITS = 5\n\nrng = np.random.RandomState(0)\n\nX_full, y_full = fetch_california_housing(return_X_y=True)\n# ~2k samples is enough for the purpose of the example.\n# Remove the following two lines for a slower run with different error bars.\nX_full = X_full[::10]\ny_full = y_full[::10]\nn_samples, n_features = X_full.shape\n\n# Estimate the score on the entire dataset, with no missing values\nbr_estimator = BayesianRidge()\nscore_full_data = pd.DataFrame(\n cross_val_score(\n br_estimator, X_full, y_full, scoring='neg_mean_squared_error',\n cv=N_SPLITS\n ),\n columns=['Full Data']\n)\n\n# Add a single missing value to each row\nX_missing = X_full.copy()\ny_missing = y_full\nmissing_samples = np.arange(n_samples)\nmissing_features = rng.choice(n_features, n_samples, replace=True)\nX_missing[missing_samples, missing_features] = np.nan\n\n# Estimate the score after imputation (mean and median strategies)\nscore_simple_imputer = pd.DataFrame()\nfor strategy in ('mean', 'median'):\n estimator = make_pipeline(\n SimpleImputer(missing_values=np.nan, strategy=strategy),\n br_estimator\n )\n score_simple_imputer[strategy] = cross_val_score(\n estimator, X_missing, y_missing, scoring='neg_mean_squared_error',\n cv=N_SPLITS\n )\n\n# Estimate the score after iterative imputation of the missing values\n# with different estimators\nestimators = [\n BayesianRidge(),\n DecisionTreeRegressor(max_features='sqrt', random_state=0),\n ExtraTreesRegressor(n_estimators=10, random_state=0),\n KNeighborsRegressor(n_neighbors=15)\n]\nscore_iterative_imputer = pd.DataFrame()\nfor impute_estimator in estimators:\n estimator = make_pipeline(\n IterativeImputer(random_state=0, estimator=impute_estimator),\n br_estimator\n )\n score_iterative_imputer[impute_estimator.__class__.__name__] = \\\n cross_val_score(\n estimator, X_missing, y_missing, scoring='neg_mean_squared_error',\n cv=N_SPLITS\n )\n\nscores = pd.concat(\n [score_full_data, score_simple_imputer, score_iterative_imputer],\n keys=['Original', 'SimpleImputer', 'IterativeImputer'], axis=1\n)\n\n# plot boston results\nfig, ax = plt.subplots(figsize=(13, 6))\nmeans = -scores.mean()\nerrors = scores.std()\nmeans.plot.barh(xerr=errors, ax=ax)\nax.set_title('California Housing Regression with Different Imputation Methods')\nax.set_xlabel('MSE (smaller is better)')\nax.set_yticks(np.arange(means.shape[0]))\nax.set_yticklabels([\" w/ \".join(label) for label in means.index.get_values()])\nplt.tight_layout(pad=1)\nplt.show()"
29+
"print(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# To use this experimental feature, we need to explicitly ask for it:\nfrom sklearn.experimental import enable_iterative_imputer # noqa\nfrom sklearn.datasets import fetch_california_housing\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.impute import IterativeImputer\nfrom sklearn.linear_model import BayesianRidge\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import ExtraTreesRegressor\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.model_selection import cross_val_score\n\nN_SPLITS = 5\n\nrng = np.random.RandomState(0)\n\nX_full, y_full = fetch_california_housing(return_X_y=True)\n# ~2k samples is enough for the purpose of the example.\n# Remove the following two lines for a slower run with different error bars.\nX_full = X_full[::10]\ny_full = y_full[::10]\nn_samples, n_features = X_full.shape\n\n# Estimate the score on the entire dataset, with no missing values\nbr_estimator = BayesianRidge()\nscore_full_data = pd.DataFrame(\n cross_val_score(\n br_estimator, X_full, y_full, scoring='neg_mean_squared_error',\n cv=N_SPLITS\n ),\n columns=['Full Data']\n)\n\n# Add a single missing value to each row\nX_missing = X_full.copy()\ny_missing = y_full\nmissing_samples = np.arange(n_samples)\nmissing_features = rng.choice(n_features, n_samples, replace=True)\nX_missing[missing_samples, missing_features] = np.nan\n\n# Estimate the score after imputation (mean and median strategies)\nscore_simple_imputer = pd.DataFrame()\nfor strategy in ('mean', 'median'):\n estimator = make_pipeline(\n SimpleImputer(missing_values=np.nan, strategy=strategy),\n br_estimator\n )\n score_simple_imputer[strategy] = cross_val_score(\n estimator, X_missing, y_missing, scoring='neg_mean_squared_error',\n cv=N_SPLITS\n )\n\n# Estimate the score after iterative imputation of the missing values\n# with different estimators\nestimators = [\n BayesianRidge(),\n DecisionTreeRegressor(max_features='sqrt', random_state=0),\n ExtraTreesRegressor(n_estimators=10, random_state=0),\n KNeighborsRegressor(n_neighbors=15)\n]\nscore_iterative_imputer = pd.DataFrame()\nfor impute_estimator in estimators:\n estimator = make_pipeline(\n IterativeImputer(random_state=0, estimator=impute_estimator),\n br_estimator\n )\n score_iterative_imputer[impute_estimator.__class__.__name__] = \\\n cross_val_score(\n estimator, X_missing, y_missing, scoring='neg_mean_squared_error',\n cv=N_SPLITS\n )\n\nscores = pd.concat(\n [score_full_data, score_simple_imputer, score_iterative_imputer],\n keys=['Original', 'SimpleImputer', 'IterativeImputer'], axis=1\n)\n\n# plot boston results\nfig, ax = plt.subplots(figsize=(13, 6))\nmeans = -scores.mean()\nerrors = scores.std()\nmeans.plot.barh(xerr=errors, ax=ax)\nax.set_title('California Housing Regression with Different Imputation Methods')\nax.set_xlabel('MSE (smaller is better)')\nax.set_yticks(np.arange(means.shape[0]))\nax.set_yticklabels([\" w/ \".join(label) for label in means.index.get_values()])\nplt.tight_layout(pad=1)\nplt.show()"
3030
]
3131
}
3232
],

dev/_downloads/plot_iterative_imputer_variants_comparison.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,8 @@
4242
import matplotlib.pyplot as plt
4343
import pandas as pd
4444

45+
# To use this experimental feature, we need to explicitly ask for it:
46+
from sklearn.experimental import enable_iterative_imputer # noqa
4547
from sklearn.datasets import fetch_california_housing
4648
from sklearn.impute import SimpleImputer
4749
from sklearn.impute import IterativeImputer

dev/_downloads/plot_missing_values.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
},
2727
"outputs": [],
2828
"source": [
29-
"print(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import load_diabetes\nfrom sklearn.datasets import load_boston\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.pipeline import make_pipeline, make_union\nfrom sklearn.impute import SimpleImputer, IterativeImputer, MissingIndicator\nfrom sklearn.model_selection import cross_val_score\n\nrng = np.random.RandomState(0)\n\nN_SPLITS = 5\nREGRESSOR = RandomForestRegressor(random_state=0, n_estimators=100)\n\n\ndef get_scores_for_imputer(imputer, X_missing, y_missing):\n estimator = make_pipeline(\n make_union(imputer, MissingIndicator(missing_values=0)),\n REGRESSOR)\n impute_scores = cross_val_score(estimator, X_missing, y_missing,\n scoring='neg_mean_squared_error',\n cv=N_SPLITS)\n return impute_scores\n\n\ndef get_results(dataset):\n X_full, y_full = dataset.data, dataset.target\n n_samples = X_full.shape[0]\n n_features = X_full.shape[1]\n\n # Estimate the score on the entire dataset, with no missing values\n full_scores = cross_val_score(REGRESSOR, X_full, y_full,\n scoring='neg_mean_squared_error',\n cv=N_SPLITS)\n\n # Add missing values in 75% of the lines\n missing_rate = 0.75\n n_missing_samples = int(np.floor(n_samples * missing_rate))\n missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,\n dtype=np.bool),\n np.ones(n_missing_samples,\n dtype=np.bool)))\n rng.shuffle(missing_samples)\n missing_features = rng.randint(0, n_features, n_missing_samples)\n X_missing = X_full.copy()\n X_missing[np.where(missing_samples)[0], missing_features] = 0\n y_missing = y_full.copy()\n\n # Estimate the score after replacing missing values by 0\n imputer = SimpleImputer(missing_values=0,\n strategy='constant',\n fill_value=0)\n zero_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing)\n\n # Estimate the score after imputation (mean strategy) of the missing values\n imputer = SimpleImputer(missing_values=0, strategy=\"mean\")\n mean_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing)\n\n # Estimate the score after iterative imputation of the missing values\n imputer = IterativeImputer(missing_values=0,\n random_state=0,\n n_nearest_features=5)\n iterative_impute_scores = get_scores_for_imputer(imputer,\n X_missing,\n y_missing)\n\n return ((full_scores.mean(), full_scores.std()),\n (zero_impute_scores.mean(), zero_impute_scores.std()),\n (mean_impute_scores.mean(), mean_impute_scores.std()),\n (iterative_impute_scores.mean(), iterative_impute_scores.std()))\n\n\nresults_diabetes = np.array(get_results(load_diabetes()))\nmses_diabetes = results_diabetes[:, 0] * -1\nstds_diabetes = results_diabetes[:, 1]\n\nresults_boston = np.array(get_results(load_boston()))\nmses_boston = results_boston[:, 0] * -1\nstds_boston = results_boston[:, 1]\n\nn_bars = len(mses_diabetes)\nxval = np.arange(n_bars)\n\nx_labels = ['Full data',\n 'Zero imputation',\n 'Mean Imputation',\n 'Multivariate Imputation']\ncolors = ['r', 'g', 'b', 'orange']\n\n# plot diabetes results\nplt.figure(figsize=(12, 6))\nax1 = plt.subplot(121)\nfor j in xval:\n ax1.barh(j, mses_diabetes[j], xerr=stds_diabetes[j],\n color=colors[j], alpha=0.6, align='center')\n\nax1.set_title('Imputation Techniques with Diabetes Data')\nax1.set_xlim(left=np.min(mses_diabetes) * 0.9,\n right=np.max(mses_diabetes) * 1.1)\nax1.set_yticks(xval)\nax1.set_xlabel('MSE')\nax1.invert_yaxis()\nax1.set_yticklabels(x_labels)\n\n# plot boston results\nax2 = plt.subplot(122)\nfor j in xval:\n ax2.barh(j, mses_boston[j], xerr=stds_boston[j],\n color=colors[j], alpha=0.6, align='center')\n\nax2.set_title('Imputation Techniques with Boston Data')\nax2.set_yticks(xval)\nax2.set_xlabel('MSE')\nax2.invert_yaxis()\nax2.set_yticklabels([''] * n_bars)\n\nplt.show()"
29+
"print(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# To use the experimental IterativeImputer, we need to explicitly ask for it:\nfrom sklearn.experimental import enable_iterative_imputer # noqa\nfrom sklearn.datasets import load_diabetes\nfrom sklearn.datasets import load_boston\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.pipeline import make_pipeline, make_union\nfrom sklearn.impute import SimpleImputer, IterativeImputer, MissingIndicator\nfrom sklearn.model_selection import cross_val_score\n\nrng = np.random.RandomState(0)\n\nN_SPLITS = 5\nREGRESSOR = RandomForestRegressor(random_state=0, n_estimators=100)\n\n\ndef get_scores_for_imputer(imputer, X_missing, y_missing):\n estimator = make_pipeline(\n make_union(imputer, MissingIndicator(missing_values=0)),\n REGRESSOR)\n impute_scores = cross_val_score(estimator, X_missing, y_missing,\n scoring='neg_mean_squared_error',\n cv=N_SPLITS)\n return impute_scores\n\n\ndef get_results(dataset):\n X_full, y_full = dataset.data, dataset.target\n n_samples = X_full.shape[0]\n n_features = X_full.shape[1]\n\n # Estimate the score on the entire dataset, with no missing values\n full_scores = cross_val_score(REGRESSOR, X_full, y_full,\n scoring='neg_mean_squared_error',\n cv=N_SPLITS)\n\n # Add missing values in 75% of the lines\n missing_rate = 0.75\n n_missing_samples = int(np.floor(n_samples * missing_rate))\n missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,\n dtype=np.bool),\n np.ones(n_missing_samples,\n dtype=np.bool)))\n rng.shuffle(missing_samples)\n missing_features = rng.randint(0, n_features, n_missing_samples)\n X_missing = X_full.copy()\n X_missing[np.where(missing_samples)[0], missing_features] = 0\n y_missing = y_full.copy()\n\n # Estimate the score after replacing missing values by 0\n imputer = SimpleImputer(missing_values=0,\n strategy='constant',\n fill_value=0)\n zero_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing)\n\n # Estimate the score after imputation (mean strategy) of the missing values\n imputer = SimpleImputer(missing_values=0, strategy=\"mean\")\n mean_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing)\n\n # Estimate the score after iterative imputation of the missing values\n imputer = IterativeImputer(missing_values=0,\n random_state=0,\n n_nearest_features=5)\n iterative_impute_scores = get_scores_for_imputer(imputer,\n X_missing,\n y_missing)\n\n return ((full_scores.mean(), full_scores.std()),\n (zero_impute_scores.mean(), zero_impute_scores.std()),\n (mean_impute_scores.mean(), mean_impute_scores.std()),\n (iterative_impute_scores.mean(), iterative_impute_scores.std()))\n\n\nresults_diabetes = np.array(get_results(load_diabetes()))\nmses_diabetes = results_diabetes[:, 0] * -1\nstds_diabetes = results_diabetes[:, 1]\n\nresults_boston = np.array(get_results(load_boston()))\nmses_boston = results_boston[:, 0] * -1\nstds_boston = results_boston[:, 1]\n\nn_bars = len(mses_diabetes)\nxval = np.arange(n_bars)\n\nx_labels = ['Full data',\n 'Zero imputation',\n 'Mean Imputation',\n 'Multivariate Imputation']\ncolors = ['r', 'g', 'b', 'orange']\n\n# plot diabetes results\nplt.figure(figsize=(12, 6))\nax1 = plt.subplot(121)\nfor j in xval:\n ax1.barh(j, mses_diabetes[j], xerr=stds_diabetes[j],\n color=colors[j], alpha=0.6, align='center')\n\nax1.set_title('Imputation Techniques with Diabetes Data')\nax1.set_xlim(left=np.min(mses_diabetes) * 0.9,\n right=np.max(mses_diabetes) * 1.1)\nax1.set_yticks(xval)\nax1.set_xlabel('MSE')\nax1.invert_yaxis()\nax1.set_yticklabels(x_labels)\n\n# plot boston results\nax2 = plt.subplot(122)\nfor j in xval:\n ax2.barh(j, mses_boston[j], xerr=stds_boston[j],\n color=colors[j], alpha=0.6, align='center')\n\nax2.set_title('Imputation Techniques with Boston Data')\nax2.set_yticks(xval)\nax2.set_xlabel('MSE')\nax2.invert_yaxis()\nax2.set_yticklabels([''] * n_bars)\n\nplt.show()"
3030
]
3131
}
3232
],

dev/_downloads/plot_missing_values.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,8 @@
2323
import numpy as np
2424
import matplotlib.pyplot as plt
2525

26+
# To use the experimental IterativeImputer, we need to explicitly ask for it:
27+
from sklearn.experimental import enable_iterative_imputer # noqa
2628
from sklearn.datasets import load_diabetes
2729
from sklearn.datasets import load_boston
2830
from sklearn.ensemble import RandomForestRegressor

dev/_downloads/scikit-learn-docs.pdf

3.36 KB
Binary file not shown.

dev/_images/iris.png

0 Bytes
-163 Bytes
-163 Bytes

0 commit comments

Comments
 (0)