Skip to content

Commit a3cc71b

Browse files
committed
Pushing the docs to dev/ for branch: main, commit bb8776874410d65f510717c97b8027331bb0f3ff
1 parent 2101642 commit a3cc71b

File tree

1,488 files changed

+8505
-8449
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,488 files changed

+8505
-8449
lines changed

dev/.buildinfo

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
# Sphinx build info version 1
22
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
3-
config: 96897f112db88d2310dacd90b7b48d81
3+
config: 1408e0609485347c9900f4d7f3e01442
44
tags: 645f666f9bcd5a90fca523b33c5a78b7
Binary file not shown.

dev/_downloads/1b8827af01c9a70017a4739bcf2e21a8/plot_gpr_co2.py

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,15 @@
6666
# We will preprocess the dataset by taking a monthly average and drop month
6767
# for which no measurements were collected. Such a processing will have an
6868
# smoothing effect on the data.
69-
co2_data = co2_data.resample("M").mean().dropna(axis="index", how="any")
69+
70+
try:
71+
co2_data_resampled_monthly = co2_data.resample("ME")
72+
except ValueError:
73+
# pandas < 2.2 uses M instead of ME
74+
co2_data_resampled_monthly = co2_data.resample("M")
75+
76+
77+
co2_data = co2_data_resampled_monthly.mean().dropna(axis="index", how="any")
7078
co2_data.plot()
7179
plt.ylabel("Monthly average of CO$_2$ concentration (ppm)")
7280
_ = plt.title(

dev/_downloads/21b82d82985712b5de6347f382c77c86/plot_partial_dependence.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@
5858
},
5959
"outputs": [],
6060
"source": [
61-
"X[\"weather\"].replace(to_replace=\"heavy_rain\", value=\"rain\", inplace=True)"
61+
"X[\"weather\"] = (\n X[\"weather\"]\n .astype(object)\n .replace(to_replace=\"heavy_rain\", value=\"rain\")\n .astype(\"category\")\n)"
6262
]
6363
},
6464
{
Binary file not shown.

dev/_downloads/7012baed63b9a27f121bae611b8285c2/plot_cyclical_feature_engineering.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@
141141
},
142142
"outputs": [],
143143
"source": [
144-
"X[\"weather\"].replace(to_replace=\"heavy_rain\", value=\"rain\", inplace=True)"
144+
"X[\"weather\"] = (\n X[\"weather\"]\n .astype(object)\n .replace(to_replace=\"heavy_rain\", value=\"rain\")\n .astype(\"category\")\n)"
145145
]
146146
},
147147
{

dev/_downloads/86c888008757148890daaf43d664fa71/plot_tweedie_regression_insurance_claims.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ def load_mtpl2(n_samples=None):
7979
df_sev = df_sev.groupby("IDpol").sum()
8080

8181
df = df_freq.join(df_sev, how="left")
82-
df["ClaimAmount"].fillna(0, inplace=True)
82+
df["ClaimAmount"] = df["ClaimAmount"].fillna(0)
8383

8484
# unquote string fields
8585
for column_name in df.columns[df.dtypes.values == object]:

dev/_downloads/898b30acf62919d918478efbe526195f/plot_digits_pipe.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
},
1616
"outputs": [],
1717
"source": [
18-
"# Code source: Ga\u00ebl Varoquaux\n# Modified for documentation by Jaques Grobler\n# License: BSD 3 clause\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn import datasets\nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\n\n# Define a pipeline to search for the best combination of PCA truncation\n# and classifier regularization.\npca = PCA()\n# Define a Standard Scaler to normalize inputs\nscaler = StandardScaler()\n\n# set the tolerance to a large value to make the example faster\nlogistic = LogisticRegression(max_iter=10000, tol=0.1)\npipe = Pipeline(steps=[(\"scaler\", scaler), (\"pca\", pca), (\"logistic\", logistic)])\n\nX_digits, y_digits = datasets.load_digits(return_X_y=True)\n# Parameters of pipelines can be set using '__' separated parameter names:\nparam_grid = {\n \"pca__n_components\": [5, 15, 30, 45, 60],\n \"logistic__C\": np.logspace(-4, 4, 4),\n}\nsearch = GridSearchCV(pipe, param_grid, n_jobs=2)\nsearch.fit(X_digits, y_digits)\nprint(\"Best parameter (CV score=%0.3f):\" % search.best_score_)\nprint(search.best_params_)\n\n# Plot the PCA spectrum\npca.fit(X_digits)\n\nfig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True, figsize=(6, 6))\nax0.plot(\n np.arange(1, pca.n_components_ + 1), pca.explained_variance_ratio_, \"+\", linewidth=2\n)\nax0.set_ylabel(\"PCA explained variance ratio\")\n\nax0.axvline(\n search.best_estimator_.named_steps[\"pca\"].n_components,\n linestyle=\":\",\n label=\"n_components chosen\",\n)\nax0.legend(prop=dict(size=12))\n\n# For each number of components, find the best classifier results\nresults = pd.DataFrame(search.cv_results_)\ncomponents_col = \"param_pca__n_components\"\nbest_clfs = results.groupby(components_col).apply(\n lambda g: g.nlargest(1, \"mean_test_score\")\n)\n\nbest_clfs.plot(\n x=components_col, y=\"mean_test_score\", yerr=\"std_test_score\", legend=False, ax=ax1\n)\nax1.set_ylabel(\"Classification accuracy (val)\")\nax1.set_xlabel(\"n_components\")\n\nplt.xlim(-1, 70)\n\nplt.tight_layout()\nplt.show()"
18+
"# Code source: Ga\u00ebl Varoquaux\n# Modified for documentation by Jaques Grobler\n# License: BSD 3 clause\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn import datasets\nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\n\n# Define a pipeline to search for the best combination of PCA truncation\n# and classifier regularization.\npca = PCA()\n# Define a Standard Scaler to normalize inputs\nscaler = StandardScaler()\n\n# set the tolerance to a large value to make the example faster\nlogistic = LogisticRegression(max_iter=10000, tol=0.1)\npipe = Pipeline(steps=[(\"scaler\", scaler), (\"pca\", pca), (\"logistic\", logistic)])\n\nX_digits, y_digits = datasets.load_digits(return_X_y=True)\n# Parameters of pipelines can be set using '__' separated parameter names:\nparam_grid = {\n \"pca__n_components\": [5, 15, 30, 45, 60],\n \"logistic__C\": np.logspace(-4, 4, 4),\n}\nsearch = GridSearchCV(pipe, param_grid, n_jobs=2)\nsearch.fit(X_digits, y_digits)\nprint(\"Best parameter (CV score=%0.3f):\" % search.best_score_)\nprint(search.best_params_)\n\n# Plot the PCA spectrum\npca.fit(X_digits)\n\nfig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True, figsize=(6, 6))\nax0.plot(\n np.arange(1, pca.n_components_ + 1), pca.explained_variance_ratio_, \"+\", linewidth=2\n)\nax0.set_ylabel(\"PCA explained variance ratio\")\n\nax0.axvline(\n search.best_estimator_.named_steps[\"pca\"].n_components,\n linestyle=\":\",\n label=\"n_components chosen\",\n)\nax0.legend(prop=dict(size=12))\n\n# For each number of components, find the best classifier results\nresults = pd.DataFrame(search.cv_results_)\ncomponents_col = \"param_pca__n_components\"\nbest_clfs = results.groupby(components_col)[\n [components_col, \"mean_test_score\", \"std_test_score\"]\n].apply(lambda g: g.nlargest(1, \"mean_test_score\"))\n\nbest_clfs.plot(\n x=components_col, y=\"mean_test_score\", yerr=\"std_test_score\", legend=False, ax=ax1\n)\nax1.set_ylabel(\"Classification accuracy (val)\")\nax1.set_xlabel(\"n_components\")\n\nplt.xlim(-1, 70)\n\nplt.tight_layout()\nplt.show()"
1919
]
2020
}
2121
],

dev/_downloads/91a0c94f9f7c19d59a0ad06e77512326/plot_gpr_co2.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@
9898
},
9999
"outputs": [],
100100
"source": [
101-
"co2_data = co2_data.resample(\"M\").mean().dropna(axis=\"index\", how=\"any\")\nco2_data.plot()\nplt.ylabel(\"Monthly average of CO$_2$ concentration (ppm)\")\n_ = plt.title(\n \"Monthly average of air samples measurements\\nfrom the Mauna Loa Observatory\"\n)"
101+
"try:\n co2_data_resampled_monthly = co2_data.resample(\"ME\")\nexcept ValueError:\n # pandas < 2.2 uses M instead of ME\n co2_data_resampled_monthly = co2_data.resample(\"M\")\n\n\nco2_data = co2_data_resampled_monthly.mean().dropna(axis=\"index\", how=\"any\")\nco2_data.plot()\nplt.ylabel(\"Monthly average of CO$_2$ concentration (ppm)\")\n_ = plt.title(\n \"Monthly average of air samples measurements\\nfrom the Mauna Loa Observatory\"\n)"
102102
]
103103
},
104104
{

dev/_downloads/9fcbbc59ab27a20d07e209a711ac4f50/plot_cyclical_feature_engineering.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,13 @@
104104
# train machine learning models with cross validation. Instead, we simplify the
105105
# representation by collapsing those into the `"rain"` category.
106106
#
107-
X["weather"].replace(to_replace="heavy_rain", value="rain", inplace=True)
107+
X["weather"] = (
108+
X["weather"]
109+
.astype(object)
110+
.replace(to_replace="heavy_rain", value="rain")
111+
.astype("category")
112+
)
113+
108114
# %%
109115
X["weather"].value_counts()
110116

0 commit comments

Comments
 (0)