Skip to content

Commit 00bfa4e

Browse files
committed
Pushing the docs to dev/ for branch: master, commit 13cc121b3047997ecc00fdc1a90f14b0a9de1e7d
1 parent f8f6fce commit 00bfa4e

File tree

928 files changed

+2694
-2685
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

928 files changed

+2694
-2685
lines changed
125 Bytes
Binary file not shown.
122 Bytes
Binary file not shown.

dev/_downloads/plot_compare_cross_decomposition.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@
4242
"execution_count": null,
4343
"cell_type": "code",
4444
"source": [
45-
"n = 500\n# 2 latents vars:\nl1 = np.random.normal(size=n)\nl2 = np.random.normal(size=n)\n\nlatents = np.array([l1, l1, l2, l2]).T\nX = latents + np.random.normal(size=4 * n).reshape((n, 4))\nY = latents + np.random.normal(size=4 * n).reshape((n, 4))\n\nX_train = X[:n / 2]\nY_train = Y[:n / 2]\nX_test = X[n / 2:]\nY_test = Y[n / 2:]\n\nprint(\"Corr(X)\")\nprint(np.round(np.corrcoef(X.T), 2))\nprint(\"Corr(Y)\")\nprint(np.round(np.corrcoef(Y.T), 2))"
45+
"n = 500\n# 2 latents vars:\nl1 = np.random.normal(size=n)\nl2 = np.random.normal(size=n)\n\nlatents = np.array([l1, l1, l2, l2]).T\nX = latents + np.random.normal(size=4 * n).reshape((n, 4))\nY = latents + np.random.normal(size=4 * n).reshape((n, 4))\n\nX_train = X[:n // 2]\nY_train = Y[:n // 2]\nX_test = X[n // 2:]\nY_test = Y[n // 2:]\n\nprint(\"Corr(X)\")\nprint(np.round(np.corrcoef(X.T), 2))\nprint(\"Corr(Y)\")\nprint(np.round(np.corrcoef(Y.T), 2))"
4646
],
4747
"outputs": [],
4848
"metadata": {

dev/_downloads/plot_compare_cross_decomposition.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -36,10 +36,10 @@
3636
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
3737
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
3838

39-
X_train = X[:n / 2]
40-
Y_train = Y[:n / 2]
41-
X_test = X[n / 2:]
42-
Y_test = Y[n / 2:]
39+
X_train = X[:n // 2]
40+
Y_train = Y[:n // 2]
41+
X_test = X[n // 2:]
42+
Y_test = Y[n // 2:]
4343

4444
print("Corr(X)")
4545
print(np.round(np.corrcoef(X.T), 2))

dev/_downloads/plot_digits_classification.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
"execution_count": null,
2525
"cell_type": "code",
2626
"source": [
27-
"print(__doc__)\n\n# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>\n# License: BSD 3 clause\n\n# Standard scientific Python imports\nimport matplotlib.pyplot as plt\n\n# Import datasets, classifiers and performance metrics\nfrom sklearn import datasets, svm, metrics\n\n# The digits dataset\ndigits = datasets.load_digits()\n\n# The data that we are interested in is made of 8x8 images of digits, let's\n# have a look at the first 4 images, stored in the `images` attribute of the\n# dataset. If we were working from image files, we could load them using\n# matplotlib.pyplot.imread. Note that each image must have the same size. For these\n# images, we know which digit they represent: it is given in the 'target' of\n# the dataset.\nimages_and_labels = list(zip(digits.images, digits.target))\nfor index, (image, label) in enumerate(images_and_labels[:4]):\n plt.subplot(2, 4, index + 1)\n plt.axis('off')\n plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')\n plt.title('Training: %i' % label)\n\n# To apply a classifier on this data, we need to flatten the image, to\n# turn the data in a (samples, feature) matrix:\nn_samples = len(digits.images)\ndata = digits.images.reshape((n_samples, -1))\n\n# Create a classifier: a support vector classifier\nclassifier = svm.SVC(gamma=0.001)\n\n# We learn the digits on the first half of the digits\nclassifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])\n\n# Now predict the value of the digit on the second half:\nexpected = digits.target[n_samples / 2:]\npredicted = classifier.predict(data[n_samples / 2:])\n\nprint(\"Classification report for classifier %s:\\n%s\\n\"\n % (classifier, metrics.classification_report(expected, predicted)))\nprint(\"Confusion matrix:\\n%s\" % metrics.confusion_matrix(expected, predicted))\n\nimages_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))\nfor index, (image, prediction) in enumerate(images_and_predictions[:4]):\n plt.subplot(2, 4, index + 5)\n plt.axis('off')\n plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')\n plt.title('Prediction: %i' % prediction)\n\nplt.show()"
27+
"print(__doc__)\n\n# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>\n# License: BSD 3 clause\n\n# Standard scientific Python imports\nimport matplotlib.pyplot as plt\n\n# Import datasets, classifiers and performance metrics\nfrom sklearn import datasets, svm, metrics\n\n# The digits dataset\ndigits = datasets.load_digits()\n\n# The data that we are interested in is made of 8x8 images of digits, let's\n# have a look at the first 4 images, stored in the `images` attribute of the\n# dataset. If we were working from image files, we could load them using\n# matplotlib.pyplot.imread. Note that each image must have the same size. For these\n# images, we know which digit they represent: it is given in the 'target' of\n# the dataset.\nimages_and_labels = list(zip(digits.images, digits.target))\nfor index, (image, label) in enumerate(images_and_labels[:4]):\n plt.subplot(2, 4, index + 1)\n plt.axis('off')\n plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')\n plt.title('Training: %i' % label)\n\n# To apply a classifier on this data, we need to flatten the image, to\n# turn the data in a (samples, feature) matrix:\nn_samples = len(digits.images)\ndata = digits.images.reshape((n_samples, -1))\n\n# Create a classifier: a support vector classifier\nclassifier = svm.SVC(gamma=0.001)\n\n# We learn the digits on the first half of the digits\nclassifier.fit(data[:n_samples // 2], digits.target[:n_samples // 2])\n\n# Now predict the value of the digit on the second half:\nexpected = digits.target[n_samples // 2:]\npredicted = classifier.predict(data[n_samples // 2:])\n\nprint(\"Classification report for classifier %s:\\n%s\\n\"\n % (classifier, metrics.classification_report(expected, predicted)))\nprint(\"Confusion matrix:\\n%s\" % metrics.confusion_matrix(expected, predicted))\n\nimages_and_predictions = list(zip(digits.images[n_samples // 2:], predicted))\nfor index, (image, prediction) in enumerate(images_and_predictions[:4]):\n plt.subplot(2, 4, index + 5)\n plt.axis('off')\n plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')\n plt.title('Prediction: %i' % prediction)\n\nplt.show()"
2828
],
2929
"outputs": [],
3030
"metadata": {

dev/_downloads/plot_digits_classification.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -46,17 +46,17 @@
4646
classifier = svm.SVC(gamma=0.001)
4747

4848
# We learn the digits on the first half of the digits
49-
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
49+
classifier.fit(data[:n_samples // 2], digits.target[:n_samples // 2])
5050

5151
# Now predict the value of the digit on the second half:
52-
expected = digits.target[n_samples / 2:]
53-
predicted = classifier.predict(data[n_samples / 2:])
52+
expected = digits.target[n_samples // 2:]
53+
predicted = classifier.predict(data[n_samples // 2:])
5454

5555
print("Classification report for classifier %s:\n%s\n"
5656
% (classifier, metrics.classification_report(expected, predicted)))
5757
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
5858

59-
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
59+
images_and_predictions = list(zip(digits.images[n_samples // 2:], predicted))
6060
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
6161
plt.subplot(2, 4, index + 5)
6262
plt.axis('off')

dev/_downloads/plot_iris_exercise.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
"execution_count": null,
2525
"cell_type": "code",
2626
"source": [
27-
"print(__doc__)\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets, svm\n\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\n\nX = X[y != 0, :2]\ny = y[y != 0]\n\nn_sample = len(X)\n\nnp.random.seed(0)\norder = np.random.permutation(n_sample)\nX = X[order]\ny = y[order].astype(np.float)\n\nX_train = X[:.9 * n_sample]\ny_train = y[:.9 * n_sample]\nX_test = X[.9 * n_sample:]\ny_test = y[.9 * n_sample:]\n\n# fit the model\nfor fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):\n clf = svm.SVC(kernel=kernel, gamma=10)\n clf.fit(X_train, y_train)\n\n plt.figure(fig_num)\n plt.clf()\n plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)\n\n # Circle out the test data\n plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)\n\n plt.axis('tight')\n x_min = X[:, 0].min()\n x_max = X[:, 0].max()\n y_min = X[:, 1].min()\n y_max = X[:, 1].max()\n\n XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]\n Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])\n\n # Put the result into a color plot\n Z = Z.reshape(XX.shape)\n plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)\n plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],\n levels=[-.5, 0, .5])\n\n plt.title(kernel)\nplt.show()"
27+
"print(__doc__)\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets, svm\n\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\n\nX = X[y != 0, :2]\ny = y[y != 0]\n\nn_sample = len(X)\n\nnp.random.seed(0)\norder = np.random.permutation(n_sample)\nX = X[order]\ny = y[order].astype(np.float)\n\nX_train = X[:int(.9 * n_sample)]\ny_train = y[:int(.9 * n_sample)]\nX_test = X[int(.9 * n_sample):]\ny_test = y[int(.9 * n_sample):]\n\n# fit the model\nfor fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):\n clf = svm.SVC(kernel=kernel, gamma=10)\n clf.fit(X_train, y_train)\n\n plt.figure(fig_num)\n plt.clf()\n plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)\n\n # Circle out the test data\n plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)\n\n plt.axis('tight')\n x_min = X[:, 0].min()\n x_max = X[:, 0].max()\n y_min = X[:, 1].min()\n y_max = X[:, 1].max()\n\n XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]\n Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])\n\n # Put the result into a color plot\n Z = Z.reshape(XX.shape)\n plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)\n plt.contour(XX, YY, Z, colors=['k', 'k', 'k'],\n linestyles=['--', '-', '--'], levels=[-.5, 0, .5])\n\n plt.title(kernel)\nplt.show()"
2828
],
2929
"outputs": [],
3030
"metadata": {

dev/_downloads/plot_iris_exercise.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -29,10 +29,10 @@
2929
X = X[order]
3030
y = y[order].astype(np.float)
3131

32-
X_train = X[:.9 * n_sample]
33-
y_train = y[:.9 * n_sample]
34-
X_test = X[.9 * n_sample:]
35-
y_test = y[.9 * n_sample:]
32+
X_train = X[:int(.9 * n_sample)]
33+
y_train = y[:int(.9 * n_sample)]
34+
X_test = X[int(.9 * n_sample):]
35+
y_test = y[int(.9 * n_sample):]
3636

3737
# fit the model
3838
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
@@ -58,8 +58,8 @@
5858
# Put the result into a color plot
5959
Z = Z.reshape(XX.shape)
6060
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
61-
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
62-
levels=[-.5, 0, .5])
61+
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'],
62+
linestyles=['--', '-', '--'], levels=[-.5, 0, .5])
6363

6464
plt.title(kernel)
6565
plt.show()

dev/_downloads/plot_kde_1d.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
"execution_count": null,
2525
"cell_type": "code",
2626
"source": [
27-
"# Author: Jake Vanderplas <[email protected]>\n#\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import norm\nfrom sklearn.neighbors import KernelDensity\n\n\n#----------------------------------------------------------------------\n# Plot the progression of histograms to kernels\nnp.random.seed(1)\nN = 20\nX = np.concatenate((np.random.normal(0, 1, 0.3 * N),\n np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]\nX_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]\nbins = np.linspace(-5, 10, 10)\n\nfig, ax = plt.subplots(2, 2, sharex=True, sharey=True)\nfig.subplots_adjust(hspace=0.05, wspace=0.05)\n\n# histogram 1\nax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)\nax[0, 0].text(-3.5, 0.31, \"Histogram\")\n\n# histogram 2\nax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)\nax[0, 1].text(-3.5, 0.31, \"Histogram, bins shifted\")\n\n# tophat KDE\nkde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)\nlog_dens = kde.score_samples(X_plot)\nax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')\nax[1, 0].text(-3.5, 0.31, \"Tophat Kernel Density\")\n\n# Gaussian KDE\nkde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)\nlog_dens = kde.score_samples(X_plot)\nax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')\nax[1, 1].text(-3.5, 0.31, \"Gaussian Kernel Density\")\n\nfor axi in ax.ravel():\n axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')\n axi.set_xlim(-4, 9)\n axi.set_ylim(-0.02, 0.34)\n\nfor axi in ax[:, 0]:\n axi.set_ylabel('Normalized Density')\n\nfor axi in ax[1, :]:\n axi.set_xlabel('x')\n\n#----------------------------------------------------------------------\n# Plot all available kernels\nX_plot = np.linspace(-6, 6, 1000)[:, None]\nX_src = np.zeros((1, 1))\n\nfig, ax = plt.subplots(2, 3, sharex=True, sharey=True)\nfig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)\n\n\ndef format_func(x, loc):\n if x == 0:\n return '0'\n elif x == 1:\n return 'h'\n elif x == -1:\n return '-h'\n else:\n return '%ih' % x\n\nfor i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',\n 'exponential', 'linear', 'cosine']):\n axi = ax.ravel()[i]\n log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)\n axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')\n axi.text(-2.6, 0.95, kernel)\n\n axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))\n axi.xaxis.set_major_locator(plt.MultipleLocator(1))\n axi.yaxis.set_major_locator(plt.NullLocator())\n\n axi.set_ylim(0, 1.05)\n axi.set_xlim(-2.9, 2.9)\n\nax[0, 1].set_title('Available Kernels')\n\n#----------------------------------------------------------------------\n# Plot a 1D density example\nN = 100\nnp.random.seed(1)\nX = np.concatenate((np.random.normal(0, 1, 0.3 * N),\n np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]\n\nX_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]\n\ntrue_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])\n + 0.7 * norm(5, 1).pdf(X_plot[:, 0]))\n\nfig, ax = plt.subplots()\nax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,\n label='input distribution')\n\nfor kernel in ['gaussian', 'tophat', 'epanechnikov']:\n kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)\n log_dens = kde.score_samples(X_plot)\n ax.plot(X_plot[:, 0], np.exp(log_dens), '-',\n label=\"kernel = '{0}'\".format(kernel))\n\nax.text(6, 0.38, \"N={0} points\".format(N))\n\nax.legend(loc='upper left')\nax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')\n\nax.set_xlim(-4, 9)\nax.set_ylim(-0.02, 0.4)\nplt.show()"
27+
"# Author: Jake Vanderplas <[email protected]>\n#\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import norm\nfrom sklearn.neighbors import KernelDensity\n\n\n#----------------------------------------------------------------------\n# Plot the progression of histograms to kernels\nnp.random.seed(1)\nN = 20\nX = np.concatenate((np.random.normal(0, 1, int(0.3 * N)),\n np.random.normal(5, 1, int(0.7 * N))))[:, np.newaxis]\nX_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]\nbins = np.linspace(-5, 10, 10)\n\nfig, ax = plt.subplots(2, 2, sharex=True, sharey=True)\nfig.subplots_adjust(hspace=0.05, wspace=0.05)\n\n# histogram 1\nax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)\nax[0, 0].text(-3.5, 0.31, \"Histogram\")\n\n# histogram 2\nax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)\nax[0, 1].text(-3.5, 0.31, \"Histogram, bins shifted\")\n\n# tophat KDE\nkde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)\nlog_dens = kde.score_samples(X_plot)\nax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')\nax[1, 0].text(-3.5, 0.31, \"Tophat Kernel Density\")\n\n# Gaussian KDE\nkde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)\nlog_dens = kde.score_samples(X_plot)\nax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')\nax[1, 1].text(-3.5, 0.31, \"Gaussian Kernel Density\")\n\nfor axi in ax.ravel():\n axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')\n axi.set_xlim(-4, 9)\n axi.set_ylim(-0.02, 0.34)\n\nfor axi in ax[:, 0]:\n axi.set_ylabel('Normalized Density')\n\nfor axi in ax[1, :]:\n axi.set_xlabel('x')\n\n#----------------------------------------------------------------------\n# Plot all available kernels\nX_plot = np.linspace(-6, 6, 1000)[:, None]\nX_src = np.zeros((1, 1))\n\nfig, ax = plt.subplots(2, 3, sharex=True, sharey=True)\nfig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)\n\n\ndef format_func(x, loc):\n if x == 0:\n return '0'\n elif x == 1:\n return 'h'\n elif x == -1:\n return '-h'\n else:\n return '%ih' % x\n\nfor i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',\n 'exponential', 'linear', 'cosine']):\n axi = ax.ravel()[i]\n log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)\n axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')\n axi.text(-2.6, 0.95, kernel)\n\n axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))\n axi.xaxis.set_major_locator(plt.MultipleLocator(1))\n axi.yaxis.set_major_locator(plt.NullLocator())\n\n axi.set_ylim(0, 1.05)\n axi.set_xlim(-2.9, 2.9)\n\nax[0, 1].set_title('Available Kernels')\n\n#----------------------------------------------------------------------\n# Plot a 1D density example\nN = 100\nnp.random.seed(1)\nX = np.concatenate((np.random.normal(0, 1, int(0.3 * N)),\n np.random.normal(5, 1, int(0.7 * N))))[:, np.newaxis]\n\nX_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]\n\ntrue_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])\n + 0.7 * norm(5, 1).pdf(X_plot[:, 0]))\n\nfig, ax = plt.subplots()\nax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,\n label='input distribution')\n\nfor kernel in ['gaussian', 'tophat', 'epanechnikov']:\n kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)\n log_dens = kde.score_samples(X_plot)\n ax.plot(X_plot[:, 0], np.exp(log_dens), '-',\n label=\"kernel = '{0}'\".format(kernel))\n\nax.text(6, 0.38, \"N={0} points\".format(N))\n\nax.legend(loc='upper left')\nax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')\n\nax.set_xlim(-4, 9)\nax.set_ylim(-0.02, 0.4)\nplt.show()"
2828
],
2929
"outputs": [],
3030
"metadata": {

dev/_downloads/plot_kde_1d.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -38,8 +38,8 @@
3838
# Plot the progression of histograms to kernels
3939
np.random.seed(1)
4040
N = 20
41-
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
42-
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
41+
X = np.concatenate((np.random.normal(0, 1, int(0.3 * N)),
42+
np.random.normal(5, 1, int(0.7 * N))))[:, np.newaxis]
4343
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
4444
bins = np.linspace(-5, 10, 10)
4545

@@ -116,8 +116,8 @@ def format_func(x, loc):
116116
# Plot a 1D density example
117117
N = 100
118118
np.random.seed(1)
119-
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
120-
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
119+
X = np.concatenate((np.random.normal(0, 1, int(0.3 * N)),
120+
np.random.normal(5, 1, int(0.7 * N))))[:, np.newaxis]
121121

122122
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
123123

0 commit comments

Comments
 (0)