Skip to content

Commit 7ad51df

Browse files
committed
Pushing the docs to dev/ for branch: master, commit e6e8345248c09214ad1946c8549204dbbcbaf8a6
1 parent 68dd107 commit 7ad51df

File tree

1,053 files changed

+3186
-3192
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,053 files changed

+3186
-3192
lines changed
-82 Bytes
Binary file not shown.
-80 Bytes
Binary file not shown.

dev/_downloads/plot_stock_market.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
},
2727
"outputs": [],
2828
"source": [
29-
"from __future__ import print_function\n\n# Author: Gael Varoquaux [email protected]\n# License: BSD 3 clause\n\nimport sys\nfrom datetime import datetime\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import LineCollection\n\nimport pandas as pd\n\nfrom sklearn import cluster, covariance, manifold\n\nprint(__doc__)\n\n\n# #############################################################################\n# Retrieve the data from Internet\n\n# The data is from 2003 - 2008. This is reasonably calm: (not too long ago so\n# that we get high-tech firms, and before the 2008 crash). This kind of\n# historical data can be obtained for from APIs like the quandl.com and\n# alphavantage.co ones.\nstart_date = datetime(2003, 1, 1).date()\nend_date = datetime(2008, 1, 1).date()\n\nsymbol_dict = {\n 'TOT': 'Total',\n 'XOM': 'Exxon',\n 'CVX': 'Chevron',\n 'COP': 'ConocoPhillips',\n 'VLO': 'Valero Energy',\n 'MSFT': 'Microsoft',\n 'IBM': 'IBM',\n 'TWX': 'Time Warner',\n 'CMCSA': 'Comcast',\n 'CVC': 'Cablevision',\n 'YHOO': 'Yahoo',\n 'DELL': 'Dell',\n 'HPQ': 'HP',\n 'AMZN': 'Amazon',\n 'TM': 'Toyota',\n 'CAJ': 'Canon',\n 'SNE': 'Sony',\n 'F': 'Ford',\n 'HMC': 'Honda',\n 'NAV': 'Navistar',\n 'NOC': 'Northrop Grumman',\n 'BA': 'Boeing',\n 'KO': 'Coca Cola',\n 'MMM': '3M',\n 'MCD': 'McDonald\\'s',\n 'PEP': 'Pepsi',\n 'K': 'Kellogg',\n 'UN': 'Unilever',\n 'MAR': 'Marriott',\n 'PG': 'Procter Gamble',\n 'CL': 'Colgate-Palmolive',\n 'GE': 'General Electrics',\n 'WFC': 'Wells Fargo',\n 'JPM': 'JPMorgan Chase',\n 'AIG': 'AIG',\n 'AXP': 'American express',\n 'BAC': 'Bank of America',\n 'GS': 'Goldman Sachs',\n 'AAPL': 'Apple',\n 'SAP': 'SAP',\n 'CSCO': 'Cisco',\n 'TXN': 'Texas Instruments',\n 'XRX': 'Xerox',\n 'WMT': 'Wal-Mart',\n 'HD': 'Home Depot',\n 'GSK': 'GlaxoSmithKline',\n 'PFE': 'Pfizer',\n 'SNY': 'Sanofi-Aventis',\n 'NVS': 'Novartis',\n 'KMB': 'Kimberly-Clark',\n 'R': 'Ryder',\n 'GD': 'General Dynamics',\n 'RTN': 'Raytheon',\n 'CVS': 'CVS',\n 'CAT': 'Caterpillar',\n 'DD': 'DuPont de Nemours'}\n\n\nsymbols, names = np.array(sorted(symbol_dict.items())).T\n\nquotes = []\n\nfor symbol in symbols:\n print('Fetching quote history for %r' % symbol, file=sys.stderr)\n url = ('https://raw.githubusercontent.com/scikit-learn/examples-data/'\n 'master/financial-data/{}.csv')\n quotes.append(pd.read_csv(url.format(symbol)))\n\nclose_prices = np.vstack([q['close'] for q in quotes])\nopen_prices = np.vstack([q['open'] for q in quotes])\n\n# The daily variations of the quotes are what carry most information\nvariation = close_prices - open_prices\n\n\n# #############################################################################\n# Learn a graphical structure from the correlations\nedge_model = covariance.GraphicalLassoCV(cv=5)\n\n# standardize the time series: using correlations rather than covariance\n# is more efficient for structure recovery\nX = variation.copy().T\nX /= X.std(axis=0)\nedge_model.fit(X)\n\n# #############################################################################\n# Cluster using affinity propagation\n\n_, labels = cluster.affinity_propagation(edge_model.covariance_)\nn_labels = labels.max()\n\nfor i in range(n_labels + 1):\n print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))\n\n# #############################################################################\n# Find a low-dimension embedding for visualization: find the best position of\n# the nodes (the stocks) on a 2D plane\n\n# We use a dense eigen_solver to achieve reproducibility (arpack is\n# initiated with random vectors that we don't control). In addition, we\n# use a large number of neighbors to capture the large-scale structure.\nnode_position_model = manifold.LocallyLinearEmbedding(\n n_components=2, eigen_solver='dense', n_neighbors=6)\n\nembedding = node_position_model.fit_transform(X.T).T\n\n# #############################################################################\n# Visualization\nplt.figure(1, facecolor='w', figsize=(10, 8))\nplt.clf()\nax = plt.axes([0., 0., 1., 1.])\nplt.axis('off')\n\n# Display a graph of the partial correlations\npartial_correlations = edge_model.precision_.copy()\nd = 1 / np.sqrt(np.diag(partial_correlations))\npartial_correlations *= d\npartial_correlations *= d[:, np.newaxis]\nnon_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)\n\n# Plot the nodes using the coordinates of our embedding\nplt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,\n cmap=plt.cm.nipy_spectral)\n\n# Plot the edges\nstart_idx, end_idx = np.where(non_zero)\n# a sequence of (*line0*, *line1*, *line2*), where::\n# linen = (x0, y0), (x1, y1), ... (xm, ym)\nsegments = [[embedding[:, start], embedding[:, stop]]\n for start, stop in zip(start_idx, end_idx)]\nvalues = np.abs(partial_correlations[non_zero])\nlc = LineCollection(segments,\n zorder=0, cmap=plt.cm.hot_r,\n norm=plt.Normalize(0, .7 * values.max()))\nlc.set_array(values)\nlc.set_linewidths(15 * values)\nax.add_collection(lc)\n\n# Add a label to each node. The challenge here is that we want to\n# position the labels to avoid overlap with other labels\nfor index, (name, label, (x, y)) in enumerate(\n zip(names, labels, embedding.T)):\n\n dx = x - embedding[0]\n dx[index] = 1\n dy = y - embedding[1]\n dy[index] = 1\n this_dx = dx[np.argmin(np.abs(dy))]\n this_dy = dy[np.argmin(np.abs(dx))]\n if this_dx > 0:\n horizontalalignment = 'left'\n x = x + .002\n else:\n horizontalalignment = 'right'\n x = x - .002\n if this_dy > 0:\n verticalalignment = 'bottom'\n y = y + .002\n else:\n verticalalignment = 'top'\n y = y - .002\n plt.text(x, y, name, size=10,\n horizontalalignment=horizontalalignment,\n verticalalignment=verticalalignment,\n bbox=dict(facecolor='w',\n edgecolor=plt.cm.nipy_spectral(label / float(n_labels)),\n alpha=.6))\n\nplt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),\n embedding[0].max() + .10 * embedding[0].ptp(),)\nplt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),\n embedding[1].max() + .03 * embedding[1].ptp())\n\nplt.show()"
29+
"from __future__ import print_function\n\n# Author: Gael Varoquaux [email protected]\n# License: BSD 3 clause\n\nimport sys\nfrom datetime import datetime\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import LineCollection\n\nimport pandas as pd\n\nfrom sklearn import cluster, covariance, manifold\n\nprint(__doc__)\n\n\n# #############################################################################\n# Retrieve the data from Internet\n\n# The data is from 2003 - 2008. This is reasonably calm: (not too long ago so\n# that we get high-tech firms, and before the 2008 crash). This kind of\n# historical data can be obtained for from APIs like the quandl.com and\n# alphavantage.co ones.\n\nsymbol_dict = {\n 'TOT': 'Total',\n 'XOM': 'Exxon',\n 'CVX': 'Chevron',\n 'COP': 'ConocoPhillips',\n 'VLO': 'Valero Energy',\n 'MSFT': 'Microsoft',\n 'IBM': 'IBM',\n 'TWX': 'Time Warner',\n 'CMCSA': 'Comcast',\n 'CVC': 'Cablevision',\n 'YHOO': 'Yahoo',\n 'DELL': 'Dell',\n 'HPQ': 'HP',\n 'AMZN': 'Amazon',\n 'TM': 'Toyota',\n 'CAJ': 'Canon',\n 'SNE': 'Sony',\n 'F': 'Ford',\n 'HMC': 'Honda',\n 'NAV': 'Navistar',\n 'NOC': 'Northrop Grumman',\n 'BA': 'Boeing',\n 'KO': 'Coca Cola',\n 'MMM': '3M',\n 'MCD': 'McDonald\\'s',\n 'PEP': 'Pepsi',\n 'K': 'Kellogg',\n 'UN': 'Unilever',\n 'MAR': 'Marriott',\n 'PG': 'Procter Gamble',\n 'CL': 'Colgate-Palmolive',\n 'GE': 'General Electrics',\n 'WFC': 'Wells Fargo',\n 'JPM': 'JPMorgan Chase',\n 'AIG': 'AIG',\n 'AXP': 'American express',\n 'BAC': 'Bank of America',\n 'GS': 'Goldman Sachs',\n 'AAPL': 'Apple',\n 'SAP': 'SAP',\n 'CSCO': 'Cisco',\n 'TXN': 'Texas Instruments',\n 'XRX': 'Xerox',\n 'WMT': 'Wal-Mart',\n 'HD': 'Home Depot',\n 'GSK': 'GlaxoSmithKline',\n 'PFE': 'Pfizer',\n 'SNY': 'Sanofi-Aventis',\n 'NVS': 'Novartis',\n 'KMB': 'Kimberly-Clark',\n 'R': 'Ryder',\n 'GD': 'General Dynamics',\n 'RTN': 'Raytheon',\n 'CVS': 'CVS',\n 'CAT': 'Caterpillar',\n 'DD': 'DuPont de Nemours'}\n\n\nsymbols, names = np.array(sorted(symbol_dict.items())).T\n\nquotes = []\n\nfor symbol in symbols:\n print('Fetching quote history for %r' % symbol, file=sys.stderr)\n url = ('https://raw.githubusercontent.com/scikit-learn/examples-data/'\n 'master/financial-data/{}.csv')\n quotes.append(pd.read_csv(url.format(symbol)))\n\nclose_prices = np.vstack([q['close'] for q in quotes])\nopen_prices = np.vstack([q['open'] for q in quotes])\n\n# The daily variations of the quotes are what carry most information\nvariation = close_prices - open_prices\n\n\n# #############################################################################\n# Learn a graphical structure from the correlations\nedge_model = covariance.GraphicalLassoCV(cv=5)\n\n# standardize the time series: using correlations rather than covariance\n# is more efficient for structure recovery\nX = variation.copy().T\nX /= X.std(axis=0)\nedge_model.fit(X)\n\n# #############################################################################\n# Cluster using affinity propagation\n\n_, labels = cluster.affinity_propagation(edge_model.covariance_)\nn_labels = labels.max()\n\nfor i in range(n_labels + 1):\n print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))\n\n# #############################################################################\n# Find a low-dimension embedding for visualization: find the best position of\n# the nodes (the stocks) on a 2D plane\n\n# We use a dense eigen_solver to achieve reproducibility (arpack is\n# initiated with random vectors that we don't control). In addition, we\n# use a large number of neighbors to capture the large-scale structure.\nnode_position_model = manifold.LocallyLinearEmbedding(\n n_components=2, eigen_solver='dense', n_neighbors=6)\n\nembedding = node_position_model.fit_transform(X.T).T\n\n# #############################################################################\n# Visualization\nplt.figure(1, facecolor='w', figsize=(10, 8))\nplt.clf()\nax = plt.axes([0., 0., 1., 1.])\nplt.axis('off')\n\n# Display a graph of the partial correlations\npartial_correlations = edge_model.precision_.copy()\nd = 1 / np.sqrt(np.diag(partial_correlations))\npartial_correlations *= d\npartial_correlations *= d[:, np.newaxis]\nnon_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)\n\n# Plot the nodes using the coordinates of our embedding\nplt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,\n cmap=plt.cm.nipy_spectral)\n\n# Plot the edges\nstart_idx, end_idx = np.where(non_zero)\n# a sequence of (*line0*, *line1*, *line2*), where::\n# linen = (x0, y0), (x1, y1), ... (xm, ym)\nsegments = [[embedding[:, start], embedding[:, stop]]\n for start, stop in zip(start_idx, end_idx)]\nvalues = np.abs(partial_correlations[non_zero])\nlc = LineCollection(segments,\n zorder=0, cmap=plt.cm.hot_r,\n norm=plt.Normalize(0, .7 * values.max()))\nlc.set_array(values)\nlc.set_linewidths(15 * values)\nax.add_collection(lc)\n\n# Add a label to each node. The challenge here is that we want to\n# position the labels to avoid overlap with other labels\nfor index, (name, label, (x, y)) in enumerate(\n zip(names, labels, embedding.T)):\n\n dx = x - embedding[0]\n dx[index] = 1\n dy = y - embedding[1]\n dy[index] = 1\n this_dx = dx[np.argmin(np.abs(dy))]\n this_dy = dy[np.argmin(np.abs(dx))]\n if this_dx > 0:\n horizontalalignment = 'left'\n x = x + .002\n else:\n horizontalalignment = 'right'\n x = x - .002\n if this_dy > 0:\n verticalalignment = 'bottom'\n y = y + .002\n else:\n verticalalignment = 'top'\n y = y - .002\n plt.text(x, y, name, size=10,\n horizontalalignment=horizontalalignment,\n verticalalignment=verticalalignment,\n bbox=dict(facecolor='w',\n edgecolor=plt.cm.nipy_spectral(label / float(n_labels)),\n alpha=.6))\n\nplt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),\n embedding[0].max() + .10 * embedding[0].ptp(),)\nplt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),\n embedding[1].max() + .03 * embedding[1].ptp())\n\nplt.show()"
3030
]
3131
}
3232
],

dev/_downloads/plot_stock_market.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -85,8 +85,6 @@
8585
# that we get high-tech firms, and before the 2008 crash). This kind of
8686
# historical data can be obtained for from APIs like the quandl.com and
8787
# alphavantage.co ones.
88-
start_date = datetime(2003, 1, 1).date()
89-
end_date = datetime(2008, 1, 1).date()
9088

9189
symbol_dict = {
9290
'TOT': 'Total',

dev/_downloads/scikit-learn-docs.pdf

-31.7 KB
Binary file not shown.

dev/_images/iris.png

0 Bytes

0 commit comments

Comments
 (0)