Skip to content

Commit be38c00

Browse files
committed
Pushing the docs to dev/ for branch: main, commit 7d9f1cae7f49ff6bca4e474a4c9e8f4e7b88a357
1 parent 67dfa89 commit be38c00

File tree

1,227 files changed

+4626
-4374
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,227 files changed

+4626
-4374
lines changed
Binary file not shown.

dev/_downloads/51bc3899ceeec0ecf99c5f72ff1fd241/wikipedia_principal_eigenvector.py

Lines changed: 15 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -47,8 +47,9 @@
4747
from urllib.request import urlopen
4848

4949

50-
# #############################################################################
51-
# Where to download the data, if not already on disk
50+
# %%
51+
# Download data, if not already on disk
52+
# -------------------------------------
5253
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
5354
redirects_filename = redirects_url.rsplit("/", 1)[1]
5455

@@ -68,10 +69,9 @@
6869
print()
6970

7071

71-
# #############################################################################
72+
# %%
7273
# Loading the redirect files
73-
74-
74+
# --------------------------
7575
def index(redirects, index_map, k):
7676
"""Find the index of an article name after redirect resolution"""
7777
k = redirects.get(k, k)
@@ -119,6 +119,9 @@ def get_redirects(redirects_filename):
119119
return redirects
120120

121121

122+
# %%
123+
# Computing the Adjacency matrix
124+
# ------------------------------
122125
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
123126
"""Extract the adjacency graph as a scipy sparse matrix
124127
@@ -166,6 +169,10 @@ def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
166169
)
167170
names = {i: name for name, i in index_map.items()}
168171

172+
173+
# %%
174+
# Computing Principal Singular Vector using Randomized SVD
175+
# --------------------------------------------------------
169176
print("Computing the principal singular vectors using randomized_svd")
170177
t0 = time()
171178
U, s, V = randomized_svd(X, 5, n_iter=3)
@@ -178,6 +185,9 @@ def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
178185
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
179186

180187

188+
# %%
189+
# Computing Centrality scores
190+
# ---------------------------
181191
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
182192
"""Power iteration computation of the principal eigenvector
183193
Binary file not shown.

dev/_downloads/948a4dfa149766b475b1cf2515f289d1/wikipedia_principal_eigenvector.ipynb

Lines changed: 91 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,97 @@
2626
},
2727
"outputs": [],
2828
"source": [
29-
"# Author: Olivier Grisel <[email protected]>\n# License: BSD 3 clause\n\nfrom bz2 import BZ2File\nimport os\nfrom datetime import datetime\nfrom pprint import pprint\nfrom time import time\n\nimport numpy as np\n\nfrom scipy import sparse\n\nfrom sklearn.decomposition import randomized_svd\nfrom urllib.request import urlopen\n\n\n# #############################################################################\n# Where to download the data, if not already on disk\nredirects_url = \"http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2\"\nredirects_filename = redirects_url.rsplit(\"/\", 1)[1]\n\npage_links_url = \"http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2\"\npage_links_filename = page_links_url.rsplit(\"/\", 1)[1]\n\nresources = [\n (redirects_url, redirects_filename),\n (page_links_url, page_links_filename),\n]\n\nfor url, filename in resources:\n if not os.path.exists(filename):\n print(\"Downloading data from '%s', please wait...\" % url)\n opener = urlopen(url)\n open(filename, \"wb\").write(opener.read())\n print()\n\n\n# #############################################################################\n# Loading the redirect files\n\n\ndef index(redirects, index_map, k):\n \"\"\"Find the index of an article name after redirect resolution\"\"\"\n k = redirects.get(k, k)\n return index_map.setdefault(k, len(index_map))\n\n\nDBPEDIA_RESOURCE_PREFIX_LEN = len(\"http://dbpedia.org/resource/\")\nSHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)\n\n\ndef short_name(nt_uri):\n \"\"\"Remove the < and > URI markers and the common URI prefix\"\"\"\n return nt_uri[SHORTNAME_SLICE]\n\n\ndef get_redirects(redirects_filename):\n \"\"\"Parse the redirections and build a transitively closed map out of it\"\"\"\n redirects = {}\n print(\"Parsing the NT redirect file\")\n for l, line in enumerate(BZ2File(redirects_filename)):\n split = line.split()\n if len(split) != 4:\n print(\"ignoring malformed line: \" + line)\n continue\n redirects[short_name(split[0])] = short_name(split[2])\n if l % 1000000 == 0:\n print(\"[%s] line: %08d\" % (datetime.now().isoformat(), l))\n\n # compute the transitive closure\n print(\"Computing the transitive closure of the redirect relation\")\n for l, source in enumerate(redirects.keys()):\n transitive_target = None\n target = redirects[source]\n seen = {source}\n while True:\n transitive_target = target\n target = redirects.get(target)\n if target is None or target in seen:\n break\n seen.add(target)\n redirects[source] = transitive_target\n if l % 1000000 == 0:\n print(\"[%s] line: %08d\" % (datetime.now().isoformat(), l))\n\n return redirects\n\n\ndef get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):\n \"\"\"Extract the adjacency graph as a scipy sparse matrix\n\n Redirects are resolved first.\n\n Returns X, the scipy sparse adjacency matrix, redirects as python\n dict from article names to article names and index_map a python dict\n from article names to python int (article indexes).\n \"\"\"\n\n print(\"Computing the redirect map\")\n redirects = get_redirects(redirects_filename)\n\n print(\"Computing the integer index map\")\n index_map = dict()\n links = list()\n for l, line in enumerate(BZ2File(page_links_filename)):\n split = line.split()\n if len(split) != 4:\n print(\"ignoring malformed line: \" + line)\n continue\n i = index(redirects, index_map, short_name(split[0]))\n j = index(redirects, index_map, short_name(split[2]))\n links.append((i, j))\n if l % 1000000 == 0:\n print(\"[%s] line: %08d\" % (datetime.now().isoformat(), l))\n\n if limit is not None and l >= limit - 1:\n break\n\n print(\"Computing the adjacency matrix\")\n X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)\n for i, j in links:\n X[i, j] = 1.0\n del links\n print(\"Converting to CSR representation\")\n X = X.tocsr()\n print(\"CSR conversion done\")\n return X, redirects, index_map\n\n\n# stop after 5M links to make it possible to work in RAM\nX, redirects, index_map = get_adjacency_matrix(\n redirects_filename, page_links_filename, limit=5000000\n)\nnames = {i: name for name, i in index_map.items()}\n\nprint(\"Computing the principal singular vectors using randomized_svd\")\nt0 = time()\nU, s, V = randomized_svd(X, 5, n_iter=3)\nprint(\"done in %0.3fs\" % (time() - t0))\n\n# print the names of the wikipedia related strongest components of the\n# principal singular vector which should be similar to the highest eigenvector\nprint(\"Top wikipedia pages according to principal singular vectors\")\npprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])\npprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])\n\n\ndef centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):\n \"\"\"Power iteration computation of the principal eigenvector\n\n This method is also known as Google PageRank and the implementation\n is based on the one from the NetworkX project (BSD licensed too)\n with copyrights by:\n\n Aric Hagberg <[email protected]>\n Dan Schult <[email protected]>\n Pieter Swart <[email protected]>\n \"\"\"\n n = X.shape[0]\n X = X.copy()\n incoming_counts = np.asarray(X.sum(axis=1)).ravel()\n\n print(\"Normalizing the graph\")\n for i in incoming_counts.nonzero()[0]:\n X.data[X.indptr[i] : X.indptr[i + 1]] *= 1.0 / incoming_counts[i]\n dangle = np.asarray(np.where(np.isclose(X.sum(axis=1), 0), 1.0 / n, 0)).ravel()\n\n scores = np.full(n, 1.0 / n, dtype=np.float32) # initial guess\n for i in range(max_iter):\n print(\"power iteration #%d\" % i)\n prev_scores = scores\n scores = (\n alpha * (scores * X + np.dot(dangle, prev_scores))\n + (1 - alpha) * prev_scores.sum() / n\n )\n # check convergence: normalized l_inf norm\n scores_max = np.abs(scores).max()\n if scores_max == 0.0:\n scores_max = 1.0\n err = np.abs(scores - prev_scores).max() / scores_max\n print(\"error: %0.6f\" % err)\n if err < n * tol:\n return scores\n\n return scores\n\n\nprint(\"Computing principal eigenvector score using a power iteration method\")\nt0 = time()\nscores = centrality_scores(X, max_iter=100)\nprint(\"done in %0.3fs\" % (time() - t0))\npprint([names[i] for i in np.abs(scores).argsort()[-10:]])"
29+
"# Author: Olivier Grisel <[email protected]>\n# License: BSD 3 clause\n\nfrom bz2 import BZ2File\nimport os\nfrom datetime import datetime\nfrom pprint import pprint\nfrom time import time\n\nimport numpy as np\n\nfrom scipy import sparse\n\nfrom sklearn.decomposition import randomized_svd\nfrom urllib.request import urlopen"
30+
]
31+
},
32+
{
33+
"cell_type": "markdown",
34+
"metadata": {},
35+
"source": [
36+
"## Download data, if not already on disk\n\n"
37+
]
38+
},
39+
{
40+
"cell_type": "code",
41+
"execution_count": null,
42+
"metadata": {
43+
"collapsed": false
44+
},
45+
"outputs": [],
46+
"source": [
47+
"redirects_url = \"http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2\"\nredirects_filename = redirects_url.rsplit(\"/\", 1)[1]\n\npage_links_url = \"http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2\"\npage_links_filename = page_links_url.rsplit(\"/\", 1)[1]\n\nresources = [\n (redirects_url, redirects_filename),\n (page_links_url, page_links_filename),\n]\n\nfor url, filename in resources:\n if not os.path.exists(filename):\n print(\"Downloading data from '%s', please wait...\" % url)\n opener = urlopen(url)\n open(filename, \"wb\").write(opener.read())\n print()"
48+
]
49+
},
50+
{
51+
"cell_type": "markdown",
52+
"metadata": {},
53+
"source": [
54+
"## Loading the redirect files\n\n"
55+
]
56+
},
57+
{
58+
"cell_type": "code",
59+
"execution_count": null,
60+
"metadata": {
61+
"collapsed": false
62+
},
63+
"outputs": [],
64+
"source": [
65+
"def index(redirects, index_map, k):\n \"\"\"Find the index of an article name after redirect resolution\"\"\"\n k = redirects.get(k, k)\n return index_map.setdefault(k, len(index_map))\n\n\nDBPEDIA_RESOURCE_PREFIX_LEN = len(\"http://dbpedia.org/resource/\")\nSHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)\n\n\ndef short_name(nt_uri):\n \"\"\"Remove the < and > URI markers and the common URI prefix\"\"\"\n return nt_uri[SHORTNAME_SLICE]\n\n\ndef get_redirects(redirects_filename):\n \"\"\"Parse the redirections and build a transitively closed map out of it\"\"\"\n redirects = {}\n print(\"Parsing the NT redirect file\")\n for l, line in enumerate(BZ2File(redirects_filename)):\n split = line.split()\n if len(split) != 4:\n print(\"ignoring malformed line: \" + line)\n continue\n redirects[short_name(split[0])] = short_name(split[2])\n if l % 1000000 == 0:\n print(\"[%s] line: %08d\" % (datetime.now().isoformat(), l))\n\n # compute the transitive closure\n print(\"Computing the transitive closure of the redirect relation\")\n for l, source in enumerate(redirects.keys()):\n transitive_target = None\n target = redirects[source]\n seen = {source}\n while True:\n transitive_target = target\n target = redirects.get(target)\n if target is None or target in seen:\n break\n seen.add(target)\n redirects[source] = transitive_target\n if l % 1000000 == 0:\n print(\"[%s] line: %08d\" % (datetime.now().isoformat(), l))\n\n return redirects"
66+
]
67+
},
68+
{
69+
"cell_type": "markdown",
70+
"metadata": {},
71+
"source": [
72+
"## Computing the Adjacency matrix\n\n"
73+
]
74+
},
75+
{
76+
"cell_type": "code",
77+
"execution_count": null,
78+
"metadata": {
79+
"collapsed": false
80+
},
81+
"outputs": [],
82+
"source": [
83+
"def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):\n \"\"\"Extract the adjacency graph as a scipy sparse matrix\n\n Redirects are resolved first.\n\n Returns X, the scipy sparse adjacency matrix, redirects as python\n dict from article names to article names and index_map a python dict\n from article names to python int (article indexes).\n \"\"\"\n\n print(\"Computing the redirect map\")\n redirects = get_redirects(redirects_filename)\n\n print(\"Computing the integer index map\")\n index_map = dict()\n links = list()\n for l, line in enumerate(BZ2File(page_links_filename)):\n split = line.split()\n if len(split) != 4:\n print(\"ignoring malformed line: \" + line)\n continue\n i = index(redirects, index_map, short_name(split[0]))\n j = index(redirects, index_map, short_name(split[2]))\n links.append((i, j))\n if l % 1000000 == 0:\n print(\"[%s] line: %08d\" % (datetime.now().isoformat(), l))\n\n if limit is not None and l >= limit - 1:\n break\n\n print(\"Computing the adjacency matrix\")\n X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)\n for i, j in links:\n X[i, j] = 1.0\n del links\n print(\"Converting to CSR representation\")\n X = X.tocsr()\n print(\"CSR conversion done\")\n return X, redirects, index_map\n\n\n# stop after 5M links to make it possible to work in RAM\nX, redirects, index_map = get_adjacency_matrix(\n redirects_filename, page_links_filename, limit=5000000\n)\nnames = {i: name for name, i in index_map.items()}"
84+
]
85+
},
86+
{
87+
"cell_type": "markdown",
88+
"metadata": {},
89+
"source": [
90+
"## Computing Principal Singular Vector using Randomized SVD\n\n"
91+
]
92+
},
93+
{
94+
"cell_type": "code",
95+
"execution_count": null,
96+
"metadata": {
97+
"collapsed": false
98+
},
99+
"outputs": [],
100+
"source": [
101+
"print(\"Computing the principal singular vectors using randomized_svd\")\nt0 = time()\nU, s, V = randomized_svd(X, 5, n_iter=3)\nprint(\"done in %0.3fs\" % (time() - t0))\n\n# print the names of the wikipedia related strongest components of the\n# principal singular vector which should be similar to the highest eigenvector\nprint(\"Top wikipedia pages according to principal singular vectors\")\npprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])\npprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])"
102+
]
103+
},
104+
{
105+
"cell_type": "markdown",
106+
"metadata": {},
107+
"source": [
108+
"## Computing Centrality scores\n\n"
109+
]
110+
},
111+
{
112+
"cell_type": "code",
113+
"execution_count": null,
114+
"metadata": {
115+
"collapsed": false
116+
},
117+
"outputs": [],
118+
"source": [
119+
"def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):\n \"\"\"Power iteration computation of the principal eigenvector\n\n This method is also known as Google PageRank and the implementation\n is based on the one from the NetworkX project (BSD licensed too)\n with copyrights by:\n\n Aric Hagberg <[email protected]>\n Dan Schult <[email protected]>\n Pieter Swart <[email protected]>\n \"\"\"\n n = X.shape[0]\n X = X.copy()\n incoming_counts = np.asarray(X.sum(axis=1)).ravel()\n\n print(\"Normalizing the graph\")\n for i in incoming_counts.nonzero()[0]:\n X.data[X.indptr[i] : X.indptr[i + 1]] *= 1.0 / incoming_counts[i]\n dangle = np.asarray(np.where(np.isclose(X.sum(axis=1), 0), 1.0 / n, 0)).ravel()\n\n scores = np.full(n, 1.0 / n, dtype=np.float32) # initial guess\n for i in range(max_iter):\n print(\"power iteration #%d\" % i)\n prev_scores = scores\n scores = (\n alpha * (scores * X + np.dot(dangle, prev_scores))\n + (1 - alpha) * prev_scores.sum() / n\n )\n # check convergence: normalized l_inf norm\n scores_max = np.abs(scores).max()\n if scores_max == 0.0:\n scores_max = 1.0\n err = np.abs(scores - prev_scores).max() / scores_max\n print(\"error: %0.6f\" % err)\n if err < n * tol:\n return scores\n\n return scores\n\n\nprint(\"Computing principal eigenvector score using a power iteration method\")\nt0 = time()\nscores = centrality_scores(X, max_iter=100)\nprint(\"done in %0.3fs\" % (time() - t0))\npprint([names[i] for i in np.abs(scores).argsort()[-10:]])"
30120
]
31121
}
32122
],

dev/_downloads/scikit-learn-docs.zip

4.49 KB
Binary file not shown.
-212 Bytes
-204 Bytes
41 Bytes
-132 Bytes
-62 Bytes

0 commit comments

Comments
 (0)