Skip to content

Commit 0b7d53b

Browse files
committed
Created using Colaboratory
1 parent 9618b34 commit 0b7d53b

File tree

1 file changed

+203
-0
lines changed

1 file changed

+203
-0
lines changed

oct22.ipynb

Lines changed: 203 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,203 @@
1+
{
2+
"nbformat": 4,
3+
"nbformat_minor": 0,
4+
"metadata": {
5+
"colab": {
6+
"name": "oct22.ipynb",
7+
"version": "0.3.2",
8+
"provenance": [],
9+
"toc_visible": true,
10+
"include_colab_link": true
11+
},
12+
"kernelspec": {
13+
"name": "python3",
14+
"display_name": "Python 3"
15+
},
16+
"accelerator": "GPU"
17+
},
18+
"cells": [
19+
{
20+
"cell_type": "markdown",
21+
"metadata": {
22+
"id": "view-in-github",
23+
"colab_type": "text"
24+
},
25+
"source": [
26+
"[View in Colaboratory](https://colab.research.google.com/github/RonRichman/100daysofcode-with-python-course/blob/master/oct22.ipynb)"
27+
]
28+
},
29+
{
30+
"metadata": {
31+
"id": "xJuP_KS7VXOa",
32+
"colab_type": "text"
33+
},
34+
"cell_type": "markdown",
35+
"source": [
36+
"## Installing fastai in GPU enabled runtime\n",
37+
"\n",
38+
"Enable GPU runtime from the the runtime menu option-->Change runtime type\n",
39+
" \n",
40+
"\n",
41+
"\n",
42+
"1. Enable GPU runtime from the the runtime menu option-->Change runtime type\n",
43+
"2. Under the Hardware Accelerator, choose GPU\n",
44+
"3. Follow the below steps to install Pytorch GPU version followed by fastai"
45+
]
46+
},
47+
{
48+
"metadata": {
49+
"id": "sL1gDkp3FrW8",
50+
"colab_type": "code",
51+
"colab": {}
52+
},
53+
"cell_type": "code",
54+
"source": [
55+
"!pip install torch_nightly -f https://download.pytorch.org/whl/nightly/cu92/torch_nightly.html\n"
56+
],
57+
"execution_count": 0,
58+
"outputs": []
59+
},
60+
{
61+
"metadata": {
62+
"id": "oeMIL1FwK0gS",
63+
"colab_type": "code",
64+
"colab": {}
65+
},
66+
"cell_type": "code",
67+
"source": [
68+
"!pip install fastai\n"
69+
],
70+
"execution_count": 0,
71+
"outputs": []
72+
},
73+
{
74+
"metadata": {
75+
"id": "eOh7l8ArLydL",
76+
"colab_type": "code",
77+
"cellView": "both",
78+
"outputId": "18114a18-fa21-4a35-db4f-892e9fb7ecce",
79+
"colab": {
80+
"base_uri": "https://localhost:8080/",
81+
"height": 1003
82+
}
83+
},
84+
"cell_type": "code",
85+
"source": [
86+
"#@title\n",
87+
"import fastai\n",
88+
"fastai.show_install(1)"
89+
],
90+
"execution_count": 0,
91+
"outputs": [
92+
{
93+
"output_type": "stream",
94+
"text": [
95+
"\n",
96+
"\n",
97+
"```text\n",
98+
"=== Software === \n",
99+
"python version : 3.6.6\n",
100+
"fastai version : 1.0.11\n",
101+
"torch version : 1.0.0.dev20181019\n",
102+
"nvidia driver : 396.44\n",
103+
"torch cuda ver : 9.2.148\n",
104+
"torch cuda is : available\n",
105+
"torch cudnn ver : 7104\n",
106+
"torch cudnn is : enabled\n",
107+
"\n",
108+
"=== Hardware === \n",
109+
"nvidia gpus : 1\n",
110+
"torch available : 1\n",
111+
" - gpu0 : 11441MB | Tesla K80\n",
112+
"\n",
113+
"=== Environment === \n",
114+
"platform : Linux-4.14.65+-x86_64-with-Ubuntu-18.04-bionic\n",
115+
"distro : #1 SMP Sun Sep 9 02:18:33 PDT 2018\n",
116+
"conda env : Unknown\n",
117+
"python : /usr/bin/python3\n",
118+
"sys.path : \n",
119+
"/env/python\n",
120+
"/usr/lib/python36.zip\n",
121+
"/usr/lib/python3.6\n",
122+
"/usr/lib/python3.6/lib-dynload\n",
123+
"/usr/local/lib/python3.6/dist-packages\n",
124+
"/usr/lib/python3/dist-packages\n",
125+
"/usr/local/lib/python3.6/dist-packages/IPython/extensions\n",
126+
"/root/.ipython\n",
127+
"\n",
128+
"Sun Oct 21 06:21:51 2018 \n",
129+
"+-----------------------------------------------------------------------------+\n",
130+
"| NVIDIA-SMI 396.44 Driver Version: 396.44 |\n",
131+
"|-------------------------------+----------------------+----------------------+\n",
132+
"| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n",
133+
"| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n",
134+
"|===============================+======================+======================|\n",
135+
"| 0 Tesla K80 Off | 00000000:00:04.0 Off | 0 |\n",
136+
"| N/A 34C P8 31W / 149W | 11MiB / 11441MiB | 0% Default |\n",
137+
"+-------------------------------+----------------------+----------------------+\n",
138+
" \n",
139+
"+-----------------------------------------------------------------------------+\n",
140+
"| Processes: GPU Memory |\n",
141+
"| GPU PID Type Process name Usage |\n",
142+
"|=============================================================================|\n",
143+
"| No running processes found |\n",
144+
"+-----------------------------------------------------------------------------+\n",
145+
"\n",
146+
"```\n",
147+
"\n",
148+
"Please make sure to include opening/closing ``` when you paste into forums/github to make the reports appear formatted as code sections.\n",
149+
"\n",
150+
"Optional package(s) to enhance the diagnostics can be installed with:\n",
151+
"pip install distro\n",
152+
"Once installed, re-run this utility to get the additional information\n"
153+
],
154+
"name": "stdout"
155+
}
156+
]
157+
},
158+
{
159+
"metadata": {
160+
"id": "FgF7KvT9L214",
161+
"colab_type": "code",
162+
"outputId": "7b766b11-bd7c-4e0f-eb7e-3a741f6cb6fd",
163+
"colab": {
164+
"base_uri": "https://localhost:8080/",
165+
"height": 34
166+
}
167+
},
168+
"cell_type": "code",
169+
"source": [
170+
"import torch\n",
171+
"torch.cuda.is_available()"
172+
],
173+
"execution_count": 0,
174+
"outputs": [
175+
{
176+
"output_type": "execute_result",
177+
"data": {
178+
"text/plain": [
179+
"True"
180+
]
181+
},
182+
"metadata": {
183+
"tags": []
184+
},
185+
"execution_count": 5
186+
}
187+
]
188+
},
189+
{
190+
"metadata": {
191+
"id": "U5tq4yh7QE5J",
192+
"colab_type": "code",
193+
"colab": {}
194+
},
195+
"cell_type": "code",
196+
"source": [
197+
""
198+
],
199+
"execution_count": 0,
200+
"outputs": []
201+
}
202+
]
203+
}

0 commit comments

Comments
 (0)