Skip to content

Commit 8428a87

Browse files
committed
Various small fixes
1 parent f4445b6 commit 8428a87

4 files changed

+31
-51
lines changed

chapter02_mathematical-building-blocks.ipynb

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -131,9 +131,9 @@
131131
},
132132
"outputs": [],
133133
"source": [
134-
"from tensorflow.keras import models\n",
134+
"from tensorflow import keras\n",
135135
"from tensorflow.keras import layers\n",
136-
"model = models.Sequential([\n",
136+
"model = keras.Sequential([\n",
137137
" layers.Dense(512, activation=\"relu\"),\n",
138138
" layers.Dense(10, activation=\"softmax\")\n",
139139
"])"
@@ -477,8 +477,8 @@
477477
},
478478
"outputs": [],
479479
"source": [
480-
"digit = train_images[4]\n",
481480
"import matplotlib.pyplot as plt\n",
481+
"digit = train_images[4]\n",
482482
"plt.imshow(digit, cmap=plt.cm.binary)\n",
483483
"plt.show()"
484484
]
@@ -1099,7 +1099,7 @@
10991099
"b = tf.Variable(tf.zeros((2,)))\n",
11001100
"x = tf.random.uniform((2, 2))\n",
11011101
"with tf.GradientTape() as tape:\n",
1102-
" y = tf.matmul(W, x) + b\n",
1102+
" y = tf.matmul(x, W) + b\n",
11031103
"grad_of_y_wrt_W_and_b = tape.gradient(y, [W, b])"
11041104
]
11051105
},
@@ -1194,7 +1194,6 @@
11941194
"import tensorflow as tf\n",
11951195
"\n",
11961196
"class NaiveDense:\n",
1197-
"\n",
11981197
" def __init__(self, input_size, output_size, activation):\n",
11991198
" self.activation = activation\n",
12001199
"\n",
@@ -1232,7 +1231,6 @@
12321231
"outputs": [],
12331232
"source": [
12341233
"class NaiveSequential:\n",
1235-
"\n",
12361234
" def __init__(self, layers):\n",
12371235
" self.layers = layers\n",
12381236
"\n",
@@ -1283,7 +1281,6 @@
12831281
"outputs": [],
12841282
"source": [
12851283
"class BatchGenerator:\n",
1286-
"\n",
12871284
" def __init__(self, images, labels, batch_size=128):\n",
12881285
" self.index = 0\n",
12891286
" self.images = images\n",

chapter03_introduction-to-keras-and-tf.ipynb

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -610,7 +610,7 @@
610610
},
611611
"outputs": [],
612612
"source": [
613-
"for step in range(20):\n",
613+
"for step in range(40):\n",
614614
" loss = training_step(inputs, targets)\n",
615615
" print(f\"Loss at step {step}: {loss:.4f}\")"
616616
]
@@ -827,7 +827,7 @@
827827
"colab_type": "text"
828828
},
829829
"source": [
830-
"### Understanding the \"fit\" method"
830+
"### Understanding the `fit` method"
831831
]
832832
},
833833
{
@@ -902,10 +902,10 @@
902902
"shuffled_targets = targets[indices_permutation]\n",
903903
"\n",
904904
"num_validation_samples = int(0.3 * len(inputs))\n",
905-
"val_inputs = shuffled_inputs[-num_validation_samples:]\n",
906-
"val_targets = shuffled_targets[-num_validation_samples:]\n",
907-
"training_inputs = shuffled_inputs[:num_validation_samples]\n",
908-
"training_targets = shuffled_targets[:num_validation_samples]\n",
905+
"val_inputs = shuffled_inputs[:num_validation_samples]\n",
906+
"val_targets = shuffled_targets[:num_validation_samples]\n",
907+
"training_inputs = shuffled_inputs[num_validation_samples:]\n",
908+
"training_targets = shuffled_targets[num_validation_samples:]\n",
909909
"model.fit(\n",
910910
" training_inputs,\n",
911911
" training_targets,\n",

chapter04_getting-started-with-neural-networks.ipynb

Lines changed: 20 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@
130130
"colab_type": "text"
131131
},
132132
"source": [
133-
"**Encoding the integer sequences via one-hot encoding**"
133+
"**Encoding the integer sequences via multi-hot encoding**"
134134
]
135135
},
136136
{
@@ -145,7 +145,8 @@
145145
"def vectorize_sequences(sequences, dimension=10000):\n",
146146
" results = np.zeros((len(sequences), dimension))\n",
147147
" for i, sequence in enumerate(sequences):\n",
148-
" results[i, sequence] = 1.\n",
148+
" for j in sequence:\n",
149+
" results[i, j] = 1.\n",
149150
" return results\n",
150151
"x_train = vectorize_sequences(train_data)\n",
151152
"x_test = vectorize_sequences(test_data)"
@@ -281,9 +282,6 @@
281282
},
282283
"outputs": [],
283284
"source": [
284-
"model.compile(optimizer=\"rmsprop\",\n",
285-
" loss=\"binary_crossentropy\",\n",
286-
" metrics=[\"accuracy\"])\n",
287285
"history = model.fit(partial_x_train,\n",
288286
" partial_y_train,\n",
289287
" epochs=20,\n",
@@ -574,11 +572,6 @@
574572
},
575573
"outputs": [],
576574
"source": [
577-
"def vectorize_sequences(sequences, dimension=10000):\n",
578-
" results = np.zeros((len(sequences), dimension))\n",
579-
" for i, sequence in enumerate(sequences):\n",
580-
" results[i, sequence] = 1.\n",
581-
" return results\n",
582575
"x_train = vectorize_sequences(train_data)\n",
583576
"x_test = vectorize_sequences(test_data)"
584577
]
@@ -605,8 +598,8 @@
605598
" for i, label in enumerate(labels):\n",
606599
" results[i, label] = 1.\n",
607600
" return results\n",
608-
"one_hot_train_labels = to_one_hot(train_labels)\n",
609-
"one_hot_test_labels = to_one_hot(test_labels)"
601+
"y_train = to_one_hot(train_labels)\n",
602+
"y_test = to_one_hot(test_labels)"
610603
]
611604
},
612605
{
@@ -618,8 +611,8 @@
618611
"outputs": [],
619612
"source": [
620613
"from tensorflow.keras.utils import to_categorical\n",
621-
"one_hot_train_labels = to_categorical(train_labels)\n",
622-
"one_hot_test_labels = to_categorical(test_labels)"
614+
"y_train = to_categorical(train_labels)\n",
615+
"y_test = to_categorical(test_labels)"
623616
]
624617
},
625618
{
@@ -705,8 +698,8 @@
705698
"source": [
706699
"x_val = x_train[:1000]\n",
707700
"partial_x_train = x_train[1000:]\n",
708-
"y_val = one_hot_train_labels[:1000]\n",
709-
"partial_y_train = one_hot_train_labels[1000:]"
701+
"y_val = y_train[:1000]\n",
702+
"partial_y_train = y_train[1000:]"
710703
]
711704
},
712705
{
@@ -816,12 +809,11 @@
816809
"model.compile(optimizer=\"rmsprop\",\n",
817810
" loss=\"categorical_crossentropy\",\n",
818811
" metrics=[\"accuracy\"])\n",
819-
"model.fit(partial_x_train,\n",
820-
" partial_y_train,\n",
812+
"model.fit(x_train,\n",
813+
" y_train,\n",
821814
" epochs=9,\n",
822-
" batch_size=512,\n",
823-
" validation_data=(x_val, y_val))\n",
824-
"results = model.evaluate(x_test, one_hot_test_labels)"
815+
" batch_size=512)\n",
816+
"results = model.evaluate(x_test, y_test)"
825817
]
826818
},
827819
{
@@ -847,7 +839,7 @@
847839
"test_labels_copy = copy.copy(test_labels)\n",
848840
"np.random.shuffle(test_labels_copy)\n",
849841
"hits_array = np.array(test_labels) == np.array(test_labels_copy)\n",
850-
"float(np.sum(hits_array)) / len(test_labels)"
842+
"hits_array.mean()"
851843
]
852844
},
853845
{
@@ -1182,7 +1174,7 @@
11821174
" axis=0)\n",
11831175
" model = build_model()\n",
11841176
" model.fit(partial_train_data, partial_train_targets,\n",
1185-
" epochs=num_epochs, batch_size=1, verbose=0)\n",
1177+
" epochs=num_epochs, batch_size=16, verbose=0)\n",
11861178
" val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)\n",
11871179
" all_scores.append(val_mae)"
11881180
]
@@ -1243,7 +1235,7 @@
12431235
" model = build_model()\n",
12441236
" history = model.fit(partial_train_data, partial_train_targets,\n",
12451237
" validation_data=(val_data, val_targets),\n",
1246-
" epochs=num_epochs, batch_size=1, verbose=0)\n",
1238+
" epochs=num_epochs, batch_size=16, verbose=0)\n",
12471239
" mae_history = history.history[\"val_mae\"]\n",
12481240
" all_mae_histories.append(mae_history)"
12491241
]
@@ -1298,7 +1290,7 @@
12981290
"colab_type": "text"
12991291
},
13001292
"source": [
1301-
"**Plotting smoothed validation scores, excluding the first 10 data points**"
1293+
"**Plotting validation scores, excluding the first 10 data points**"
13021294
]
13031295
},
13041296
{
@@ -1309,17 +1301,8 @@
13091301
},
13101302
"outputs": [],
13111303
"source": [
1312-
"def smooth_curve(points, factor=0.9):\n",
1313-
" smoothed_points = []\n",
1314-
" for point in points:\n",
1315-
" if smoothed_points:\n",
1316-
" previous = smoothed_points[-1]\n",
1317-
" smoothed_points.append(previous * factor + point * (1 - factor))\n",
1318-
" else:\n",
1319-
" smoothed_points.append(point)\n",
1320-
" return smoothed_points\n",
1321-
"smooth_mae_history = smooth_curve(average_mae_history[10:])\n",
1322-
"plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)\n",
1304+
"truncated_mae_history = average_mae_history[10:]\n",
1305+
"plt.plot(range(1, len(truncated_mae_history) + 1), truncated_mae_history)\n",
13231306
"plt.xlabel(\"Epochs\")\n",
13241307
"plt.ylabel(\"Validation MAE\")\n",
13251308
"plt.show()"
@@ -1344,7 +1327,7 @@
13441327
"source": [
13451328
"model = build_model()\n",
13461329
"model.fit(train_data, train_targets,\n",
1347-
" epochs=80, batch_size=16, verbose=0)\n",
1330+
" epochs=130, batch_size=16, verbose=0)\n",
13481331
"test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)"
13491332
]
13501333
},

chapter08_intro-to-dl-for-computer-vision.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -507,7 +507,7 @@
507507
"colab_type": "text"
508508
},
509509
"source": [
510-
"**Applying a transformation to Dataset elements using `map()``**"
510+
"**Applying a transformation to Dataset elements using `map()`**"
511511
]
512512
},
513513
{

0 commit comments

Comments
 (0)