Skip to content

Commit 564df50

Browse files
committed
Update notebooks for TF 2.6
1 parent 91e3525 commit 564df50

14 files changed

+68
-64
lines changed

chapter02_mathematical-building-blocks.ipynb

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1135,7 +1135,7 @@
11351135
},
11361136
"outputs": [],
11371137
"source": [
1138-
"model = models.Sequential([\n",
1138+
"model = keras.Sequential([\n",
11391139
" layers.Dense(512, activation=\"relu\"),\n",
11401140
" layers.Dense(10, activation=\"softmax\")\n",
11411141
"])"
@@ -1280,12 +1280,16 @@
12801280
},
12811281
"outputs": [],
12821282
"source": [
1283+
"import math\n",
1284+
"\n",
12831285
"class BatchGenerator:\n",
12841286
" def __init__(self, images, labels, batch_size=128):\n",
1287+
" assert len(images) == len(labels)\n",
12851288
" self.index = 0\n",
12861289
" self.images = images\n",
12871290
" self.labels = labels\n",
12881291
" self.batch_size = batch_size\n",
1292+
" self.num_batches = math.ceil(len(images) / batch_size)\n",
12891293
"\n",
12901294
" def next(self):\n",
12911295
" images = self.images[self.index : self.index + self.batch_size]\n",
@@ -1374,7 +1378,7 @@
13741378
" for epoch_counter in range(epochs):\n",
13751379
" print(f\"Epoch {epoch_counter}\")\n",
13761380
" batch_generator = BatchGenerator(images, labels)\n",
1377-
" for batch_counter in range(len(images) // batch_size):\n",
1381+
" for batch_counter in range(batch_generator.num_batches):\n",
13781382
" images_batch, labels_batch = batch_generator.next()\n",
13791383
" loss = one_training_step(model, images_batch, labels_batch)\n",
13801384
" if batch_counter % 100 == 0:\n",

chapter08_intro-to-dl-for-computer-vision.ipynb

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -341,7 +341,7 @@
341341
"from tensorflow.keras import layers\n",
342342
"\n",
343343
"inputs = keras.Input(shape=(180, 180, 3))\n",
344-
"x = layers.experimental.preprocessing.Rescaling(1./255)(inputs)\n",
344+
"x = layers.Rescaling(1./255)(inputs)\n",
345345
"x = layers.Conv2D(filters=32, kernel_size=3, activation=\"relu\")(x)\n",
346346
"x = layers.MaxPooling2D(pool_size=2)(x)\n",
347347
"x = layers.Conv2D(filters=64, kernel_size=3, activation=\"relu\")(x)\n",
@@ -415,7 +415,7 @@
415415
},
416416
"outputs": [],
417417
"source": [
418-
"from tensorflow.keras.preprocessing import image_dataset_from_directory\n",
418+
"from tensorflow.keras.utils import image_dataset_from_directory\n",
419419
"\n",
420420
"train_dataset = image_dataset_from_directory(\n",
421421
" new_base_dir / \"train\",\n",
@@ -663,9 +663,9 @@
663663
"source": [
664664
"data_augmentation = keras.Sequential(\n",
665665
" [\n",
666-
" layers.experimental.preprocessing.RandomFlip(\"horizontal\"),\n",
667-
" layers.experimental.preprocessing.RandomRotation(0.1),\n",
668-
" layers.experimental.preprocessing.RandomZoom(0.2),\n",
666+
" layers.RandomFlip(\"horizontal\"),\n",
667+
" layers.RandomRotation(0.1),\n",
668+
" layers.RandomZoom(0.2),\n",
669669
" ]\n",
670670
")"
671671
]
@@ -715,7 +715,7 @@
715715
"source": [
716716
"inputs = keras.Input(shape=(180, 180, 3))\n",
717717
"x = data_augmentation(inputs)\n",
718-
"x = layers.experimental.preprocessing.Rescaling(1./255)(x)\n",
718+
"x = layers.Rescaling(1./255)(x)\n",
719719
"x = layers.Conv2D(filters=32, kernel_size=3, activation=\"relu\")(x)\n",
720720
"x = layers.MaxPooling2D(pool_size=2)(x)\n",
721721
"x = layers.Conv2D(filters=64, kernel_size=3, activation=\"relu\")(x)\n",
@@ -1055,9 +1055,9 @@
10551055
"source": [
10561056
"data_augmentation = keras.Sequential(\n",
10571057
" [\n",
1058-
" layers.experimental.preprocessing.RandomFlip(\"horizontal\"),\n",
1059-
" layers.experimental.preprocessing.RandomRotation(0.1),\n",
1060-
" layers.experimental.preprocessing.RandomZoom(0.2),\n",
1058+
" layers.RandomFlip(\"horizontal\"),\n",
1059+
" layers.RandomRotation(0.1),\n",
1060+
" layers.RandomZoom(0.2),\n",
10611061
" ]\n",
10621062
")\n",
10631063
"\n",

chapter09_part01_image-segmentation.ipynb

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@
8282
"outputs": [],
8383
"source": [
8484
"import matplotlib.pyplot as plt\n",
85-
"from tensorflow.keras.preprocessing.image import load_img, img_to_array\n",
85+
"from tensorflow.keras.utils import load_img, img_to_array\n",
8686
"\n",
8787
"plt.axis(\"off\")\n",
8888
"plt.imshow(load_img(input_img_paths[9]))"
@@ -157,7 +157,7 @@
157157
"\n",
158158
"def get_model(img_size, num_classes):\n",
159159
" inputs = keras.Input(shape=img_size + (3,))\n",
160-
" x = layers.experimental.preprocessing.Rescaling(1./255)(inputs)\n",
160+
" x = layers.Rescaling(1./255)(inputs)\n",
161161
"\n",
162162
" x = layers.Conv2D(64, 3, strides=2, activation=\"relu\", padding=\"same\")(x)\n",
163163
" x = layers.Conv2D(64, 3, activation=\"relu\", padding=\"same\")(x)\n",
@@ -230,7 +230,7 @@
230230
},
231231
"outputs": [],
232232
"source": [
233-
"from tensorflow.keras.preprocessing.image import array_to_img\n",
233+
"from tensorflow.keras.utils import array_to_img\n",
234234
"\n",
235235
"model = keras.models.load_model(\"oxford_segmentation.keras\")\n",
236236
"\n",

chapter09_part02_modern-convnet-architecture-patterns.ipynb

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@
9999
"outputs": [],
100100
"source": [
101101
"inputs = keras.Input(shape=(32, 32, 3))\n",
102-
"x = layers.experimental.preprocessing.Rescaling(1./255)(inputs)\n",
102+
"x = layers.Rescaling(1./255)(inputs)\n",
103103
"\n",
104104
"def residual_block(x, filters, pooling=False):\n",
105105
" residual = x\n",
@@ -186,7 +186,7 @@
186186
"outputs": [],
187187
"source": [
188188
"import os, shutil, pathlib\n",
189-
"from tensorflow.keras.preprocessing import image_dataset_from_directory\n",
189+
"from tensorflow.keras.utils import image_dataset_from_directory\n",
190190
"\n",
191191
"original_dir = pathlib.Path(\"train\")\n",
192192
"new_base_dir = pathlib.Path(\"cats_vs_dogs_small\")\n",
@@ -228,9 +228,9 @@
228228
"source": [
229229
"data_augmentation = keras.Sequential(\n",
230230
" [\n",
231-
" layers.experimental.preprocessing.RandomFlip(\"horizontal\"),\n",
232-
" layers.experimental.preprocessing.RandomRotation(0.1),\n",
233-
" layers.experimental.preprocessing.RandomZoom(0.2),\n",
231+
" layers.RandomFlip(\"horizontal\"),\n",
232+
" layers.RandomRotation(0.1),\n",
233+
" layers.RandomZoom(0.2),\n",
234234
" ]\n",
235235
")"
236236
]
@@ -246,7 +246,7 @@
246246
"inputs = keras.Input(shape=(180, 180, 3))\n",
247247
"x = data_augmentation(inputs)\n",
248248
"\n",
249-
"x = layers.experimental.preprocessing.Rescaling(1./255)(x)\n",
249+
"x = layers.Rescaling(1./255)(x)\n",
250250
"x = layers.Conv2D(filters=32, kernel_size=5, use_bias=False)(x)\n",
251251
"\n",
252252
"for size in [32, 64, 128, 256, 512]:\n",

chapter09_part03_interpreting-what-convnets-learn.ipynb

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -79,9 +79,9 @@
7979
" origin=\"https://img-datasets.s3.amazonaws.com/cat.jpg\")\n",
8080
"\n",
8181
"def get_img_array(img_path, target_size):\n",
82-
" img = keras.preprocessing.image.load_img(\n",
82+
" img = keras.utils.load_img(\n",
8383
" img_path, target_size=target_size)\n",
84-
" array = keras.preprocessing.image.img_to_array(img)\n",
84+
" array = keras.utils.img_to_array(img)\n",
8585
" array = np.expand_dims(array, axis=0)\n",
8686
" return array\n",
8787
"\n",
@@ -493,7 +493,7 @@
493493
" :,\n",
494494
" ] = image\n",
495495
"\n",
496-
"keras.preprocessing.image.save_img(\n",
496+
"keras.utils.save_img(\n",
497497
" f\"filters_for_layer_{layer_name}.png\", stitched_filters)"
498498
]
499499
},
@@ -548,8 +548,8 @@
548548
" origin=\"https://img-datasets.s3.amazonaws.com/elephant.jpg\")\n",
549549
"\n",
550550
"def get_img_array(img_path, target_size):\n",
551-
" img = keras.preprocessing.image.load_img(img_path, target_size=target_size)\n",
552-
" array = keras.preprocessing.image.img_to_array(img)\n",
551+
" img = keras.utils.load_img(img_path, target_size=target_size)\n",
552+
" array = keras.utils.img_to_array(img)\n",
553553
" array = np.expand_dims(array, axis=0)\n",
554554
" array = keras.applications.xception.preprocess_input(array)\n",
555555
" return array\n",
@@ -724,21 +724,21 @@
724724
"source": [
725725
"import matplotlib.cm as cm\n",
726726
"\n",
727-
"img = keras.preprocessing.image.load_img(img_path)\n",
728-
"img = keras.preprocessing.image.img_to_array(img)\n",
727+
"img = keras.utils.load_img(img_path)\n",
728+
"img = keras.utils.img_to_array(img)\n",
729729
"\n",
730730
"heatmap = np.uint8(255 * heatmap)\n",
731731
"\n",
732732
"jet = cm.get_cmap(\"jet\")\n",
733733
"jet_colors = jet(np.arange(256))[:, :3]\n",
734734
"jet_heatmap = jet_colors[heatmap]\n",
735735
"\n",
736-
"jet_heatmap = keras.preprocessing.image.array_to_img(jet_heatmap)\n",
736+
"jet_heatmap = keras.utils.array_to_img(jet_heatmap)\n",
737737
"jet_heatmap = jet_heatmap.resize((img.shape[1], img.shape[0]))\n",
738-
"jet_heatmap = keras.preprocessing.image.img_to_array(jet_heatmap)\n",
738+
"jet_heatmap = keras.utils.img_to_array(jet_heatmap)\n",
739739
"\n",
740740
"superimposed_img = jet_heatmap * 0.4 + img\n",
741-
"superimposed_img = keras.preprocessing.image.array_to_img(superimposed_img)\n",
741+
"superimposed_img = keras.utils.array_to_img(superimposed_img)\n",
742742
"\n",
743743
"save_path = \"elephant_cam.jpg\"\n",
744744
"superimposed_img.save(save_path)"

chapter10_dl-for-timeseries.ipynb

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -213,7 +213,7 @@
213213
"import numpy as np\n",
214214
"from tensorflow import keras\n",
215215
"int_sequence = np.arange(10)\n",
216-
"dummy_dataset = keras.preprocessing.timeseries_dataset_from_array(\n",
216+
"dummy_dataset = keras.utils.timeseries_dataset_from_array(\n",
217217
" data=int_sequence[:-3],\n",
218218
" targets=int_sequence[3:],\n",
219219
" sequence_length=3,\n",
@@ -247,7 +247,7 @@
247247
"delay = sampling_rate * (sequence_length + 24 - 1)\n",
248248
"batch_size = 256\n",
249249
"\n",
250-
"train_dataset = keras.preprocessing.timeseries_dataset_from_array(\n",
250+
"train_dataset = keras.utils.timeseries_dataset_from_array(\n",
251251
" raw_data[:-delay],\n",
252252
" targets=temperature[delay:],\n",
253253
" sampling_rate=sampling_rate,\n",
@@ -257,7 +257,7 @@
257257
" start_index=0,\n",
258258
" end_index=num_train_samples)\n",
259259
"\n",
260-
"val_dataset = keras.preprocessing.timeseries_dataset_from_array(\n",
260+
"val_dataset = keras.utils.timeseries_dataset_from_array(\n",
261261
" raw_data[:-delay],\n",
262262
" targets=temperature[delay:],\n",
263263
" sampling_rate=sampling_rate,\n",
@@ -267,7 +267,7 @@
267267
" start_index=num_train_samples,\n",
268268
" end_index=num_train_samples + num_val_samples)\n",
269269
"\n",
270-
"test_dataset = keras.preprocessing.timeseries_dataset_from_array(\n",
270+
"test_dataset = keras.utils.timeseries_dataset_from_array(\n",
271271
" raw_data[:-delay],\n",
272272
" targets=temperature[delay:],\n",
273273
" sampling_rate=sampling_rate,\n",

chapter11_part01_introduction.ipynb

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@
153153
},
154154
"outputs": [],
155155
"source": [
156-
"from tensorflow.keras.layers.experimental.preprocessing import TextVectorization\n",
156+
"from tensorflow.keras.layers import TextVectorization\n",
157157
"text_vectorization = TextVectorization(\n",
158158
" output_mode=\"int\",\n",
159159
")"
@@ -336,13 +336,13 @@
336336
"from tensorflow import keras\n",
337337
"batch_size = 32\n",
338338
"\n",
339-
"train_ds = keras.preprocessing.text_dataset_from_directory(\n",
339+
"train_ds = keras.utils.text_dataset_from_directory(\n",
340340
" \"aclImdb/train\", batch_size=batch_size\n",
341341
")\n",
342-
"val_ds = keras.preprocessing.text_dataset_from_directory(\n",
342+
"val_ds = keras.utils.text_dataset_from_directory(\n",
343343
" \"aclImdb/val\", batch_size=batch_size\n",
344344
")\n",
345-
"test_ds = keras.preprocessing.text_dataset_from_directory(\n",
345+
"test_ds = keras.utils.text_dataset_from_directory(\n",
346346
" \"aclImdb/test\", batch_size=batch_size\n",
347347
")"
348348
]

chapter11_part02_sequence-models.ipynb

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -82,13 +82,13 @@
8282
" shutil.move(train_dir / category / fname,\n",
8383
" val_dir / category / fname)\n",
8484
"\n",
85-
"train_ds = keras.preprocessing.text_dataset_from_directory(\n",
85+
"train_ds = keras.utils.text_dataset_from_directory(\n",
8686
" \"aclImdb/train\", batch_size=batch_size\n",
8787
")\n",
88-
"val_ds = keras.preprocessing.text_dataset_from_directory(\n",
88+
"val_ds = keras.utils.text_dataset_from_directory(\n",
8989
" \"aclImdb/val\", batch_size=batch_size\n",
9090
")\n",
91-
"test_ds = keras.preprocessing.text_dataset_from_directory(\n",
91+
"test_ds = keras.utils.text_dataset_from_directory(\n",
9292
" \"aclImdb/test\", batch_size=batch_size\n",
9393
")\n",
9494
"text_only_train_ds = train_ds.map(lambda x, y: x)"
@@ -111,10 +111,11 @@
111111
},
112112
"outputs": [],
113113
"source": [
114-
"from tensorflow.keras.layers.experimental.preprocessing import TextVectorization\n",
114+
"from tensorflow.keras import layers\n",
115+
"\n",
115116
"max_length = 600\n",
116117
"max_tokens = 20000\n",
117-
"text_vectorization = TextVectorization(\n",
118+
"text_vectorization = layers.TextVectorization(\n",
118119
" max_tokens=max_tokens,\n",
119120
" output_mode=\"int\",\n",
120121
" output_sequence_length=max_length,\n",
@@ -144,7 +145,6 @@
144145
"outputs": [],
145146
"source": [
146147
"import tensorflow as tf\n",
147-
"from tensorflow.keras import layers\n",
148148
"inputs = keras.Input(shape=(None,), dtype=\"int64\")\n",
149149
"embedded = tf.one_hot(inputs, depth=max_tokens)\n",
150150
"x = layers.Bidirectional(layers.LSTM(32))(embedded)\n",

chapter11_part03_transformer.ipynb

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -109,13 +109,13 @@
109109
" shutil.move(train_dir / category / fname,\n",
110110
" val_dir / category / fname)\n",
111111
"\n",
112-
"train_ds = keras.preprocessing.text_dataset_from_directory(\n",
112+
"train_ds = keras.utils.text_dataset_from_directory(\n",
113113
" \"aclImdb/train\", batch_size=batch_size\n",
114114
")\n",
115-
"val_ds = keras.preprocessing.text_dataset_from_directory(\n",
115+
"val_ds = keras.utils.text_dataset_from_directory(\n",
116116
" \"aclImdb/val\", batch_size=batch_size\n",
117117
")\n",
118-
"test_ds = keras.preprocessing.text_dataset_from_directory(\n",
118+
"test_ds = keras.utils.text_dataset_from_directory(\n",
119119
" \"aclImdb/test\", batch_size=batch_size\n",
120120
")\n",
121121
"text_only_train_ds = train_ds.map(lambda x, y: x)"
@@ -138,10 +138,11 @@
138138
},
139139
"outputs": [],
140140
"source": [
141-
"from tensorflow.keras.layers.experimental.preprocessing import TextVectorization\n",
141+
"from tensorflow.keras import layers\n",
142+
"\n",
142143
"max_length = 600\n",
143144
"max_tokens = 20000\n",
144-
"text_vectorization = TextVectorization(\n",
145+
"text_vectorization = layers.TextVectorization(\n",
145146
" max_tokens=max_tokens,\n",
146147
" output_mode=\"int\",\n",
147148
" output_sequence_length=max_length,\n",

chapter11_part04_sequence-to-sequence-learning.ipynb

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,6 @@
103103
},
104104
"outputs": [],
105105
"source": [
106-
"from keras.layers.experimental.preprocessing import TextVectorization\n",
107106
"import tensorflow as tf\n",
108107
"import string\n",
109108
"import re\n",
@@ -120,12 +119,12 @@
120119
"vocab_size = 15000\n",
121120
"sequence_length = 20\n",
122121
"\n",
123-
"source_vectorization = TextVectorization(\n",
122+
"source_vectorization = layers.TextVectorization(\n",
124123
" max_tokens=vocab_size,\n",
125124
" output_mode=\"int\",\n",
126125
" output_sequence_length=sequence_length,\n",
127126
")\n",
128-
"target_vectorization = TextVectorization(\n",
127+
"target_vectorization = layers.TextVectorization(\n",
129128
" max_tokens=vocab_size,\n",
130129
" output_mode=\"int\",\n",
131130
" output_sequence_length=sequence_length + 1,\n",

0 commit comments

Comments
 (0)