Initial commit
59
ML/TensorFlow/Basics/tutorial10-save-model.py
Normal file
@@ -0,0 +1,59 @@
|
||||
import os
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
import tensorflow as tf
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
from tensorflow.keras.datasets import mnist
|
||||
|
||||
# To Avoid GPU errors
|
||||
physical_devices = tf.config.list_physical_devices("GPU")
|
||||
tf.config.experimental.set_memory_growth(physical_devices[0], True)
|
||||
|
||||
(x_train, y_train), (x_test, y_test) = mnist.load_data()
|
||||
x_train = x_train.reshape(-1, 28 * 28).astype("float32") / 255.0
|
||||
x_test = x_test.reshape(-1, 28 * 28).astype("float32") / 255.0
|
||||
|
||||
# Alright, so here have some code which should feel familiar from previous tutorials,
|
||||
# here is what we want to cover
|
||||
# 1. How to save and load model weights
|
||||
# 2. Save and loading entire model (Serializing model)
|
||||
# - Saves weights
|
||||
# - Model architecture
|
||||
# - Training Configuration (model.compile())
|
||||
# - Optimizer and states
|
||||
|
||||
model1 = keras.Sequential([layers.Dense(64, activation="relu"), layers.Dense(10)])
|
||||
|
||||
inputs = keras.Input(784)
|
||||
x = layers.Dense(64, activation="relu")(inputs)
|
||||
outputs = layers.Dense(10)(x)
|
||||
model2 = keras.Model(inputs=inputs, outputs=outputs)
|
||||
|
||||
|
||||
class MyModel(keras.Model):
|
||||
def __init__(self):
|
||||
super(MyModel, self).__init__()
|
||||
self.dense1 = layers.Dense(64, activation="relu")
|
||||
self.dense2 = layers.Dense(10)
|
||||
|
||||
def call(self, input_tensor):
|
||||
x = tf.nn.relu(self.dense1(input_tensor))
|
||||
return self.dense2(x)
|
||||
|
||||
|
||||
# SavedModel format or HDF5 format
|
||||
model3 = MyModel()
|
||||
# model = keras.models.load_model('saved_model/')
|
||||
# model.load_weights('checkpoint_folder/')
|
||||
|
||||
model.compile(
|
||||
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
|
||||
optimizer=keras.optimizers.Adam(),
|
||||
metrics=["accuracy"],
|
||||
)
|
||||
|
||||
model.fit(x_train, y_train, batch_size=32, epochs=2, verbose=2)
|
||||
model.evaluate(x_test, y_test, batch_size=32, verbose=2)
|
||||
# model.save_weights('checkpoint_folder/')
|
||||
model.save("saved_model/")
|
||||
106
ML/TensorFlow/Basics/tutorial11-transfer-learning.py
Normal file
@@ -0,0 +1,106 @@
|
||||
import os
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
import tensorflow as tf
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
from tensorflow.keras.datasets import mnist
|
||||
import tensorflow_hub as hub
|
||||
|
||||
# To Avoid GPU errors
|
||||
physical_devices = tf.config.list_physical_devices("GPU")
|
||||
tf.config.experimental.set_memory_growth(physical_devices[0], True)
|
||||
|
||||
# ================================================ #
|
||||
# Pretrained-Model #
|
||||
# ================================================ #
|
||||
|
||||
(x_train, y_train), (x_test, y_test) = mnist.load_data()
|
||||
x_train = x_train.reshape(-1, 28, 28, 1).astype("float32") / 255.0
|
||||
x_test = x_test.reshape(-1, 28, 28, 1).astype("float32") / 255.0
|
||||
|
||||
model = keras.models.load_model("pretrained")
|
||||
|
||||
# Freeze all model layer weights
|
||||
model.trainable = False
|
||||
|
||||
# Can also set trainable for specific layers
|
||||
for layer in model.layers:
|
||||
# assert should be true because of one-liner above
|
||||
assert layer.trainable == False
|
||||
layer.trainable = False
|
||||
|
||||
print(model.summary()) # for finding base input and output
|
||||
base_inputs = model.layers[0].input
|
||||
base_output = model.layers[-2].output
|
||||
output = layers.Dense(10)(base_output)
|
||||
new_model = keras.Model(base_inputs, output)
|
||||
|
||||
# This model is actually identical to model we
|
||||
# loaded (this is just for demonstration and
|
||||
# and not something you would do in practice).
|
||||
print(new_model.summary())
|
||||
|
||||
# As usual we do compile and fit, this time on new_model
|
||||
new_model.compile(
|
||||
optimizer=keras.optimizers.Adam(),
|
||||
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
|
||||
metrics=["accuracy"],
|
||||
)
|
||||
|
||||
new_model.fit(x_train, y_train, batch_size=32, epochs=3, verbose=2)
|
||||
|
||||
# =================================================== #
|
||||
# Pretrained Keras Model #
|
||||
# =================================================== #
|
||||
|
||||
# Random data for demonstration (3 examples w. 3 classes)
|
||||
x = tf.random.normal(shape=(3, 299, 299, 3))
|
||||
y = tf.constant([0, 1, 2])
|
||||
|
||||
model = keras.applications.InceptionV3(include_top=True)
|
||||
print(model.summary())
|
||||
|
||||
# for input you can also do model.input,
|
||||
# then for base_outputs you can obviously
|
||||
# choose other than simply removing the last one :)
|
||||
base_inputs = model.layers[0].input
|
||||
base_outputs = model.layers[-2].output
|
||||
classifier = layers.Dense(3)(base_outputs)
|
||||
new_model = keras.Model(inputs=base_inputs, outputs=classifier)
|
||||
new_model.compile(
|
||||
optimizer=keras.optimizers.Adam(),
|
||||
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
|
||||
metrics=["accuracy"],
|
||||
)
|
||||
|
||||
print(new_model.summary())
|
||||
new_model.fit(x, y, epochs=15, verbose=2)
|
||||
|
||||
# ================================================= #
|
||||
# Pretrained Hub Model #
|
||||
# ================================================= #
|
||||
|
||||
# Random data for demonstration (3 examples w. 3 classes)
|
||||
x = tf.random.normal(shape=(3, 299, 299, 3))
|
||||
y = tf.constant([0, 1, 2])
|
||||
|
||||
url = "https://tfhub.dev/google/imagenet/inception_v3/feature_vector/4"
|
||||
|
||||
base_model = hub.KerasLayer(url, input_shape=(299, 299, 3))
|
||||
model = keras.Sequential(
|
||||
[
|
||||
base_model,
|
||||
layers.Dense(128, activation="relu"),
|
||||
layers.Dense(64, activation="relu"),
|
||||
layers.Dense(10),
|
||||
]
|
||||
)
|
||||
|
||||
model.compile(
|
||||
optimizer=keras.optimizers.Adam(),
|
||||
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
|
||||
metrics=["accuracy"],
|
||||
)
|
||||
|
||||
model.fit(x, y, batch_size=32, epochs=15, verbose=2)
|
||||
135
ML/TensorFlow/Basics/tutorial12-tensorflowdatasets.py
Normal file
@@ -0,0 +1,135 @@
|
||||
import os
|
||||
import matplotlib.pyplot
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
import tensorflow as tf
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
import tensorflow_datasets as tfds
|
||||
|
||||
physical_devices = tf.config.list_physical_devices("GPU")
|
||||
tf.config.experimental.set_memory_growth(physical_devices[0], True)
|
||||
|
||||
(ds_train, ds_test), ds_info = tfds.load(
|
||||
"mnist",
|
||||
split=["train", "test"],
|
||||
shuffle_files=True,
|
||||
as_supervised=True, # will return tuple (img, label) otherwise dict
|
||||
with_info=True, # able to get info about dataset
|
||||
)
|
||||
|
||||
# fig = tfds.show_examples(ds_train, ds_info, rows=4, cols=4)
|
||||
# print(ds_info)
|
||||
|
||||
|
||||
def normalize_img(image, label):
|
||||
"""Normalizes images"""
|
||||
return tf.cast(image, tf.float32) / 255.0, label
|
||||
|
||||
|
||||
AUTOTUNE = tf.data.experimental.AUTOTUNE
|
||||
BATCH_SIZE = 128
|
||||
|
||||
# Setup for train dataset
|
||||
ds_train = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
|
||||
ds_train = ds_train.cache()
|
||||
ds_train = ds_train.shuffle(ds_info.splits["train"].num_examples)
|
||||
ds_train = ds_train.batch(BATCH_SIZE)
|
||||
ds_train = ds_train.prefetch(AUTOTUNE)
|
||||
|
||||
# Setup for test Dataset
|
||||
ds_test = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
|
||||
ds_test = ds_train.batch(128)
|
||||
ds_test = ds_train.prefetch(AUTOTUNE)
|
||||
|
||||
model = keras.Sequential(
|
||||
[
|
||||
keras.Input((28, 28, 1)),
|
||||
layers.Conv2D(32, 3, activation="relu"),
|
||||
layers.Flatten(),
|
||||
tf.keras.layers.Dense(10, activation="softmax"),
|
||||
]
|
||||
)
|
||||
|
||||
model.compile(
|
||||
optimizer=keras.optimizers.Adam(0.001),
|
||||
loss=keras.losses.SparseCategoricalCrossentropy(),
|
||||
metrics=["accuracy"],
|
||||
)
|
||||
|
||||
model.fit(ds_train, epochs=5, verbose=2)
|
||||
model.evaluate(ds_test)
|
||||
|
||||
|
||||
(ds_train, ds_test), ds_info = tfds.load(
|
||||
"imdb_reviews",
|
||||
split=["train", "test"],
|
||||
shuffle_files=True,
|
||||
as_supervised=True, # will return tuple (img, label) otherwise dict
|
||||
with_info=True, # able to get info about dataset
|
||||
)
|
||||
|
||||
tokenizer = tfds.features.text.Tokenizer()
|
||||
|
||||
|
||||
def build_vocabulary():
|
||||
vocabulary = set()
|
||||
for text, _ in ds_train:
|
||||
vocabulary.update(tokenizer.tokenize(text.numpy().lower()))
|
||||
return vocabulary
|
||||
|
||||
|
||||
vocabulary = build_vocabulary()
|
||||
|
||||
encoder = tfds.features.text.TokenTextEncoder(
|
||||
list(vocabulary), oov_token="<UNK>", lowercase=True, tokenizer=tokenizer
|
||||
)
|
||||
|
||||
|
||||
def my_enc(text_tensor, label):
|
||||
encoded_text = encoder.encode(text_tensor.numpy())
|
||||
return encoded_text, label
|
||||
|
||||
|
||||
def encode_map_fn(text, label):
|
||||
# py_func doesn't set the shape of the returned tensors.
|
||||
encoded_text, label = tf.py_function(
|
||||
my_enc, inp=[text, label], Tout=(tf.int64, tf.int64)
|
||||
)
|
||||
|
||||
# `tf.data.Datasets` work best if all components have a shape set
|
||||
# so set the shapes manually:
|
||||
encoded_text.set_shape([None])
|
||||
label.set_shape([])
|
||||
|
||||
return encoded_text, label
|
||||
|
||||
|
||||
AUTOTUNE = tf.data.experimental.AUTOTUNE
|
||||
ds_train = ds_train.map(encode_map_fn, num_parallel_calls=AUTOTUNE)
|
||||
ds_train = ds_train.cache()
|
||||
ds_train = ds_train.shuffle(1000)
|
||||
ds_train = ds_train.padded_batch(32, padded_shapes=([None], ()))
|
||||
ds_train = ds_train.prefetch(AUTOTUNE)
|
||||
|
||||
ds_test = ds_test.map(encode_map_fn)
|
||||
ds_test = ds_test.padded_batch(32, padded_shapes=([None], ()))
|
||||
|
||||
model = keras.Sequential(
|
||||
[
|
||||
layers.Masking(mask_value=0),
|
||||
layers.Embedding(input_dim=len(vocabulary) + 2, output_dim=32),
|
||||
layers.GlobalAveragePooling1D(),
|
||||
layers.Dense(64, activation="relu"),
|
||||
layers.Dense(1),
|
||||
]
|
||||
)
|
||||
|
||||
model.compile(
|
||||
loss=keras.losses.BinaryCrossentropy(from_logits=True),
|
||||
optimizer=keras.optimizers.Adam(3e-4, clipnorm=1),
|
||||
metrics=["accuracy"],
|
||||
)
|
||||
|
||||
model.fit(ds_train, epochs=15, verbose=2)
|
||||
model.evaluate(ds_test)
|
||||
87
ML/TensorFlow/Basics/tutorial13-data-augmentation.py
Normal file
@@ -0,0 +1,87 @@
|
||||
import os
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
import tensorflow as tf
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
import tensorflow_datasets as tfds
|
||||
|
||||
(ds_train, ds_test), ds_info = tfds.load(
|
||||
"cifar10",
|
||||
split=["train", "test"],
|
||||
shuffle_files=True,
|
||||
as_supervised=True, # will return tuple (img, label) otherwise dict
|
||||
with_info=True, # able to get info about dataset
|
||||
)
|
||||
|
||||
|
||||
def normalize_img(image, label):
|
||||
"""Normalizes images"""
|
||||
return tf.cast(image, tf.float32) / 255.0, label
|
||||
|
||||
|
||||
AUTOTUNE = tf.data.experimental.AUTOTUNE
|
||||
BATCH_SIZE = 32
|
||||
|
||||
|
||||
def augment(image, label):
|
||||
new_height = new_width = 32
|
||||
image = tf.image.resize(image, (new_height, new_width))
|
||||
|
||||
if tf.random.uniform((), minval=0, maxval=1) < 0.1:
|
||||
image = tf.tile(tf.image.rgb_to_grayscale(image), [1, 1, 3])
|
||||
|
||||
image = tf.image.random_brightness(image, max_delta=0.1)
|
||||
image = tf.image.random_contrast(image, lower=0.1, upper=0.2)
|
||||
|
||||
# a left upside down flipped is still a dog ;)
|
||||
image = tf.image.random_flip_left_right(image) # 50%
|
||||
# image = tf.image.random_flip_up_down(image) #%50%
|
||||
|
||||
return image, label
|
||||
|
||||
|
||||
# Setup for train dataset
|
||||
ds_train = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
|
||||
ds_train = ds_train.cache()
|
||||
ds_train = ds_train.shuffle(ds_info.splits["train"].num_examples)
|
||||
# ds_train = ds_train.map(augment)
|
||||
ds_train = ds_train.batch(BATCH_SIZE)
|
||||
ds_train = ds_train.prefetch(AUTOTUNE)
|
||||
|
||||
# Setup for test Dataset
|
||||
ds_test = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
|
||||
ds_test = ds_train.batch(BATCH_SIZE)
|
||||
ds_test = ds_train.prefetch(AUTOTUNE)
|
||||
|
||||
# TF >= 2.3.0
|
||||
data_augmentation = keras.Sequential(
|
||||
[
|
||||
layers.experimental.preprocessing.Resizing(height=32, width=32,),
|
||||
layers.experimental.preprocessing.RandomFlip(mode="horizontal"),
|
||||
layers.experimental.preprocessing.RandomContrast(factor=0.1,),
|
||||
]
|
||||
)
|
||||
|
||||
model = keras.Sequential(
|
||||
[
|
||||
keras.Input((32, 32, 3)),
|
||||
data_augmentation,
|
||||
layers.Conv2D(4, 3, padding="same", activation="relu"),
|
||||
layers.Conv2D(8, 3, padding="same", activation="relu"),
|
||||
layers.MaxPooling2D(),
|
||||
layers.Conv2D(16, 3, padding="same", activation="relu"),
|
||||
layers.Flatten(),
|
||||
layers.Dense(64, activation="relu"),
|
||||
layers.Dense(10),
|
||||
]
|
||||
)
|
||||
|
||||
model.compile(
|
||||
optimizer=keras.optimizers.Adam(3e-4),
|
||||
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
|
||||
metrics=["accuracy"],
|
||||
)
|
||||
|
||||
model.fit(ds_train, epochs=5, verbose=2)
|
||||
model.evaluate(ds_test)
|
||||
73
ML/TensorFlow/Basics/tutorial14-callbacks.py
Normal file
@@ -0,0 +1,73 @@
|
||||
import os
|
||||
import matplotlib.pyplot
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
import tensorflow as tf
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
import tensorflow_datasets as tfds
|
||||
|
||||
physical_devices = tf.config.list_physical_devices("GPU")
|
||||
tf.config.experimental.set_memory_growth(physical_devices[0], True)
|
||||
|
||||
(ds_train, ds_test), ds_info = tfds.load(
|
||||
"mnist",
|
||||
split=["train", "test"],
|
||||
shuffle_files=True,
|
||||
as_supervised=True, # will return tuple (img, label) otherwise dict
|
||||
with_info=True, # able to get info about dataset
|
||||
)
|
||||
|
||||
|
||||
def normalize_img(image, label):
|
||||
"""Normalizes images"""
|
||||
return tf.cast(image, tf.float32) / 255.0, label
|
||||
|
||||
|
||||
AUTOTUNE = tf.data.experimental.AUTOTUNE
|
||||
BATCH_SIZE = 128
|
||||
|
||||
# Setup for train dataset
|
||||
ds_train = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
|
||||
ds_train = ds_train.cache()
|
||||
ds_train = ds_train.shuffle(ds_info.splits["train"].num_examples)
|
||||
ds_train = ds_train.batch(BATCH_SIZE)
|
||||
ds_train = ds_train.prefetch(AUTOTUNE)
|
||||
|
||||
model = keras.Sequential(
|
||||
[
|
||||
keras.Input((28, 28, 1)),
|
||||
layers.Conv2D(32, 3, activation="relu"),
|
||||
layers.Flatten(),
|
||||
tf.keras.layers.Dense(10, activation="softmax"),
|
||||
]
|
||||
)
|
||||
|
||||
save_callback = keras.callbacks.ModelCheckpoint(
|
||||
"checkpoint/", save_weights_only=True, monitor="train_acc", save_best_only=False,
|
||||
)
|
||||
|
||||
lr_scheduler = keras.callbacks.ReduceLROnPlateau(
|
||||
monitor="loss", factor=0.1, patience=3, mode="max", verbose=1
|
||||
)
|
||||
|
||||
|
||||
class OurOwnCallback(keras.callbacks.Callback):
|
||||
def on_epoch_end(self, epoch, logs=None):
|
||||
if logs.get("accuracy") > 1:
|
||||
print("Accuracy over 70%, quitting training")
|
||||
self.model.stop_training = True
|
||||
|
||||
|
||||
model.compile(
|
||||
optimizer=keras.optimizers.Adam(0.01),
|
||||
loss=keras.losses.SparseCategoricalCrossentropy(),
|
||||
metrics=["accuracy"],
|
||||
)
|
||||
|
||||
model.fit(
|
||||
ds_train,
|
||||
epochs=10,
|
||||
callbacks=[save_callback, lr_scheduler, OurOwnCallback()],
|
||||
verbose=2,
|
||||
)
|
||||
82
ML/TensorFlow/Basics/tutorial15-customizing-modelfit.py
Normal file
@@ -0,0 +1,82 @@
|
||||
import os
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
from tensorflow.keras.datasets import mnist
|
||||
|
||||
(x_train, y_train), (x_test, y_test) = mnist.load_data()
|
||||
x_train = x_train.reshape(-1, 28, 28, 1).astype("float32") / 255.0
|
||||
x_test = x_test.reshape(-1, 28, 28, 1).astype("float32") / 255.0
|
||||
|
||||
model = keras.Sequential(
|
||||
[
|
||||
layers.Input(shape=(28, 28, 1)),
|
||||
layers.Conv2D(64, (3, 3), padding="same"),
|
||||
layers.ReLU(),
|
||||
layers.Conv2D(128, (3, 3), padding="same"),
|
||||
layers.ReLU(),
|
||||
layers.Flatten(),
|
||||
layers.Dense(10),
|
||||
],
|
||||
name="model",
|
||||
)
|
||||
|
||||
|
||||
class CustomFit(keras.Model):
|
||||
def __init__(self, model):
|
||||
super(CustomFit, self).__init__()
|
||||
self.model = model
|
||||
|
||||
def compile(self, optimizer, loss):
|
||||
super(CustomFit, self).compile()
|
||||
self.optimizer = optimizer
|
||||
self.loss = loss
|
||||
|
||||
def train_step(self, data):
|
||||
x, y = data
|
||||
|
||||
with tf.GradientTape() as tape:
|
||||
# Caclulate predictions
|
||||
y_pred = self.model(x, training=True)
|
||||
|
||||
# Loss
|
||||
loss = self.loss(y, y_pred)
|
||||
|
||||
# Gradients
|
||||
training_vars = self.trainable_variables
|
||||
gradients = tape.gradient(loss, training_vars)
|
||||
|
||||
# Step with optimizer
|
||||
self.optimizer.apply_gradients(zip(gradients, training_vars))
|
||||
acc_metric.update_state(y, y_pred)
|
||||
|
||||
return {"loss": loss, "accuracy": acc_metric.result()}
|
||||
|
||||
def test_step(self, data):
|
||||
# Unpack the data
|
||||
x, y = data
|
||||
|
||||
# Compute predictions
|
||||
y_pred = self.model(x, training=False)
|
||||
|
||||
# Updates the metrics tracking the loss
|
||||
loss = self.loss(y, y_pred)
|
||||
|
||||
# Update the metrics.
|
||||
acc_metric.update_state(y, y_pred)
|
||||
return {"loss": loss, "accuracy": acc_metric.result()}
|
||||
|
||||
|
||||
acc_metric = keras.metrics.SparseCategoricalAccuracy(name="accuracy")
|
||||
|
||||
training = CustomFit(model)
|
||||
training.compile(
|
||||
optimizer=keras.optimizers.Adam(learning_rate=3e-4),
|
||||
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
|
||||
)
|
||||
|
||||
training.fit(x_train, y_train, batch_size=64, epochs=2)
|
||||
training.evaluate(x_test, y_test, batch_size=64)
|
||||
80
ML/TensorFlow/Basics/tutorial16-customloops.py
Normal file
@@ -0,0 +1,80 @@
|
||||
import os
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
from tensorflow.keras.datasets import mnist
|
||||
import tensorflow_datasets as tfds
|
||||
|
||||
physical_devices = tf.config.list_physical_devices("GPU")
|
||||
tf.config.experimental.set_memory_growth(physical_devices[0], True)
|
||||
|
||||
(ds_train, ds_test), ds_info = tfds.load(
|
||||
"mnist",
|
||||
split=["train", "test"],
|
||||
shuffle_files=True,
|
||||
as_supervised=True,
|
||||
with_info=True,
|
||||
)
|
||||
|
||||
|
||||
def normalize_img(image, label):
|
||||
"""Normalizes images"""
|
||||
return tf.cast(image, tf.float32) / 255.0, label
|
||||
|
||||
|
||||
AUTOTUNE = tf.data.experimental.AUTOTUNE
|
||||
BATCH_SIZE = 128
|
||||
|
||||
# Setup for train dataset
|
||||
ds_train = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
|
||||
ds_train = ds_train.cache()
|
||||
ds_train = ds_train.shuffle(ds_info.splits["train"].num_examples)
|
||||
ds_train = ds_train.batch(BATCH_SIZE)
|
||||
ds_train = ds_train.prefetch(AUTOTUNE)
|
||||
|
||||
# Setup for test Dataset
|
||||
ds_test = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
|
||||
ds_test = ds_train.batch(128)
|
||||
ds_test = ds_train.prefetch(AUTOTUNE)
|
||||
|
||||
model = keras.Sequential(
|
||||
[
|
||||
keras.Input((28, 28, 1)),
|
||||
layers.Conv2D(32, 3, activation="relu"),
|
||||
layers.Flatten(),
|
||||
layers.Dense(10, activation="softmax"),
|
||||
]
|
||||
)
|
||||
|
||||
num_epochs = 5
|
||||
optimizer = keras.optimizers.Adam()
|
||||
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
|
||||
acc_metric = keras.metrics.SparseCategoricalAccuracy()
|
||||
|
||||
# Training Loop
|
||||
for epoch in range(num_epochs):
|
||||
print(f"\nStart of Training Epoch {epoch}")
|
||||
for batch_idx, (x_batch, y_batch) in enumerate(ds_train):
|
||||
with tf.GradientTape() as tape:
|
||||
y_pred = model(x_batch, training=True)
|
||||
loss = loss_fn(y_batch, y_pred)
|
||||
|
||||
gradients = tape.gradient(loss, model.trainable_weights)
|
||||
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
|
||||
acc_metric.update_state(y_batch, y_pred)
|
||||
|
||||
train_acc = acc_metric.result()
|
||||
print(f"Accuracy over epoch {train_acc}")
|
||||
acc_metric.reset_states()
|
||||
|
||||
# Test Loop
|
||||
for batch_idx, (x_batch, y_batch) in enumerate(ds_test):
|
||||
y_pred = model(x_batch, training=True)
|
||||
acc_metric.update_state(y_batch, y_pred)
|
||||
|
||||
train_acc = acc_metric.result()
|
||||
print(f"Accuracy over Test Set: {train_acc}")
|
||||
acc_metric.reset_states()
|
||||
107
ML/TensorFlow/Basics/tutorial17-tensorboard/1_tb_callback.py
Normal file
@@ -0,0 +1,107 @@
|
||||
import os
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
|
||||
import io
|
||||
import tensorflow as tf
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import tensorflow_datasets as tfds
|
||||
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
|
||||
# Make sure we don't get any GPU errors
|
||||
physical_devices = tf.config.list_physical_devices("GPU")
|
||||
tf.config.experimental.set_memory_growth(physical_devices[0], True)
|
||||
|
||||
(ds_train, ds_test), ds_info = tfds.load(
|
||||
"cifar10",
|
||||
split=["train", "test"],
|
||||
shuffle_files=True,
|
||||
as_supervised=True,
|
||||
with_info=True,
|
||||
)
|
||||
|
||||
|
||||
def normalize_img(image, label):
|
||||
"""Normalizes images"""
|
||||
return tf.cast(image, tf.float32) / 255.0, label
|
||||
|
||||
|
||||
def augment(image, label):
|
||||
if tf.random.uniform((), minval=0, maxval=1) < 0.1:
|
||||
image = tf.tile(tf.image.rgb_to_grayscale(image), [1, 1, 3])
|
||||
|
||||
image = tf.image.random_brightness(image, max_delta=0.1)
|
||||
image = tf.image.random_flip_left_right(image)
|
||||
|
||||
return image, label
|
||||
|
||||
|
||||
AUTOTUNE = tf.data.experimental.AUTOTUNE
|
||||
BATCH_SIZE = 32
|
||||
|
||||
# Setup for train dataset
|
||||
ds_train = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
|
||||
ds_train = ds_train.cache()
|
||||
ds_train = ds_train.shuffle(ds_info.splits["train"].num_examples)
|
||||
ds_train = ds_train.map(augment)
|
||||
ds_train = ds_train.batch(BATCH_SIZE)
|
||||
ds_train = ds_train.prefetch(AUTOTUNE)
|
||||
|
||||
# Setup for test Dataset
|
||||
ds_test = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
|
||||
ds_test = ds_train.batch(BATCH_SIZE)
|
||||
ds_test = ds_train.prefetch(AUTOTUNE)
|
||||
|
||||
class_names = [
|
||||
"Airplane",
|
||||
"Autmobile",
|
||||
"Bird",
|
||||
"Cat",
|
||||
"Deer",
|
||||
"Dog",
|
||||
"Frog",
|
||||
"Horse",
|
||||
"Ship",
|
||||
"Truck",
|
||||
]
|
||||
|
||||
|
||||
def get_model():
|
||||
model = keras.Sequential(
|
||||
[
|
||||
layers.Input((32, 32, 3)),
|
||||
layers.Conv2D(8, 3, padding="same", activation="relu"),
|
||||
layers.Conv2D(16, 3, padding="same", activation="relu"),
|
||||
layers.MaxPooling2D((2, 2)),
|
||||
layers.Flatten(),
|
||||
layers.Dense(64, activation="relu"),
|
||||
layers.Dropout(0.1),
|
||||
layers.Dense(10),
|
||||
]
|
||||
)
|
||||
|
||||
return model
|
||||
|
||||
|
||||
model = get_model()
|
||||
|
||||
model.compile(
|
||||
optimizer=keras.optimizers.Adam(lr=0.001),
|
||||
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
|
||||
metrics=["accuracy"],
|
||||
)
|
||||
|
||||
tensorboard_callback = keras.callbacks.TensorBoard(
|
||||
log_dir="tb_callback_dir", histogram_freq=1,
|
||||
)
|
||||
|
||||
model.fit(
|
||||
ds_train,
|
||||
epochs=5,
|
||||
validation_data=ds_test,
|
||||
callbacks=[tensorboard_callback],
|
||||
verbose=2,
|
||||
)
|
||||
144
ML/TensorFlow/Basics/tutorial17-tensorboard/2_tb_scalars.py
Normal file
@@ -0,0 +1,144 @@
|
||||
import os
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
|
||||
import io
|
||||
import tensorflow as tf
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import tensorflow_datasets as tfds
|
||||
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
|
||||
# Make sure we don't get any GPU errors
|
||||
physical_devices = tf.config.list_physical_devices("GPU")
|
||||
tf.config.experimental.set_memory_growth(physical_devices[0], True)
|
||||
|
||||
(ds_train, ds_test), ds_info = tfds.load(
|
||||
"cifar10",
|
||||
split=["train", "test"],
|
||||
shuffle_files=True,
|
||||
as_supervised=True,
|
||||
with_info=True,
|
||||
)
|
||||
|
||||
|
||||
def normalize_img(image, label):
|
||||
"""Normalizes images"""
|
||||
return tf.cast(image, tf.float32) / 255.0, label
|
||||
|
||||
|
||||
AUTOTUNE = tf.data.experimental.AUTOTUNE
|
||||
BATCH_SIZE = 32
|
||||
|
||||
|
||||
def augment(image, label):
|
||||
if tf.random.uniform((), minval=0, maxval=1) < 0.1:
|
||||
image = tf.tile(tf.image.rgb_to_grayscale(image), [1, 1, 3])
|
||||
|
||||
image = tf.image.random_brightness(image, max_delta=0.1)
|
||||
image = tf.image.random_flip_left_right(image)
|
||||
|
||||
return image, label
|
||||
|
||||
|
||||
# Setup for train dataset
|
||||
ds_train = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
|
||||
ds_train = ds_train.cache()
|
||||
ds_train = ds_train.shuffle(ds_info.splits["train"].num_examples)
|
||||
ds_train = ds_train.map(augment)
|
||||
ds_train = ds_train.batch(BATCH_SIZE)
|
||||
ds_train = ds_train.prefetch(AUTOTUNE)
|
||||
|
||||
# Setup for test Dataset
|
||||
ds_test = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
|
||||
ds_test = ds_train.batch(BATCH_SIZE)
|
||||
ds_test = ds_train.prefetch(AUTOTUNE)
|
||||
|
||||
class_names = [
|
||||
"Airplane",
|
||||
"Autmobile",
|
||||
"Bird",
|
||||
"Cat",
|
||||
"Deer",
|
||||
"Dog",
|
||||
"Frog",
|
||||
"Horse",
|
||||
"Ship",
|
||||
"Truck",
|
||||
]
|
||||
|
||||
|
||||
def get_model():
|
||||
model = keras.Sequential(
|
||||
[
|
||||
layers.Input((32, 32, 3)),
|
||||
layers.Conv2D(8, 3, padding="same", activation="relu"),
|
||||
layers.Conv2D(16, 3, padding="same", activation="relu"),
|
||||
layers.MaxPooling2D((2, 2)),
|
||||
layers.Flatten(),
|
||||
layers.Dense(64, activation="relu"),
|
||||
layers.Dropout(0.1),
|
||||
layers.Dense(10),
|
||||
]
|
||||
)
|
||||
|
||||
return model
|
||||
|
||||
|
||||
model = get_model()
|
||||
num_epochs = 1
|
||||
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
|
||||
optimizer = keras.optimizers.Adam(lr=0.001)
|
||||
acc_metric = keras.metrics.SparseCategoricalAccuracy()
|
||||
train_writer = tf.summary.create_file_writer("logs/train/")
|
||||
test_writer = tf.summary.create_file_writer("logs/test/")
|
||||
train_step = test_step = 0
|
||||
|
||||
|
||||
for lr in [1e-1, 1e-2, 1e-3, 1e-4, 1e-5]:
|
||||
train_step = test_step = 0
|
||||
train_writer = tf.summary.create_file_writer("logs/train/" + str(lr))
|
||||
test_writer = tf.summary.create_file_writer("logs/test/" + str(lr))
|
||||
model = get_model()
|
||||
optimizer = keras.optimizers.Adam(lr=lr)
|
||||
|
||||
for epoch in range(num_epochs):
|
||||
# Iterate through training set
|
||||
for batch_idx, (x, y) in enumerate(ds_train):
|
||||
with tf.GradientTape() as tape:
|
||||
y_pred = model(x, training=True)
|
||||
loss = loss_fn(y, y_pred)
|
||||
|
||||
gradients = tape.gradient(loss, model.trainable_weights)
|
||||
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
|
||||
acc_metric.update_state(y, y_pred)
|
||||
|
||||
with train_writer.as_default():
|
||||
tf.summary.scalar("Loss", loss, step=train_step)
|
||||
tf.summary.scalar(
|
||||
"Accuracy", acc_metric.result(), step=train_step,
|
||||
)
|
||||
train_step += 1
|
||||
|
||||
# Reset accuracy in between epochs (and for testing and test)
|
||||
acc_metric.reset_states()
|
||||
|
||||
# Iterate through test set
|
||||
for batch_idx, (x, y) in enumerate(ds_test):
|
||||
y_pred = model(x, training=False)
|
||||
loss = loss_fn(y, y_pred)
|
||||
acc_metric.update_state(y, y_pred)
|
||||
|
||||
with test_writer.as_default():
|
||||
tf.summary.scalar("Loss", loss, step=test_step)
|
||||
tf.summary.scalar(
|
||||
"Accuracy", acc_metric.result(), step=test_step,
|
||||
)
|
||||
test_step += 1
|
||||
|
||||
acc_metric.reset_states()
|
||||
|
||||
# Reset accuracy in between epochs (and for testing and test)
|
||||
acc_metric.reset_states()
|
||||
112
ML/TensorFlow/Basics/tutorial17-tensorboard/3_tb_images.py
Normal file
@@ -0,0 +1,112 @@
|
||||
import os
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
|
||||
import io
|
||||
import tensorflow as tf
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import tensorflow_datasets as tfds
|
||||
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
|
||||
from utils import plot_to_image, image_grid
|
||||
|
||||
# Make sure we don't get any GPU errors
|
||||
physical_devices = tf.config.list_physical_devices("GPU")
|
||||
tf.config.experimental.set_memory_growth(physical_devices[0], True)
|
||||
|
||||
(ds_train, ds_test), ds_info = tfds.load(
|
||||
"cifar10",
|
||||
split=["train", "test"],
|
||||
shuffle_files=True,
|
||||
as_supervised=True,
|
||||
with_info=True,
|
||||
)
|
||||
|
||||
|
||||
def normalize_img(image, label):
|
||||
"""Normalizes images"""
|
||||
return tf.cast(image, tf.float32) / 255.0, label
|
||||
|
||||
|
||||
AUTOTUNE = tf.data.experimental.AUTOTUNE
|
||||
BATCH_SIZE = 32
|
||||
|
||||
|
||||
def augment(image, label):
|
||||
if tf.random.uniform((), minval=0, maxval=1) < 0.1:
|
||||
image = tf.tile(tf.image.rgb_to_grayscale(image), [1, 1, 3])
|
||||
|
||||
image = tf.image.random_brightness(image, max_delta=0.1)
|
||||
image = tf.image.random_flip_left_right(image)
|
||||
|
||||
# matplotlib wants [0,1] values
|
||||
image = tf.clip_by_value(image, clip_value_min=0, clip_value_max=1)
|
||||
|
||||
return image, label
|
||||
|
||||
|
||||
# Setup for train dataset
|
||||
ds_train = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
|
||||
ds_train = ds_train.cache()
|
||||
ds_train = ds_train.shuffle(ds_info.splits["train"].num_examples)
|
||||
ds_train = ds_train.map(augment)
|
||||
ds_train = ds_train.batch(BATCH_SIZE)
|
||||
ds_train = ds_train.prefetch(AUTOTUNE)
|
||||
|
||||
# Setup for test Dataset
|
||||
ds_test = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
|
||||
ds_test = ds_train.batch(BATCH_SIZE)
|
||||
ds_test = ds_train.prefetch(AUTOTUNE)
|
||||
|
||||
class_names = [
|
||||
"Airplane",
|
||||
"Autmobile",
|
||||
"Bird",
|
||||
"Cat",
|
||||
"Deer",
|
||||
"Dog",
|
||||
"Frog",
|
||||
"Horse",
|
||||
"Ship",
|
||||
"Truck",
|
||||
]
|
||||
|
||||
|
||||
def get_model():
|
||||
model = keras.Sequential(
|
||||
[
|
||||
layers.Input((32, 32, 3)),
|
||||
layers.Conv2D(8, 3, padding="same", activation="relu"),
|
||||
layers.Conv2D(16, 3, padding="same", activation="relu"),
|
||||
layers.MaxPooling2D((2, 2)),
|
||||
layers.Flatten(),
|
||||
layers.Dense(64, activation="relu"),
|
||||
layers.Dropout(0.1),
|
||||
layers.Dense(10),
|
||||
]
|
||||
)
|
||||
|
||||
return model
|
||||
|
||||
|
||||
model = get_model()
|
||||
num_epochs = 1
|
||||
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
|
||||
optimizer = keras.optimizers.Adam(lr=0.001)
|
||||
acc_metric = keras.metrics.SparseCategoricalAccuracy()
|
||||
writer = tf.summary.create_file_writer("logs/train/")
|
||||
step = 0
|
||||
|
||||
|
||||
for epoch in range(num_epochs):
|
||||
for batch_idx, (x, y) in enumerate(ds_train):
|
||||
figure = image_grid(x, y, class_names)
|
||||
|
||||
with writer.as_default():
|
||||
tf.summary.image(
|
||||
"Visualize Images", plot_to_image(figure), step=step,
|
||||
)
|
||||
step += 1
|
||||
124
ML/TensorFlow/Basics/tutorial17-tensorboard/4_tb_confusion.py
Normal file
@@ -0,0 +1,124 @@
|
||||
import os
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
|
||||
import io
|
||||
import tensorflow as tf
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import tensorflow_datasets as tfds
|
||||
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
|
||||
from utils import get_confusion_matrix, plot_confusion_matrix
|
||||
|
||||
# Make sure we don't get any GPU errors
|
||||
physical_devices = tf.config.list_physical_devices("GPU")
|
||||
tf.config.experimental.set_memory_growth(physical_devices[0], True)
|
||||
|
||||
(ds_train, ds_test), ds_info = tfds.load(
|
||||
"cifar10",
|
||||
split=["train", "test"],
|
||||
shuffle_files=True,
|
||||
as_supervised=True,
|
||||
with_info=True,
|
||||
)
|
||||
|
||||
|
||||
def normalize_img(image, label):
|
||||
"""Normalizes images"""
|
||||
return tf.cast(image, tf.float32) / 255.0, label
|
||||
|
||||
|
||||
AUTOTUNE = tf.data.experimental.AUTOTUNE
|
||||
BATCH_SIZE = 32
|
||||
|
||||
|
||||
def augment(image, label):
|
||||
if tf.random.uniform((), minval=0, maxval=1) < 0.1:
|
||||
image = tf.tile(tf.image.rgb_to_grayscale(image), [1, 1, 3])
|
||||
|
||||
image = tf.image.random_brightness(image, max_delta=0.1)
|
||||
image = tf.image.random_flip_left_right(image)
|
||||
|
||||
return image, label
|
||||
|
||||
|
||||
# Setup for train dataset
|
||||
ds_train = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
|
||||
ds_train = ds_train.cache()
|
||||
ds_train = ds_train.shuffle(ds_info.splits["train"].num_examples)
|
||||
ds_train = ds_train.map(augment)
|
||||
ds_train = ds_train.batch(BATCH_SIZE)
|
||||
ds_train = ds_train.prefetch(AUTOTUNE)
|
||||
|
||||
# Setup for test Dataset
|
||||
ds_test = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
|
||||
ds_test = ds_train.batch(BATCH_SIZE)
|
||||
ds_test = ds_train.prefetch(AUTOTUNE)
|
||||
|
||||
class_names = [
|
||||
"Airplane",
|
||||
"Autmobile",
|
||||
"Bird",
|
||||
"Cat",
|
||||
"Deer",
|
||||
"Dog",
|
||||
"Frog",
|
||||
"Horse",
|
||||
"Ship",
|
||||
"Truck",
|
||||
]
|
||||
|
||||
|
||||
def get_model():
|
||||
model = keras.Sequential(
|
||||
[
|
||||
layers.Input((32, 32, 3)),
|
||||
layers.Conv2D(8, 3, padding="same", activation="relu"),
|
||||
layers.Conv2D(16, 3, padding="same", activation="relu"),
|
||||
layers.MaxPooling2D((2, 2)),
|
||||
layers.Flatten(),
|
||||
layers.Dense(64, activation="relu"),
|
||||
layers.Dropout(0.1),
|
||||
layers.Dense(10),
|
||||
]
|
||||
)
|
||||
|
||||
return model
|
||||
|
||||
|
||||
model = get_model()
|
||||
num_epochs = 5
|
||||
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
|
||||
optimizer = keras.optimizers.Adam(lr=0.001)
|
||||
acc_metric = keras.metrics.SparseCategoricalAccuracy()
|
||||
train_writer = tf.summary.create_file_writer("logs/train/")
|
||||
test_writer = tf.summary.create_file_writer("logs/test/")
|
||||
train_step = test_step = 0
|
||||
|
||||
|
||||
for epoch in range(num_epochs):
|
||||
confusion = np.zeros((len(class_names), len(class_names)))
|
||||
|
||||
# Iterate through training set
|
||||
for batch_idx, (x, y) in enumerate(ds_train):
|
||||
with tf.GradientTape() as tape:
|
||||
y_pred = model(x, training=True)
|
||||
loss = loss_fn(y, y_pred)
|
||||
|
||||
gradients = tape.gradient(loss, model.trainable_weights)
|
||||
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
|
||||
acc_metric.update_state(y, y_pred)
|
||||
confusion += get_confusion_matrix(y, y_pred, class_names)
|
||||
|
||||
with train_writer.as_default():
|
||||
tf.summary.image(
|
||||
"Confusion Matrix",
|
||||
plot_confusion_matrix(confusion / batch_idx, class_names),
|
||||
step=epoch,
|
||||
)
|
||||
|
||||
# Reset accuracy in between epochs (and for testing and test)
|
||||
acc_metric.reset_states()
|
||||
35
ML/TensorFlow/Basics/tutorial17-tensorboard/5_tb_graph.py
Normal file
@@ -0,0 +1,35 @@
|
||||
import os
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
|
||||
import io
|
||||
import tensorflow as tf
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import tensorflow_datasets as tfds
|
||||
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
|
||||
# Make sure we don't get any GPU errors
|
||||
physical_devices = tf.config.list_physical_devices("GPU")
|
||||
tf.config.experimental.set_memory_growth(physical_devices[0], True)
|
||||
|
||||
writer = tf.summary.create_file_writer("logs/graph_vis")
|
||||
|
||||
|
||||
@tf.function
|
||||
def my_func(x, y):
|
||||
return tf.nn.relu(tf.matmul(x, y))
|
||||
|
||||
|
||||
x = tf.random.uniform((3, 3))
|
||||
y = tf.random.uniform((3, 3))
|
||||
|
||||
tf.summary.trace_on(graph=True, profiler=True)
|
||||
out = my_func(x, y)
|
||||
|
||||
with writer.as_default():
|
||||
tf.summary.trace_export(
|
||||
name="function_trace", step=0, profiler_outdir="logs\\graph_vis\\"
|
||||
)
|
||||
137
ML/TensorFlow/Basics/tutorial17-tensorboard/6_tb_hparams.py
Normal file
@@ -0,0 +1,137 @@
|
||||
import os
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
|
||||
import io
|
||||
import tensorflow as tf
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import tensorflow_datasets as tfds
|
||||
|
||||
from tensorboard.plugins.hparams import api as hp
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
|
||||
# Make sure we don't get any GPU errors
|
||||
physical_devices = tf.config.list_physical_devices("GPU")
|
||||
tf.config.experimental.set_memory_growth(physical_devices[0], True)
|
||||
|
||||
(ds_train, ds_test), ds_info = tfds.load(
|
||||
"cifar10",
|
||||
split=["train", "test"],
|
||||
shuffle_files=True,
|
||||
as_supervised=True,
|
||||
with_info=True,
|
||||
)
|
||||
|
||||
|
||||
def normalize_img(image, label):
|
||||
"""Normalizes images"""
|
||||
return tf.cast(image, tf.float32) / 255.0, label
|
||||
|
||||
|
||||
AUTOTUNE = tf.data.experimental.AUTOTUNE
|
||||
BATCH_SIZE = 32
|
||||
|
||||
|
||||
def augment(image, label):
|
||||
if tf.random.uniform((), minval=0, maxval=1) < 0.1:
|
||||
image = tf.tile(tf.image.rgb_to_grayscale(image), [1, 1, 3])
|
||||
|
||||
image = tf.image.random_brightness(image, max_delta=0.1)
|
||||
image = tf.image.random_flip_left_right(image)
|
||||
|
||||
return image, label
|
||||
|
||||
|
||||
# Setup for train dataset
|
||||
ds_train = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
|
||||
ds_train = ds_train.cache()
|
||||
ds_train = ds_train.shuffle(ds_info.splits["train"].num_examples)
|
||||
ds_train = ds_train.map(augment)
|
||||
ds_train = ds_train.batch(BATCH_SIZE)
|
||||
ds_train = ds_train.prefetch(AUTOTUNE)
|
||||
|
||||
# Setup for test Dataset
|
||||
ds_test = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
|
||||
ds_test = ds_train.batch(BATCH_SIZE)
|
||||
ds_test = ds_train.prefetch(AUTOTUNE)
|
||||
|
||||
class_names = [
|
||||
"Airplane",
|
||||
"Autmobile",
|
||||
"Bird",
|
||||
"Cat",
|
||||
"Deer",
|
||||
"Dog",
|
||||
"Frog",
|
||||
"Horse",
|
||||
"Ship",
|
||||
"Truck",
|
||||
]
|
||||
|
||||
|
||||
def train_model_one_epoch(hparams):
|
||||
units = hparams[HP_NUM_UNITS]
|
||||
drop_rate = hparams[HP_DROPOUT]
|
||||
learning_rate = hparams[HP_LR]
|
||||
|
||||
optimizer = keras.optimizers.Adam(lr=learning_rate)
|
||||
model = keras.Sequential(
|
||||
[
|
||||
layers.Input((32, 32, 3)),
|
||||
layers.Conv2D(8, 3, padding="same", activation="relu"),
|
||||
layers.Conv2D(16, 3, padding="same", activation="relu"),
|
||||
layers.MaxPooling2D((2, 2)),
|
||||
layers.Flatten(),
|
||||
layers.Dense(units, activation="relu"),
|
||||
layers.Dropout(drop_rate),
|
||||
layers.Dense(10),
|
||||
]
|
||||
)
|
||||
|
||||
for batch_idx, (x, y) in enumerate(ds_train):
|
||||
with tf.GradientTape() as tape:
|
||||
y_pred = model(x, training=True)
|
||||
loss = loss_fn(y, y_pred)
|
||||
|
||||
gradients = tape.gradient(loss, model.trainable_weights)
|
||||
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
|
||||
acc_metric.update_state(y, y_pred)
|
||||
|
||||
# write to TB
|
||||
run_dir = (
|
||||
"logs/train/"
|
||||
+ str(units)
|
||||
+ "units_"
|
||||
+ str(drop_rate)
|
||||
+ "dropout_"
|
||||
+ str(learning_rate)
|
||||
+ "learning_rate"
|
||||
)
|
||||
|
||||
with tf.summary.create_file_writer(run_dir).as_default():
|
||||
hp.hparams(hparams)
|
||||
accuracy = acc_metric.result()
|
||||
tf.summary.scalar("accuracy", accuracy, step=1)
|
||||
|
||||
acc_metric.reset_states()
|
||||
|
||||
|
||||
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
|
||||
optimizer = keras.optimizers.Adam(lr=0.001)
|
||||
acc_metric = keras.metrics.SparseCategoricalAccuracy()
|
||||
HP_NUM_UNITS = hp.HParam("num units", hp.Discrete([32, 64, 128]))
|
||||
HP_DROPOUT = hp.HParam("dropout", hp.Discrete([0.1, 0.2, 0.3, 0.5]))
|
||||
HP_LR = hp.HParam("learning_rate", hp.Discrete([1e-3, 1e-4, 1e-5]))
|
||||
|
||||
for lr in HP_LR.domain.values:
|
||||
for units in HP_NUM_UNITS.domain.values:
|
||||
for rate in HP_DROPOUT.domain.values:
|
||||
hparams = {
|
||||
HP_LR: lr,
|
||||
HP_NUM_UNITS: units,
|
||||
HP_DROPOUT: rate,
|
||||
}
|
||||
|
||||
train_model_one_epoch(hparams)
|
||||
@@ -0,0 +1,69 @@
|
||||
import os
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
|
||||
import io
|
||||
import tensorflow as tf
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import tensorflow_datasets as tfds
|
||||
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
|
||||
from utils import plot_to_projector
|
||||
|
||||
# Make sure we don't get any GPU errors
|
||||
physical_devices = tf.config.list_physical_devices("GPU")
|
||||
tf.config.experimental.set_memory_growth(physical_devices[0], True)
|
||||
|
||||
(ds_train, ds_test), ds_info = tfds.load(
|
||||
"mnist",
|
||||
split=["train", "test"],
|
||||
shuffle_files=True,
|
||||
as_supervised=True,
|
||||
with_info=True,
|
||||
)
|
||||
|
||||
|
||||
def normalize_img(image, label):
|
||||
"""Normalizes images"""
|
||||
return tf.cast(image, tf.float32), label
|
||||
|
||||
|
||||
AUTOTUNE = tf.data.experimental.AUTOTUNE
|
||||
BATCH_SIZE = 500
|
||||
|
||||
|
||||
def augment(image, label):
|
||||
return image, label
|
||||
|
||||
|
||||
# Setup for train dataset
|
||||
ds_train = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
|
||||
ds_train = ds_train.cache()
|
||||
ds_train = ds_train.shuffle(ds_info.splits["train"].num_examples)
|
||||
ds_train = ds_train.map(augment)
|
||||
ds_train = ds_train.batch(BATCH_SIZE)
|
||||
ds_train = ds_train.prefetch(AUTOTUNE)
|
||||
|
||||
# Setup for test Dataset
|
||||
ds_test = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
|
||||
ds_test = ds_train.batch(BATCH_SIZE)
|
||||
ds_test = ds_train.prefetch(AUTOTUNE)
|
||||
|
||||
class_names = [
|
||||
"Airplane",
|
||||
"Autmobile",
|
||||
"Bird",
|
||||
"Cat",
|
||||
"Deer",
|
||||
"Dog",
|
||||
"Frog",
|
||||
"Horse",
|
||||
"Ship",
|
||||
"Truck",
|
||||
]
|
||||
|
||||
x_batch, y_batch = next(iter(ds_train))
|
||||
plot_to_projector(x_batch, x_batch, y_batch, class_names, log_dir="proj")
|
||||
167
ML/TensorFlow/Basics/tutorial17-tensorboard/utils.py
Normal file
@@ -0,0 +1,167 @@
|
||||
import matplotlib.pyplot as plt
|
||||
import tensorflow as tf
|
||||
from tensorflow import keras
|
||||
import numpy as np
|
||||
import io
|
||||
import sklearn.metrics
|
||||
from tensorboard.plugins import projector
|
||||
import cv2
|
||||
import os
|
||||
import shutil
|
||||
|
||||
# Stolen from tensorflow official guide:
|
||||
# https://www.tensorflow.org/tensorboard/image_summaries
|
||||
def plot_to_image(figure):
|
||||
"""Converts the matplotlib plot specified by 'figure' to a PNG image and
|
||||
returns it. The supplied figure is closed and inaccessible after this call."""
|
||||
|
||||
# Save the plot to a PNG in memory.
|
||||
buf = io.BytesIO()
|
||||
plt.savefig(buf, format="png")
|
||||
|
||||
# Closing the figure prevents it from being displayed directly inside
|
||||
# the notebook.
|
||||
plt.close(figure)
|
||||
buf.seek(0)
|
||||
|
||||
# Convert PNG buffer to TF image
|
||||
image = tf.image.decode_png(buf.getvalue(), channels=4)
|
||||
|
||||
# Add the batch dimension
|
||||
image = tf.expand_dims(image, 0)
|
||||
return image
|
||||
|
||||
|
||||
def image_grid(data, labels, class_names):
|
||||
# Data should be in (BATCH_SIZE, H, W, C)
|
||||
assert data.ndim == 4
|
||||
|
||||
figure = plt.figure(figsize=(10, 10))
|
||||
num_images = data.shape[0]
|
||||
size = int(np.ceil(np.sqrt(num_images)))
|
||||
|
||||
for i in range(data.shape[0]):
|
||||
plt.subplot(size, size, i + 1, title=class_names[labels[i]])
|
||||
plt.xticks([])
|
||||
plt.yticks([])
|
||||
plt.grid(False)
|
||||
|
||||
# if grayscale
|
||||
if data.shape[3] == 1:
|
||||
plt.imshow(data[i], cmap=plt.cm.binary)
|
||||
|
||||
else:
|
||||
plt.imshow(data[i])
|
||||
|
||||
return figure
|
||||
|
||||
|
||||
def get_confusion_matrix(y_labels, logits, class_names):
|
||||
preds = np.argmax(logits, axis=1)
|
||||
cm = sklearn.metrics.confusion_matrix(
|
||||
y_labels, preds, labels=np.arange(len(class_names)),
|
||||
)
|
||||
|
||||
return cm
|
||||
|
||||
|
||||
def plot_confusion_matrix(cm, class_names):
|
||||
size = len(class_names)
|
||||
figure = plt.figure(figsize=(size, size))
|
||||
plt.imshow(cm, interpolation="nearest", cmap=plt.cm.Blues)
|
||||
plt.title("Confusion Matrix")
|
||||
|
||||
indices = np.arange(len(class_names))
|
||||
plt.xticks(indices, class_names, rotation=45)
|
||||
plt.yticks(indices, class_names)
|
||||
|
||||
# Normalize Confusion Matrix
|
||||
cm = np.around(cm.astype("float") / cm.sum(axis=1)[:, np.newaxis], decimals=3,)
|
||||
|
||||
threshold = cm.max() / 2.0
|
||||
for i in range(size):
|
||||
for j in range(size):
|
||||
color = "white" if cm[i, j] > threshold else "black"
|
||||
plt.text(
|
||||
i, j, cm[i, j], horizontalalignment="center", color=color,
|
||||
)
|
||||
|
||||
plt.tight_layout()
|
||||
plt.xlabel("True Label")
|
||||
plt.ylabel("Predicted label")
|
||||
|
||||
cm_image = plot_to_image(figure)
|
||||
return cm_image
|
||||
|
||||
|
||||
# Stolen from:
|
||||
# https://gist.github.com/AndrewBMartin/ab06f4708124ccb4cacc4b158c3cef12
|
||||
def create_sprite(data):
|
||||
"""
|
||||
Tile images into sprite image.
|
||||
Add any necessary padding
|
||||
"""
|
||||
|
||||
# For B&W or greyscale images
|
||||
if len(data.shape) == 3:
|
||||
data = np.tile(data[..., np.newaxis], (1, 1, 1, 3))
|
||||
|
||||
n = int(np.ceil(np.sqrt(data.shape[0])))
|
||||
padding = ((0, n ** 2 - data.shape[0]), (0, 0), (0, 0), (0, 0))
|
||||
data = np.pad(data, padding, mode="constant", constant_values=0)
|
||||
|
||||
# Tile images into sprite
|
||||
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3, 4))
|
||||
# print(data.shape) => (n, image_height, n, image_width, 3)
|
||||
|
||||
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
|
||||
# print(data.shape) => (n * image_height, n * image_width, 3)
|
||||
return data
|
||||
|
||||
|
||||
def plot_to_projector(
|
||||
x,
|
||||
feature_vector,
|
||||
y,
|
||||
class_names,
|
||||
log_dir="default_log_dir",
|
||||
meta_file="metadata.tsv",
|
||||
):
|
||||
assert x.ndim == 4 # (BATCH, H, W, C)
|
||||
|
||||
if os.path.isdir(log_dir):
|
||||
shutil.rmtree(log_dir)
|
||||
|
||||
# Create a new clean fresh folder :)
|
||||
os.mkdir(log_dir)
|
||||
|
||||
SPRITES_FILE = os.path.join(log_dir, "sprites.png")
|
||||
sprite = create_sprite(x)
|
||||
cv2.imwrite(SPRITES_FILE, sprite)
|
||||
|
||||
# Generate label names
|
||||
labels = [class_names[y[i]] for i in range(int(y.shape[0]))]
|
||||
|
||||
with open(os.path.join(log_dir, meta_file), "w") as f:
|
||||
for label in labels:
|
||||
f.write("{}\n".format(label))
|
||||
|
||||
if feature_vector.ndim != 2:
|
||||
print(
|
||||
"NOTE: Feature vector is not of form (BATCH, FEATURES)"
|
||||
" reshaping to try and get it to this form!"
|
||||
)
|
||||
feature_vector = tf.reshape(feature_vector, [feature_vector.shape[0], -1])
|
||||
|
||||
feature_vector = tf.Variable(feature_vector)
|
||||
checkpoint = tf.train.Checkpoint(embedding=feature_vector)
|
||||
checkpoint.save(os.path.join(log_dir, "embeddings.ckpt"))
|
||||
|
||||
# Set up config
|
||||
config = projector.ProjectorConfig()
|
||||
embedding = config.embeddings.add()
|
||||
embedding.tensor_name = "embedding/.ATTRIBUTES/VARIABLE_VALUE"
|
||||
embedding.metadata_path = meta_file
|
||||
embedding.sprite.image_path = "sprites.png"
|
||||
embedding.sprite.single_image_dim.extend((x.shape[1], x.shape[2]))
|
||||
projector.visualize_embeddings(log_dir, config)
|
||||
@@ -0,0 +1,142 @@
|
||||
# Imports needed
|
||||
import os
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
import tensorflow as tf
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
||||
|
||||
img_height = 28
|
||||
img_width = 28
|
||||
batch_size = 2
|
||||
|
||||
model = keras.Sequential(
|
||||
[
|
||||
layers.Input((28, 28, 1)),
|
||||
layers.Conv2D(16, 3, padding="same"),
|
||||
layers.Conv2D(32, 3, padding="same"),
|
||||
layers.MaxPooling2D(),
|
||||
layers.Flatten(),
|
||||
layers.Dense(10),
|
||||
]
|
||||
)
|
||||
|
||||
# METHOD 1
|
||||
# ==================================================== #
|
||||
# Using dataset_from_directory #
|
||||
# ==================================================== #
|
||||
ds_train = tf.keras.preprocessing.image_dataset_from_directory(
|
||||
"data/mnist_subfolders/",
|
||||
labels="inferred",
|
||||
label_mode="int", # categorical, binary
|
||||
# class_names=['0', '1', '2', '3', ...]
|
||||
color_mode="grayscale",
|
||||
batch_size=batch_size,
|
||||
image_size=(img_height, img_width), # reshape if not in this size
|
||||
shuffle=True,
|
||||
seed=123,
|
||||
validation_split=0.1,
|
||||
subset="training",
|
||||
)
|
||||
|
||||
ds_validation = tf.keras.preprocessing.image_dataset_from_directory(
|
||||
"data/mnist_subfolders/",
|
||||
labels="inferred",
|
||||
label_mode="int", # categorical, binary
|
||||
# class_names=['0', '1', '2', '3', ...]
|
||||
color_mode="grayscale",
|
||||
batch_size=batch_size,
|
||||
image_size=(img_height, img_width), # reshape if not in this size
|
||||
shuffle=True,
|
||||
seed=123,
|
||||
validation_split=0.1,
|
||||
subset="validation",
|
||||
)
|
||||
|
||||
|
||||
def augment(x, y):
|
||||
image = tf.image.random_brightness(x, max_delta=0.05)
|
||||
return image, y
|
||||
|
||||
|
||||
ds_train = ds_train.map(augment)
|
||||
|
||||
# Custom Loops
|
||||
for epochs in range(10):
|
||||
for x, y in ds_train:
|
||||
# train here
|
||||
pass
|
||||
|
||||
|
||||
model.compile(
|
||||
optimizer=keras.optimizers.Adam(),
|
||||
loss=[keras.losses.SparseCategoricalCrossentropy(from_logits=True),],
|
||||
metrics=["accuracy"],
|
||||
)
|
||||
|
||||
model.fit(ds_train, epochs=10, verbose=2)
|
||||
|
||||
|
||||
# METHOD 2
|
||||
# ================================================================== #
|
||||
# ImageDataGenerator and flow_from_directory #
|
||||
# ================================================================== #
|
||||
|
||||
datagen = ImageDataGenerator(
|
||||
rescale=1.0 / 255,
|
||||
rotation_range=5,
|
||||
zoom_range=(0.95, 0.95),
|
||||
horizontal_flip=False,
|
||||
vertical_flip=False,
|
||||
data_format="channels_last",
|
||||
validation_split=0.0,
|
||||
dtype=tf.float32,
|
||||
)
|
||||
|
||||
train_generator = datagen.flow_from_directory(
|
||||
"data/mnist_subfolders/",
|
||||
target_size=(img_height, img_width),
|
||||
batch_size=batch_size,
|
||||
color_mode="grayscale",
|
||||
class_mode="sparse",
|
||||
shuffle=True,
|
||||
subset="training",
|
||||
seed=123,
|
||||
)
|
||||
|
||||
|
||||
def training():
|
||||
pass
|
||||
|
||||
|
||||
# Custom Loops
|
||||
for epoch in range(10):
|
||||
num_batches = 0
|
||||
|
||||
for x, y in ds_train:
|
||||
num_batches += 1
|
||||
|
||||
# do training
|
||||
training()
|
||||
|
||||
if num_batches == 25: # len(train_dataset)/batch_size
|
||||
break
|
||||
|
||||
# Redo model.compile to reset the optimizer states
|
||||
model.compile(
|
||||
optimizer=keras.optimizers.Adam(),
|
||||
loss=[keras.losses.SparseCategoricalCrossentropy(from_logits=True),],
|
||||
metrics=["accuracy"],
|
||||
)
|
||||
|
||||
# using model.fit (note steps_per_epoch)
|
||||
model.fit(
|
||||
train_generator,
|
||||
epochs=10,
|
||||
steps_per_epoch=25,
|
||||
verbose=2,
|
||||
# if we had a validation generator:
|
||||
# validation_data=validation_generator,
|
||||
# valiation_steps=len(validation_set)/batch_size),
|
||||
)
|
||||
@@ -0,0 +1,52 @@
|
||||
import os
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
import tensorflow as tf
|
||||
import pandas as pd
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
|
||||
directory = "data/mnist_images_csv/"
|
||||
df = pd.read_csv(directory + "train.csv")
|
||||
|
||||
file_paths = df["file_name"].values
|
||||
labels = df["label"].values
|
||||
ds_train = tf.data.Dataset.from_tensor_slices((file_paths, labels))
|
||||
|
||||
|
||||
def read_image(image_file, label):
|
||||
image = tf.io.read_file(directory + image_file)
|
||||
image = tf.image.decode_image(image, channels=1, dtype=tf.float32)
|
||||
return image, label
|
||||
|
||||
|
||||
def augment(image, label):
|
||||
# data augmentation here
|
||||
return image, label
|
||||
|
||||
|
||||
ds_train = ds_train.map(read_image).map(augment).batch(2)
|
||||
|
||||
for epoch in range(10):
|
||||
for x, y in ds_train:
|
||||
# train here
|
||||
pass
|
||||
|
||||
model = keras.Sequential(
|
||||
[
|
||||
layers.Input((28, 28, 1)),
|
||||
layers.Conv2D(16, 3, padding="same"),
|
||||
layers.Conv2D(32, 3, padding="same"),
|
||||
layers.MaxPooling2D(),
|
||||
layers.Flatten(),
|
||||
layers.Dense(10),
|
||||
]
|
||||
)
|
||||
|
||||
model.compile(
|
||||
optimizer=keras.optimizers.Adam(),
|
||||
loss=[keras.losses.SparseCategoricalCrossentropy(from_logits=True),],
|
||||
metrics=["accuracy"],
|
||||
)
|
||||
|
||||
model.fit(ds_train, epochs=10, verbose=2)
|
||||
@@ -0,0 +1,46 @@
|
||||
import os
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
import tensorflow as tf
|
||||
import pandas as pd
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
import pathlib # pathlib is in standard library
|
||||
|
||||
batch_size = 2
|
||||
img_height = 28
|
||||
img_width = 28
|
||||
|
||||
directory = "data/mnist_images_only/"
|
||||
ds_train = tf.data.Dataset.list_files(str(pathlib.Path(directory + "*.jpg")))
|
||||
|
||||
|
||||
def process_path(file_path):
|
||||
image = tf.io.read_file(file_path)
|
||||
image = tf.image.decode_jpeg(image, channels=1)
|
||||
label = tf.strings.split(file_path, "\\")
|
||||
label = tf.strings.substr(label, pos=0, len=1)[2]
|
||||
label = tf.strings.to_number(label, out_type=tf.int64)
|
||||
return image, label
|
||||
|
||||
|
||||
ds_train = ds_train.map(process_path).batch(batch_size)
|
||||
|
||||
model = keras.Sequential(
|
||||
[
|
||||
layers.Input((28, 28, 1)),
|
||||
layers.Conv2D(16, 3, padding="same"),
|
||||
layers.Conv2D(32, 3, padding="same"),
|
||||
layers.MaxPooling2D(),
|
||||
layers.Flatten(),
|
||||
layers.Dense(10),
|
||||
]
|
||||
)
|
||||
|
||||
model.compile(
|
||||
optimizer=keras.optimizers.Adam(),
|
||||
loss=[keras.losses.SparseCategoricalCrossentropy(from_logits=True),],
|
||||
metrics=["accuracy"],
|
||||
)
|
||||
|
||||
model.fit(ds_train, epochs=10, verbose=2)
|
||||
|
After Width: | Height: | Size: 602 B |
|
After Width: | Height: | Size: 628 B |
|
After Width: | Height: | Size: 539 B |
|
After Width: | Height: | Size: 579 B |
|
After Width: | Height: | Size: 634 B |
|
After Width: | Height: | Size: 492 B |
|
After Width: | Height: | Size: 427 B |
|
After Width: | Height: | Size: 491 B |
|
After Width: | Height: | Size: 475 B |
|
After Width: | Height: | Size: 460 B |
|
After Width: | Height: | Size: 604 B |
|
After Width: | Height: | Size: 592 B |
|
After Width: | Height: | Size: 571 B |
|
After Width: | Height: | Size: 564 B |
|
After Width: | Height: | Size: 541 B |
|
After Width: | Height: | Size: 646 B |
|
After Width: | Height: | Size: 583 B |
|
After Width: | Height: | Size: 630 B |
|
After Width: | Height: | Size: 609 B |
|
After Width: | Height: | Size: 591 B |
|
After Width: | Height: | Size: 625 B |
|
After Width: | Height: | Size: 582 B |
|
After Width: | Height: | Size: 602 B |
|
After Width: | Height: | Size: 593 B |
|
After Width: | Height: | Size: 600 B |
|
After Width: | Height: | Size: 514 B |
|
After Width: | Height: | Size: 575 B |
|
After Width: | Height: | Size: 571 B |
|
After Width: | Height: | Size: 579 B |
|
After Width: | Height: | Size: 617 B |
|
After Width: | Height: | Size: 603 B |
|
After Width: | Height: | Size: 589 B |
|
After Width: | Height: | Size: 513 B |
|
After Width: | Height: | Size: 531 B |
|
After Width: | Height: | Size: 563 B |
|
After Width: | Height: | Size: 555 B |
|
After Width: | Height: | Size: 523 B |
|
After Width: | Height: | Size: 621 B |
|
After Width: | Height: | Size: 571 B |
|
After Width: | Height: | Size: 540 B |
|
After Width: | Height: | Size: 622 B |
|
After Width: | Height: | Size: 598 B |
|
After Width: | Height: | Size: 578 B |
|
After Width: | Height: | Size: 596 B |
|
After Width: | Height: | Size: 599 B |
|
After Width: | Height: | Size: 554 B |
|
After Width: | Height: | Size: 548 B |
|
After Width: | Height: | Size: 552 B |
|
After Width: | Height: | Size: 539 B |
|
After Width: | Height: | Size: 570 B |
@@ -0,0 +1,52 @@
|
||||
file_name,label
|
||||
0_1.jpg, 0
|
||||
0_2.jpg, 0
|
||||
0_3.jpg, 0
|
||||
0_4.jpg, 0
|
||||
0_5.jpg, 0
|
||||
1_1.jpg, 1
|
||||
1_2.jpg, 1
|
||||
1_3.jpg, 1
|
||||
1_4.jpg, 1
|
||||
1_5.jpg, 1
|
||||
2_1.jpg, 2
|
||||
2_2.jpg, 2
|
||||
2_3.jpg, 2
|
||||
2_4.jpg, 2
|
||||
2_5.jpg, 2
|
||||
3_1.jpg, 3
|
||||
3_2.jpg, 3
|
||||
3_3.jpg, 3
|
||||
3_4.jpg, 3
|
||||
3_5.jpg, 3
|
||||
4_1.jpg, 4
|
||||
4_2.jpg, 4
|
||||
4_3.jpg, 4
|
||||
4_4.jpg, 4
|
||||
4_5.jpg, 4
|
||||
5_1.jpg, 5
|
||||
5_2.jpg, 5
|
||||
5_3.jpg, 5
|
||||
5_4.jpg, 5
|
||||
5_5.jpg, 5
|
||||
6_1.jpg, 6
|
||||
6_2.jpg, 6
|
||||
6_3.jpg, 6
|
||||
6_4.jpg, 6
|
||||
6_5.jpg, 6
|
||||
7_1.jpg, 7
|
||||
7_2.jpg, 7
|
||||
7_3.jpg, 7
|
||||
7_4.jpg, 7
|
||||
7_5.jpg, 7
|
||||
8_1.jpg, 8
|
||||
8_2.jpg, 8
|
||||
8_3.jpg, 8
|
||||
8_4.jpg, 8
|
||||
8_5.jpg, 8
|
||||
9_1.jpg, 9
|
||||
9_2.jpg, 9
|
||||
9_3.jpg, 9
|
||||
9_4.jpg, 9
|
||||
9_5.jpg, 9
|
||||
|
||||
|
|
After Width: | Height: | Size: 602 B |
|
After Width: | Height: | Size: 628 B |
|
After Width: | Height: | Size: 539 B |
|
After Width: | Height: | Size: 579 B |
|
After Width: | Height: | Size: 634 B |
|
After Width: | Height: | Size: 492 B |
|
After Width: | Height: | Size: 427 B |
|
After Width: | Height: | Size: 491 B |
|
After Width: | Height: | Size: 475 B |
|
After Width: | Height: | Size: 460 B |
|
After Width: | Height: | Size: 604 B |
|
After Width: | Height: | Size: 592 B |
|
After Width: | Height: | Size: 571 B |
|
After Width: | Height: | Size: 564 B |
|
After Width: | Height: | Size: 541 B |
|
After Width: | Height: | Size: 646 B |
|
After Width: | Height: | Size: 583 B |
|
After Width: | Height: | Size: 630 B |
|
After Width: | Height: | Size: 609 B |
|
After Width: | Height: | Size: 591 B |
|
After Width: | Height: | Size: 625 B |
|
After Width: | Height: | Size: 582 B |
|
After Width: | Height: | Size: 602 B |
|
After Width: | Height: | Size: 593 B |
|
After Width: | Height: | Size: 600 B |
|
After Width: | Height: | Size: 514 B |
|
After Width: | Height: | Size: 575 B |
|
After Width: | Height: | Size: 571 B |
|
After Width: | Height: | Size: 579 B |
|
After Width: | Height: | Size: 617 B |