Initial commit

This commit is contained in:
Aladdin Persson
2021-01-30 21:49:15 +01:00
commit 65b8c80495
432 changed files with 1290844 additions and 0 deletions

View File

@@ -0,0 +1,107 @@
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import io
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import tensorflow_datasets as tfds
from tensorflow import keras
from tensorflow.keras import layers
# Make sure we don't get any GPU errors
physical_devices = tf.config.list_physical_devices("GPU")
tf.config.experimental.set_memory_growth(physical_devices[0], True)
(ds_train, ds_test), ds_info = tfds.load(
"cifar10",
split=["train", "test"],
shuffle_files=True,
as_supervised=True,
with_info=True,
)
def normalize_img(image, label):
"""Normalizes images"""
return tf.cast(image, tf.float32) / 255.0, label
def augment(image, label):
if tf.random.uniform((), minval=0, maxval=1) < 0.1:
image = tf.tile(tf.image.rgb_to_grayscale(image), [1, 1, 3])
image = tf.image.random_brightness(image, max_delta=0.1)
image = tf.image.random_flip_left_right(image)
return image, label
AUTOTUNE = tf.data.experimental.AUTOTUNE
BATCH_SIZE = 32
# Setup for train dataset
ds_train = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
ds_train = ds_train.cache()
ds_train = ds_train.shuffle(ds_info.splits["train"].num_examples)
ds_train = ds_train.map(augment)
ds_train = ds_train.batch(BATCH_SIZE)
ds_train = ds_train.prefetch(AUTOTUNE)
# Setup for test Dataset
ds_test = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
ds_test = ds_train.batch(BATCH_SIZE)
ds_test = ds_train.prefetch(AUTOTUNE)
class_names = [
"Airplane",
"Autmobile",
"Bird",
"Cat",
"Deer",
"Dog",
"Frog",
"Horse",
"Ship",
"Truck",
]
def get_model():
model = keras.Sequential(
[
layers.Input((32, 32, 3)),
layers.Conv2D(8, 3, padding="same", activation="relu"),
layers.Conv2D(16, 3, padding="same", activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Flatten(),
layers.Dense(64, activation="relu"),
layers.Dropout(0.1),
layers.Dense(10),
]
)
return model
model = get_model()
model.compile(
optimizer=keras.optimizers.Adam(lr=0.001),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
tensorboard_callback = keras.callbacks.TensorBoard(
log_dir="tb_callback_dir", histogram_freq=1,
)
model.fit(
ds_train,
epochs=5,
validation_data=ds_test,
callbacks=[tensorboard_callback],
verbose=2,
)

View File

@@ -0,0 +1,144 @@
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import io
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import tensorflow_datasets as tfds
from tensorflow import keras
from tensorflow.keras import layers
# Make sure we don't get any GPU errors
physical_devices = tf.config.list_physical_devices("GPU")
tf.config.experimental.set_memory_growth(physical_devices[0], True)
(ds_train, ds_test), ds_info = tfds.load(
"cifar10",
split=["train", "test"],
shuffle_files=True,
as_supervised=True,
with_info=True,
)
def normalize_img(image, label):
"""Normalizes images"""
return tf.cast(image, tf.float32) / 255.0, label
AUTOTUNE = tf.data.experimental.AUTOTUNE
BATCH_SIZE = 32
def augment(image, label):
if tf.random.uniform((), minval=0, maxval=1) < 0.1:
image = tf.tile(tf.image.rgb_to_grayscale(image), [1, 1, 3])
image = tf.image.random_brightness(image, max_delta=0.1)
image = tf.image.random_flip_left_right(image)
return image, label
# Setup for train dataset
ds_train = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
ds_train = ds_train.cache()
ds_train = ds_train.shuffle(ds_info.splits["train"].num_examples)
ds_train = ds_train.map(augment)
ds_train = ds_train.batch(BATCH_SIZE)
ds_train = ds_train.prefetch(AUTOTUNE)
# Setup for test Dataset
ds_test = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
ds_test = ds_train.batch(BATCH_SIZE)
ds_test = ds_train.prefetch(AUTOTUNE)
class_names = [
"Airplane",
"Autmobile",
"Bird",
"Cat",
"Deer",
"Dog",
"Frog",
"Horse",
"Ship",
"Truck",
]
def get_model():
model = keras.Sequential(
[
layers.Input((32, 32, 3)),
layers.Conv2D(8, 3, padding="same", activation="relu"),
layers.Conv2D(16, 3, padding="same", activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Flatten(),
layers.Dense(64, activation="relu"),
layers.Dropout(0.1),
layers.Dense(10),
]
)
return model
model = get_model()
num_epochs = 1
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = keras.optimizers.Adam(lr=0.001)
acc_metric = keras.metrics.SparseCategoricalAccuracy()
train_writer = tf.summary.create_file_writer("logs/train/")
test_writer = tf.summary.create_file_writer("logs/test/")
train_step = test_step = 0
for lr in [1e-1, 1e-2, 1e-3, 1e-4, 1e-5]:
train_step = test_step = 0
train_writer = tf.summary.create_file_writer("logs/train/" + str(lr))
test_writer = tf.summary.create_file_writer("logs/test/" + str(lr))
model = get_model()
optimizer = keras.optimizers.Adam(lr=lr)
for epoch in range(num_epochs):
# Iterate through training set
for batch_idx, (x, y) in enumerate(ds_train):
with tf.GradientTape() as tape:
y_pred = model(x, training=True)
loss = loss_fn(y, y_pred)
gradients = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
acc_metric.update_state(y, y_pred)
with train_writer.as_default():
tf.summary.scalar("Loss", loss, step=train_step)
tf.summary.scalar(
"Accuracy", acc_metric.result(), step=train_step,
)
train_step += 1
# Reset accuracy in between epochs (and for testing and test)
acc_metric.reset_states()
# Iterate through test set
for batch_idx, (x, y) in enumerate(ds_test):
y_pred = model(x, training=False)
loss = loss_fn(y, y_pred)
acc_metric.update_state(y, y_pred)
with test_writer.as_default():
tf.summary.scalar("Loss", loss, step=test_step)
tf.summary.scalar(
"Accuracy", acc_metric.result(), step=test_step,
)
test_step += 1
acc_metric.reset_states()
# Reset accuracy in between epochs (and for testing and test)
acc_metric.reset_states()

View File

@@ -0,0 +1,112 @@
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import io
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import tensorflow_datasets as tfds
from tensorflow import keras
from tensorflow.keras import layers
from utils import plot_to_image, image_grid
# Make sure we don't get any GPU errors
physical_devices = tf.config.list_physical_devices("GPU")
tf.config.experimental.set_memory_growth(physical_devices[0], True)
(ds_train, ds_test), ds_info = tfds.load(
"cifar10",
split=["train", "test"],
shuffle_files=True,
as_supervised=True,
with_info=True,
)
def normalize_img(image, label):
"""Normalizes images"""
return tf.cast(image, tf.float32) / 255.0, label
AUTOTUNE = tf.data.experimental.AUTOTUNE
BATCH_SIZE = 32
def augment(image, label):
if tf.random.uniform((), minval=0, maxval=1) < 0.1:
image = tf.tile(tf.image.rgb_to_grayscale(image), [1, 1, 3])
image = tf.image.random_brightness(image, max_delta=0.1)
image = tf.image.random_flip_left_right(image)
# matplotlib wants [0,1] values
image = tf.clip_by_value(image, clip_value_min=0, clip_value_max=1)
return image, label
# Setup for train dataset
ds_train = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
ds_train = ds_train.cache()
ds_train = ds_train.shuffle(ds_info.splits["train"].num_examples)
ds_train = ds_train.map(augment)
ds_train = ds_train.batch(BATCH_SIZE)
ds_train = ds_train.prefetch(AUTOTUNE)
# Setup for test Dataset
ds_test = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
ds_test = ds_train.batch(BATCH_SIZE)
ds_test = ds_train.prefetch(AUTOTUNE)
class_names = [
"Airplane",
"Autmobile",
"Bird",
"Cat",
"Deer",
"Dog",
"Frog",
"Horse",
"Ship",
"Truck",
]
def get_model():
model = keras.Sequential(
[
layers.Input((32, 32, 3)),
layers.Conv2D(8, 3, padding="same", activation="relu"),
layers.Conv2D(16, 3, padding="same", activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Flatten(),
layers.Dense(64, activation="relu"),
layers.Dropout(0.1),
layers.Dense(10),
]
)
return model
model = get_model()
num_epochs = 1
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = keras.optimizers.Adam(lr=0.001)
acc_metric = keras.metrics.SparseCategoricalAccuracy()
writer = tf.summary.create_file_writer("logs/train/")
step = 0
for epoch in range(num_epochs):
for batch_idx, (x, y) in enumerate(ds_train):
figure = image_grid(x, y, class_names)
with writer.as_default():
tf.summary.image(
"Visualize Images", plot_to_image(figure), step=step,
)
step += 1

View File

@@ -0,0 +1,124 @@
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import io
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import tensorflow_datasets as tfds
from tensorflow import keras
from tensorflow.keras import layers
from utils import get_confusion_matrix, plot_confusion_matrix
# Make sure we don't get any GPU errors
physical_devices = tf.config.list_physical_devices("GPU")
tf.config.experimental.set_memory_growth(physical_devices[0], True)
(ds_train, ds_test), ds_info = tfds.load(
"cifar10",
split=["train", "test"],
shuffle_files=True,
as_supervised=True,
with_info=True,
)
def normalize_img(image, label):
"""Normalizes images"""
return tf.cast(image, tf.float32) / 255.0, label
AUTOTUNE = tf.data.experimental.AUTOTUNE
BATCH_SIZE = 32
def augment(image, label):
if tf.random.uniform((), minval=0, maxval=1) < 0.1:
image = tf.tile(tf.image.rgb_to_grayscale(image), [1, 1, 3])
image = tf.image.random_brightness(image, max_delta=0.1)
image = tf.image.random_flip_left_right(image)
return image, label
# Setup for train dataset
ds_train = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
ds_train = ds_train.cache()
ds_train = ds_train.shuffle(ds_info.splits["train"].num_examples)
ds_train = ds_train.map(augment)
ds_train = ds_train.batch(BATCH_SIZE)
ds_train = ds_train.prefetch(AUTOTUNE)
# Setup for test Dataset
ds_test = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
ds_test = ds_train.batch(BATCH_SIZE)
ds_test = ds_train.prefetch(AUTOTUNE)
class_names = [
"Airplane",
"Autmobile",
"Bird",
"Cat",
"Deer",
"Dog",
"Frog",
"Horse",
"Ship",
"Truck",
]
def get_model():
model = keras.Sequential(
[
layers.Input((32, 32, 3)),
layers.Conv2D(8, 3, padding="same", activation="relu"),
layers.Conv2D(16, 3, padding="same", activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Flatten(),
layers.Dense(64, activation="relu"),
layers.Dropout(0.1),
layers.Dense(10),
]
)
return model
model = get_model()
num_epochs = 5
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = keras.optimizers.Adam(lr=0.001)
acc_metric = keras.metrics.SparseCategoricalAccuracy()
train_writer = tf.summary.create_file_writer("logs/train/")
test_writer = tf.summary.create_file_writer("logs/test/")
train_step = test_step = 0
for epoch in range(num_epochs):
confusion = np.zeros((len(class_names), len(class_names)))
# Iterate through training set
for batch_idx, (x, y) in enumerate(ds_train):
with tf.GradientTape() as tape:
y_pred = model(x, training=True)
loss = loss_fn(y, y_pred)
gradients = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
acc_metric.update_state(y, y_pred)
confusion += get_confusion_matrix(y, y_pred, class_names)
with train_writer.as_default():
tf.summary.image(
"Confusion Matrix",
plot_confusion_matrix(confusion / batch_idx, class_names),
step=epoch,
)
# Reset accuracy in between epochs (and for testing and test)
acc_metric.reset_states()

View File

@@ -0,0 +1,35 @@
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import io
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import tensorflow_datasets as tfds
from tensorflow import keras
from tensorflow.keras import layers
# Make sure we don't get any GPU errors
physical_devices = tf.config.list_physical_devices("GPU")
tf.config.experimental.set_memory_growth(physical_devices[0], True)
writer = tf.summary.create_file_writer("logs/graph_vis")
@tf.function
def my_func(x, y):
return tf.nn.relu(tf.matmul(x, y))
x = tf.random.uniform((3, 3))
y = tf.random.uniform((3, 3))
tf.summary.trace_on(graph=True, profiler=True)
out = my_func(x, y)
with writer.as_default():
tf.summary.trace_export(
name="function_trace", step=0, profiler_outdir="logs\\graph_vis\\"
)

View File

@@ -0,0 +1,137 @@
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import io
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import tensorflow_datasets as tfds
from tensorboard.plugins.hparams import api as hp
from tensorflow import keras
from tensorflow.keras import layers
# Make sure we don't get any GPU errors
physical_devices = tf.config.list_physical_devices("GPU")
tf.config.experimental.set_memory_growth(physical_devices[0], True)
(ds_train, ds_test), ds_info = tfds.load(
"cifar10",
split=["train", "test"],
shuffle_files=True,
as_supervised=True,
with_info=True,
)
def normalize_img(image, label):
"""Normalizes images"""
return tf.cast(image, tf.float32) / 255.0, label
AUTOTUNE = tf.data.experimental.AUTOTUNE
BATCH_SIZE = 32
def augment(image, label):
if tf.random.uniform((), minval=0, maxval=1) < 0.1:
image = tf.tile(tf.image.rgb_to_grayscale(image), [1, 1, 3])
image = tf.image.random_brightness(image, max_delta=0.1)
image = tf.image.random_flip_left_right(image)
return image, label
# Setup for train dataset
ds_train = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
ds_train = ds_train.cache()
ds_train = ds_train.shuffle(ds_info.splits["train"].num_examples)
ds_train = ds_train.map(augment)
ds_train = ds_train.batch(BATCH_SIZE)
ds_train = ds_train.prefetch(AUTOTUNE)
# Setup for test Dataset
ds_test = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
ds_test = ds_train.batch(BATCH_SIZE)
ds_test = ds_train.prefetch(AUTOTUNE)
class_names = [
"Airplane",
"Autmobile",
"Bird",
"Cat",
"Deer",
"Dog",
"Frog",
"Horse",
"Ship",
"Truck",
]
def train_model_one_epoch(hparams):
units = hparams[HP_NUM_UNITS]
drop_rate = hparams[HP_DROPOUT]
learning_rate = hparams[HP_LR]
optimizer = keras.optimizers.Adam(lr=learning_rate)
model = keras.Sequential(
[
layers.Input((32, 32, 3)),
layers.Conv2D(8, 3, padding="same", activation="relu"),
layers.Conv2D(16, 3, padding="same", activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Flatten(),
layers.Dense(units, activation="relu"),
layers.Dropout(drop_rate),
layers.Dense(10),
]
)
for batch_idx, (x, y) in enumerate(ds_train):
with tf.GradientTape() as tape:
y_pred = model(x, training=True)
loss = loss_fn(y, y_pred)
gradients = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
acc_metric.update_state(y, y_pred)
# write to TB
run_dir = (
"logs/train/"
+ str(units)
+ "units_"
+ str(drop_rate)
+ "dropout_"
+ str(learning_rate)
+ "learning_rate"
)
with tf.summary.create_file_writer(run_dir).as_default():
hp.hparams(hparams)
accuracy = acc_metric.result()
tf.summary.scalar("accuracy", accuracy, step=1)
acc_metric.reset_states()
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = keras.optimizers.Adam(lr=0.001)
acc_metric = keras.metrics.SparseCategoricalAccuracy()
HP_NUM_UNITS = hp.HParam("num units", hp.Discrete([32, 64, 128]))
HP_DROPOUT = hp.HParam("dropout", hp.Discrete([0.1, 0.2, 0.3, 0.5]))
HP_LR = hp.HParam("learning_rate", hp.Discrete([1e-3, 1e-4, 1e-5]))
for lr in HP_LR.domain.values:
for units in HP_NUM_UNITS.domain.values:
for rate in HP_DROPOUT.domain.values:
hparams = {
HP_LR: lr,
HP_NUM_UNITS: units,
HP_DROPOUT: rate,
}
train_model_one_epoch(hparams)

View File

@@ -0,0 +1,69 @@
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import io
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import tensorflow_datasets as tfds
from tensorflow import keras
from tensorflow.keras import layers
from utils import plot_to_projector
# Make sure we don't get any GPU errors
physical_devices = tf.config.list_physical_devices("GPU")
tf.config.experimental.set_memory_growth(physical_devices[0], True)
(ds_train, ds_test), ds_info = tfds.load(
"mnist",
split=["train", "test"],
shuffle_files=True,
as_supervised=True,
with_info=True,
)
def normalize_img(image, label):
"""Normalizes images"""
return tf.cast(image, tf.float32), label
AUTOTUNE = tf.data.experimental.AUTOTUNE
BATCH_SIZE = 500
def augment(image, label):
return image, label
# Setup for train dataset
ds_train = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
ds_train = ds_train.cache()
ds_train = ds_train.shuffle(ds_info.splits["train"].num_examples)
ds_train = ds_train.map(augment)
ds_train = ds_train.batch(BATCH_SIZE)
ds_train = ds_train.prefetch(AUTOTUNE)
# Setup for test Dataset
ds_test = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
ds_test = ds_train.batch(BATCH_SIZE)
ds_test = ds_train.prefetch(AUTOTUNE)
class_names = [
"Airplane",
"Autmobile",
"Bird",
"Cat",
"Deer",
"Dog",
"Frog",
"Horse",
"Ship",
"Truck",
]
x_batch, y_batch = next(iter(ds_train))
plot_to_projector(x_batch, x_batch, y_batch, class_names, log_dir="proj")

View File

@@ -0,0 +1,167 @@
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
import numpy as np
import io
import sklearn.metrics
from tensorboard.plugins import projector
import cv2
import os
import shutil
# Stolen from tensorflow official guide:
# https://www.tensorflow.org/tensorboard/image_summaries
def plot_to_image(figure):
"""Converts the matplotlib plot specified by 'figure' to a PNG image and
returns it. The supplied figure is closed and inaccessible after this call."""
# Save the plot to a PNG in memory.
buf = io.BytesIO()
plt.savefig(buf, format="png")
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(figure)
buf.seek(0)
# Convert PNG buffer to TF image
image = tf.image.decode_png(buf.getvalue(), channels=4)
# Add the batch dimension
image = tf.expand_dims(image, 0)
return image
def image_grid(data, labels, class_names):
# Data should be in (BATCH_SIZE, H, W, C)
assert data.ndim == 4
figure = plt.figure(figsize=(10, 10))
num_images = data.shape[0]
size = int(np.ceil(np.sqrt(num_images)))
for i in range(data.shape[0]):
plt.subplot(size, size, i + 1, title=class_names[labels[i]])
plt.xticks([])
plt.yticks([])
plt.grid(False)
# if grayscale
if data.shape[3] == 1:
plt.imshow(data[i], cmap=plt.cm.binary)
else:
plt.imshow(data[i])
return figure
def get_confusion_matrix(y_labels, logits, class_names):
preds = np.argmax(logits, axis=1)
cm = sklearn.metrics.confusion_matrix(
y_labels, preds, labels=np.arange(len(class_names)),
)
return cm
def plot_confusion_matrix(cm, class_names):
size = len(class_names)
figure = plt.figure(figsize=(size, size))
plt.imshow(cm, interpolation="nearest", cmap=plt.cm.Blues)
plt.title("Confusion Matrix")
indices = np.arange(len(class_names))
plt.xticks(indices, class_names, rotation=45)
plt.yticks(indices, class_names)
# Normalize Confusion Matrix
cm = np.around(cm.astype("float") / cm.sum(axis=1)[:, np.newaxis], decimals=3,)
threshold = cm.max() / 2.0
for i in range(size):
for j in range(size):
color = "white" if cm[i, j] > threshold else "black"
plt.text(
i, j, cm[i, j], horizontalalignment="center", color=color,
)
plt.tight_layout()
plt.xlabel("True Label")
plt.ylabel("Predicted label")
cm_image = plot_to_image(figure)
return cm_image
# Stolen from:
# https://gist.github.com/AndrewBMartin/ab06f4708124ccb4cacc4b158c3cef12
def create_sprite(data):
"""
Tile images into sprite image.
Add any necessary padding
"""
# For B&W or greyscale images
if len(data.shape) == 3:
data = np.tile(data[..., np.newaxis], (1, 1, 1, 3))
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = ((0, n ** 2 - data.shape[0]), (0, 0), (0, 0), (0, 0))
data = np.pad(data, padding, mode="constant", constant_values=0)
# Tile images into sprite
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3, 4))
# print(data.shape) => (n, image_height, n, image_width, 3)
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
# print(data.shape) => (n * image_height, n * image_width, 3)
return data
def plot_to_projector(
x,
feature_vector,
y,
class_names,
log_dir="default_log_dir",
meta_file="metadata.tsv",
):
assert x.ndim == 4 # (BATCH, H, W, C)
if os.path.isdir(log_dir):
shutil.rmtree(log_dir)
# Create a new clean fresh folder :)
os.mkdir(log_dir)
SPRITES_FILE = os.path.join(log_dir, "sprites.png")
sprite = create_sprite(x)
cv2.imwrite(SPRITES_FILE, sprite)
# Generate label names
labels = [class_names[y[i]] for i in range(int(y.shape[0]))]
with open(os.path.join(log_dir, meta_file), "w") as f:
for label in labels:
f.write("{}\n".format(label))
if feature_vector.ndim != 2:
print(
"NOTE: Feature vector is not of form (BATCH, FEATURES)"
" reshaping to try and get it to this form!"
)
feature_vector = tf.reshape(feature_vector, [feature_vector.shape[0], -1])
feature_vector = tf.Variable(feature_vector)
checkpoint = tf.train.Checkpoint(embedding=feature_vector)
checkpoint.save(os.path.join(log_dir, "embeddings.ckpt"))
# Set up config
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = "embedding/.ATTRIBUTES/VARIABLE_VALUE"
embedding.metadata_path = meta_file
embedding.sprite.image_path = "sprites.png"
embedding.sprite.single_image_dim.extend((x.shape[1], x.shape[2]))
projector.visualize_embeddings(log_dir, config)