mirror of
https://github.com/aladdinpersson/Machine-Learning-Collection.git
synced 2026-02-21 19:27:58 +00:00
Initial commit
This commit is contained in:
91
ML/TensorFlow/more_advanced/DCGAN/main.py
Normal file
91
ML/TensorFlow/more_advanced/DCGAN/main.py
Normal file
@@ -0,0 +1,91 @@
|
||||
import os
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
import tensorflow as tf
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
from tqdm import tqdm
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
physical_devices = tf.config.list_physical_devices("GPU")
|
||||
tf.config.experimental.set_memory_growth(physical_devices[0], True)
|
||||
|
||||
|
||||
dataset = keras.preprocessing.image_dataset_from_directory(
|
||||
directory="celeb_dataset", label_mode=None, image_size=(64, 64), batch_size=32,
|
||||
shuffle=True, seed=None, validation_split=None,
|
||||
).map(lambda x: x / 255.0)
|
||||
|
||||
discriminator = keras.Sequential(
|
||||
[
|
||||
keras.Input(shape=(64, 64, 3)),
|
||||
layers.Conv2D(64, kernel_size=4, strides=2, padding="same"),
|
||||
layers.LeakyReLU(alpha=0.2),
|
||||
layers.Conv2D(128, kernel_size=4, strides=2, padding="same"),
|
||||
layers.LeakyReLU(0.2),
|
||||
layers.Conv2D(128, kernel_size=4, strides=2, padding="same"),
|
||||
layers.LeakyReLU(0.2),
|
||||
layers.Flatten(),
|
||||
layers.Dropout(0.2),
|
||||
layers.Dense(1, activation="sigmoid"),
|
||||
],
|
||||
name="discriminator",
|
||||
)
|
||||
print(discriminator.summary())
|
||||
|
||||
latent_dim = 128
|
||||
generator = keras.Sequential(
|
||||
[
|
||||
keras.Input(shape=(latent_dim,)),
|
||||
layers.Dense(8 * 8 * 128),
|
||||
layers.Reshape((8, 8, 128)),
|
||||
layers.Conv2DTranspose(128, kernel_size=4, strides=2, padding="same"),
|
||||
layers.LeakyReLU(alpha=0.2),
|
||||
layers.Conv2DTranspose(256, kernel_size=4, strides=2, padding="same"),
|
||||
layers.LeakyReLU(alpha=0.2),
|
||||
layers.Conv2DTranspose(512, kernel_size=4, strides=2, padding="same"),
|
||||
layers.LeakyReLU(alpha=0.2),
|
||||
layers.Conv2D(3, kernel_size=5, padding="same", activation="sigmoid"),
|
||||
],
|
||||
name="generator",
|
||||
)
|
||||
generator.summary()
|
||||
|
||||
opt_gen = keras.optimizers.Adam(1e-4)
|
||||
opt_disc = keras.optimizers.Adam(1e-4)
|
||||
loss_fn = keras.losses.BinaryCrossentropy()
|
||||
|
||||
for epoch in range(10):
|
||||
for idx, (real) in enumerate(tqdm(dataset)):
|
||||
batch_size = real.shape[0]
|
||||
with tf.GradientTape() as gen_tape:
|
||||
random_latent_vectors = tf.random.normal(shape = (batch_size, latent_dim))
|
||||
fake = generator(random_latent_vectors)
|
||||
|
||||
if idx % 100 == 0:
|
||||
img = keras.preprocessing.image.array_to_img(fake[0])
|
||||
img.save("gen_images/generated_img_%03d_%d.png" % (epoch, idx))
|
||||
|
||||
### Train Discriminator: max log(D(x)) + log(1 - D(G(z)))
|
||||
with tf.GradientTape() as disc_tape:
|
||||
loss_disc_real = loss_fn(tf.ones((batch_size, 1)), discriminator(real))
|
||||
loss_disc_fake = loss_fn(tf.zeros((batch_size, 1)), discriminator(fake))
|
||||
loss_disc = (loss_disc_real + loss_disc_fake)/2
|
||||
|
||||
grads = disc_tape.gradient(loss_disc, discriminator.trainable_weights)
|
||||
opt_disc.apply_gradients(
|
||||
zip(grads, discriminator.trainable_weights)
|
||||
)
|
||||
|
||||
### Train Generator: min log(1 - D(G(z))) <-> max log(D(G(z))
|
||||
with tf.GradientTape() as gen_tape:
|
||||
fake = generator(random_latent_vectors)
|
||||
output = discriminator(fake)
|
||||
loss_gen = loss_fn(tf.ones(batch_size, 1), output)
|
||||
|
||||
grads = gen_tape.gradient(loss_gen, generator.trainable_weights)
|
||||
opt_gen.apply_gradients(zip(grads, generator.trainable_weights))
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
87
ML/TensorFlow/more_advanced/DCGAN/train.py
Normal file
87
ML/TensorFlow/more_advanced/DCGAN/train.py
Normal file
@@ -0,0 +1,87 @@
|
||||
import os
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
import tensorflow as tf
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
from tqdm import tqdm
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
physical_devices = tf.config.list_physical_devices("GPU")
|
||||
tf.config.experimental.set_memory_growth(physical_devices[0], True)
|
||||
|
||||
dataset = keras.preprocessing.image_dataset_from_directory(
|
||||
directory="celeb_dataset", label_mode=None, image_size=(64, 64), batch_size=32,
|
||||
shuffle=True
|
||||
).map(lambda x: x/255.0)
|
||||
|
||||
discriminator = keras.Sequential(
|
||||
[
|
||||
keras.Input(shape=(64,64,3)),
|
||||
layers.Conv2D(64, kernel_size=4, strides=2, padding="same"),
|
||||
layers.LeakyReLU(0.2),
|
||||
layers.Conv2D(128, kernel_size=4, strides=2, padding="same"),
|
||||
layers.LeakyReLU(0.2),
|
||||
layers.Conv2D(128, kernel_size=4, strides=2, padding="same"),
|
||||
layers.LeakyReLU(0.2),
|
||||
layers.Flatten(),
|
||||
layers.Dropout(0.2),
|
||||
layers.Dense(1, activation="sigmoid"),
|
||||
]
|
||||
)
|
||||
|
||||
print(discriminator.summary())
|
||||
|
||||
latent_dim = 128
|
||||
generator = keras.Sequential(
|
||||
[
|
||||
layers.Input(shape=(latent_dim,)),
|
||||
layers.Dense(8*8*128),
|
||||
layers.Reshape((8, 8, 128)),
|
||||
layers.Conv2DTranspose(128, kernel_size=4, strides=2, padding="same"),
|
||||
layers.LeakyReLU(0.2),
|
||||
layers.Conv2DTranspose(256, kernel_size=4, strides=2, padding="same"),
|
||||
layers.LeakyReLU(0.2),
|
||||
layers.Conv2DTranspose(512, kernel_size=4, strides=2, padding="same"),
|
||||
layers.LeakyReLU(0.2),
|
||||
layers.Conv2D(3, kernel_size=5, padding="same", activation="sigmoid"),
|
||||
]
|
||||
)
|
||||
generator.summary()
|
||||
|
||||
opt_gen = keras.optimizers.Adam(1e-4)
|
||||
opt_disc = keras.optimizers.Adam(1e-4)
|
||||
loss_fn = keras.losses.BinaryCrossentropy()
|
||||
|
||||
for epoch in range(10):
|
||||
for idx, real in enumerate(tqdm(dataset)):
|
||||
batch_size = real.shape[0]
|
||||
random_latent_vectors = tf.random.normal(shape=(batch_size, latent_dim))
|
||||
fake = generator(random_latent_vectors)
|
||||
|
||||
if idx % 100 == 0:
|
||||
img = keras.preprocessing.image.array_to_img(fake[0])
|
||||
img.save(f"generated_images/generated_img{epoch}_{idx}_.png")
|
||||
|
||||
### Train Discriminator: max log(D(x)) + log(1 - D(G(z))
|
||||
with tf.GradientTape() as disc_tape:
|
||||
loss_disc_real = loss_fn(tf.ones((batch_size, 1)), discriminator(real))
|
||||
loss_disc_fake = loss_fn(tf.zeros(batch_size, 1), discriminator(fake))
|
||||
loss_disc = (loss_disc_real + loss_disc_fake)/2
|
||||
|
||||
grads = disc_tape.gradient(loss_disc, discriminator.trainable_weights)
|
||||
opt_disc.apply_gradients(
|
||||
zip(grads, discriminator.trainable_weights)
|
||||
)
|
||||
|
||||
### Train Generator min log(1 - D(G(z)) <-> max log(D(G(z))
|
||||
with tf.GradientTape() as gen_tape:
|
||||
fake = generator(random_latent_vectors)
|
||||
output = discriminator(fake)
|
||||
loss_gen = loss_fn(tf.ones(batch_size, 1), output)
|
||||
|
||||
grads = gen_tape.gradient(loss_gen, generator.trainable_weights)
|
||||
opt_gen.apply_gradients(
|
||||
zip(grads, generator.trainable_weights)
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user