From 2a397b17e2153ad07210e924cffdae1fa4db9877 Mon Sep 17 00:00:00 2001 From: Aladdin Persson Date: Sat, 6 Mar 2021 21:09:08 +0100 Subject: [PATCH] cyclegan --- .../{ => randaugment}/Randaugment.pdf | Bin ML/PaperReviews/{ => unet}/UNET.pdf | Bin ML/Pytorch/GANs/CycleGAN/README.md | 37 ++++ ML/Pytorch/GANs/CycleGAN/config.py | 29 ++++ ML/Pytorch/GANs/CycleGAN/dataset.py | 41 +++++ .../GANs/CycleGAN/discriminator_model.py | 53 ++++++ ML/Pytorch/GANs/CycleGAN/generator_model.py | 72 ++++++++ ML/Pytorch/GANs/CycleGAN/train.py | 158 ++++++++++++++++++ ML/Pytorch/GANs/CycleGAN/utils.py | 35 ++++ .../Pix2Pix/__pycache__/config.cpython-38.pyc | Bin 1069 -> 0 bytes .../__pycache__/dataset.cpython-38.pyc | Bin 1582 -> 0 bytes .../discriminator_model.cpython-38.pyc | Bin 2051 -> 0 bytes .../generator_model.cpython-38.pyc | Bin 3118 -> 0 bytes .../Pix2Pix/__pycache__/utils.cpython-38.pyc | Bin 1342 -> 0 bytes 14 files changed, 425 insertions(+) rename ML/PaperReviews/{ => randaugment}/Randaugment.pdf (100%) rename ML/PaperReviews/{ => unet}/UNET.pdf (100%) create mode 100644 ML/Pytorch/GANs/CycleGAN/README.md create mode 100644 ML/Pytorch/GANs/CycleGAN/config.py create mode 100644 ML/Pytorch/GANs/CycleGAN/dataset.py create mode 100644 ML/Pytorch/GANs/CycleGAN/discriminator_model.py create mode 100644 ML/Pytorch/GANs/CycleGAN/generator_model.py create mode 100644 ML/Pytorch/GANs/CycleGAN/train.py create mode 100644 ML/Pytorch/GANs/CycleGAN/utils.py delete mode 100644 ML/Pytorch/GANs/Pix2Pix/__pycache__/config.cpython-38.pyc delete mode 100644 ML/Pytorch/GANs/Pix2Pix/__pycache__/dataset.cpython-38.pyc delete mode 100644 ML/Pytorch/GANs/Pix2Pix/__pycache__/discriminator_model.cpython-38.pyc delete mode 100644 ML/Pytorch/GANs/Pix2Pix/__pycache__/generator_model.cpython-38.pyc delete mode 100644 ML/Pytorch/GANs/Pix2Pix/__pycache__/utils.cpython-38.pyc diff --git a/ML/PaperReviews/Randaugment.pdf b/ML/PaperReviews/randaugment/Randaugment.pdf similarity index 100% rename from ML/PaperReviews/Randaugment.pdf rename to ML/PaperReviews/randaugment/Randaugment.pdf diff --git a/ML/PaperReviews/UNET.pdf b/ML/PaperReviews/unet/UNET.pdf similarity index 100% rename from ML/PaperReviews/UNET.pdf rename to ML/PaperReviews/unet/UNET.pdf diff --git a/ML/Pytorch/GANs/CycleGAN/README.md b/ML/Pytorch/GANs/CycleGAN/README.md new file mode 100644 index 0000000..30529c6 --- /dev/null +++ b/ML/Pytorch/GANs/CycleGAN/README.md @@ -0,0 +1,37 @@ +# CycleGAN +A clean, simple and readable implementation of CycleGAN in PyTorch. I've tried to replicate the original paper as closely as possible, so if you read the paper the implementation should be pretty much identical. The results from this implementation I would say is on par with the paper, I'll include some examples results below. + +## Results +The model was trained on Zebra<->Horses dataset. + +|1st row: Input / 2nd row: Generated / 3rd row: Target| +|:---:| +|![](results/results_anime.png)| +|![](results/results_maps.png)| + + +### Horses and Zebras Dataset +The dataset can be downloaded from Kaggle: [link](https://www.kaggle.com/suyashdamle/cyclegan). + +### Download pretrained weights +Pretrained weights for Satellite image to Google Map [will upload soon](). + +Extract the zip file and put the pth.tar files in the directory with all the python files. Make sure you put LOAD_MODEL=True in the config.py file. + +### Training +Edit the config.py file to match the setup you want to use. Then run train.py + +## CycleGAN paper +### Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks by Jun-Yan Zhu, Taesung Park, Phillip Isola, Alexei A. Efros + +#### Abstract +Image-to-image translation is a class of vision and graphics problems where the goal is to learn the mapping between an input image and an output image using a training set of aligned image pairs. However, for many tasks, paired training data will not be available. We present an approach for learning to translate an image from a source domain X to a target domain Y in the absence of paired examples. Our goal is to learn a mapping G:X→Y such that the distribution of images from G(X) is indistinguishable from the distribution Y using an adversarial loss. Because this mapping is highly under-constrained, we couple it with an inverse mapping F:Y→X and introduce a cycle consistency loss to push F(G(X))≈X (and vice versa). Qualitative results are presented on several tasks where paired training data does not exist, including collection style transfer, object transfiguration, season transfer, photo enhancement, etc. Quantitative comparisons against several prior methods demonstrate the superiority of our approach. ``` +@misc{zhu2020unpaired, + title={Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks}, + author={Jun-Yan Zhu and Taesung Park and Phillip Isola and Alexei A. Efros}, + year={2020}, + eprint={1703.10593}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` diff --git a/ML/Pytorch/GANs/CycleGAN/config.py b/ML/Pytorch/GANs/CycleGAN/config.py new file mode 100644 index 0000000..040d71c --- /dev/null +++ b/ML/Pytorch/GANs/CycleGAN/config.py @@ -0,0 +1,29 @@ +import torch +import albumentations as A +from albumentations.pytorch import ToTensorV2 + +DEVICE = "cuda" if torch.cuda.is_available() else "cpu" +TRAIN_DIR = "data/train" +VAL_DIR = "data/val" +BATCH_SIZE = 1 +LEARNING_RATE = 1e-5 +LAMBDA_IDENTITY = 0.0 +LAMBDA_CYCLE = 10 +NUM_WORKERS = 4 +NUM_EPOCHS = 10 +LOAD_MODEL = True +SAVE_MODEL = True +CHECKPOINT_GEN_H = "genh.pth.tar" +CHECKPOINT_GEN_Z = "genz.pth.tar" +CHECKPOINT_CRITIC_H = "critich.pth.tar" +CHECKPOINT_CRITIC_Z = "criticz.pth.tar" + +transforms = A.Compose( + [ + A.Resize(width=256, height=256), + A.HorizontalFlip(p=0.5), + A.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], max_pixel_value=255), + ToTensorV2(), + ], + additional_targets={"image0": "image"}, +) \ No newline at end of file diff --git a/ML/Pytorch/GANs/CycleGAN/dataset.py b/ML/Pytorch/GANs/CycleGAN/dataset.py new file mode 100644 index 0000000..66e5b79 --- /dev/null +++ b/ML/Pytorch/GANs/CycleGAN/dataset.py @@ -0,0 +1,41 @@ +from PIL import Image +import os +from torch.utils.data import Dataset +import numpy as np + +class HorseZebraDataset(Dataset): + def __init__(self, root_zebra, root_horse, transform=None): + self.root_zebra = root_zebra + self.root_horse = root_horse + self.transform = transform + + self.zebra_images = os.listdir(root_zebra) + self.horse_images = os.listdir(root_horse) + self.length_dataset = max(len(self.zebra_images), len(self.horse_images)) # 1000, 1500 + self.zebra_len = len(self.zebra_images) + self.horse_len = len(self.horse_images) + + def __len__(self): + return self.length_dataset + + def __getitem__(self, index): + zebra_img = self.zebra_images[index % self.zebra_len] + horse_img = self.horse_images[index % self.horse_len] + + zebra_path = os.path.join(self.root_zebra, zebra_img) + horse_path = os.path.join(self.root_horse, horse_img) + + zebra_img = np.array(Image.open(zebra_path).convert("RGB")) + horse_img = np.array(Image.open(horse_path).convert("RGB")) + + if self.transform: + augmentations = self.transform(image=zebra_img, image0=horse_img) + zebra_img = augmentations["image"] + horse_img = augmentations["image0"] + + return zebra_img, horse_img + + + + + diff --git a/ML/Pytorch/GANs/CycleGAN/discriminator_model.py b/ML/Pytorch/GANs/CycleGAN/discriminator_model.py new file mode 100644 index 0000000..d61c2ac --- /dev/null +++ b/ML/Pytorch/GANs/CycleGAN/discriminator_model.py @@ -0,0 +1,53 @@ +import torch +import torch.nn as nn + +class Block(nn.Module): + def __init__(self, in_channels, out_channels, stride): + super().__init__() + self.conv = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 4, stride, 1, bias=True, padding_mode="reflect"), + nn.InstanceNorm2d(out_channels), + nn.LeakyReLU(0.2, inplace=True), + ) + + def forward(self, x): + return self.conv(x) + + +class Discriminator(nn.Module): + def __init__(self, in_channels=3, features=[64, 128, 256, 512]): + super().__init__() + self.initial = nn.Sequential( + nn.Conv2d( + in_channels, + features[0], + kernel_size=4, + stride=2, + padding=1, + padding_mode="reflect", + ), + nn.LeakyReLU(0.2, inplace=True), + ) + + layers = [] + in_channels = features[0] + for feature in features[1:]: + layers.append(Block(in_channels, feature, stride=1 if feature==features[-1] else 2)) + in_channels = feature + layers.append(nn.Conv2d(in_channels, 1, kernel_size=4, stride=1, padding=1, padding_mode="reflect")) + self.model = nn.Sequential(*layers) + + def forward(self, x): + x = self.initial(x) + return torch.sigmoid(self.model(x)) + +def test(): + x = torch.randn((5, 3, 256, 256)) + model = Discriminator(in_channels=3) + preds = model(x) + print(preds.shape) + + +if __name__ == "__main__": + test() + diff --git a/ML/Pytorch/GANs/CycleGAN/generator_model.py b/ML/Pytorch/GANs/CycleGAN/generator_model.py new file mode 100644 index 0000000..1028845 --- /dev/null +++ b/ML/Pytorch/GANs/CycleGAN/generator_model.py @@ -0,0 +1,72 @@ +import torch +import torch.nn as nn + +class ConvBlock(nn.Module): + def __init__(self, in_channels, out_channels, down=True, use_act=True, **kwargs): + super().__init__() + self.conv = nn.Sequential( + nn.Conv2d(in_channels, out_channels, padding_mode="reflect", **kwargs) + if down + else nn.ConvTranspose2d(in_channels, out_channels, **kwargs), + nn.InstanceNorm2d(out_channels), + nn.ReLU(inplace=True) if use_act else nn.Identity() + ) + + def forward(self, x): + return self.conv(x) + +class ResidualBlock(nn.Module): + def __init__(self, channels): + super().__init__() + self.block = nn.Sequential( + ConvBlock(channels, channels, kernel_size=3, padding=1), + ConvBlock(channels, channels, use_act=False, kernel_size=3, padding=1), + ) + + def forward(self, x): + return x + self.block(x) + +class Generator(nn.Module): + def __init__(self, img_channels, num_features = 64, num_residuals=9): + super().__init__() + self.initial = nn.Sequential( + nn.Conv2d(img_channels, num_features, kernel_size=7, stride=1, padding=3, padding_mode="reflect"), + nn.InstanceNorm2d(num_features), + nn.ReLU(inplace=True), + ) + self.down_blocks = nn.ModuleList( + [ + ConvBlock(num_features, num_features*2, kernel_size=3, stride=2, padding=1), + ConvBlock(num_features*2, num_features*4, kernel_size=3, stride=2, padding=1), + ] + ) + self.res_blocks = nn.Sequential( + *[ResidualBlock(num_features*4) for _ in range(num_residuals)] + ) + self.up_blocks = nn.ModuleList( + [ + ConvBlock(num_features*4, num_features*2, down=False, kernel_size=3, stride=2, padding=1, output_padding=1), + ConvBlock(num_features*2, num_features*1, down=False, kernel_size=3, stride=2, padding=1, output_padding=1), + ] + ) + + self.last = nn.Conv2d(num_features*1, img_channels, kernel_size=7, stride=1, padding=3, padding_mode="reflect") + + def forward(self, x): + x = self.initial(x) + for layer in self.down_blocks: + x = layer(x) + x = self.res_blocks(x) + for layer in self.up_blocks: + x = layer(x) + return torch.tanh(self.last(x)) + +def test(): + img_channels = 3 + img_size = 256 + x = torch.randn((2, img_channels, img_size, img_size)) + gen = Generator(img_channels, 9) + print(gen(x).shape) + +if __name__ == "__main__": + test() diff --git a/ML/Pytorch/GANs/CycleGAN/train.py b/ML/Pytorch/GANs/CycleGAN/train.py new file mode 100644 index 0000000..6e07afb --- /dev/null +++ b/ML/Pytorch/GANs/CycleGAN/train.py @@ -0,0 +1,158 @@ +import torch +from dataset import HorseZebraDataset +import sys +from utils import save_checkpoint, load_checkpoint +from torch.utils.data import DataLoader +import torch.nn as nn +import torch.optim as optim +import config +from tqdm import tqdm +from torchvision.utils import save_image +from discriminator_model import Discriminator +from generator_model import Generator + +def train_fn(disc_H, disc_Z, gen_Z, gen_H, loader, opt_disc, opt_gen, l1, mse, d_scaler, g_scaler): + H_reals = 0 + H_fakes = 0 + loop = tqdm(loader, leave=True) + + for idx, (zebra, horse) in enumerate(loop): + zebra = zebra.to(config.DEVICE) + horse = horse.to(config.DEVICE) + + # Train Discriminators H and Z + with torch.cuda.amp.autocast(): + fake_horse = gen_H(zebra) + D_H_real = disc_H(horse) + D_H_fake = disc_H(fake_horse.detach()) + H_reals += D_H_real.mean().item() + H_fakes += D_H_fake.mean().item() + D_H_real_loss = mse(D_H_real, torch.ones_like(D_H_real)) + D_H_fake_loss = mse(D_H_fake, torch.zeros_like(D_H_fake)) + D_H_loss = D_H_real_loss + D_H_fake_loss + + fake_zebra = gen_Z(horse) + D_Z_real = disc_Z(zebra) + D_Z_fake = disc_Z(fake_zebra.detach()) + D_Z_real_loss = mse(D_Z_real, torch.ones_like(D_Z_real)) + D_Z_fake_loss = mse(D_Z_fake, torch.zeros_like(D_Z_fake)) + D_Z_loss = D_Z_real_loss + D_Z_fake_loss + + # put it togethor + D_loss = (D_H_loss + D_Z_loss)/2 + + opt_disc.zero_grad() + d_scaler.scale(D_loss).backward() + d_scaler.step(opt_disc) + d_scaler.update() + + # Train Generators H and Z + with torch.cuda.amp.autocast(): + # adversarial loss for both generators + D_H_fake = disc_H(fake_horse) + D_Z_fake = disc_Z(fake_zebra) + loss_G_H = mse(D_H_fake, torch.ones_like(D_H_fake)) + loss_G_Z = mse(D_Z_fake, torch.ones_like(D_Z_fake)) + + # cycle loss + cycle_zebra = gen_Z(fake_horse) + cycle_horse = gen_H(fake_zebra) + cycle_zebra_loss = l1(zebra, cycle_zebra) + cycle_horse_loss = l1(horse, cycle_horse) + + # identity loss (remove these for efficiency if you set lambda_identity=0) + identity_zebra = gen_Z(zebra) + identity_horse = gen_H(horse) + identity_zebra_loss = l1(zebra, identity_zebra) + identity_horse_loss = l1(horse, identity_horse) + + # add all togethor + G_loss = ( + loss_G_Z + + loss_G_H + + cycle_zebra_loss * config.LAMBDA_CYCLE + + cycle_horse_loss * config.LAMBDA_CYCLE + + identity_horse_loss * config.LAMBDA_IDENTITY + + identity_zebra_loss * config.LAMBDA_IDENTITY + ) + + opt_gen.zero_grad() + g_scaler.scale(G_loss).backward() + g_scaler.step(opt_gen) + g_scaler.update() + + if idx % 200 == 0: + save_image(fake_horse*0.5+0.5, f"saved_images/horse_{idx}.png") + save_image(fake_zebra*0.5+0.5, f"saved_images/zebra_{idx}.png") + + loop.set_postfix(H_real=H_reals/(idx+1), H_fake=H_fakes/(idx+1)) + + + +def main(): + disc_H = Discriminator(in_channels=3).to(config.DEVICE) + disc_Z = Discriminator(in_channels=3).to(config.DEVICE) + gen_Z = Generator(img_channels=3, num_residuals=9).to(config.DEVICE) + gen_H = Generator(img_channels=3, num_residuals=9).to(config.DEVICE) + opt_disc = optim.Adam( + list(disc_H.parameters()) + list(disc_Z.parameters()), + lr=config.LEARNING_RATE, + betas=(0.5, 0.999), + ) + + opt_gen = optim.Adam( + list(gen_Z.parameters()) + list(gen_H.parameters()), + lr=config.LEARNING_RATE, + betas=(0.5, 0.999), + ) + + L1 = nn.L1Loss() + mse = nn.MSELoss() + + if config.LOAD_MODEL: + load_checkpoint( + config.CHECKPOINT_GEN_H, gen_H, opt_gen, config.LEARNING_RATE, + ) + load_checkpoint( + config.CHECKPOINT_GEN_Z, gen_Z, opt_gen, config.LEARNING_RATE, + ) + load_checkpoint( + config.CHECKPOINT_CRITIC_H, disc_H, opt_disc, config.LEARNING_RATE, + ) + load_checkpoint( + config.CHECKPOINT_CRITIC_Z, disc_Z, opt_disc, config.LEARNING_RATE, + ) + + dataset = HorseZebraDataset( + root_horse=config.TRAIN_DIR+"/horses", root_zebra=config.TRAIN_DIR+"/zebras", transform=config.transforms + ) + val_dataset = HorseZebraDataset( + root_horse="cyclegan_test/horse1", root_zebra="cyclegan_test/zebra1", transform=config.transforms + ) + val_loader = DataLoader( + val_dataset, + batch_size=1, + shuffle=False, + pin_memory=True, + ) + loader = DataLoader( + dataset, + batch_size=config.BATCH_SIZE, + shuffle=True, + num_workers=config.NUM_WORKERS, + pin_memory=True + ) + g_scaler = torch.cuda.amp.GradScaler() + d_scaler = torch.cuda.amp.GradScaler() + + for epoch in range(config.NUM_EPOCHS): + train_fn(disc_H, disc_Z, gen_Z, gen_H, loader, opt_disc, opt_gen, L1, mse, d_scaler, g_scaler) + + if config.SAVE_MODEL: + save_checkpoint(gen_H, opt_gen, filename=config.CHECKPOINT_GEN_H) + save_checkpoint(gen_Z, opt_gen, filename=config.CHECKPOINT_GEN_Z) + save_checkpoint(disc_H, opt_disc, filename=config.CHECKPOINT_CRITIC_H) + save_checkpoint(disc_Z, opt_disc, filename=config.CHECKPOINT_CRITIC_Z) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ML/Pytorch/GANs/CycleGAN/utils.py b/ML/Pytorch/GANs/CycleGAN/utils.py new file mode 100644 index 0000000..25044a9 --- /dev/null +++ b/ML/Pytorch/GANs/CycleGAN/utils.py @@ -0,0 +1,35 @@ +import random, torch, os, numpy as np +import torch.nn as nn +import config +import copy + +def save_checkpoint(model, optimizer, filename="my_checkpoint.pth.tar"): + print("=> Saving checkpoint") + checkpoint = { + "state_dict": model.state_dict(), + "optimizer": optimizer.state_dict(), + } + torch.save(checkpoint, filename) + + +def load_checkpoint(checkpoint_file, model, optimizer, lr): + print("=> Loading checkpoint") + checkpoint = torch.load(checkpoint_file, map_location=config.DEVICE) + model.load_state_dict(checkpoint["state_dict"]) + optimizer.load_state_dict(checkpoint["optimizer"]) + + # If we don't do this then it will just have learning rate of old checkpoint + # and it will lead to many hours of debugging \: + for param_group in optimizer.param_groups: + param_group["lr"] = lr + + +def seed_everything(seed=42): + os.environ["PYTHONHASHSEED"] = str(seed) + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False \ No newline at end of file diff --git a/ML/Pytorch/GANs/Pix2Pix/__pycache__/config.cpython-38.pyc b/ML/Pytorch/GANs/Pix2Pix/__pycache__/config.cpython-38.pyc deleted file mode 100644 index 7f69f658d78fc7ce1fc5b1937a62c00a4d369753..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1069 zcmaJ<&2HO95av>pEKxspY{y9)x9C*?;xsw*5EQj(ik8$+lprZCFc8YDH5+PEk}Hr@ z>gd|DAD}%Y$38?Kps%o}pgkGwv8SS_yOiA^Ezr(jzWLdi#q7*Jt=COW(U+0^?1zG; z{hT}RQ$;z4O445ypoS#W;7SwFm9K||$N+_plJ_Xuuj4p^PP{;2M}% zhALK|h9=ap3Jt8mI@VzW8*l^HVG}oC3va+SZo*Bbzt~Z73r)pa=XyYJ8*g%gJLfvw zX8P;=^bX#_+vh}CyLji?x~pP}gTgz#i+9fp%=ktNj5h@Ku514VY8sYp&9Elbk{(#H>FVLD$dO{5upkR_DI%ks*8NyFg2z4yycpWHjR*iiKs zdJY;+P7p(cAEM_8^&Y(fV(?!JMjOO>Jl31|w&(Qw-)hac~pD2xdFtWx|pT49# zq^BX1rIvHjwH;X*53R1}x4J{Qc4E0JqwYG^(Cd0_e`t*zX&zZ)yW@|#;K-`?wC8`( zA3kx0BQU!?tL>}~pk{Y0&vV?7-|e+!#eL|zR_~}~LFG#O?SVA&VrS5|J0oejeXHg7 z`Yp$m=Eyp6uI+WZ AH~;_u diff --git a/ML/Pytorch/GANs/Pix2Pix/__pycache__/dataset.cpython-38.pyc b/ML/Pytorch/GANs/Pix2Pix/__pycache__/dataset.cpython-38.pyc deleted file mode 100644 index 5447aac82471f48c7f42f87d8106001f51a71a26..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1582 zcmZ8hOK%%D5GJ`#uOz>e26YZC(DtH3oS=suilB&*pg{mfFc1_d7An|{NJ;DLDK2?Jv2an5wAVvFZ9$IuI<|4g2NfkkeqKG`^#XE5E$bp2fu$E5b_TSw}%hH zQ<(a1Ac6>5kd(TWrOd5f>cPqizYNlVqK#LCWt2u`oW@S}i;Xf#6G~nX5s2^u5uuFE zytF6#B6`czU$Q6SGnx)$EC*skBH$ z(MQgCF-(VYcyaHNQ1Y%3^feg`K7q$%Jfb#uS!Q!NVm5l7nM_M#6J!qSOh^U#L}%|M z&(Ua#XKoJ$!c&;~J0Q|w2i#P!4+L;~!iUxGP^E#0;N93Jud?Rqr751j9>{wzpTX2$ z0%>VWI5G5swd9ywq6D#gO+M1`$hWboYr{pZEUUGRid-8M5@h%+FQf)YzLv#I#c

Y|WSlh@Vc)#2o5Wok8DOkNy}_2ei&`5xv( zbg|iQRyO85uX4jV)`rFsAT*@w;s3byuPvq&wO0dq1nB_cSwjFAJ%e)ku=I`y5nB6_IZ?%6+q!DzO0kDM|?kJ*I>vfr{x zcIuhnA~exO+mS#7<+5&B)z~0YDqATGm-XvLRyLg0)hwUe%{R4KaHFzH&uUfLdp9hvt6~Ld zZkEOlKie%c{dN?){D5Fnk#9LE$5}IOonx* zKhJcJb1=<~EIHo+&-HyEgeG(wz6bOH+oUm#8Axn>|4s$m92<{9tko(jCFeHbysX8t zK)%oUyJc2%8np!%D||zJg9O{;7Qs?eArK6(Mh4#XW6$m3{biOzMXr0;A&($bdG+WM z)*Jph-ehLF;5uK+b#SuZRP%MP!e#V@4XS0?tQ5jhdq`XYy`z_hU8c5O6YeifUg&+i zTj9?^VY}HKr+c4kh+Jo(@I~j6`W7vstA4d%;r?Tmo>;oFL8J1@*g!9`Mp{p=wDsjl mZf@aq@LjXTPr88pgpI4e0s_e4>q7?m-u7O{_aP_v#q2-g?t#Go diff --git a/ML/Pytorch/GANs/Pix2Pix/__pycache__/discriminator_model.cpython-38.pyc b/ML/Pytorch/GANs/Pix2Pix/__pycache__/discriminator_model.cpython-38.pyc deleted file mode 100644 index e6ca8171784a84a6f125368b123e9e93bf74ceca..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2051 zcmZ8hPj4GV6rX>)UONd*qZUZ15J(l$0*z2OAXQOmld8QWR@AB&D`;7E#>pn@wKKDZ zCR$5K4(ch~;bb4fH{mPHl>_o8Kq`mu-mINC%xd1uyqSG(-h02_yk1&r5NOSfr{6Ev z2>A;a^G(3w33T%kgd~z?q*aZy!&;1z7eq42&xquT9r7V>3D5}XfySF^Jn75Y8EN^d zCW9lUo5~*&3btO6us#91q?w?cZ+yBi4`8B5i%QaBZ%LaaNXr9D zo(;B|&CRTv93^<`S;P4PbhKzTK@_B*B^k028^bLMJ`x2#^h#PXNlT8=;Qh~Qu;Z6( zOku1^e@w@`BvBv<_^LH0Xm&zDJhwHi+L=mhIrw!}jO5 zVcmMB-&0yM_{|1Ul;&v>MV95cZM;&)eU%q!oLPUXoBy~j?ZReUBnQo|?!cnHt>UAT zmuma96$$Kz!V06Zwp~c`C^?ApTxH<7+b^zHzA1Dn)djWnC`z)}m?*j+?{=O)+S`ef zgEUtU!K%*Tst>oi8OD%y^Szzzz1@?dtCNGh7f+jJZ#R9j4t-CiCedjp&12Yfv1Mz$ z6O91!p_}_42z`$V_!yb{E(H&0@bBsES%KDOu_u^{S;IM2X$!hRt0ftcl8zB4v>8&p z47!l%+b~%AMt=ZfB|bz=t=-i>#aiMiVaAVfcE9Me)<_{1B8Iloo&}SoF zutQM-sqPSfKU)PF!;Jv`(m(eLHu7W*fX@K<{yrJ->dXMCA|&|*&OamHeK`;%P^1We zLIOS}gH-}F&!xy2SgW7&F{Pvw1A4scPF6mJi+kz|PrzMx$$y2iM|DTDMstF7)JQXp z0cgyy#*hI13r9)==p&P!Dt!lZwl?LRhL?=!0L0`H8XyO^!-lS-9LH1<&~vcUD|j$ytGMWEWvrjYCrTT@UZv3v6%LpV8t4mEW&;GF z4aW_i`jl1QI}rr`_=|!DbQ$*U154c%bw30;AJ8}#(<&8ttDs6T=ua|W*Y^a*TW!KQq=_pwGz0{yCClo+jx^f62d zz_i4;u4l9WL`!~72do4HmvqPmyZ{Uj5o>_7LxD4gG|>!21+GuQn0|02a0&RqvI7LZ zg28$^&SmcIWWAnF^8()WgSe-_1m;B}zg@O)TS+q)qqN4f&s=t9X zK2V()UiBypDvyCH#af;_*jj(bJz5T)?i%hljnD6#t1A%NV555F9wU#OPk5yG%B_{p If_gyz1MKOx6951J diff --git a/ML/Pytorch/GANs/Pix2Pix/__pycache__/generator_model.cpython-38.pyc b/ML/Pytorch/GANs/Pix2Pix/__pycache__/generator_model.cpython-38.pyc deleted file mode 100644 index d9da67fbc99cfb3ca8949f34baf53520daf080f2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3118 zcmbVOTW{1x6rS;Iz1Lg_Aw;xIFRHp#Ny(L#(sBt3Y6~j_;boP|HMWy%;&_*_w?O1P z1&OEpfcB-?N2LCd{)Ks})Q2kXeQG7>cgDM$O$1b8toi25ckDBB#&gch{c^d$P->U2 z{rGEzu|KIZSPXQoqQ-XtKJ$6V>RHRTwYtXHP3CL9{*?K8pl#}VdffnH_$C;$%b339 z=bo~<736$-OB1!g+G8BH?y+kA5O!J3BOEL=E|vIWK)~wUXLZfDpRqNrtmo@`*Q)W1 z8LV!SEooj4o8H!nG(-@#mpx0F$bjS`s*V~z1tcutDcjQyv<@bd=m$osi|It)G*g~x zJ}0+mDBs%8MPuNtV|;Gk!Z*28>+sZ2Zq?DZJ1nx%=KXw!cl4Awh13KV!Ik_1-epcX zCBEV_Ou1M)Bu@`D0uES~IT5UdftOS@X*_7Su`IP*-)}_g&UVues$AH3O^_RZ{qi%F z->X0p>I<7Yemr;dJYXD8W4a z|9Bk0c!e99_Ht4i<2Il8*Rm~M;yV?!b*6hO=FtN73dywKOQ`X?fRr7uly~?p{^uqa zljv*IzXuk7mDj4e7{hzYF>wY`RTF2?Nd8#7M+yZ~#N^hR;*l$SdOud%o)`E|Ih#&b zSJhY4A76fM(NB%s?EpWu;E;>$kmQo%JZ!sRrcn-y56F7CyQE$ahK@5Z_B+suDFX7& zL{)>^yi!q&2j*CLbwuV=L?Te0QbZzH5a&k`Y0Lc0-tRc16+FBO@JQ+vk|_mI8^1sZ zBy5iza21o@B|q~jn}oTGEBz=U+FOA%g_lMdA|goGJl49x~@+& z??L^@|L8|wqwlTPsS|oyGqT^)k10K|*_zUN59&`VJ+Ya7P&c0Wzj+3pla_b0Yk8l% zpXhpjR%OIWP0&sgv?B!tt;hk8_VOgtPFbm&A!Vdx1R13eagKme$YDPy zvt}V{7R4x8C_HE;Nr>I=ZArb|njx4am?M}cSRhy=xG09{8)Lu=jL_yn_FTE;op zD;%@;cD-|$;W)Qm?2Pu4!>jY|2>hWfu!I`_4d~}xME(IzX=GrWrO3THBmV{w{jH5CWdQSmvJ zKwKc0A($nYBbX;xAXp^0Nbm(fn(Ba*y5}b1OJXk(d_{1X;A?^_fa*xL)!v6JHGc+m z7IhAnu6fi2)J4>bCkA-E(c`C_I*h)9?RW}6oAl%}jxEu9c1DgiM(+gFL=_*3PYFIG zI2z#SDEkBoikRL}c0my1q4o=%Gn8P|p85_m6IeGTV2-3|G!mgp%? zpgoT3D&5!2TGhz163yli7e^+RAstN)2R#L&xC*G6;u;!h;)3NzYA(`jiAI#*nzrG# z0tC2PaFecfVI8`_j|V)PqOXl4h?5^6#gyJ??Mk?~i0?^<%fPma86N2 wP*#)HZFNCV2~3TzJ`*27H!w^;mq`0^cBA+PjGLZ#dw2B}*KiJK_2RB1s-=_+l-k5-EYGj=wU%pV>n z%C2Uw(p$s>0MhOyJOvNH6Zi^=tG+@n5XVlJ-GE0vp7ZD9bIy0ZxOz20u>RP2`0c)r z&|j8WT^=x>z;3pI2qbWh1{WtDVBkqk3UAC|uOjA-wO&T;E6{s?j! zH1p>OBh6;P9Q~*MQgSdWxQ*uOaTNV_zZ=l2;4#GnQ)=ikXZi6cWxd8Q4`v{Z)Z%CNL553-SAj~;*d`R-#wWxTxCMlYXJs514+5D8?2^3V0xt4pU4c0}sB+8VGlR#5jgtd@EPq961H4~7_;h>t zgr|pDDevw{uF9+&-QBJ7T&6m!%Hfl};r`f#9e(z(-wgM&vkzepPjr?yy?Sh}yS_G6 zArpDVi#nGL017oXfFOJg-@+k|V0ZBid>hX9v9hXHo5eUu4`q5>S6QihwLa`=u2KuU z#j-H5+d<<4_|eePb8Oo@!*kSPO9lCvOg&9zUhB1F4y|=f@cNUDkMG}k&QBqsJIg>Z z0VHa!Wg@avn@&~htjH!(#nkw2c(aTA9_!Z7=rQS}bUE=>Xh!N2M50kcR~<0H}{I|G9J05buJ!&C_G zCYsw~5XaF4cf=l{Z@Mry9$?qsLEmq?g0TK%6AH2i(D+Y9u;~`O2GFG3w)`T0mn+Ft z9H?9Hl-KpJz*2T%RS3kaTRvGSXSe2x7XZ%cq;6t1SYMVPv2{~#TVH#(de@TM_LeIv zPX8;Ui?F=K`Zrx52%vNgui-TsVf7yHF;-jl2;5WMBh4EQxTjeIY4sdp-N-lqy81iM dm6&!Ul<<$XJ;u63aWF$veA&OakcMl1jT