diff --git a/ML/Pytorch/CNN_architectures/pytorch_inceptionet.py b/ML/Pytorch/CNN_architectures/pytorch_inceptionet.py index c70cde7..38faef9 100644 --- a/ML/Pytorch/CNN_architectures/pytorch_inceptionet.py +++ b/ML/Pytorch/CNN_architectures/pytorch_inceptionet.py @@ -51,7 +51,7 @@ class GoogLeNet(nn.Module): self.avgpool = nn.AvgPool2d(kernel_size=7, stride=1) self.dropout = nn.Dropout(p=0.4) - self.fc1 = nn.Linear(1024, 1000) + self.fc1 = nn.Linear(1024, num_classes) if self.aux_logits: self.aux1 = InceptionAux(512, num_classes) diff --git a/ML/Pytorch/CNN_architectures/pytorch_resnet.py b/ML/Pytorch/CNN_architectures/pytorch_resnet.py index 4822d12..1279179 100644 --- a/ML/Pytorch/CNN_architectures/pytorch_resnet.py +++ b/ML/Pytorch/CNN_architectures/pytorch_resnet.py @@ -23,7 +23,7 @@ class block(nn.Module): super(block, self).__init__() self.expansion = 4 self.conv1 = nn.Conv2d( - in_channels, intermediate_channels, kernel_size=1, stride=1, padding=0 + in_channels, intermediate_channels, kernel_size=1, stride=1, padding=0, bias=False ) self.bn1 = nn.BatchNorm2d(intermediate_channels) self.conv2 = nn.Conv2d( @@ -32,6 +32,7 @@ class block(nn.Module): kernel_size=3, stride=stride, padding=1, + bias=False ) self.bn2 = nn.BatchNorm2d(intermediate_channels) self.conv3 = nn.Conv2d( @@ -40,6 +41,7 @@ class block(nn.Module): kernel_size=1, stride=1, padding=0, + bias=False ) self.bn3 = nn.BatchNorm2d(intermediate_channels * self.expansion) self.relu = nn.ReLU() @@ -70,7 +72,7 @@ class ResNet(nn.Module): def __init__(self, block, layers, image_channels, num_classes): super(ResNet, self).__init__() self.in_channels = 64 - self.conv1 = nn.Conv2d(image_channels, 64, kernel_size=7, stride=2, padding=3) + self.conv1 = nn.Conv2d(image_channels, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU() self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) @@ -122,6 +124,7 @@ class ResNet(nn.Module): intermediate_channels * 4, kernel_size=1, stride=stride, + bias=False ), nn.BatchNorm2d(intermediate_channels * 4), ) diff --git a/ML/Pytorch/GANs/ProGAN/README.md b/ML/Pytorch/GANs/ProGAN/README.md index b232701..fa1b5a2 100644 --- a/ML/Pytorch/GANs/ProGAN/README.md +++ b/ML/Pytorch/GANs/ProGAN/README.md @@ -1,21 +1,18 @@ # ProGAN -A clean, simple and readable implementation of ProGAN in PyTorch. I've tried to replicate the original paper as closely as possible, so if you read the paper the implementation should be pretty much identical. The results from this implementation I would say is on par with the paper, I'll include some examples results below. +A clean, simple and readable implementation of ProGAN in PyTorch. I've tried to replicate the original paper as closely as possible, so if you read the paper the implementation should be pretty much identical. The results from this implementation I would say is close to the paper, but I did not train it to 1024x1024 images because I found it took too long. I also did not use number of channels = 512, but instead made the model smaller so that would be something that could worsen the results. I'll include some examples results below. ## Results -The model was trained on the Maps dataset and for fun I also tried using it to colorize anime. - || |:---:| -|![](results/64_examples.png)| |![](results/result1.png)| +|![](results/64_examples.png)| ### Celeb-HQ dataset The dataset can be downloaded from Kaggle: [link](https://www.kaggle.com/lamsimon/celebahq). - ### Download pretrained weights -Pretrained weights [here](). +Download pretrained weights [here](https://github.com/aladdinpersson/Machine-Learning-Collection/releases/download/1.0/ProGAN_weights.zip). Extract the zip file and put the pth.tar files in the directory with all the python files. Make sure you put LOAD_MODEL=True in the config.py file.