From b1e23795281428bacb022a75bb36012bf00017e3 Mon Sep 17 00:00:00 2001 From: Aladdin Persson Date: Wed, 24 Mar 2021 22:09:25 +0100 Subject: [PATCH] test --- ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py | 157 ------------------- ML/Pytorch/Basics/pytorch_simple_CNN.py | 119 -------------- ML/Pytorch/Basics/pytorch_simple_fullynet.py | 120 -------------- 3 files changed, 396 deletions(-) delete mode 100644 ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py delete mode 100644 ML/Pytorch/Basics/pytorch_simple_CNN.py delete mode 100644 ML/Pytorch/Basics/pytorch_simple_fullynet.py diff --git a/ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py b/ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py deleted file mode 100644 index f12d855..0000000 --- a/ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py +++ /dev/null @@ -1,157 +0,0 @@ -""" -Example code of a simple RNN, GRU, LSTM on the MNIST dataset. - -Programmed by Aladdin Persson -* 2020-05-09 Initial coding - -""" - -# Imports -import torch -import torchvision # torch package for vision related things -import torch.nn.functional as F # Parameterless functions, like (some) activation functions -import torchvision.datasets as datasets # Standard datasets -import torchvision.transforms as transforms # Transformations we can perform on our dataset for augmentation -from torch import optim # For optimizers like SGD, Adam, etc. -from torch import nn # All neural network modules -from torch.utils.data import DataLoader # Gives easier dataset managment by creating mini batches etc. -from tqdm import tqdm # For a nice progress bar! - -# Set device -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -# Hyperparameters -input_size = 28 -hidden_size = 256 -num_layers = 2 -num_classes = 10 -sequence_length = 28 -learning_rate = 0.005 -batch_size = 64 -num_epochs = 3 - -# Recurrent neural network (many-to-one) -class RNN(nn.Module): - def __init__(self, input_size, hidden_size, num_layers, num_classes): - super(RNN, self).__init__() - self.hidden_size = hidden_size - self.num_layers = num_layers - self.rnn = nn.RNN(input_size, hidden_size, num_layers, batch_first=True) - self.fc = nn.Linear(hidden_size * sequence_length, num_classes) - - def forward(self, x): - # Set initial hidden and cell states - h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) - - # Forward propagate LSTM - out, _ = self.rnn(x, h0) - out = out.reshape(out.shape[0], -1) - - # Decode the hidden state of the last time step - out = self.fc(out) - return out - - -# Recurrent neural network with GRU (many-to-one) -class RNN_GRU(nn.Module): - def __init__(self, input_size, hidden_size, num_layers, num_classes): - super(RNN_GRU, self).__init__() - self.hidden_size = hidden_size - self.num_layers = num_layers - self.gru = nn.GRU(input_size, hidden_size, num_layers, batch_first=True) - self.fc = nn.Linear(hidden_size * sequence_length, num_classes) - - def forward(self, x): - # Set initial hidden and cell states - h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) - - # Forward propagate LSTM - out, _ = self.gru(x, h0) - out = out.reshape(out.shape[0], -1) - - # Decode the hidden state of the last time step - out = self.fc(out) - return out - - -# Recurrent neural network with LSTM (many-to-one) -class RNN_LSTM(nn.Module): - def __init__(self, input_size, hidden_size, num_layers, num_classes): - super(RNN_LSTM, self).__init__() - self.hidden_size = hidden_size - self.num_layers = num_layers - self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True) - self.fc = nn.Linear(hidden_size * sequence_length, num_classes) - - def forward(self, x): - # Set initial hidden and cell states - h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) - c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) - - # Forward propagate LSTM - out, _ = self.lstm( - x, (h0, c0) - ) # out: tensor of shape (batch_size, seq_length, hidden_size) - out = out.reshape(out.shape[0], -1) - - # Decode the hidden state of the last time step - out = self.fc(out) - return out - - -# Load Data -train_dataset = datasets.MNIST(root="dataset/", train=True, transform=transforms.ToTensor(), download=True) -test_dataset = datasets.MNIST(root="dataset/", train=False, transform=transforms.ToTensor(), download=True) -train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) -test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True) - -# Initialize network (try out just using simple RNN, or GRU, and then compare with LSTM) -model = RNN_LSTM(input_size, hidden_size, num_layers, num_classes).to(device) - -# Loss and optimizer -criterion = nn.CrossEntropyLoss() -optimizer = optim.Adam(model.parameters(), lr=learning_rate) - -# Train Network -for epoch in range(num_epochs): - for batch_idx, (data, targets) in enumerate(tqdm(train_loader)): - # Get data to cuda if possible - data = data.to(device=device).squeeze(1) - targets = targets.to(device=device) - - # forward - scores = model(data) - loss = criterion(scores, targets) - - # backward - optimizer.zero_grad() - loss.backward() - - # gradient descent update step/adam step - optimizer.step() - -# Check accuracy on training & test to see how good our model -def check_accuracy(loader, model): - num_correct = 0 - num_samples = 0 - - # Set model to eval - model.eval() - - with torch.no_grad(): - for x, y in loader: - x = x.to(device=device).squeeze(1) - y = y.to(device=device) - - scores = model(x) - _, predictions = scores.max(1) - num_correct += (predictions == y).sum() - num_samples += predictions.size(0) - - # Toggle model back to train - model.train() - return num_correct / num_samples - - -print(f"Accuracy on training set: {check_accuracy(train_loader, model)*100:2f}") -print(f"Accuracy on test set: {check_accuracy(test_loader, model)*100:.2f}") diff --git a/ML/Pytorch/Basics/pytorch_simple_CNN.py b/ML/Pytorch/Basics/pytorch_simple_CNN.py deleted file mode 100644 index d18d948..0000000 --- a/ML/Pytorch/Basics/pytorch_simple_CNN.py +++ /dev/null @@ -1,119 +0,0 @@ -""" -A simple walkthrough of how to code a convolutional neural network (CNN) -using the PyTorch library. For demonstration we train it on the very -common MNIST dataset of handwritten digits. In this code we go through -how to create the network as well as initialize a loss function, optimizer, -check accuracy and more. - -Programmed by Aladdin Persson -* 2020-04-08: Initial coding -* 2021-03-24: More detailed comments and small revision of the code - -""" - -# Imports -import torch -import torchvision # torch package for vision related things -import torch.nn.functional as F # Parameterless functions, like (some) activation functions -import torchvision.datasets as datasets # Standard datasets -import torchvision.transforms as transforms # Transformations we can perform on our dataset for augmentation -from torch import optim # For optimizers like SGD, Adam, etc. -from torch import nn # All neural network modules -from torch.utils.data import DataLoader # Gives easier dataset managment by creating mini batches etc. -from tqdm import tqdm # For nice progress bar! - -# Simple CNN -class CNN(nn.Module): - def __init__(self, in_channels=1, num_classes=10): - super(CNN, self).__init__() - self.conv1 = nn.Conv2d( - in_channels=in_channels, - out_channels=8, - kernel_size=(3, 3), - stride=(1, 1), - padding=(1, 1), - ) - self.pool = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)) - self.conv2 = nn.Conv2d( - in_channels=8, - out_channels=16, - kernel_size=(3, 3), - stride=(1, 1), - padding=(1, 1), - ) - self.fc1 = nn.Linear(16 * 7 * 7, num_classes) - - def forward(self, x): - x = F.relu(self.conv1(x)) - x = self.pool(x) - x = F.relu(self.conv2(x)) - x = self.pool(x) - x = x.reshape(x.shape[0], -1) - x = self.fc1(x) - return x - - -# Set device -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -# Hyperparameters -in_channels = 1 -num_classes = 10 -learning_rate = 0.001 -batch_size = 64 -num_epochs = 3 - -# Load Data -train_dataset = datasets.MNIST(root="dataset/", train=True, transform=transforms.ToTensor(), download=True) -test_dataset = datasets.MNIST(root="dataset/", train=False, transform=transforms.ToTensor(), download=True) -train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) -test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True) - -# Initialize network -model = CNN(in_channels=in_channels, num_classes=num_classes).to(device) - -# Loss and optimizer -criterion = nn.CrossEntropyLoss() -optimizer = optim.Adam(model.parameters(), lr=learning_rate) - -# Train Network -for epoch in range(num_epochs): - for batch_idx, (data, targets) in enumerate(tqdm(train_loader)): - # Get data to cuda if possible - data = data.to(device=device) - targets = targets.to(device=device) - - # forward - scores = model(data) - loss = criterion(scores, targets) - - # backward - optimizer.zero_grad() - loss.backward() - - # gradient descent or adam step - optimizer.step() - -# Check accuracy on training & test to see how good our model -def check_accuracy(loader, model): - num_correct = 0 - num_samples = 0 - model.eval() - - with torch.no_grad(): - for x, y in loader: - x = x.to(device=device) - y = y.to(device=device) - - scores = model(x) - _, predictions = scores.max(1) - num_correct += (predictions == y).sum() - num_samples += predictions.size(0) - - - model.train() - return num_correct/num_samples - - -print(f"Accuracy on training set: {check_accuracy(train_loader, model)*100:.2f}") -print(f"Accuracy on test set: {check_accuracy(test_loader, model)*100:.2f}") \ No newline at end of file diff --git a/ML/Pytorch/Basics/pytorch_simple_fullynet.py b/ML/Pytorch/Basics/pytorch_simple_fullynet.py deleted file mode 100644 index fa0a9f4..0000000 --- a/ML/Pytorch/Basics/pytorch_simple_fullynet.py +++ /dev/null @@ -1,120 +0,0 @@ -""" -A simple walkthrough of how to code a fully connected neural network -using the PyTorch library. For demonstration we train it on the very -common MNIST dataset of handwritten digits. In this code we go through -how to create the network as well as initialize a loss function, optimizer, -check accuracy and more. - -Programmed by Aladdin Persson -* 2020-04-08: Initial coding -* 2021-03-24: Added more detailed comments also removed part of - check_accuracy which would only work specifically on MNIST. - -""" - -# Imports -import torch -import torchvision # torch package for vision related things -import torch.nn.functional as F # Parameterless functions, like (some) activation functions -import torchvision.datasets as datasets # Standard datasets -import torchvision.transforms as transforms # Transformations we can perform on our dataset for augmentation -from torch import optim # For optimizers like SGD, Adam, etc. -from torch import nn # All neural network modules -from torch.utils.data import DataLoader # Gives easier dataset managment by creating mini batches etc. -from tqdm import tqdm # For nice progress bar! - -# Here we create our simple neural network. For more details here we are subclassing and -# inheriting from nn.Module, this is the most general way to create your networks and -# allows for more flexibility. I encourage you to also check out nn.Sequential which -# would be easier to use in this scenario but I wanted to show you something that -# "always" works. -class NN(nn.Module): - def __init__(self, input_size, num_classes): - super(NN, self).__init__() - # Our first linear layer take input_size, in this case 784 nodes to 50 - # and our second linear layer takes 50 to the num_classes we have, in - # this case 10. - self.fc1 = nn.Linear(input_size, 50) - self.fc2 = nn.Linear(50, num_classes) - - def forward(self, x): - """ - x here is the mnist images and we run it through fc1, fc2 that we created above. - we also add a ReLU activation function in between and for that (since it has no parameters) - I recommend using nn.functional (F) - """ - - x = F.relu(self.fc1(x)) - x = self.fc2(x) - return x - - -# Set device cuda for GPU if it's available otherwise run on the CPU -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -# Hyperparameters of our neural network which depends on the dataset, and -# also just experimenting to see what works well (learning rate for example). -input_size = 784 -num_classes = 10 -learning_rate = 0.001 -batch_size = 64 -num_epochs = 3 - -# Load Training and Test data -train_dataset = datasets.MNIST(root="dataset/", train=True, transform=transforms.ToTensor(), download=True) -test_dataset = datasets.MNIST(root="dataset/", train=False, transform=transforms.ToTensor(), download=True) -train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) -test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True) - -# Initialize network -model = NN(input_size=input_size, num_classes=num_classes).to(device) - -# Loss and optimizer -criterion = nn.CrossEntropyLoss() -optimizer = optim.Adam(model.parameters(), lr=learning_rate) - -# Train Network -for epoch in range(num_epochs): - for batch_idx, (data, targets) in enumerate(tqdm(train_loader)): - # Get data to cuda if possible - data = data.to(device=device) - targets = targets.to(device=device) - - # Get to correct shape - data = data.reshape(data.shape[0], -1) - - # forward - scores = model(data) - loss = criterion(scores, targets) - - # backward - optimizer.zero_grad() - loss.backward() - - # gradient descent or adam step - optimizer.step() - - -# Check accuracy on training & test to see how good our model -def check_accuracy(loader, model): - num_correct = 0 - num_samples = 0 - model.eval() - - with torch.no_grad(): - for x, y in loader: - x = x.to(device=device) - y = y.to(device=device) - x = x.reshape(x.shape[0], -1) - - scores = model(x) - _, predictions = scores.max(1) - num_correct += (predictions == y).sum() - num_samples += predictions.size(0) - - model.train() - return num_correct/num_samples - - -print(f"Accuracy on training set: {check_accuracy(train_loader, model)*100:.2f}") -print(f"Accuracy on test set: {check_accuracy(test_loader, model)*100:.2f}")