Files
Machine-Learning-Collection/ML/Pytorch/Basics/pytorch_lr_ratescheduler.py

110 lines
3.5 KiB
Python
Raw Normal View History

2021-01-30 21:49:15 +01:00
"""
Example code of how to use a learning rate scheduler simple, in this
case with a (very) small and simple Feedforward Network training on MNIST
dataset with a learning rate scheduler. In this case ReduceLROnPlateau
scheduler is used, but can easily be changed to any of the other schedulers
2022-12-19 16:13:53 +01:00
available. I think simply reducing LR by 1/10 or so, when loss plateaus is
a good default.
2021-01-30 21:49:15 +01:00
Programmed by Aladdin Persson <aladdin.persson at hotmail dot com>
* 2020-04-10 Initial programming
2022-12-19 16:13:53 +01:00
* 2022-12-19 Updated comments, made sure it works with latest PyTorch
2021-01-30 21:49:15 +01:00
"""
# Imports
import torch
import torch.nn as nn # All neural network modules, nn.Linear, nn.Conv2d, BatchNorm, Loss functions
import torch.optim as optim # For all Optimization algorithms, SGD, Adam, etc.
from torch.utils.data import (
DataLoader,
) # Gives easier dataset managment and creates mini batches
import torchvision.datasets as datasets # Has standard datasets we can import in a nice way
import torchvision.transforms as transforms # Transformations we can perform on our dataset
# Set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Hyperparameters
num_classes = 10
2022-12-19 16:13:53 +01:00
learning_rate = (
0.1 # way too high learning rate, but we want to see the scheduler in action
)
2021-01-30 21:49:15 +01:00
batch_size = 128
num_epochs = 100
# Define a very simple model
model = nn.Sequential(nn.Linear(784, 50), nn.ReLU(), nn.Linear(50, 10)).to(device)
# Load Data
train_dataset = datasets.MNIST(
root="dataset/", train=True, transform=transforms.ToTensor(), download=True
)
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# Define Scheduler
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
2022-12-19 16:13:53 +01:00
optimizer, factor=0.1, patience=10, verbose=True
2021-01-30 21:49:15 +01:00
)
# Train Network
for epoch in range(1, num_epochs):
losses = []
for batch_idx, (data, targets) in enumerate(train_loader):
# Get data to cuda if possible
data = data.reshape(data.shape[0], -1)
data = data.to(device=device)
targets = targets.to(device=device)
# forward
scores = model(data)
loss = criterion(scores, targets)
losses.append(loss.item())
# backward
2022-12-19 16:13:53 +01:00
optimizer.zero_grad()
2021-01-30 21:49:15 +01:00
loss.backward()
optimizer.step()
mean_loss = sum(losses) / len(losses)
2022-12-19 16:13:53 +01:00
mean_loss = round(mean_loss, 2) # we should see difference in loss at 2 decimals
2021-01-30 21:49:15 +01:00
# After each epoch do scheduler.step, note in this scheduler we need to send
2022-12-19 16:13:53 +01:00
# in loss for that epoch! This can also be set using validation loss, and also
# in the forward loop we can do on our batch but then we might need to modify
# the patience parameter
2021-01-30 21:49:15 +01:00
scheduler.step(mean_loss)
2022-12-19 16:13:53 +01:00
print(f"Average loss for epoch {epoch} was {mean_loss}")
2021-01-30 21:49:15 +01:00
# Check accuracy on training & test to see how good our model
def check_accuracy(loader, model):
num_correct = 0
num_samples = 0
model.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device=device)
2022-12-19 16:13:53 +01:00
x = x.reshape(x.shape[0], -1)
2021-01-30 21:49:15 +01:00
y = y.to(device=device)
scores = model(x)
_, predictions = scores.max(1)
num_correct += (predictions == y).sum()
num_samples += predictions.size(0)
print(
f"Got {num_correct} / {num_samples} with accuracy {float(num_correct)/float(num_samples)*100:.2f}"
)
model.train()
check_accuracy(train_loader, model)