mirror of
https://github.com/aladdinpersson/Machine-Learning-Collection.git
synced 2026-02-20 13:50:41 +00:00
35 lines
1.1 KiB
Python
35 lines
1.1 KiB
Python
import torch
|
|
import torch.nn as nn
|
|
|
|
|
|
def gradient_penalty(critic, real, fake, device="cpu"):
|
|
BATCH_SIZE, C, H, W = real.shape
|
|
alpha = torch.rand((BATCH_SIZE, 1, 1, 1)).repeat(1, C, H, W).to(device)
|
|
interpolated_images = real * alpha + fake * (1 - alpha)
|
|
|
|
# Calculate critic scores
|
|
mixed_scores = critic(interpolated_images)
|
|
|
|
# Take the gradient of the scores with respect to the images
|
|
gradient = torch.autograd.grad(
|
|
inputs=interpolated_images,
|
|
outputs=mixed_scores,
|
|
grad_outputs=torch.ones_like(mixed_scores),
|
|
create_graph=True,
|
|
retain_graph=True,
|
|
)[0]
|
|
gradient = gradient.view(gradient.shape[0], -1)
|
|
gradient_norm = gradient.norm(2, dim=1)
|
|
gradient_penalty = torch.mean((gradient_norm - 1) ** 2)
|
|
return gradient_penalty
|
|
|
|
|
|
def save_checkpoint(state, filename="celeba_wgan_gp.pth.tar"):
|
|
print("=> Saving checkpoint")
|
|
torch.save(state, filename)
|
|
|
|
|
|
def load_checkpoint(checkpoint, gen, disc):
|
|
print("=> Loading checkpoint")
|
|
gen.load_state_dict(checkpoint['gen'])
|
|
disc.load_state_dict(checkpoint['disc']) |