Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Gpu selection + Tensorboard + Faster MNIST #106

Open
wants to merge 9 commits into
base: master
Choose a base branch
from
30 changes: 30 additions & 0 deletions data/optimized_MNIST.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
import torch
from torchvision.datasets import MNIST

device = torch.device('cuda')


class FastMNIST(MNIST):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)

# Scale data to [0,1]
self.data = self.data.unsqueeze(1).float().div(255)

# Normalize it with the usual MNIST mean and std
self.data = self.data.sub_(0.1307).div_(0.3081)

# Put both data and targets on GPU in advance
self.data, self.targets = self.data.to(device), self.targets.to(device)

def __getitem__(self, index):
"""
Args:
index (int): Index

Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.targets[index]

return img, target
62 changes: 49 additions & 13 deletions implementations/cgan/cgan.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,16 @@
import numpy as np
import math

import torchvision
import torchvision.transforms as transforms
from torchvision.utils import save_image

from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from torch.utils.tensorboard import SummaryWriter

from data import optimized_MNIST

import torch.nn as nn
import torch.nn.functional as F
Expand All @@ -28,6 +32,7 @@
parser.add_argument("--img_size", type=int, default=32, help="size of each image dimension")
parser.add_argument("--channels", type=int, default=1, help="number of image channels")
parser.add_argument("--sample_interval", type=int, default=400, help="interval between image sampling")
parser.add_argument("--gpu", type=int, default=0, help="gpu index")
opt = parser.parse_args()
print(opt)

Expand Down Expand Up @@ -99,24 +104,41 @@ def forward(self, img, labels):
discriminator = Discriminator()

if cuda:
torch.cuda.set_device(opt.gpu)
print("using GPU " + str(opt.gpu) + ": " + torch.cuda.get_device_name(opt.gpu))
generator.cuda()
discriminator.cuda()
adversarial_loss.cuda()
else:
print("using CPU")


# Configure data loader
os.makedirs("../../data/mnist", exist_ok=True)
dataloader = torch.utils.data.DataLoader(
datasets.MNIST(
"../../data/mnist",
train=True,
download=True,
transform=transforms.Compose(
[transforms.Resize(opt.img_size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]
if opt.img_size == 28:
print("using optimized MNIST dataset")
dataloader = torch.utils.data.DataLoader(
optimized_MNIST.FastMNIST(
"../../data/mnist",
train=True,
download=True,
),
),
batch_size=opt.batch_size,
shuffle=True,
)
batch_size=opt.batch_size,
shuffle=True,
)
else:
os.makedirs("../../data/mnist", exist_ok=True)
dataloader = torch.utils.data.DataLoader(
datasets.MNIST(
"../../data/mnist",
train=True,
download=True,
transform=transforms.Compose(
[transforms.Resize(opt.img_size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]
),
),
batch_size=opt.batch_size,
shuffle=True,
)

# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
Expand All @@ -125,6 +147,11 @@ def forward(self, img, labels):
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor

writer = SummaryWriter()
save_folder = 'results/'+list(writer.all_writers.keys())[0]
os.makedirs(save_folder)
if len(os.listdir(save_folder)) is not 0:
raise Exception('Directory is not empty!')

def sample_image(n_row, batches_done):
"""Saves a grid of generated digits ranging from 0 to n_classes"""
Expand All @@ -134,14 +161,16 @@ def sample_image(n_row, batches_done):
labels = np.array([num for _ in range(n_row) for num in range(n_row)])
labels = Variable(LongTensor(labels))
gen_imgs = generator(z, labels)
save_image(gen_imgs.data, "images/%d.png" % batches_done, nrow=n_row, normalize=True)
save_image(gen_imgs.data, save_folder+"/%d.png" % batches_done, nrow=n_row, normalize=True)


# ----------
# Training
# ----------

for epoch in range(opt.n_epochs):
g_loss_epoch = 0
d_loss_epoch = 0
for i, (imgs, labels) in enumerate(dataloader):

batch_size = imgs.shape[0]
Expand Down Expand Up @@ -194,6 +223,9 @@ def sample_image(n_row, batches_done):
d_loss.backward()
optimizer_D.step()

g_loss_epoch += g_loss.item() / len(dataloader)
d_loss_epoch += d_loss.item() / len(dataloader)

print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]"
% (epoch, opt.n_epochs, i, len(dataloader), d_loss.item(), g_loss.item())
Expand All @@ -202,3 +234,7 @@ def sample_image(n_row, batches_done):
batches_done = epoch * len(dataloader) + i
if batches_done % opt.sample_interval == 0:
sample_image(n_row=10, batches_done=batches_done)
writer.add_scalar('Loss/generator', g_loss_epoch, epoch)
writer.add_scalar('Loss/discriminator', d_loss_epoch, epoch)
img_grid = torchvision.utils.make_grid(gen_imgs.data[:25])
writer.add_image('Sample generated images', img_grid, epoch)
64 changes: 51 additions & 13 deletions implementations/gan/gan.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,16 @@
import numpy as np
import math

import torchvision
import torchvision.transforms as transforms
from torchvision.utils import save_image

from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from torch.utils.tensorboard import SummaryWriter

from data import optimized_MNIST

import torch.nn as nn
import torch.nn.functional as F
Expand All @@ -27,6 +31,7 @@
parser.add_argument("--img_size", type=int, default=28, help="size of each image dimension")
parser.add_argument("--channels", type=int, default=1, help="number of image channels")
parser.add_argument("--sample_interval", type=int, default=400, help="interval betwen image samples")
parser.add_argument("--gpu", type=int, default=0, help="gpu index")
opt = parser.parse_args()
print(opt)

Expand Down Expand Up @@ -89,36 +94,62 @@ def forward(self, img):
discriminator = Discriminator()

if cuda:
torch.cuda.set_device(opt.gpu)
print("using GPU " + str(opt.gpu) + ": " + torch.cuda.get_device_name(opt.gpu))
generator.cuda()
discriminator.cuda()
adversarial_loss.cuda()
else:
print("using CPU")

# Configure data loader
os.makedirs("../../data/mnist", exist_ok=True)
dataloader = torch.utils.data.DataLoader(
datasets.MNIST(
"../../data/mnist",
train=True,
download=True,
transform=transforms.Compose(
[transforms.Resize(opt.img_size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]
if opt.img_size == 28:
print("using optimized MNIST dataset")
dataloader = torch.utils.data.DataLoader(
optimized_MNIST.FastMNIST(
"../../data/mnist",
train=True,
download=True,
),
batch_size=opt.batch_size,
shuffle=True,
)
else:
os.makedirs("../../data/mnist", exist_ok=True)
dataloader = torch.utils.data.DataLoader(
datasets.MNIST(
"../../data/mnist",
train=True,
download=True,
transform=transforms.Compose(
[transforms.Resize(opt.img_size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]
),
),
),
batch_size=opt.batch_size,
shuffle=True,
)
batch_size=opt.batch_size,
shuffle=True,
)


# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))

Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor

writer = SummaryWriter()
save_folder = 'results/'+list(writer.all_writers.keys())[0]
os.makedirs(save_folder)
if len(os.listdir(save_folder)) is not 0:
raise Exception('Directory is not empty!')


# ----------
# Training
# ----------

for epoch in range(opt.n_epochs):
g_loss_epoch = 0
d_loss_epoch = 0
for i, (imgs, _) in enumerate(dataloader):

# Adversarial ground truths
Expand Down Expand Up @@ -160,11 +191,18 @@ def forward(self, img):
d_loss.backward()
optimizer_D.step()

g_loss_epoch += g_loss.item() / len(dataloader)
d_loss_epoch += d_loss.item() / len(dataloader)
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]"
% (epoch, opt.n_epochs, i, len(dataloader), d_loss.item(), g_loss.item())
)

batches_done = epoch * len(dataloader) + i
if batches_done % opt.sample_interval == 0:
save_image(gen_imgs.data[:25], "images/%d.png" % batches_done, nrow=5, normalize=True)
save_image(gen_imgs.data[:25], save_folder+"/%d.png" % batches_done, nrow=5, normalize=True)

writer.add_scalar('Loss/generator', g_loss_epoch, epoch)
writer.add_scalar('Loss/discriminator', d_loss_epoch, epoch)
img_grid = torchvision.utils.make_grid(gen_imgs.data[:25])
writer.add_image('Sample generated images', img_grid, epoch)
29 changes: 26 additions & 3 deletions implementations/pix2pix/pix2pix.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,18 @@
import datetime
import sys

import torchvision
import torchvision.transforms as transforms
from torchvision.utils import save_image

from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from torch.utils.tensorboard import SummaryWriter

from models import *
from datasets import *

from implementations.pix2pix.models import *
from implementations.pix2pix.datasets import *

import torch.nn as nn
import torch.nn.functional as F
Expand All @@ -38,6 +41,7 @@
"--sample_interval", type=int, default=500, help="interval between sampling of images from generators"
)
parser.add_argument("--checkpoint_interval", type=int, default=-1, help="interval between model checkpoints")
parser.add_argument("--gpu", type=int, default=0, help="gpu index")
opt = parser.parse_args()
print(opt)

Expand All @@ -61,10 +65,14 @@
discriminator = Discriminator()

if cuda:
torch.cuda.set_device(opt.gpu)
print("using GPU " + str(opt.gpu) + ": " + torch.cuda.get_device_name(opt.gpu))
generator = generator.cuda()
discriminator = discriminator.cuda()
criterion_GAN.cuda()
criterion_pixelwise.cuda()
else:
print("using CPU")

if opt.epoch != 0:
# Load pretrained models
Expand Down Expand Up @@ -103,6 +111,11 @@
# Tensor type
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor

writer = SummaryWriter()
save_folder = 'results/'+list(writer.all_writers.keys())[0]
os.makedirs(save_folder)
if len(os.listdir(save_folder)) is not 0:
raise Exception('Directory is not empty!')

def sample_images(batches_done):
"""Saves a generated sample from the validation set"""
Expand All @@ -111,7 +124,9 @@ def sample_images(batches_done):
real_B = Variable(imgs["A"].type(Tensor))
fake_B = generator(real_A)
img_sample = torch.cat((real_A.data, fake_B.data, real_B.data), -2)
save_image(img_sample, "images/%s/%s.png" % (opt.dataset_name, batches_done), nrow=5, normalize=True)
save_image(img_sample, "%s/%s.png" % (save_folder, batches_done), nrow=5, normalize=True)
img_grid = torchvision.utils.make_grid(img_sample.data)
writer.add_image('Sample generated images (step = batches_done)', img_grid, batches_done)


# ----------
Expand All @@ -121,6 +136,8 @@ def sample_images(batches_done):
prev_time = time.time()

for epoch in range(opt.epoch, opt.n_epochs):
loss_G_epoch = 0
loss_D_epoch = 0
for i, batch in enumerate(dataloader):

# Model inputs
Expand Down Expand Up @@ -181,6 +198,9 @@ def sample_images(batches_done):
time_left = datetime.timedelta(seconds=batches_left * (time.time() - prev_time))
prev_time = time.time()

loss_G_epoch += loss_G.item() / len(dataloader)
loss_D_epoch += loss_D.item() / len(dataloader)

# Print log
sys.stdout.write(
"\r[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f, pixel: %f, adv: %f] ETA: %s"
Expand All @@ -205,3 +225,6 @@ def sample_images(batches_done):
# Save model checkpoints
torch.save(generator.state_dict(), "saved_models/%s/generator_%d.pth" % (opt.dataset_name, epoch))
torch.save(discriminator.state_dict(), "saved_models/%s/discriminator_%d.pth" % (opt.dataset_name, epoch))

writer.add_scalar('Loss/generator', loss_G_epoch, epoch)
writer.add_scalar('Loss/discriminator', loss_D_epoch, epoch)
9 changes: 5 additions & 4 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
torch>=0.4.0
torchvision
torchvision~=0.5.0
matplotlib
numpy
scipy
pillow
numpy~=1.18.1
scipy~=1.4.1
pillow~=7.0.0
urllib3
scikit-image
tensorboard~=2.1.0