-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathtrain_ebb.py
73 lines (52 loc) · 2.29 KB
/
train_ebb.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import argparse
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from utils.dataset_utils import BokehDataset_ebb as BokehDataset
from net.model import bokeh
from utils.schedulers import LinearWarmupCosineAnnealingLR
import lightning.pytorch as pl
from lightning.pytorch.callbacks import ModelCheckpoint
from utils.loss_utils import ssim_loss
class BokehModel(pl.LightningModule):
def __init__(self):
super().__init__()
self.net = bokeh()
self.loss_fn = nn.L1Loss()
def forward(self,x, x1, x2):
return self.net(x, x1, x2)
def training_step(self, batch, batch_idx):
# training_step defines the train loop.
# it is independent of forward
([clean_name], degrad_patch, clean_patch, depth_patch, mask_patch) = batch
restored = self.net(degrad_patch, depth_patch, mask_patch)
loss = self.loss_fn(restored,clean_patch)
ssim_l = ssim_loss(restored, clean_patch)
loss = loss + ssim_l
# Logging to TensorBoard (if installed) by default
self.log("train_loss", loss)
return loss
def lr_scheduler_step(self,scheduler,metric):
scheduler.step(self.current_epoch)
lr = scheduler.get_lr()
def configure_optimizers(self):
optimizer = optim.AdamW(self.parameters(), lr=2e-4)
scheduler = LinearWarmupCosineAnnealingLR(optimizer=optimizer,warmup_epochs=5,max_epochs=150)
return [optimizer], [scheduler]
def main():
parser = argparse.ArgumentParser()
# Input Parameters
parser.add_argument('--cuda', type=int, default=0)
parser.add_argument('--bokeh_path', type=str, default="./data/EBB/train/", help='save path of test bokeh images')
opt = parser.parse_args()
print("Options")
print(opt)
trainset = BokehDataset(opt)
checkpoint_callback = ModelCheckpoint(dirpath='./ckptebb', every_n_epochs=1, save_top_k=-1)
trainloader = DataLoader(trainset, batch_size=2, pin_memory=True, shuffle=True,
drop_last=True, num_workers=1)
model = BokehModel()
trainer = pl.Trainer(max_epochs=150, accelerator="gpu", devices=1, callbacks=[checkpoint_callback])
trainer.fit(model=model, train_dataloaders=trainloader)
if __name__ == '__main__':
main()