-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdcgan_models.py
executable file
·121 lines (98 loc) · 3.75 KB
/
dcgan_models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from spectral import SpectralNorm
import numpy as np
class Generator_DC(nn.Module):
"""Generator."""
def __init__(self, batch_size=64, image_size=64, z_dim=100, conv_dim=64, rgb_channel=3):
super(Generator_DC, self).__init__()
self.imsize = image_size
layer1 = []
layer2 = []
layer3 = []
last = []
repeat_num = int(np.log2(self.imsize)) - 3
mult = 2 ** repeat_num # 8
layer1.append(SpectralNorm(nn.ConvTranspose2d(z_dim, conv_dim * mult, 4)))
layer1.append(nn.BatchNorm2d(conv_dim * mult))
layer1.append(nn.ReLU())
curr_dim = conv_dim * mult
layer2.append(SpectralNorm(nn.ConvTranspose2d(curr_dim, int(curr_dim / 2), 4, 2, 1)))
layer2.append(nn.BatchNorm2d(int(curr_dim / 2)))
layer2.append(nn.ReLU())
curr_dim = int(curr_dim / 2)
layer3.append(SpectralNorm(nn.ConvTranspose2d(curr_dim, int(curr_dim / 2), 4, 2, 1)))
layer3.append(nn.BatchNorm2d(int(curr_dim / 2)))
layer3.append(nn.ReLU())
if self.imsize == 64:
layer4 = []
curr_dim = int(curr_dim / 2)
layer4.append(SpectralNorm(nn.ConvTranspose2d(curr_dim, int(curr_dim / 2), 4, 2, 1)))
layer4.append(nn.BatchNorm2d(int(curr_dim / 2)))
layer4.append(nn.ReLU())
self.l4 = nn.Sequential(*layer4)
curr_dim = int(curr_dim / 2)
self.l1 = nn.Sequential(*layer1)
self.l2 = nn.Sequential(*layer2)
self.l3 = nn.Sequential(*layer3)
last.append(nn.ConvTranspose2d(curr_dim, rgb_channel, 4, 2, 1))
last.append(nn.Tanh())
self.last = nn.Sequential(*last)
def forward(self, z):
z = z.view(z.size(0), z.size(1), 1, 1)
# 64 x 128 x 1 x 1
out = self.l1(z)
# 64 x 512 x 4 x 4
out = self.l2(out)
# 64 x 256 x 8 x 8
out = self.l3(out)
# 64 x 128 x 16 x 16
out = self.l4(out)
# 64 x 64 x 32 x 32
out = self.last(out)
# 64 x 3* x 64 x 64
return out, None, None
class Discriminator_DC(nn.Module):
"""Discriminator, Auxiliary Classifier."""
def __init__(self, batch_size=64, image_size=64, conv_dim=64, rgb_channel=3):
super(Discriminator_DC, self).__init__()
self.imsize = image_size
layer1 = []
layer2 = []
layer3 = []
last = []
layer1.append(SpectralNorm(nn.Conv2d(rgb_channel, conv_dim, 4, 2, 1)))
layer1.append(nn.LeakyReLU(0.1))
curr_dim = conv_dim
layer2.append(SpectralNorm(nn.Conv2d(curr_dim, curr_dim * 2, 4, 2, 1)))
layer2.append(nn.LeakyReLU(0.1))
curr_dim = curr_dim * 2
layer3.append(SpectralNorm(nn.Conv2d(curr_dim, curr_dim * 2, 4, 2, 1)))
layer3.append(nn.LeakyReLU(0.1))
curr_dim = curr_dim * 2
if self.imsize == 64:
layer4 = []
layer4.append(SpectralNorm(nn.Conv2d(curr_dim, curr_dim * 2, 4, 2, 1)))
layer4.append(nn.LeakyReLU(0.1))
self.l4 = nn.Sequential(*layer4)
curr_dim = curr_dim * 2
self.l1 = nn.Sequential(*layer1)
self.l2 = nn.Sequential(*layer2)
self.l3 = nn.Sequential(*layer3)
last.append(nn.Conv2d(curr_dim, 1, 4))
self.last = nn.Sequential(*last)
def forward(self, x):
# 64 x 3 x 64 x 64
out = self.l1(x)
# 64 x 64 x 32 x 32
out = self.l2(out)
# 64 x 128 x 16 x 16
out = self.l3(out)
# 64 x 256 x 8 x 8
out = self.l4(out)
# 64 x 512 x 4 x 4
out = self.last(out)
# 64 x 1 x 1 x 1
return out.squeeze(), None, None