-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathconvNet.py
68 lines (54 loc) · 2.79 KB
/
convNet.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
class ConvNet(nn.Module):
def __init__(self, params):
super().__init__()
self.num_channels = params.num_channels
self.conv1 = nn.Conv2d(in_channels=self.num_channels, out_channels=self.num_channels * 6, kernel_size=3,
padding=1, stride=1)
self.bn1 = nn.BatchNorm2d(self.num_channels * 6)
self.conv2 = nn.Conv2d(in_channels=self.num_channels * 6, out_channels=self.num_channels * 12, kernel_size=3,
padding=1, stride=1)
self.bn2 = nn.BatchNorm2d(self.num_channels * 12)
self.conv3 = nn.Conv2d(in_channels=self.num_channels * 12, out_channels=self.num_channels * 24, kernel_size=3,
padding=1, stride=1)
self.bn3 = nn.BatchNorm2d(self.num_channels * 24)
self.fc1 = nn.Linear(in_features=self.num_channels * 24 * 3 * 3, out_features=120)
self.fcbn1 = nn.BatchNorm1d(120)
self.fc2 = nn.Linear(in_features=120, out_features=60)
self.fcbn2 = nn.BatchNorm1d(60)
self.out = nn.Linear(in_features=60, out_features=10)
self.dropout_rate = params.dropout_rate
def forward(self, s):
s = self.bn1(self.conv1(s)) # batch_size x num_channels x 28 x 28
s = F.relu(F.max_pool2d(s, 2)) # batch_size x num_channels x 14 x 14
s = self.bn2(self.conv2(s)) # batch_size x num_channels x 14 x 14
s = F.relu(F.max_pool2d(s, 2)) # batch_size x num_channels x 7 x 7
s = self.bn3(self.conv3(s)) # batch_size x num_channels x 7 x 7
s = F.relu(F.max_pool2d(s, 2)) # batch_size x num_channels x 3 x 3
# Flatten the output for each image
s = s.reshape(-1, self.num_channels * 24 * 3 * 3) # batch_size x num_channels x 3 x 3
# Apply two fully connected layers with dropout
s = F.dropout(F.relu(self.fcbn1(self.fc1(s))),
p=self.dropout_rate, training=self.training) # batch_size x 120
s = F.dropout(F.relu(self.fcbn2(self.fc2(s))),
p=self.dropout_rate, training=self.training) # batch_size x 60
s = self.out(s) # batch_size x 10
# s = F.softmax(s, dim=1)
return s
def loss_fn(outputs, labels):
return F.cross_entropy(outputs, labels)
def accuracy(outputs, labels):
"""
Compute the accuracy, given the outputs and labels for all images.
Args:
outputs: (np.ndarray) dimension batch_size x 10 - log softmax output of the model
labels: (np.ndarray) dimension batch_size, where each element is a value in [0 .. 9]
Returns: (float) accuracy in [0,1]
"""
outputs = np.argmax(outputs, axis=1)
return np.sum(outputs == labels) / float(labels.size)
metrics = {
'accuracy': accuracy,
}