-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathmodel.py
58 lines (51 loc) · 1.62 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import torch.nn as nn
import torchvision.transforms as transforms
from collections import OrderedDict
transformation = transforms.Compose(
[transforms.Resize((32, 32)), transforms.ToTensor()]
)
class Model(nn.Module):
"""
Input - 1x32x32
C1 - 6@28x28 (5x5 kernel)
tanh
S2 - 6@14x14 (2x2 kernel, stride 2) Subsampling
C3 - 16@10x10 (5x5 kernel, complicated shit)
tanh
S4 - 16@5x5 (2x2 kernel, stride 2) Subsampling
C5 - 120@1x1 (5x5 kernel)
F6 - 84
tanh
F7 - 10 (Output)
"""
def __init__(self):
super(Model, self).__init__()
self.convnet = nn.Sequential(
OrderedDict(
[
("c1", nn.Conv2d(1, 6, kernel_size=(5, 5))),
("relu1", nn.ReLU()),
("s2", nn.MaxPool2d(kernel_size=(2, 2), stride=2)),
("c3", nn.Conv2d(6, 16, kernel_size=(5, 5))),
("relu3", nn.ReLU()),
("s4", nn.MaxPool2d(kernel_size=(2, 2), stride=2)),
("c5", nn.Conv2d(16, 120, kernel_size=(5, 5))),
("relu5", nn.ReLU()),
]
)
)
self.fc = nn.Sequential(
OrderedDict(
[
("f6", nn.Linear(120, 84)),
("relu6", nn.ReLU()),
("f7", nn.Linear(84, 10)),
("sig7", nn.LogSoftmax(dim=-1)),
]
)
)
def forward(self, img):
output = self.convnet(img)
output = output.view(img.size(0), -1)
output = self.fc(output)
return output