-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmodel.py
92 lines (66 loc) · 3.07 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# Block 1
self.conv1 = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=11, padding=5)
self.bn1 = nn.BatchNorm2d(64)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=7, padding=3)
self.bn2 = nn.BatchNorm2d(64)
# Block 2
self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=7, padding=3)
self.bn3 = nn.BatchNorm2d(128)
self.conv4 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=5, padding=2)
self.bn4 = nn.BatchNorm2d(128)
# Block 3
self.conv5 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=5, padding=2)
self.bn5 = nn.BatchNorm2d(256)
self.conv6 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1)
self.bn6 = nn.BatchNorm2d(256)
# Block 4
self.conv7 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, padding=1)
self.bn7 = nn.BatchNorm2d(512)
self.conv8 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1)
self.bn8 = nn.BatchNorm2d(512)
# Block 5
self.conv9 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1)
self.bn9 = nn.BatchNorm2d(512)
self.conv10 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1)
self.bn10 = nn.BatchNorm2d(512)
# Dropout layers
self.dropout_fc = nn.Dropout(0.5) # Dropout for fully connected layers
# Fully connected layer
self.fc1 = nn.Linear(512 * 4 * 4 , 1024)
self.fc2 = nn.Linear(1024, 10)
def forward(self, x):
x = self.pre_process(x)
# Block 1
x = F.leaky_relu(self.bn1(self.conv1(x)))
x = F.leaky_relu(self.bn2(self.conv2(x)))
x = F.max_pool2d(x, kernel_size=2, stride=2) # 128 -> 64
# Block 2
x = F.leaky_relu(self.bn3(self.conv3(x)))
x = F.leaky_relu(self.bn4(self.conv4(x)))
x = F.max_pool2d(x, kernel_size=2, stride=2) # 64 -> 32
# Block 3
x = F.leaky_relu(self.bn5(self.conv5(x)))
x = F.leaky_relu(self.bn6(self.conv6(x)))
x = F.max_pool2d(x, kernel_size=2, stride=2) # 32 -> 16
# Block 4
x = F.leaky_relu(self.bn7(self.conv7(x)))
x = F.leaky_relu(self.bn8(self.conv8(x))) # 16 -> 8
x = F.max_pool2d(x, kernel_size=2, stride=2)
# Block 5
x = F.leaky_relu(self.bn9(self.conv9(x)))
x = F.leaky_relu(self.bn10(self.conv10(x)))
x = F.max_pool2d(x, kernel_size=2, stride=2) # 8 -> 4
# Flatten
x = x.view(x.size(0), -1) # batch_size x (512 * 4 * 4)
# Fully connected layers
x = F.relu(self.fc1(x))
x = self.dropout_fc(x) # Apply dropout
x = self.fc2(x)
return x
def pre_process(self, x):
return x.float()