-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathmodel.py
114 lines (97 loc) · 4.41 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
import torch
import torch.nn as nn
from torchvision import models
import timm
class COVNet(nn.Module):
def __init__(self, n_classes):
super().__init__()
model = models.resnet152(pretrained=True)
layer_list = list(model.children())[:-2]
self.pretrained_model = nn.Sequential(*layer_list)
self.pooling_layer = nn.AdaptiveAvgPool2d(1)
self.classifer = nn.Linear(2048, n_classes)
self.n_classes = n_classes
def forward(self, x):
x = torch.squeeze(x, dim=0)
features = self.pretrained_model(x)
pooled_features = self.pooling_layer(features)
pooled_features = pooled_features.view(pooled_features.size(0), -1)
flattened_features = torch.max(pooled_features, 0, keepdim=True)[0]
output = self.classifer(flattened_features)
return output
# resnet152 larger network COVNetL
class COVNetL(nn.Module):
def __init__(self, n_classes):
super().__init__()
model = models.resnet152(pretrained=True)
layer_list = list(model.children())[:-1]
self.pretrained_model = nn.Sequential(*layer_list)
# self.pooling_layer = nn.AdaptiveAvgPool2d(1)
self.classifer = nn.Linear(2048, n_classes)
self.n_classes = n_classes
def forward(self, x):
x = torch.squeeze(x, dim=0)
pooled_features = self.pretrained_model(x)
# pooled_features = self.pooling_layer(features)
pooled_features = pooled_features.view(pooled_features.size(0), -1)
flattened_features = torch.max(pooled_features, 0, keepdim=True)[0]
output = self.classifer(flattened_features)
return output
#COVNetT transformer
class COVNetT(nn.Module):
def __init__(self, n_classes):
super().__init__()
model = timm.create_model('swin_base_patch4_window7_224', pretrained=True)
layer_list = list(model.children())[:-2]
self.pretrained_model = nn.Sequential(*layer_list)
self.pooling_layer = nn.AdaptiveAvgPool1d(1)
self.head = nn.Linear(1024, n_classes)
self.n_classes = n_classes
def reshape_transform(tensor, height=7, width=7):
result = tensor.reshape(tensor.size(0),
height, width, tensor.size(2))
def forward(self, x):
x = torch.squeeze(x, dim=0)
features = self.pretrained_model(x)
features = torch.transpose(features, 2, 1)
pooled_features = self.pooling_layer(features)
pooled_features = pooled_features.view(pooled_features.size(0), -1)
flattened_features = torch.max(pooled_features, 0, keepdim=True)[0]
output = self.head(flattened_features)
return output
# BigThansfer (BiT) |o k| resnetv2_101x1_bitm_in21k |x out of memory|resnetv2_50x3_bitm_in21k
class COVNetBiT(nn.Module):
def __init__(self, n_classes):
super().__init__()
model = timm.create_model('resnetv2_101x1_bitm_in21k', pretrained=True)
layer_list = list(model.children())[:-2]
self.pretrained_model = nn.Sequential(*layer_list)
self.pooling_layer = nn.AdaptiveAvgPool2d(1)
self.classifer = nn.Linear(2048, n_classes)
self.n_classes = n_classes
def forward(self, x):
x = torch.squeeze(x, dim=0)
features = self.pretrained_model(x)
pooled_features = self.pooling_layer(features)
pooled_features = pooled_features.view(pooled_features.size(0), -1)
flattened_features = torch.max(pooled_features, 0, keepdim=True)[0]
output = self.classifer(flattened_features)
return output
# Efficientnetv2 |o k| resnetv2_101x1_bitm_in21k |x out of memory|resnetv2_50x3_bitm_in21k
class COVNetEffi(nn.Module):
def __init__(self, n_classes):
super().__init__()
model = timm.create_model('tf_efficientnetv2_m_in21k', pretrained=True)
layer_list = list(model.children())[:-2]
self.pretrained_model = nn.Sequential(*layer_list)
self.pooling_layer = nn.AdaptiveAvgPool2d(1)
self.classifer = nn.Linear(1280, n_classes)
self.n_classes = n_classes
def forward(self, x):
x = torch.squeeze(x, dim=0)
features = self.pretrained_model(x)
pooled_features = self.pooling_layer(features)
pooled_features = pooled_features.view(pooled_features.size(0), -1)
flattened_features = torch.max(pooled_features, 0, keepdim=True)[0]
output = self.classifer(flattened_features)
return output