-
Notifications
You must be signed in to change notification settings - Fork 13
/
Copy pathmodel.py
55 lines (47 loc) · 1.64 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import torch
from torch import nn
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.lstm = nn.LSTM(batch_first=True, input_size=17, hidden_size=50, num_layers=1)
self.attenion = Attention3dBlock()
self.linear = nn.Sequential(
nn.Linear(in_features=1500, out_features=50),
nn.ReLU(inplace=True),
nn.Dropout(p=0.2),
nn.Linear(in_features=50, out_features=10),
nn.ReLU(inplace=True)
)
self.handcrafted = nn.Sequential(
nn.Linear(in_features=34, out_features=10),
nn.ReLU(inplace=True),
nn.Dropout(p=0.2)
)
self.output = nn.Sequential(
nn.Linear(in_features=20, out_features=1)
)
def forward(self, inputs, handcrafted_feature):
y = self.handcrafted(handcrafted_feature)
x, (hn, cn) = self.lstm(inputs)
x = self.attenion(x)
# flatten
x = x.reshape(-1, 1500)
x = self.linear(x)
out = torch.concat((x, y), dim=1)
out = self.output(out)
return out
class Attention3dBlock(nn.Module):
def __init__(self):
super(Attention3dBlock, self).__init__()
self.linear = nn.Sequential(
nn.Linear(in_features=30, out_features=30),
nn.Softmax(dim=2),
)
# inputs: batch size * window size(time step) * lstm output dims
def forward(self, inputs):
x = inputs.permute(0, 2, 1)
x = self.linear(x)
x_probs = x.permute(0, 2, 1)
# print(torch.sum(x_probs.item()))
output = x_probs * inputs
return output