-
Notifications
You must be signed in to change notification settings - Fork 0
/
LSTM_rnn.py
73 lines (62 loc) · 2.51 KB
/
LSTM_rnn.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import torch
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
class LSTM(nn.Module):
def __init__(self,
input_size,
hidden_size,
output_size,
n_layers,
batch_size,
bidirectional,
use_cuda):
super(LSTM, self).__init__()
self.use_cuda = use_cuda
self.n_layers = n_layers
self.batch_size = batch_size
self.hidden_size = hidden_size
self.bidirectional = bidirectional
if bidirectional:
self.hidden_layer = nn.LSTM(input_size,
hidden_size,
n_layers,
dropout=0.05,
bidirectional=True,
batch_first=True)
self.output_layer = nn.Linear(2*hidden_size, output_size)
else:
self.hidden_layer = nn.LSTM(input_size,
hidden_size,
n_layers,
dropout=0.05,
bidirectional=False,
batch_first=True)
self.output_layer = nn.Linear(hidden_size, output_size)
def forward(self, inp):
if self.use_cuda:
hidden, cell = self.initHidden().cuda(), self.initHidden().cuda()
else:
hidden, cell = self.initHidden(), self.initHidden()
outputs = []
for i in range(inp.size(1)):
rnn_out, hidden = self.hidden_layer(inp[:, i:i+1, :], (hidden, cell))
outputs.append(self.output_layer(rnn_out.squeeze()))
hidden, cell = hidden[0], hidden[1]
return torch.stack(outputs, dim=1), hidden
def pred(self, inp):
out = self.forward(inp)
return out[0]
def initHidden(self):
if self.bidirectional:
out = Variable(torch.zeros(2*self.n_layers,
self.batch_size,
self.hidden_size))
else:
out = Variable(torch.zeros(self.n_layers,
self.batch_size,
self.hidden_size))
return out
def batch(self, x_tr, y_tr):
index = np.random.permutation(x_tr.size()[0])
return x_tr[index, :, :], y_tr[index, :, :]