-
Notifications
You must be signed in to change notification settings - Fork 0
/
models.py
266 lines (224 loc) · 9.33 KB
/
models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
import nn
class PerceptronModel(object):
def __init__(self, dimensions):
"""
Initialize a new Perceptron instance.
A perceptron classifies data points as either belonging to a particular
class (+1) or not (-1). `dimensions` is the dimensionality of the data.
For example, dimensions=2 would mean that the perceptron must classify
2D points.
"""
self.w = nn.Parameter(1, dimensions)
def get_weights(self):
"""
Return a Parameter instance with the current weights of the perceptron.
"""
return self.w
def run(self, x):
"""
Calculates the score assigned by the perceptron to a data point x.
Inputs:
x: a node with shape (1 x dimensions)
Returns: a node containing a single number (the score)
"""
return nn.dot_product(x, self.w)
def get_prediction(self, x):
"""
Calculates the predicted class for a single data point `x`.
Returns: 1 or -1
"""
if self.run(x).item() >= 0:
return 1
return -1
def train(self, dataset):
"""
Train the perceptron until convergence.
"""
batch_size = 1
lr = 1
converge = False
while not converge:
converge = True
for x, y in dataset.iterate_once(batch_size):
if self.get_prediction(x) * y.item() < 0:
converge = False
self.w.update(x, y.item()*lr)
class RegressionModel(object):
"""
A neural network model for approximating a function that maps from real
numbers to real numbers. The network should be sufficiently large to be able
to approximate sin(x) on the interval [-2pi, 2pi] to reasonable precision.
"""
def __init__(self):
# Initialize your model parameters here
self.w0 = nn.Parameter(1, 60)
self.w1 = nn.Parameter(60, 30)
self.w2 = nn.Parameter(30, 1)
self.b0 = nn.Parameter(1, 60)
self.b1 = nn.Parameter(1, 30)
self.b2 = nn.Parameter(1, 1)
def run(self, x):
"""
Runs the model for a batch of examples.
Inputs:
x: a node with shape (batch_size x 1)
Returns:
A node with shape (batch_size x 1) containing predicted y-values
"""
x = nn.relu(nn.add_bias(nn.matmul(x, self.w0), self.b0))
x = nn.relu(nn.add_bias(nn.matmul(x, self.w1), self.b1))
x = nn.add_bias(nn.matmul(x, self.w2), self.b2)
return x
def get_loss(self, x, y):
"""
Computes the loss for a batch of examples.
Inputs:
x: a node with shape (batch_size x 1)
y: a node with shape (batch_size x 1), containing the true y-values
to be used for training
Returns: a loss node
"""
return nn.square_loss(self.run(x), y)
def train(self, dataset):
"""
Trains the model.
"""
batch_size = 20
lr = -0.01
while True:
for x, y in dataset.iterate_once(batch_size):
loss = self.get_loss(x, y)
if loss.item() < 0.005:
return
w0_grad, w1_grad, w2_grad, b0_grad, b1_grad, b2_grad = nn.gradients(loss, [self.w0, self.w1, self.w2, self.b0, self.b1, self.b2])
self.w0.update(w0_grad, lr)
self.w1.update(w1_grad, lr)
self.w2.update(w2_grad, lr)
self.b0.update(b0_grad, lr)
self.b1.update(b1_grad, lr)
self.b2.update(b2_grad, lr)
class DigitClassificationModel(object):
"""
A model for handwritten digit classification using the MNIST dataset.
Each handwritten digit is a 28x28 pixel grayscale image, which is flattened
into a 784-dimensional vector for the purposes of this model. Each entry in
the vector is a floating point number between 0 and 1.
The goal is to sort each digit into one of 10 classes (number 0 through 9).
(See RegressionModel for more information about the APIs of different
methods here. We recommend that you implement the RegressionModel before
working on this part of the project.)
"""
def __init__(self):
# Initialize your model parameters here
self.w0 = nn.Parameter(784, 256)
self.w1 = nn.Parameter(256, 32)
self.w2 = nn.Parameter(32, 10)
self.b0 = nn.Parameter(1, 256)
self.b1 = nn.Parameter(1, 32)
self.b2 = nn.Parameter(1, 10)
def run(self, x):
"""
Runs the model for a batch of examples.
Your model should predict a node with shape (batch_size x 10),
containing scores. Higher scores correspond to greater probability of
the image belonging to a particular class.
Inputs:
x: a node with shape (batch_size x 784)
Output:
A node with shape (batch_size x 10) containing predicted scores
(also called logits)
"""
x = nn.relu(nn.add_bias(nn.matmul(x, self.w0), self.b0))
x = nn.relu(nn.add_bias(nn.matmul(x, self.w1), self.b1))
x = nn.add_bias(nn.matmul(x, self.w2), self.b2)
return x
def get_loss(self, x, y):
"""
Computes the loss for a batch of examples.
The correct labels `y` are represented as a node with shape
(batch_size x 10). Each row is a one-hot vector encoding the correct
digit class (0-9).
Inputs:
x: a node with shape (batch_size x 784)
y: a node with shape (batch_size x 10)
Returns: a loss node
"""
return nn.softmax_loss(self.run(x), y)
def train(self, dataset):
"""
Trains the model.
"""
batch_size = 60
lr = -0.1
while True:
for x, y in dataset.iterate_once(batch_size):
loss = self.get_loss(x, y)
w0_grad, w1_grad, w2_grad, b0_grad, b1_grad, b2_grad = nn.gradients(loss, [self.w0, self.w1, self.w2, self.b0, self.b1, self.b2])
self.w0.update(w0_grad, lr)
self.w1.update(w1_grad, lr)
self.w2.update(w2_grad, lr)
self.b0.update(b0_grad, lr)
self.b1.update(b1_grad, lr)
self.b2.update(b2_grad, lr)
if dataset.get_validation_accuracy() > 0.98:
return
class LanguageIDModel(object):
"""
A model for language identification at a single-word granularity.
(See RegressionModel for more information about the APIs of different
methods here. We recommend that you implement the RegressionModel before
working on this part of the project.)
"""
def __init__(self):
# Our dataset contains words from five different languages, and the
# combined alphabets of the five languages contain a total of 47 unique
# characters.
# You can refer to self.num_chars or len(self.languages) in your code
self.num_chars = 47
self.languages = ["English", "Spanish", "Finnish", "Dutch", "Polish"]
# Initialize your model parameters here
"*** YOUR CODE HERE ***"
def run(self, xs):
"""
Runs the model for a batch of examples.
Although words have different lengths, our data processing guarantees
that within a single batch, all words will be of the same length (L).
Here `xs` will be a list of length L. Each element of `xs` will be a
node with shape (batch_size x self.num_chars), where every row in the
array is a one-hot vector encoding of a character. For example, if we
have a batch of 8 three-letter words where the last word is "cat", then
xs[1] will be a node that contains a 1 at position (7, 0). Here the
index 7 reflects the fact that "cat" is the last word in the batch, and
the index 0 reflects the fact that the letter "a" is the inital (0th)
letter of our combined alphabet for this task.
Your model should use a Recurrent Neural Network to summarize the list
`xs` into a single node of shape (batch_size x hidden_size), for your
choice of hidden_size. It should then calculate a node of shape
(batch_size x 5) containing scores, where higher scores correspond to
greater probability of the word originating from a particular language.
Inputs:
xs: a list with L elements (one per character), where each element
is a node with shape (batch_size x self.num_chars)
Returns:
A node with shape (batch_size x 5) containing predicted scores
(also called logits)
"""
"*** YOUR CODE HERE ***"
def get_loss(self, xs, y):
"""
Computes the loss for a batch of examples.
The correct labels `y` are represented as a node with shape
(batch_size x 5). Each row is a one-hot vector encoding the correct
language.
Inputs:
xs: a list with L elements (one per character), where each element
is a node with shape (batch_size x self.num_chars)
y: a node with shape (batch_size x 5)
Returns: a loss node
"""
"*** YOUR CODE HERE ***"
def train(self, dataset):
"""
Trains the model.
"""
"*** YOUR CODE HERE ***"