-
Notifications
You must be signed in to change notification settings - Fork 0
/
seimies_2.py
314 lines (237 loc) · 10.4 KB
/
seimies_2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
import sys
import numpy as np
import pandas as pd
from scipy.misc import imread
import pickle
import os
import matplotlib.pyplot as plt
# %matplotlib inline
import cv2
import time
import tensorflow as tf
from keras.models import Sequential
from keras.optimizers import Adam
from keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate
from keras.models import Model
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import Concatenate
from keras.layers.core import Lambda, Flatten, Dense
from keras.initializers import glorot_uniform
from keras.engine.topology import Layer
from keras.regularizers import l2
from keras import backend as K
from sklearn.utils import shuffle
import numpy.random as rng
train_folder = "fool_2/images_background/"
val_folder = 'fool_2/images_evaluation/'
save_path = 'data_33/'
def loadimgs(path,n = 0):
'''
path => Path of train directory or test directory
'''
X=[]
y = []
cat_dict = {}
lang_dict = {}
curr_y = n
# we load every alphabet seperately so we can isolate them later
for alphabet in os.listdir(path):
print("loading alphabet: " + alphabet)
lang_dict[alphabet] = [curr_y,None]
alphabet_path = os.path.join(path,alphabet)
# every letter/category has it's own column in the array, so load seperately
for letter in os.listdir(alphabet_path):
cat_dict[curr_y] = (alphabet, letter)
category_images=[]
letter_path = os.path.join(alphabet_path, letter)
# read all the images in the current category
for filename in os.listdir(letter_path):
image_path = os.path.join(letter_path, filename)
image = imread(image_path)
category_images.append(image)
y.append(curr_y)
try:
X.append(np.stack(category_images))
# edge case - last one
except ValueError as e:
print(e)
print("error - category_images:", category_images)
curr_y += 1
lang_dict[alphabet][1] = curr_y - 1
y = np.vstack(y)
X = np.stack(X)
return X,y,lang_dict
X,y,c=loadimgs(train_folder)
with open(os.path.join(save_path,"train.pickle"), "wb") as f:
pickle.dump((X,c),f)
Xval,yval,cval=loadimgs(val_folder)
with open(os.path.join(save_path,"val.pickle"), "wb") as f:
pickle.dump((Xval,cval),f)
def initialize_weights(shape, name=None):
"""
The paper, http://www.cs.utoronto.ca/~gkoch/files/msc-thesis.pdf
suggests to initialize CNN layer weights with mean as 0.0 and standard deviation of 0.01
"""
return np.random.normal(loc = 0.0, scale = 1e-2, size = shape)
def initialize_bias(shape, name=None):
"""
The paper, http://www.cs.utoronto.ca/~gkoch/files/msc-thesis.pdf
suggests to initialize CNN layer bias with mean as 0.5 and standard deviation of 0.01
"""
return np.random.normal(loc = 0.5, scale = 1e-2, size = shape)
def get_siamese_model(input_shape):
"""
Model architecture based on the one provided in: http://www.cs.utoronto.ca/~gkoch/files/msc-thesis.pdf
"""
# Define the tensors for the two input images
left_input = Input(input_shape)
right_input = Input(input_shape)
# Convolutional Neural Network
model = Sequential()
model.add(Conv2D(64, (10,10), activation='relu', input_shape=input_shape,
kernel_initializer=initialize_weights, kernel_regularizer=l2(2e-4)))
model.add(MaxPooling2D())
model.add(Conv2D(128, (7,7), activation='relu',
kernel_initializer=initialize_weights,
bias_initializer=initialize_bias, kernel_regularizer=l2(2e-4)))
model.add(MaxPooling2D())
model.add(Conv2D(128, (4,4), activation='relu', kernel_initializer=initialize_weights,
bias_initializer=initialize_bias, kernel_regularizer=l2(2e-4)))
model.add(MaxPooling2D())
model.add(Conv2D(256, (4,4), activation='relu', kernel_initializer=initialize_weights,
bias_initializer=initialize_bias, kernel_regularizer=l2(2e-4)))
model.add(Flatten())
model.add(Dense(4096, activation='sigmoid',
kernel_regularizer=l2(1e-3),
kernel_initializer=initialize_weights,bias_initializer=initialize_bias))
# Generate the encodings (feature vectors) for the two images
encoded_l = model(left_input)
encoded_r = model(right_input)
# Add a customized layer to compute the absolute difference between the encodings
L1_layer = Lambda(lambda tensors:K.abs(tensors[0] - tensors[1]))
L1_distance = L1_layer([encoded_l, encoded_r])
# Add a dense layer with a sigmoid unit to generate the similarity score
prediction = Dense(1,activation='sigmoid',bias_initializer=initialize_bias)(L1_distance)
# Connect the inputs with the outputs
siamese_net = Model(inputs=[left_input,right_input],outputs=prediction)
# return the model
return siamese_net
model = get_siamese_model((105, 105, 3))
model.summary()
# from IPython.display import Image
# Image(retina=True, filename='model.png')
optimizer = Adam(lr = 0.00006)
model.compile(loss="binary_crossentropy",optimizer=optimizer)
with open(os.path.join(save_path, "train.pickle"), "rb") as f:
(Xtrain, train_classes) = pickle.load(f)
print("Training alphabets: \n")
print(list(train_classes.keys()))
with open(os.path.join(save_path, "val.pickle"), "rb") as f:
(Xval, val_classes) = pickle.load(f)
print("Validation alphabets:", end="\n\n")
print(list(val_classes.keys()))
def get_batch(batch_size,s="train"):
"""Create batch of n pairs, half same class, half different class"""
if s == 'train':
X = Xtrain
print(X)
print("________________________+++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
categories = train_classes
else:
X = Xval
categories = val_classes
print(X.shape)
print("******************************************************************************")
n_classes, n_examples, w, h = X.shape[:-1]
# randomly sample several classes to use in the batch
categories = rng.choice(n_classes,size=(batch_size,),replace=True)
# initialize 2 empty arrays for the input image batch
pairs=[np.zeros((batch_size, h, w,3)) for i in range(2)]
# initialize vector for the targets
targets=np.zeros((batch_size,))
# make one half of it '1's, so 2nd half of batch has same class
targets[batch_size//2:] = 1
for i in range(batch_size):
category = categories[i]
idx_1 = rng.randint(0, n_examples)
pairs[0][i,:,:,:] = X[category, idx_1].reshape(w, h, 3)
idx_2 = rng.randint(0, n_examples)
# pick images of same class for 1st half, different for 2nd
if i >= batch_size // 2:
category_2 = category
else:
# add a random number to the category modulo n classes to ensure 2nd image has a different category
category_2 = (category + rng.randint(1,n_classes)) % n_classes
pairs[1][i,:,:,:] = X[category_2,idx_2].reshape(w, h,3)
return pairs, targets
def generate(batch_size, s="train"):
"""a generator for batches, so model.fit_generator can be used. """
while True:
pairs, targets = get_batch(batch_size,s)
yield (pairs, targets)
def make_oneshot_task(N, s="val", language=None):
"""Create pairs of test image, support set for testing N way one-shot learning. """
if s == 'train':
X = Xtrain
categories = train_classes
else:
X = Xval
categories = val_classes
n_classes, n_examples, w, h = X.shape
indices = rng.randint(0, n_examples,size=(N,))
if language is not None: # if language is specified, select characters for that language
low, high = categories[language]
if N > high - low:
raise ValueError("This language ({}) has less than {} letters".format(language, N))
categories = rng.choice(range(low,high),size=(N,),replace=False)
else: # if no language specified just pick a bunch of random letters
categories = rng.choice(range(n_classes),size=(N,),replace=False)
true_category = categories[0]
ex1, ex2 = rng.choice(n_examples,replace=False,size=(2,))
test_image = np.asarray([X[true_category,ex1,:,:]]*N).reshape(N, w, h,3)
support_set = X[categories,indices,:,:]
support_set[0,:,:] = X[true_category,ex2]
support_set = support_set.reshape(N, w, h,1)
targets = np.zeros((N,))
targets[0] = 1
targets, test_image, support_set = shuffle(targets, test_image, support_set)
pairs = [test_image,support_set]
return pairs, targets
def test_oneshot(model, N, k, s = "val", verbose = 0):
"""Test average N way oneshot learning accuracy of a siamese neural net over k one-shot tasks"""
n_correct = 0
if verbose:
print("Evaluating model on {} random {} way one-shot learning tasks ... \n".format(k,N))
for i in range(k):
inputs, targets = make_oneshot_task(N,s)
probs = model.predict(inputs)
if np.argmax(probs) == np.argmax(targets):
n_correct+=1
percent_correct = (100.0 * n_correct / k)
if verbose:
print("Got an average of {}% {} way one-shot learning accuracy \n".format(percent_correct,N))
return percent_correct
# Hyper parameters
evaluate_every = 200 # interval for evaluating on one-shot tasks
batch_size = 32
n_iter = 20000 # No. of training iterations
N_way = 20 # how many classes for testing one-shot tasks
n_val = 250 # how many one-shot tasks to validate on
best = -1
model_path = './weights/'
print("Starting training process!")
print("-------------------------------------")
t_start = time.time()
for i in range(1, n_iter+1):
(inputs,targets) = get_batch(batch_size)
loss = model.train_on_batch(inputs, targets)
if i % evaluate_every == 0:
print("\n ------------- \n")
print("Time for {0} iterations: {1} mins".format(i, (time.time()-t_start)/60.0))
print("Train Loss: {0}".format(loss))
val_acc = test_oneshot(model, N_way, n_val, verbose=True)
model.save_weights(os.path.join(model_path, 'weights.{}.h5'.format(i)))
if val_acc >= best:
print("Current best: {0}, previous best: {1}".format(val_acc, best))
best = val_acc