-
Notifications
You must be signed in to change notification settings - Fork 45
/
models.py
97 lines (80 loc) · 3.76 KB
/
models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
# -*- coding: utf-8 -*-
from keras.layers import Input, Conv2D,Conv1D, Lambda, merge, Dense, Flatten,MaxPooling2D,MaxPooling1D,Dropout
from keras.models import Model, Sequential
from keras.regularizers import l2
from keras import backend as K
from keras.optimizers import SGD,Adam
from keras.losses import binary_crossentropy
import numpy.random as rng
import numpy as np
import os
import time
import json
import pickle
import matplotlib.pyplot as plt
import seaborn as sns
def load_siamese_net(input_shape = (2048,2)):
left_input = Input(input_shape)
right_input = Input(input_shape)
convnet = Sequential()
# WDCNN
convnet.add(Conv1D(filters=16, kernel_size=64, strides=16, activation='relu', padding='same',input_shape=input_shape))
convnet.add(MaxPooling1D(strides=2))
convnet.add(Conv1D(filters=32, kernel_size=3, strides=1, activation='relu', padding='same'))
convnet.add(MaxPooling1D(strides=2))
convnet.add(Conv1D(filters=64, kernel_size=2, strides=1, activation='relu', padding='same'))
convnet.add(MaxPooling1D(strides=2))
convnet.add(Conv1D(filters=64, kernel_size=3, strides=1, activation='relu', padding='same'))
convnet.add(MaxPooling1D(strides=2))
convnet.add(Conv1D(filters=64, kernel_size=3, strides=1, activation='relu'))
convnet.add(MaxPooling1D(strides=2))
convnet.add(Flatten())
convnet.add(Dense(100,activation='sigmoid'))
# print('WDCNN convnet summary:')
# convnet.summary()
#call the convnet Sequential model on each of the input tensors so params will be shared
encoded_l = convnet(left_input)
encoded_r = convnet(right_input)
#layer to merge two encoded inputs with the l1 distance between them
L1_layer = Lambda(lambda tensors:K.abs(tensors[0] - tensors[1]))
#call this layer on list of two input tensors.
L1_distance = L1_layer([encoded_l, encoded_r])
D1_layer = Dropout(0.5)(L1_distance)
prediction = Dense(1,activation='sigmoid')(D1_layer)
siamese_net = Model(inputs=[left_input,right_input],outputs=prediction)
# optimizer = Adam(0.00006)
optimizer = Adam()
#//TODO: get layerwise learning rates and momentum annealing scheme described in paperworking
siamese_net.compile(loss="binary_crossentropy",optimizer=optimizer)
# print('\nsiamese_net summary:')
# siamese_net.summary()
# print(siamese_net.count_params())
return siamese_net
def load_wdcnn_net(input_shape = (2048,2),nclasses=10):
left_input = Input(input_shape)
convnet = Sequential()
# WDCNN
convnet.add(Conv1D(filters=16, kernel_size=64, strides=16, activation='relu', padding='same',input_shape=input_shape))
convnet.add(MaxPooling1D(strides=2))
convnet.add(Conv1D(filters=32, kernel_size=3, strides=1, activation='relu', padding='same'))
convnet.add(MaxPooling1D(strides=2))
convnet.add(Conv1D(filters=64, kernel_size=2, strides=1, activation='relu', padding='same'))
convnet.add(MaxPooling1D(strides=2))
convnet.add(Conv1D(filters=64, kernel_size=3, strides=1, activation='relu', padding='same'))
convnet.add(MaxPooling1D(strides=2))
convnet.add(Conv1D(filters=64, kernel_size=3, strides=1, activation='relu'))
convnet.add(MaxPooling1D(strides=2))
convnet.add(Flatten())
convnet.add(Dense(100,activation='sigmoid'))
# print('convnet summary:')
# convnet.summary()
encoded_cnn = convnet(left_input)
prediction_cnn = Dense(nclasses,activation='softmax')(Dropout(0.5)(encoded_cnn ))
wdcnn_net = Model(inputs=left_input,outputs=prediction_cnn)
# optimizer = Adam(0.00006)
optimizer = Adam()
wdcnn_net.compile(loss='categorical_crossentropy',optimizer=optimizer,metrics=['accuracy'])
# print('\nsiamese_net summary:')
# cnn_net.summary()
print(wdcnn_net.count_params())
return wdcnn_net