-
Notifications
You must be signed in to change notification settings - Fork 8
/
model.py
113 lines (89 loc) · 3.96 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras.layers import Lambda, Conv2D, MaxPooling2D, Dropout, Dense, Flatten
from utils import INPUT_SHAPE, batch_generator
import argparse
import os
import tensorflow.contrib.keras as keras
np.random.seed(0)
def load_data(args):
"""
Load training data and split it into training and validation set
"""
data_df = pd.read_csv(os.path.join(args.data_dir, 'driving_log.csv'))
X = data_df[['center', 'left', 'right']].values
y = data_df['steering'].values
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=args.test_size, random_state=0)
return X_train, X_valid, y_train, y_valid
def build_model(args):
"""
Modified NVIDIA model
"""
model = Sequential()
model.add(Lambda(lambda x: x/127.5-1.0, input_shape=INPUT_SHAPE))
model.add(Conv2D(24, 5, 5, activation='elu', subsample=(2, 2)))
model.add(Conv2D(36, 5, 5, activation='elu', subsample=(2, 2)))
model.add(Conv2D(48, 5, 5, activation='elu', subsample=(2, 2)))
model.add(Conv2D(64, 3, 3, activation='elu'))
model.add(Conv2D(64, 3, 3, activation='elu'))
model.add(Dropout(args.keep_prob))
model.add(Flatten())
model.add(Dense(100, activation='elu'))
model.add(Dense(50, activation='elu'))
model.add(Dense(10, activation='elu'))
model.add(Dense(1))
model.summary()
return model
def train_model(model, args, X_train, X_valid, y_train, y_valid):
"""
Train the model
"""
checkpoint = ModelCheckpoint('model-{epoch:03d}.h5',
monitor='val_loss',
verbose=0,
save_best_only=args.save_best_only,
mode='auto')
model.compile(loss='mean_squared_error', optimizer=Adam(lr=args.learning_rate))
model.fit_generator(batch_generator(args.data_dir, X_train, y_train, args.batch_size, True),
args.samples_per_epoch,
args.nb_epoch,
max_q_size=1,
validation_data=batch_generator(args.data_dir, X_valid, y_valid, args.batch_size, False),
nb_val_samples=len(X_valid),
callbacks=[checkpoint],
verbose=1)
def s2b(s):
"""
Converts a string to boolean value
"""
s = s.lower()
return s == 'true' or s == 'yes' or s == 'y' or s == '1'
def main():
"""
Load train/validation data set and train the model
"""
parser = argparse.ArgumentParser(description='Behavioral Cloning Training Program')
parser.add_argument('-d', help='data directory', dest='data_dir', type=str, default='data')
parser.add_argument('-t', help='test size fraction', dest='test_size', type=float, default=0.2)
parser.add_argument('-k', help='drop out probability', dest='keep_prob', type=float, default=0.5)
parser.add_argument('-n', help='number of epochs', dest='nb_epoch', type=int, default=1)
parser.add_argument('-s', help='samples per epoch', dest='samples_per_epoch', type=int, default=20000)
parser.add_argument('-b', help='batch size', dest='batch_size', type=int, default=40)
parser.add_argument('-o', help='save best models only', dest='save_best_only', type=s2b, default='true')
parser.add_argument('-l', help='learning rate', dest='learning_rate', type=float, default=1.0e-4)
args = parser.parse_args()
print('-' * 30)
print('Parameters')
print('-' * 30)
for key, value in vars(args).items():
print('{:<20} := {}'.format(key, value))
print('-' * 30)
data = load_data(args)
model = build_model(args)
train_model(model, args, *data)
if __name__ == '__main__':
main()