-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmodel.py
144 lines (98 loc) · 3.91 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
import random
import os
import csv
import cv2
import numpy as np
import sys # required for floyd argument
import sklearn
print("Started Behavioural Cloning Model!")
lines = []
floyd = 0
data_path = '../CarND-P3-Data/'
save_path = 'model.h5'
# enable floyd mode for different datapath
if "floyd" in sys.argv:
# floyd = 1
data_path = '/input/'
save_path = '/output/model.h5'
print("floydmode activated!")
# import the driving log as csv
with open(data_path + 'driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
next(reader, None) # skip the first line
for line in reader:
lines.append(line)
from sklearn.model_selection import train_test_split
train_samples, validation_samples = train_test_split(lines, test_size=0.2)
# Generator for memory efficient data loading
correction = 0.2
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
sklearn.utils.shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
# add all images of the sample to the set
for batch_sample in batch_samples:
center_image = cv2.imread(data_path + 'IMG/'+batch_sample[0].split('/')[-1])
left_image = cv2.imread(data_path + 'IMG/'+batch_sample[1].split('/')[-1])
right_image = cv2.imread(data_path + 'IMG/'+batch_sample[2].split('/')[-1])
center_angle = float(batch_sample[3])
images.append(center_image)
angles.append(center_angle)
images.append(left_image)
angles.append(center_angle+correction)
images.append(right_image)
angles.append(center_angle-correction)
# also add flipped images to the set
images.append(np.fliplr(center_image))
angles.append(-center_angle)
images.append(np.fliplr(left_image))
angles.append(-(center_angle+correction))
images.append(np.fliplr(right_image))
angles.append(-(center_angle-correction))
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
print("Loaded Data!")
from keras.models import Sequential, Model
from keras.layers import Flatten, Dense, Conv2D, Lambda, Cropping2D, Dropout
train_generator = generator(train_samples, batch_size=32)
validation_generator = generator(validation_samples, batch_size=32)
# Nvidia Architecture
model = Sequential()
# input shape for normalized images
# normalization
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3)))
model.add(Cropping2D(cropping=((50,20),(0,0))))
# Convolution 24@31x98
model.add(Conv2D(24, 5, 5, subsample=(2,2), activation = "relu"))
# Convolution 36@14x47
model.add(Conv2D(32, 5, 5, subsample=(2,2), activation = "relu"))
# Convolutioin 48@5x22
model.add(Conv2D(48, 3, 3, subsample=(2,2), activation = "relu"))
# Convolution 64@3x20
model.add(Conv2D(60, 3, 3, subsample=(2,2), activation = "relu"))
# Convolution 64@1x18
model.add(Conv2D(60, 3, 3, subsample=(2,2), activation = "relu"))
# Flatten
model.add(Flatten())
# Fully connected 1164 neurons
model.add(Dense(1164))
# Dropout layer
model.add(Dropout(0.1))
# Fully connected 100 neurons
model.add(Dense(100))
# Fully connected 50 neurons
model.add(Dense(50))
# Fully connected 10 neurons (?)
model.add(Dense(10))
# output vehicle control
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
model.fit_generator(train_generator, samples_per_epoch=len(train_samples*6), validation_data=validation_generator, nb_val_samples=len(validation_samples), nb_epoch=3)
model.save(save_path)
from keras.utils.visualize_util import plot
plot(model, to_file='model.png')