Skip to content

Commit

Permalink
ready to merge to master
Browse files Browse the repository at this point in the history
  • Loading branch information
zeroAska committed Apr 16, 2018
1 parent 4074759 commit 998c5c2
Show file tree
Hide file tree
Showing 7 changed files with 94 additions and 70 deletions.
20 changes: 12 additions & 8 deletions gen_data_cam.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,18 @@
import numpy as np
import random, imutils
batch_size = 32
# test directory
# directory = '/home/eecs568/Documents/TestImages/'
# dataset = 'testSet.csv'

# train directory
directory = './cam4_train/'
dataset = 'dataset_train.csv'
#directory = './nclt_tripple/'
#dataset = 'dataset_train.csv'

# test directory
directory = './nclt_03_31/test/'
dataset = 'dataset_test.csv'

class datasource(object):
def __init__(self, images, poses):
print("Image Data path: "+directory)
print("label path: "+dataset)
self.images = images
self.poses = poses

Expand Down Expand Up @@ -80,8 +82,10 @@ def get_data():
p3 = float(p3)
p4 = float(p4)
p5 = float(p5)

filename = directory+'/'+all_imgs[line_num] #fname+".tiff"
try:
filename = directory+'/'+all_imgs[line_num] #fname+".tiff"
except:
pdb.set_trace()
if (os.path.isfile(filename)==False):
pdb.set_trace()
continue
Expand Down
8 changes: 8 additions & 0 deletions gen_subset.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@

import subset_gen
#gen = subset_gen.Subset('/home/eecs568/eecs568/new_version/Mobile-Robotics/lu_data/', '/home/eecs568/eecs568/new_version/Mobile-Robotics/lu_data/dataset.csv', 4, 8, '.png', 19, 1e-2)
gen = subset_gen.Subset('./nclt_03_31/', './nclt_03_31/groundtruth_2012-03-31.csv', 7, 4, '.tiff', 12, 1e-5)
#gen = subset_gen.Subset('/home/eecs568/eecs568/new_version/Mobile-Robotics/nclt_01_18/', '/home/eecs568/Documents/groundtruth_2012-01-08.csv', 10, 1, '.tiff', 12, 1e-5)

# is_euler, is_train, is_test
gen.gen_subset(True, False, True)
51 changes: 29 additions & 22 deletions gen_test_trajectory_euler_angle.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import tensorflow as tf
import os, sys
import gen_data_nclt_new
import gen_data_cam
import train
import numpy as np
import pdb
Expand All @@ -10,48 +10,51 @@
import matplotlib.pyplot as plt


# os.environ['CUDA_VISIBLE_DEVICES'] = '0'
#os.environ['CUDA_VISIBLE_DEVICES'] = '1'
# second to last argument False for nclt True for others
weightPath = '/home/eecs568/eecs568/Mobile-Robotics/success_models/nclt_new/20180409-130922model_epoch_4.ckpt'
#imagePath = './cam4_train/'
weightPath = '20180415-201632model_epoch_4.ckpt'
imagePath = './nclt_03_31/test/'
#figname = 'nclt_train.png'
imagePath = '/home/eecs568/Documents/TestImages\ 2012-01-08/test/'
figname = 'nclt_test_seq2.png'
# trainer = train.trainer(weightPath, imagePath, 100, False, False)
datasource = gen_data_nclt_new.get_data()
#imagePath = './nclt_11_04/test/'
figname = 'nclt_train_four.png'
trainer = train.trainer(weightPath, imagePath, 100, False, True, True)
datasource = gen_data_cam.get_data()

# initialize plot tool
fig = plt.figure(1)

error = np.zeros([len(datasource.images),3])

for i in range(len(datasource.images)):
iterations = len(datasource.images)
iterations = 340
for i in range(iterations):
np_image = datasource.images[i]
# feed={tf.get_default_graph().get_tensor_by_name('Placeholder:0'): np.expand_dims(np_image, axis=0) }
feed={tf.get_default_graph().get_tensor_by_name('Placeholder:0'): np.expand_dims(np_image, axis=0) }

# ground truth x y z
pose_x= np.asarray(datasource.poses[i][0:2])

# ground truth euler angles
pose_q= np.asarray(datasource.poses[i][3:6])
pose_q= np.asarray(datasource.poses[i][3:5])
# pose_euler_angle = transforms3d.euler.quat2euler(pose_q)

# x_q = trainer.sess.run([tf.get_default_graph().get_tensor_by_name('fc9/fc9:0') ], feed)
# pdb.set_trace()
x_q = trainer.sess.run([tf.get_default_graph().get_tensor_by_name('fc9/fc9:0') ], feed)
# pdb.set_trace()

# x y z
# pred_x = np.squeeze(x_q)[0:3]
pred_x = np.squeeze(x_q)[0:2]

# euler angle
# pred_q = np.squeeze(x_q)[3:6]
pred_q = np.squeeze(x_q)[3:5]
# pred_euler_angle = transforms3d.euler.quat2euler(pred_q)

# scatter plot for pose
plt.scatter(pose_x[0],pose_x[1],c='g')
# plt.scatter(pred_x[0],pred_x[1],c='r')
plt.scatter(pose_x[1],pose_x[0],c='g')
plt.scatter(pred_x[1],pred_x[0],c='r')
plt.plot([pose_x[1],pred_x[1]],[pose_x[0],pred_x[0]],c='k')
# plt.pause(0.01)
# plt.draw()
# error[i,:] = np.array([pose_x[0]-pred_x[0],pose_x[1]-pred_x[1],pose_q[-1]-pred_q[-1]])
plt.draw()
error[i,:] = np.array([pose_x[1]-pred_x[1],pose_x[0]-pred_x[0],pose_q[-1]-pred_q[-1]])

print("iteration {}\n".format(i))

Expand All @@ -60,7 +63,11 @@
fig.savefig(figname)

# calculate stddev and mean error
#meanErr = np.sum(error,axis=0)/len(error)
#stdErr = np.std(error,axis=0)
#print("The mean error is {} and standard deviation is {}.".format(meanErr,stdErr))
meanErr = np.sum(error,axis=0)/len(error)
stdErr = np.std(error,axis=0)
print("The mean error is {} and standard deviation is {}.".format(meanErr,stdErr))

goodId = [106,204]
badId = [298,321]
print(error[goodId[0],:],error[goodId[1],:])
print(error[badId[0],:],error[badId[1],:])
16 changes: 5 additions & 11 deletions gtsamSolver.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,17 +102,11 @@ def update(self, updateNum = 1):
self.currentEst.atPose2(X(self.currentKey)).theta()]
return self.currentPose

# def currentPos(self, key=0):
# if key == 0:
# key = self.currentKey

# if(self.currentEst):
# currentPos = [self.currentEst.atPose2(X(key)).x(),
# self.currentEst.atPose2(X(key)).y(),
# self.currentEst.atPose2(X(key)).theta()]
# else:
# currentPos = [0,0,0]
# return currentPos
def getEstimate(self,id):
return [self.currentEst.atPose2(X(id)).x(),
self.currentEst.atPose2(X(id)).y(),
self.currentEst.atPose2(X(id)).theta()]


def printGraph(self, output = "\nFactor Graph:\n"):
print(self.graph)
Expand Down
44 changes: 26 additions & 18 deletions subset_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,56 +3,64 @@
from numpy import genfromtxt
import numpy as np
import pdb

class Subset:
def __init__(self, folder_name, label_file, train_freq, test_freq, tail_str):
def __init__(self, folder_name, label_file, train_freq, test_freq, tail_str, name_timestep_len, accuracy=1e-5):
self.imgs = []
self.timestep_len = name_timestep_len
self.folder_name = folder_name
self.train_freq = train_freq
self.test_freq = test_freq
self.train_folder = folder_name + '/train'
self.test_folder = folder_name + '/test'
self.tail_str = tail_str
self.name_has_prefix = None

for name in sorted(os.listdir(folder_name)):
if name[-len(tail_str):] != tail_str:
continue
if name[0:5] == 'frame':
name = name[5:]
self.name_has_prefix = 'frame'
self.imgs.append(name)
self.labels = genfromtxt(label_file, delimiter=',')

# pointer point to the current timestamp that wait to be matched
self.currentMatch = 0
# match tolerance in microseconds
self.matchTol = 1e5
self.matchTol = 1.0/accuracy


def gen_subset(self):
# self.dump_to_folder( self.train_freq, self.train_folder,'dataset_train.csv', True)
self.dump_to_folder( self.test_freq, self.test_folder,'dataset_test.csv', False)
def gen_subset(self, is_euler= False, is_train=True, is_test=True):
if is_train:
self.dump_to_folder( self.train_freq, self.train_folder,'dataset_train.csv', True, is_euler)
if is_test:
self.dump_to_folder( self.test_freq, self.test_folder,'dataset_test.csv', False,is_euler)

def dump_to_folder(self, freq, new_folder, new_label_file, is_train):
def dump_to_folder(self, freq, new_folder, new_label_file, is_train, is_euler):
table_ind = 0
total_num_imgs = len(self.imgs)
table = np.zeros((total_num_imgs // freq , self.labels.shape[1]))
if not os.path.exists(new_folder):
os.makedirs(new_folder)
new_labels = open(new_folder + '/' + new_label_file, "a")
new_labels = open(new_folder + '/' + new_label_file, "w")
for i in tqdm(range(total_num_imgs)):
if (i % freq == 0):
img_i = self.imgs[i][:-len(self.tail_str)]
img_i = self.imgs[i][:-len(self.tail_str)] # only contain number
label_i = self.match(img_i)
if label_i == -1: continue
table[table_ind, :] = self.labels[label_i,:]
if is_train:
table[table_ind, 0] = str(int(self.labels[label_i, 0]))[0:12]
img_i = img_i[0:12]
shutil.copyfile(self.folder_name +'/' + self.imgs[i], new_folder + '/' + img_i + self.tail_str)
to_write = str(int(table[table_ind, 0]))+ ',' + \
str(float(table[table_ind, 1])) + ',' + \
str(float(table[table_ind, 2])) + ',' + \
str(float(table[table_ind, 3])) + ',' + \
str(float(table[table_ind, 4])) + ',' + \
str(float(table[table_ind, 5])) + ',' + \
str(float(table[table_ind, 6])) + '\n'
table[table_ind, 0] = str(int(self.labels[label_i, 0]))[0:self.timestep_len]
img_i = img_i[0:self.timestep_len]

name = self.name_has_prefix + self.imgs[i] if self.name_has_prefix is not None else self.imgs[i]

shutil.copyfile(self.folder_name +'/' + name , new_folder + '/' + img_i + self.tail_str)
to_write = str(int(table[table_ind, 0]))+ ','
for i in range(1,table.shape[1]-1):
to_write = to_write + str(float(table[table_ind, i])) + ','
to_write = to_write + str(float(table[table_ind, table.shape[1]-1]))+ '\n'
new_labels.write(to_write)
table_ind += 1
if table_ind == table.shape[0]: break
Expand Down
3 changes: 0 additions & 3 deletions test_subset.py

This file was deleted.

22 changes: 14 additions & 8 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,8 +101,10 @@ def regen_regression_network(self):
def restore_network(self, path_to_weight):
self.saver = tf.train.import_meta_graph(path_to_weight + ".meta" )
graph = tf.get_default_graph()

self.regression_out = tf.get_default_graph().get_tensor_by_name('fc9/fc9:0')
self.loss = graph.get_operation_by_name("final_loss")
self.loss = graph.get_tensor_by_name("add:0")

self.train_op = tf.get_default_graph().get_operation_by_name("Adam_minimizer")
self.saver.restore(self.sess, path_to_weight)#tf.train.latest_checkpoint('./'))
self.image_inputs = tf.get_default_graph().get_tensor_by_name('Placeholder:0')
Expand Down Expand Up @@ -133,7 +135,6 @@ def build_loss(self, beta=100):
self.network.variable_summaries(self.loss, "final_weighted_loss_")

def test(self, img, need_rotate_angle=270, num_random_crops=20):
pdb.set_trace()
if img.shape[2] != 3:
print ("We only accept 3-dimensional rgb images")
if img.shape[0] > img.shape[1]:
Expand Down Expand Up @@ -161,11 +162,12 @@ def test(self, img, need_rotate_angle=270, num_random_crops=20):
def train(self, batch_size, epochs):

total_loss = 0
total_batch = 281 #int(self.data_handler.numimages() * self.data_handler.genNum * 1.0 / batch_size) #100
total_batch = 125 #int(self.data_handler.numimages() * self.data_handler.genNum * 1.0 / batch_size) #100
if total_batch==0:
pdb.set_trace()
#print("[trainer] Start Training, size of dataset is " +str(self.data_handler.numimages() * self.data_handler.num_crops ))
#pdb.set_trace()
min_loss = 100000000000.0
for epoch in range(epochs):
#self.data_handler.reset()
#self.data_handler.generateData(500)
Expand All @@ -185,15 +187,19 @@ def train(self, batch_size, epochs):
one_batch_image, np_poses_x, np_poses_q = next(data_gen)
one_batch_label = np.hstack((np_poses_x, np_poses_q))
feeds ={self.image_inputs: one_batch_image, self.label_inputs: one_batch_label }
summary, loss, gradients = self.sess.run([self.merged_summary, self.loss, self.compute_gradients ], feed_dict=feeds)
self.sess.run([self.train_op], feed_dict=feeds )
#summary, loss, gradients = self.sess.run([self.merged_summary, self.loss, self.compute_gradients ], feed_dict=feeds)

summary, loss, _= self.sess.run([self.merged_summary, self.loss, self.train_op ], feed_dict=feeds)
#self.sess.run([self.train_op], feed_dict=feeds )
print("[Epoch "+str(epoch)+" trainer] Train one batch of size "+str(batch_size)+", loss is "+str(loss))
total_loss += loss
self.train_writer.add_summary(summary, epoch * total_batch + i)

avg_loss = (total_loss)/total_batch
self.saver.save(self.sess, "./"+self.summary_now+"model_epoch_"+str(epoch)+".ckpt")
if epoch > 0: delete_network_backups("./"+self.summary_now+"model_epoch_"+str(epoch-1)+".ckpt" )
if avg_loss < min_loss:
min_loss = avg_loss
self.saver.save(self.sess, "./"+self.summary_now+"model_epoch_"+str(epoch)+".ckpt")
#if epoch > 0: delete_network_backups("./"+self.summary_now+"model_epoch_"+str(epoch-1)+".ckpt" )
print("[trainer] Epoch " + str(epoch )+ " ends, avg loss =" + "{:.3f}".format(avg_loss))

total_loss = 0
Expand All @@ -209,6 +215,6 @@ def train(self, batch_size, epochs):
argv[4] = True
argv[5] = bool(int(False))
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
#os.environ['CUDA_VISIBLE_DEVICES'] = '1'
train_thread1 = trainer(argv[1], argv[2], 100, use_quaternion=argv[4], resume_training=False )
train_thread1.train(32, 10)

0 comments on commit 998c5c2

Please sign in to comment.