Skip to content

Commit

Permalink
120 stable version
Browse files Browse the repository at this point in the history
  • Loading branch information
huangxinghui233 committed Sep 25, 2021
1 parent ea59ea0 commit d3092c9
Show file tree
Hide file tree
Showing 6 changed files with 47 additions and 182 deletions.
7 changes: 3 additions & 4 deletions control/control.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,10 +67,9 @@ def lontitudeControlSpeed(speed, lonPid):


def speedupJob(Controller, MyCar):
if MyCar.time >= 100:
Controller.speeduplimit = 98
# else:
# Controller.speeduplimit = 70
if MyCar.time >= Controller.superspeeduplimittime \
and MyCar.overtakeSum != 0:
Controller.speeduplimit = Controller.superspeeduplimit

Controller.speedPid.setSetpoint(Controller.speeduplimit)
# 纵向控制 thorro_ and brake_
Expand Down
2 changes: 2 additions & 0 deletions initial/initial.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@ class ControlData(object):
def __init__(self):

self.speeduplimit = 70
self.superspeeduplimit = 101 # super speedup max speed
self.superspeeduplimittime = 60 # super speedup time threshold
self.followlimit = 40
self.overtakelimit = 40

Expand Down
3 changes: 1 addition & 2 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
data["landLine"] = None
data["radar"] = None
data["image"] = None
result = None
distanceData = DistanceData()
previous_distance = DistanceData()
current_distance = DistanceData()
Expand Down Expand Up @@ -53,7 +52,7 @@
while True:
distanceprocessing.run(distanceData, previous_distance, current_distance, MyCar)

print("current : ", distanceData.distance_mid, "left : ", distanceData.distance_left, "right : ", distanceData.distance_right)
# print("current : ", distanceData.distance_mid, "left : ", distanceData.distance_left, "right : ", distanceData.distance_right)
# print("car decison : ", MyCar.cardecision)

planning.run(distanceData, MyCar)
Expand Down
146 changes: 6 additions & 140 deletions perception/DrivingDetection.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def make_parser(left_num, right_num):
parser.add_argument("-lb", "--leftbound", type=int, default=left_num)
parser.add_argument("-rb", "--rightbound", type=int, default=right_num)
parser.add_argument("-expn", "--experiment-name", type=str, default=None)
parser.add_argument("-n", "--name", type=str, default="yolox-x", help="model name")
parser.add_argument("-n", "--name", type=str, default="yolox-l", help="model name")

parser.add_argument(
"--path", default="./assets/dog.jpg", help="path to images or video"
Expand All @@ -62,11 +62,11 @@ def make_parser(left_num, right_num):
parser.add_argument(
"-f",
"--exp_file",
default="./YOLOX/exps/default/yolox_x.py",
default="./YOLOX/exps/default/yolox_l.py",
type=str,
help="pls input your experiment description file",
)
parser.add_argument("-c", "--ckpt", default=".//YOLOX/pretrainedmodel/yolox_x.pth", type=str, help="ckpt for eval")
parser.add_argument("-c", "--ckpt", default=".//YOLOX/pretrainedmodel/yolox_l.pth", type=str, help="ckpt for eval")
parser.add_argument(
"--device",
default="gpu",
Expand Down Expand Up @@ -203,61 +203,7 @@ def visual(self, output, img_info, cls_conf=0.35):

vis_res = vis(img, bboxes, scores, cls, cls_conf, self.cls_names)
return vis_res


def image_demo(predictor, vis_folder, path, current_time, save_result):
if os.path.isdir(path):
files = get_image_list(path)
else:
files = [path]
files.sort()
for image_name in files:
outputs, img_info = predictor.inference(image_name)
result_image = predictor.visual(outputs[0], img_info, predictor.confthre)
print("shape: ", result_image.shape)
if save_result:
save_folder = os.path.join(
vis_folder, time.strftime("%Y_%m_%d_%H_%M_%S", current_time)
)
os.makedirs(save_folder, exist_ok=True)
save_file_name = os.path.join(save_folder, os.path.basename(image_name))
logger.info("Saving detection result in {}".format(save_file_name))
cv2.imwrite(save_file_name, result_image)
# cv2.imshow("test", result_image)
ch = cv2.waitKey(0)
if ch == 27 or ch == ord("q") or ch == ord("Q"):
break


def imageflow_demo(predictor, vis_folder, current_time, args):
cap = cv2.VideoCapture(args.path if args.demo == "video" else args.camid)
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float
fps = cap.get(cv2.CAP_PROP_FPS)
save_folder = os.path.join(
vis_folder, time.strftime("%Y_%m_%d_%H_%M_%S", current_time)
)
os.makedirs(save_folder, exist_ok=True)
if args.demo == "video":
save_path = os.path.join(save_folder, args.path.split("/")[-1])
else:
save_path = os.path.join(save_folder, "camera.mp4")
logger.info(f"video save_path is {save_path}")
vid_writer = cv2.VideoWriter(
save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (int(width), int(height))
)
while True:
ret_val, frame = cap.read()
if ret_val:
outputs, img_info = predictor.inference(frame)
result_frame = predictor.visual(outputs[0], img_info, predictor.confthre)
if args.save_result:
vid_writer.write(result_frame)
ch = cv2.waitKey(1)
if ch == 27 or ch == ord("q") or ch == ord("Q"):
break
else:
break


def driving_runtime(predictor, vis_folder, image, args, MyCar):
# detection model inference
Expand Down Expand Up @@ -345,15 +291,15 @@ def driving_runtime(predictor, vis_folder, image, args, MyCar):
point.y_ = outputs[0][i][3] / 1.33333
# when autonomous drving vehicle is in middle lane or in right lane,
# there will be a left bounding box to estimate distance.
if (MyCar.midlane == 0 or MyCar.midlane == -7) and \
if (MyCar.midlane == 0 or MyCar.midlane == -7.5) and \
MyCar.changing == False and \
triangle_left.isInTriangle(point):
leftlist1.append(i)
leftlist2.append( (outputs[0][i][2] - outputs[0][i][0]) * (outputs[0][i][3] - outputs[0][i][1]) )

# when autonomous driving vehicle is in middle lane or left lane,
# there will be a right bounding box to estimate distance.
if (MyCar.midlane == 0 or MyCar.midlane == 7) and \
if (MyCar.midlane == 0 or MyCar.midlane == 7.5) and \
MyCar.changing == False and \
triangle_right.isInTriangle(point):
rightlist1.append(i)
Expand Down Expand Up @@ -445,7 +391,6 @@ def driving_runtime(predictor, vis_folder, image, args, MyCar):
# visulize the bounding box into the original image
result_image = predictor.visual(outputs[0], img_info, predictor.confthre)
img = cv2.cvtColor(result_image, cv2.COLOR_RGB2BGR)
# cv2.putText(img,"wei", (225, 100), 1, 2, (0,0,0))

# visulize the two line of left bound and right bound
templeft = args.leftbound // 1.3333
Expand All @@ -467,82 +412,3 @@ def driving_runtime(predictor, vis_folder, image, args, MyCar):
cv2.waitKey(1)

return distance_left, distance_mid, distance_right


def main(exp, args):
if not args.experiment_name:
args.experiment_name = exp.exp_name

file_name = os.path.join(exp.output_dir, args.experiment_name)
os.makedirs(file_name, exist_ok=True)

vis_folder = None
if args.save_result:
vis_folder = os.path.join(file_name, "vis_res")
os.makedirs(vis_folder, exist_ok=True)

if args.trt:
args.device = "gpu"

logger.info("Args: {}".format(args))

if args.conf is not None:
exp.test_conf = args.conf
if args.nms is not None:
exp.nmsthre = args.nms
if args.tsize is not None:
exp.test_size = (args.tsize, args.tsize)

model = exp.get_model()
logger.info("Model Summary: {}".format(get_model_info(model, exp.test_size)))

if args.device == "gpu":
model.cuda()
if args.fp16:
model.half() # to FP16
model.eval()

if not args.trt:
if args.ckpt is None:
ckpt_file = os.path.join(file_name, "best_ckpt.pth")
else:
ckpt_file = args.ckpt
logger.info("loading checkpoint")
ckpt = torch.load(ckpt_file, map_location="cpu")
# load the model state dict
model.load_state_dict(ckpt["model"])
logger.info("loaded checkpoint done.")

if args.fuse:
logger.info("\tFusing model...")
model = fuse_model(model)

if args.trt:
assert not args.fuse, "TensorRT model is not support model fusing!"
trt_file = os.path.join(file_name, "model_trt.pth")
assert os.path.exists(
trt_file
), "TensorRT model is not found!\n Run python3 tools/trt.py first!"
model.head.decode_in_inference = False
decoder = model.head.decode_outputs
logger.info("Using TensorRT to inference")
else:
trt_file = None
decoder = None

predictor = Predictor(model, exp, COCO_CLASSES, trt_file, decoder, args.device, args.fp16, args.legacy)
current_time = time.localtime()
if args.demo == "image":
image_demo(predictor, vis_folder, args.path, current_time, args.save_result)
elif args.demo == "video" or args.demo == "webcam":
imageflow_demo(predictor, vis_folder, current_time, args)
elif args.demo == "driving":
driving_runtime(predictor, vis_folder, image, args)



if __name__ == "__main__":
args = make_parser().parse_args()
exp = get_exp(args.exp_file, args.name)

main(exp, args)
5 changes: 2 additions & 3 deletions perception/perception.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ def set_distance_right(self, dis):


def convert_image_to_numpy_ndarray(imageframe_byte):

return numpy.array(Image.open(imageframe_byte))


Expand All @@ -51,12 +50,12 @@ def run(perceptionFlag, data, PerceptionArgs, distanceData, MyCar):
image=img,
args=PerceptionArgs["args"],
MyCar=MyCar)
if MyCar.midlane == 7:
if MyCar.midlane == 7.5:
distanceData.set_distance_left(float('inf'))
else:
distanceData.set_distance_left(result_left.item())

if MyCar.midlane == -7:
if MyCar.midlane == -7.5:
distanceData.set_distance_right(float('inf'))
else:
distanceData.set_distance_right(result_right.item())
Expand Down
66 changes: 33 additions & 33 deletions planning/decision.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,43 +42,43 @@ def run(distanceData, MyCar):
findpath(distance, MyCar) # left or right
return

# stage 2-1
# 对于变道时左侧车卡位的情况 而且当前处于speedup阶段需要快速变道
# overtakesum 计数作弊
if(MyCar.cardecision == 'speedup'
and distance_mid > MyCar.saftydistance # 远大于follow 12m条件
and distance_left < MyCar.saftydistance + 3 # 左侧车与前车很近
and MyCar.midlane == -7.5 # 当前在最右车道
and distance_mid - distance_left < MyCar.saftydistance
# and not MyCar.changing # 保证超车只判断一次即可
# and MyCar.overtakeSum == 9
):
# 超车完成后会自动回复到follow状态
MyCar.cardecision = 'overtake'
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# MyCar.midlane = 0
# findpath(distance, MyCar) # left or right
MyCar.direction = 'left'
return
# # stage 2-1
# # 对于变道时左侧车卡位的情况 而且当前处于speedup阶段需要快速变道
# # overtakesum 计数作弊
# if(MyCar.cardecision == 'speedup'
# and distance_mid > MyCar.saftydistance # 远大于follow 12m条件
# and distance_left < MyCar.saftydistance + 3 # 左侧车与前车很近
# and MyCar.midlane == -7.5 # 当前在最右车道
# and distance_mid - distance_left < MyCar.saftydistance
# # and not MyCar.changing # 保证超车只判断一次即可
# # and MyCar.overtakeSum == 9
# ):
# # 超车完成后会自动回复到follow状态
# MyCar.cardecision = 'overtake'
# print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# # MyCar.midlane = 0
# # findpath(distance, MyCar) # left or right
# MyCar.direction = 'left'
# return

# stage 2-2
#对于变道时右侧车卡位的情况 而且当前处于speedup阶段需要快速变道
if(MyCar.cardecision == 'speedup'
and distance_mid > MyCar.saftydistance # 远大于follow 12m条件
and distance_right < MyCar.saftydistance + 3 # 左侧车与前车很近
and MyCar.midlane == 7.5 # 当前在最zuo车道
and distance_mid - distance_right < MyCar.saftydistance
# and not MyCar.changing # 保证超车只判断一次即可
# and MyCar.overtakeSum == 9
): #
# if(MyCar.cardecision == 'speedup'
# and distance_mid > MyCar.saftydistance # 远大于follow 12m条件
# and distance_right < MyCar.saftydistance + 3 # 左侧车与前车很近
# and MyCar.midlane == 7.5 # 当前在最zuo车道
# and distance_mid - distance_right < MyCar.saftydistance
# # and not MyCar.changing # 保证超车只判断一次即可
# # and MyCar.overtakeSum == 9
# ): #

# 超车完成后会自动回复到follow状态
MyCar.cardecision = 'overtake'
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
# MyCar.midlane = 0
# findpath(distance, MyCar) # left or right
MyCar.direction = 'right'
return
# # 超车完成后会自动回复到follow状态
# MyCar.cardecision = 'overtake'
# print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
# # MyCar.midlane = 0
# # findpath(distance, MyCar) # left or right
# MyCar.direction = 'right'
# return

# stage 3
# speedup and get close to the front car
Expand Down

0 comments on commit d3092c9

Please sign in to comment.