Skip to content

Commit

Permalink
#24 attempting to use better model with Pi
Browse files Browse the repository at this point in the history
  • Loading branch information
JohnnyD1 committed Nov 23, 2019
1 parent 75e5774 commit 0901408
Show file tree
Hide file tree
Showing 2 changed files with 33 additions and 5 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,25 @@
import imutils
import time
import cv2
from pipelinev2 import VehicleCounter



EXIT_PTS = np.array([
# left side
# [[0, 0], [50, 0], [50, 480], [0, 480]]
# right side
[[764,0], [864,0],[864,480],[764,480]]
])

def get_centroid(x, y, w, h):
x1 = int(w / 2)
y1 = int(h / 2)

cx = x + x1
cy = y + y1

return (cx, cy)

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
Expand Down Expand Up @@ -40,6 +59,9 @@
time.sleep(2.0)
fps = FPS().start()


vehicle_counter = VehicleCounter(exit_masks=EXIT_PTS)

# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream and resize it
Expand All @@ -62,7 +84,7 @@
# extract the confidence (i.e., probability) associated with
# the prediction
confidence = detections[0, 0, i, 2]

matches = []
# filter out weak detections by ensuring the `confidence` is
# greater than the minimum confidence
if confidence > args["confidence"]:
Expand All @@ -72,7 +94,8 @@
idx = int(detections[0, 0, i, 1])
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")


matches.append((startX, startY, endX, endY), get_centroid(startX,startY,endX,endY))
# draw the prediction on the frame
label = "{}: {:.2f}%".format(CLASSES[idx],
confidence * 100)
Expand All @@ -81,6 +104,11 @@
y = startY - 15 if startY - 15 > 15 else startY + 15
cv2.putText(frame, label, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)

if matches = []:
continue
d['objects'] = matches
vehicler_counter.run(d)

# show the output frame
cv2.imshow("Frame", frame)
Expand All @@ -100,4 +128,4 @@

# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
vs.stop()
4 changes: 2 additions & 2 deletions ml_model/scripts/car_detection_shapes/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ def detect_vehicles(self, fg_mask, context):
continue

centroid = utils.get_centroid(x, y, w, h)

# important here for determining what the value of context will be
matches.append(((x, y, w, h), centroid))

return matches
Expand All @@ -147,7 +147,7 @@ def __call__(self, context):
if self.save_image:
utils.save_frame(fg_mask, self.image_dir +
"/mask_%04d.png" % frame_number, flip=False)

# important
context['objects'] = self.detect_vehicles(fg_mask, context)
context['fg_mask'] = fg_mask

Expand Down

0 comments on commit 0901408

Please sign in to comment.