-
Notifications
You must be signed in to change notification settings - Fork 24
/
camera.py
104 lines (76 loc) · 3.13 KB
/
camera.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
"""
Simulates detection and tracking of pigs for a single camera
"""
import sys
sys.path.append('deepsort-tracking')
import cv2
import json
from collections import defaultdict
# from deep_sort.tracker import Tracker
# from deep_sort import nn_matching
# from tools import generate_detections as gdet
# from yolov4.annotate import Detector
import multiprocessing as multiproc
multiproc.set_start_method('fork')
class Camera(multiproc.context.Process):
def __init__(self, stream, queue, track_prefix="", simulation_file=None):
multiproc.context.Process.__init__(self)
self.queue = queue
self.track_prefix = track_prefix
if simulation_file is None:
# Definition of the parameters
max_cosine_distance = 0.5
# Initialize deepsort
model_filename = 'networks//mars-small128.pb'
encoder = gdet.create_box_encoder(model_filename, batch_size=1)
metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, None)
# Initialize Object Detector and Tracker
video_width = int(stream.get(cv2.CAP_PROP_FRAME_WIDTH)) # float
video_height = int(stream.get(cv2.CAP_PROP_FRAME_HEIGHT))
detector = Detector(video_width, video_height)
self.tracker = Tracker(metric, detector, encoder)
self.stream = stream
self.run = self.track
else:
self.simulation_file = simulation_file
self.run = self.simulate
def track(self):
frame_id = 0
while True:
ret, frame = self.stream.read()
if not ret:
break
tracks = self.tracker.consume(frame)
self.queue.put((frame_id, {"%s%d"%(self.track_prefix, t): bbox for t, bbox in tracks.items()}))
frame_id += 1
self.queue.put((-1, None))
def get_tracks(self):
return self.queue.get()
def simulate(self):
with open(self.simulation_file) as f:
objects = json.load(f)["objects"]
tracks_dict = defaultdict(dict)
for id_dict in objects:
for f in id_dict["frames"]:
x, y = f["bbox"]["x"], f["bbox"]["y"]
width, height = f["bbox"]["width"], f["bbox"]["height"]
xmin, xmax = x-(width/2), x+(width/2)
ymin, ymax = y-(height/2), y+(height/2)
tracks_dict[f["frameNumber"]][id_dict["id"]] = [xmin, ymin, xmax, ymax]
for frame_id, tracks in tracks_dict.items():
self.queue.put((frame_id, {"%s%d"%(self.track_prefix, t): bbox for t, bbox in tracks.items()}))
self.queue.put((-1, {}))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description = "Simulate using JSON file")
parser.add_argument('--j', required=True, help="Json file which contains annotations")
args = parser.parse_args()
q = multiproc.Queue()
c = Camera(None, q, simulation_file=args.j)
c.start()
while True:
frame_id, tracks = q.get()
if frame_id == -1:
break
print(frame_id, tracks)
c.join()