-
Notifications
You must be signed in to change notification settings - Fork 3
/
main.py
106 lines (99 loc) · 2.69 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import argparse
import os
from pprint import pprint
from src.utils.display import stream
from src.utils.generation import generate
from src.utils.final_stream import stream2
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-d",
"--detector",
default="Mediapipe",
help="model used for face detection",
)
parser.add_argument(
"-e",
"--extract-face",
action="store_true",
default=False,
help="extract faces from video",
)
parser.add_argument(
"-a",
"--align-face",
action="store_true",
default=False,
help="align extracted faces from video",
)
parser.add_argument(
"-t",
"--track-face",
action="store_true",
default=False,
help="track extracted faces from video",
)
parser.add_argument(
"-r",
"--recognize-face",
action="store_true",
default=False,
help="recognize faces in the video",
)
parser.add_argument(
"-c",
"--webcam",
action="store_true",
default=False,
help="recognize faces in the video",
)
parser.add_argument(
"-s",
"--status",
default="entry",
help="attendance status",
)
parser.add_argument(
"-g",
"--generate",
action='store_true',
default=False,
help="embedding generation",
)
args = parser.parse_args()
videos = os.listdir("data/test-videos/")
#pprint(args)
if args.align_face is True and args.extract_face is False:
print("You need to extract faces first. Alignment disabled for now.")
videopath = None if args.webcam else "data/test-videos/" + videos[2]
# stream(
# filepath=videopath,
# model=args.detector,
# extract_face=args.extract_face,
# align_face=args.align_face,
# track_face=args.track_face,
# recognize_face=args.recognize_face,
# padding=0,
# status=args.status,
# )
if args.generate == True:
generate()
stream2(
filepath=videopath,
model=args.detector,
extract_face=args.extract_face,
align_face=args.align_face,
recognize_face=args.recognize_face,
padding=0,
status=args.status,
)
# stream(
# filepath=videopath,
# model=args.detector,
# extract_face=args.extract_face,
# align_face=args.align_face,
# track_face=args.track_face,
# recognize_face=args.recognize_face,
# padding=0,
# status=args.status,
# )