-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathFace Recognition using SVM.py
228 lines (171 loc) · 6.95 KB
/
Face Recognition using SVM.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
import os
from tkinter import *
import tkinter.font as font
import tkinter.simpledialog as simpledialog
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
import cv2
import face_recognition
from sklearn import svm
def train_svm():
# Training the SVC classifier
# The training data would be all the face encodings from all the known images and the labels are their names
encodings = []
names = []
train_dir = os.listdir('train/')
for person in train_dir:
pix = os.listdir("train/" + person)
print(person)
# Loop through each training image for the current person
for person_img in pix:
# Get the face encodings for the face in each image file
face = face_recognition.load_image_file(
"train/" + person + "/" + person_img)
face_bounding_boxes = face_recognition.face_locations(face)
# If training image contains exactly one face
if len(face_bounding_boxes) == 1:
face_enc = face_recognition.face_encodings(face)[0]
# Add face encoding for current image with corresponding label (name) to the training data
encodings.append(face_enc)
names.append(person)
else:
print(person + "/" + person_img +" was skipped and can't be used for training")
# Create and train the SVC classifier
clf = svm.SVC(gamma='scale')
clf.fit(encodings, names)
return clf
def get_confusion_matrix_and_accuracy(clf):
encodings = []
names_ytest = []
names_ypred = []
test_dir = os.listdir('test/test/')
for person in test_dir:
print(person)
pix = os.listdir("test/test/" + person)
#
# # Loop through each training image for the current person
for person_img in pix:
# Get the face encodings for the face in each image file
face = face_recognition.load_image_file(
"test/test/" + person + "/" + person_img)
face_bounding_boxes = face_recognition.face_locations(face)
#
# If training image contains exactly one face
if len(face_bounding_boxes) == 1:
face_enc = face_recognition.face_encodings(face)[0]
# Add face encoding for current image with corresponding label (name) to the training data
encodings.append(face_enc)
names_ytest.append(person)
name = clf.predict([face_enc])
names_ypred.append(name)
else:
print(person + "/" + person_img + " was skipped and can't be used for training")
print(names_ytest)
print(names_ypred)
print(confusion_matrix(names_ytest, names_ypred))
print(classification_report(names_ytest, names_ypred))
print(accuracy_score(names_ytest, names_ypred))
def test_svm(clf):
# Load the test image with unknown faces into a numpy array
test_image = face_recognition.load_image_file('test/test.jpg')
# Find all the faces in the test image using the default HOG-based model
face_locations = face_recognition.face_locations(test_image)
num = len(face_locations)
print("Number of faces detected: ", num)
# Predict all the faces in the test image using the trained classifier
list_names = []
print("Found:")
for i in range(num):
test_image_enc = face_recognition.face_encodings(test_image)[i]
name = clf.predict([test_image_enc])
print(name)
list_names.append(*name)
display_name(list_names)
def test_img_capture(clf):
window.destroy()
img_counter = 0
if(True):
cam = cv2.VideoCapture(0)
cv2.namedWindow("Face Recognition")
while True:
ret, frame = cam.read()
if not ret:
print("failed to grab frame")
break
cv2.imshow("Face Recognition", frame)
k = cv2.waitKey(1)
if k % 256 == 32:
# SPACE pressed
img_name = "test.jpg"
cv2.imwrite(os.path.join('test', img_name), frame)
print("{} written!".format(img_name))
print("Closing now")
img_counter += 1
break
cam.release()
cv2.destroyAllWindows()
test_svm(clf)
def train_img_capture():
cam = cv2.VideoCapture(0)
cv2.namedWindow("Face Training")
img_counter = 0
file_name = ''
file_name= simpledialog.askstring(title="Face Recognition",prompt="What's your Name?:")
window = Tk()
window.withdraw()
os.mkdir("train/"+file_name)
while True:
ret, frame = cam.read()
if not ret:
print("failed to grab frame")
break
cv2.imshow("Face Training", frame)
k = cv2.waitKey(1)
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
elif k%256 == 32:
# SPACE pressed
img_name = file_name+"_{}.jpg".format(img_counter)
cv2.imwrite(os.path.join("train/"+file_name, img_name), frame)
print("{} written!".format(img_name))
img_counter += 1
cam.release()
cv2.destroyAllWindows()
window.destroy()
def display_name(list_name):
window=Tk()
label = Label(window, text="Faces Recognized",font=font.Font(size=16))
listbox = Listbox(window, width=50,bg="white",fg="blue")
label.pack()
listbox.pack(fill=BOTH, expand=1) # adds listbox to window
for row in list_name:
listbox.insert(END, row) # one line for loop
window.mainloop()
model = train_svm()
window=Tk()
window.config(width=300, height=300,padx=20,pady=50)
label = Label(
window, text='WELCOME TO FACE RECOGNITION. SELECT AN OPTION BELOW:\n',font=font.Font(size=16))
label.pack()
button = Button(window, text="Train",command=train_img_capture,width=20,bg="red",fg="white",pady=10)
button['font']=font.Font(size=16)
button.pack()
label = Label(window, text='\n')
label.pack()
button = Button(window, text="Test (with updated model)", command=lambda :test_img_capture(train_svm()),width=20,bg="#0052cc",fg="white",pady=20)
button['font']=font.Font(size=16)
button.pack()
label = Label(window, text='\n')
label.pack()
button = Button(window, text="Test (with existing model)", command=lambda :test_img_capture(model),width=20,bg="#0052cc",fg="white",pady=20)
button['font']=font.Font(size=16)
button.pack()
label = Label(window, text='\n')
label.pack()
button = Button(window, text="Test and get accuracy", command=lambda :get_confusion_matrix_and_accuracy(model),width=20,bg="#0052cc",fg="white",pady=20)
button['font']=font.Font(size=16)
button.pack()
# label=Label(window,text="\nInstructions\n1).In Train Mode, enter your name and then press SPACEBAR to capture images. Hit ESC when done.\n2).In Test Mode, press SPACEBAR to capture image and display detected face",font=font.Font(size=14))
# label.pack()
window.mainloop()