-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathRecognize.py
99 lines (88 loc) · 4.38 KB
/
Recognize.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
from scipy.spatial import distance
import face_recognition
from datetime import datetime
import cv2
import numpy as np
import dlib
def recognize_attendance():
eyes_closed_time = 0
eyes_opened_time = 0
known_face_encodings = np.load('encode-data.npy', allow_pickle=True)
known_faces_data = np.load('known-faces-data.npy', allow_pickle=True)
def markAttendance(name):
with open('Attendance_Records/Attendance.csv', 'r+') as f:
myDataList = f.readlines()
face_names = []
for line in myDataList:
entry = line.split(',')
face_names.append(entry[0])
if name not in face_names:
now = datetime.now()
dtString = now.strftime('%H:%M')
f.writelines(f'\n{name},{dtString},{datetime.today().strftime("%d-%m-%Y")}')
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
face_landmarks_list = face_recognition.face_landmarks(rgb_small_frame)
for face_landmark in face_landmarks_list:
left_eye = face_landmark['left_eye']
right_eye = face_landmark['right_eye']
ear_left = get_ear(left_eye)
ear_right = get_ear(right_eye)
closed = ear_left < 0.2 and ear_right < 0.2
if closed:
eyes_closed_time += 1
else:
eyes_opened_time += 1
if eyes_closed_time > 1 and eyes_opened_time > 1:
current_face_locations = face_recognition.face_locations(rgb_small_frame)
current_face_encodings = face_recognition.face_encodings(rgb_small_frame, current_face_locations)
detector = dlib.get_frontal_face_detector()
dets, scores, idx = detector.run(rgb_small_frame, 1, -1)
count = 1
for i in scores:
Detectionscore = i % 100 * 100
dontremovedetectionscore = Detectionscore
count += 1
for (top, right, bottom, left), face_encoding in zip(current_face_locations,
current_face_encodings):
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
faceDis = face_recognition.face_distance(known_face_encodings, face_encoding)
matchIndex = np.argmin(faceDis)
if matches[matchIndex]:
known_faces, face_id = zip(*known_faces_data)
name = known_faces[matchIndex]
id = face_id[matchIndex]
markAttendance(name)
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a Rectangle around the Face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
if count > 1 and dontremovedetectionscore > 70:
dontremovedetectionscore = round(dontremovedetectionscore,1)
cv2.putText(frame, name + " " + str(id) + str(dontremovedetectionscore), (left + 3, bottom - 6), font, 1.0, (255, 255, 255), 1)
else:
cv2.putText(frame, "low confidence of detection", (left + 3, bottom - 6), font, 1.0, (255, 255, 255), 1)
# display the frame
cv2.imshow('Video', frame)
# wait for 100 milliseconds
if cv2.waitKey(100) & 0xFF == ord('q'):
print('exited..\n')
cap.release()
cv2.destroyAllWindows()
break
def get_ear(eye):
A = distance.euclidean(eye[1], eye[5])
B = distance.euclidean(eye[2], eye[4])
C = distance.euclidean(eye[0], eye[3])
ear = (A + B) / (2.0 * C)
return ear