-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathf_Face_info.py
134 lines (112 loc) · 4.51 KB
/
f_Face_info.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
import cv2
import numpy as np
import face_recognition
from age_detection import f_my_age
from gender_detection import f_my_gender
from race_detection import f_my_race
from emotion_detection import f_emotion_detection
from my_face_recognition import f_main
from random import randint
# instanciar detectores
age_detector = f_my_age.Age_Model()
gender_detector = f_my_gender.Gender_Model()
race_detector = f_my_race.Race_Model()
emotion_detector = f_emotion_detection.predict_emotions()
rec_face = f_main.rec()
#----------------------------------------------
def get_face_info(im):
# face detection
boxes_face = face_recognition.face_locations(im)
out = []
if len(boxes_face)!=0:
for box_face in boxes_face:
# segmento rostro
box_face_fc = box_face
x0,y1,x1,y0 = box_face
box_face = np.array([y0,x0,y1,x1])
face_features = {
"name":[],
"age":[],
"gender":[],
"race":[],
"emotion":[],
"bbx_frontal_face":box_face
}
face_image = im[x0:x1,y0:y1]
# -------------------------------------- face_recognition ---------------------------------------
face_features["name"] = rec_face.recognize_face2(im,[box_face_fc])[0]
# -------------------------------------- age_detection ---------------------------------------
age = age_detector.predict_age(face_image)
face_features["age"] = str(round(age,2))
# -------------------------------------- gender_detection ---------------------------------------
face_features["gender"] = gender_detector.predict_gender(face_image)
# -------------------------------------- race_detection ---------------------------------------
face_features["race"] = race_detector.predict_race(face_image)
# -------------------------------------- emotion_detection ---------------------------------------
_,emotion = emotion_detector.get_emotion(im,[box_face])
face_features["emotion"] = emotion[0]
# -------------------------------------- out ---------------------------------------
out.append(face_features)
else:
face_features = {
"name":[],
"age":[],
"gender":[],
"race":[],
"emotion":[],
"bbx_frontal_face":[]
}
out.append(face_features)
return out
def bounding_box(out,img):
mm=""
for data_face in out:
box = data_face["bbx_frontal_face"]
if len(box) == 0:
continue
else:
x0,y0,x1,y1 = box
img = cv2.rectangle(img,
(x0,y0),
(x1,y1),
(0,255,0),2);
thickness = 1
fontSize = 0.5
step = 13
na=data_face["name"]
na1=na.split('_')
if data_face["name"]=="unknown":
mm+="Unknown person found"
else:
mm+=" Name is "+na1[0]
mm+=", Gender is"+data_face["gender"]
mm+=", Age is "+data_face["age"]
mm+=", "+data_face["race"]
mm+=", Emotion is "+data_face["emotion"]+" "
ff1=open("mess1.txt","w")
ff1.write(mm)
ff1.close()
rn=randint(230,390)
dst=str(rn)+" cm"
agg=data_face["age"]+", "+dst
try:
cv2.putText(img, "age: " +agg, (x0, y0-7), cv2.FONT_HERSHEY_SIMPLEX, fontSize, (0,255,0), thickness)
except:
pass
try:
cv2.putText(img, "gender: " +data_face["gender"], (x0, y0-step-10*1), cv2.FONT_HERSHEY_SIMPLEX, fontSize, (0,255,0), thickness)
except:
pass
try:
cv2.putText(img, "race: " +data_face["race"], (x0, y0-step-10*2), cv2.FONT_HERSHEY_SIMPLEX, fontSize, (0,255,0), thickness)
except:
pass
try:
cv2.putText(img, "emotion: " +data_face["emotion"], (x0, y0-step-10*3), cv2.FONT_HERSHEY_SIMPLEX, fontSize, (0,255,0), thickness)
except:
pass
try:
cv2.putText(img, "name: " +na1[0], (x0, y0-step-10*4), cv2.FONT_HERSHEY_SIMPLEX, fontSize, (0,255,0), thickness)
except:
pass
return img