Spaces:
Sleeping
Sleeping
Commit
·
2976284
1
Parent(s):
95f9f46
Update app.py
Browse files
app.py
CHANGED
@@ -65,13 +65,11 @@ if img_file_buffer is not None:
|
|
65 |
|
66 |
img = np.asarray(image1)
|
67 |
img = cv2.resize(img,(0,0),None,0.25,0.25)
|
68 |
-
st.write("resize")
|
69 |
|
70 |
#image gray
|
71 |
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
72 |
faces = face_cascade.detectMultiScale(
|
73 |
image=img_gray, scaleFactor=1.3, minNeighbors=5)
|
74 |
-
st.write("gray")
|
75 |
try:
|
76 |
for (x, y, w, h) in faces:
|
77 |
cv2.rectangle(img=img, pt1=(x, y), pt2=(
|
@@ -90,7 +88,6 @@ if img_file_buffer is not None:
|
|
90 |
label_position = (x, y)
|
91 |
img = cv2.putText(img, output, label_position, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
92 |
st.image(img, use_column_width=True)
|
93 |
-
st.write("emotion done")
|
94 |
except:
|
95 |
st.write("face is not clear")
|
96 |
|
@@ -99,13 +96,10 @@ if img_file_buffer is not None:
|
|
99 |
imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
|
100 |
facesCurFrame = face_recognition.face_locations(imgS)
|
101 |
encodesCurFrame = face_recognition.face_encodings(imgS,facesCurFrame)
|
102 |
-
st.write("recog")
|
103 |
for encodeFace,faceLoc in zip(encodesCurFrame,facesCurFrame):
|
104 |
matches = face_recognition.compare_faces(encodeListknown,encodeFace)
|
105 |
faceDis = face_recognition.face_distance(encodeListknown,encodeFace)
|
106 |
-
#print(faceDis)
|
107 |
matchIndex = np.argmin(faceDis)
|
108 |
-
st.write("matching")
|
109 |
if matches[matchIndex]:
|
110 |
name = classnames[matchIndex]
|
111 |
st.write(name)
|
@@ -113,8 +107,8 @@ if img_file_buffer is not None:
|
|
113 |
y1, x2, y2, x1 = y1*4,x2*4,y2*4,x1*4
|
114 |
cv2.rectangle(image,(x1,y1),(x2,y2),(0,255,0),2)
|
115 |
cv2.rectangle(image,(x1,y2-35),(x2,y2),(0,255,0),cv2.FILLED)
|
116 |
-
cv2.putText(image,name,(x1+6,y2-6),cv2.FONT_HERSHEY_COMPLEX,1,(255, 255, 255),2
|
117 |
-
st.
|
118 |
##############
|
119 |
if name:
|
120 |
if output=='happy':
|
@@ -139,5 +133,5 @@ if img_file_buffer is not None:
|
|
139 |
|
140 |
|
141 |
else:
|
142 |
-
st.write("
|
143 |
|
|
|
65 |
|
66 |
img = np.asarray(image1)
|
67 |
img = cv2.resize(img,(0,0),None,0.25,0.25)
|
|
|
68 |
|
69 |
#image gray
|
70 |
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
71 |
faces = face_cascade.detectMultiScale(
|
72 |
image=img_gray, scaleFactor=1.3, minNeighbors=5)
|
|
|
73 |
try:
|
74 |
for (x, y, w, h) in faces:
|
75 |
cv2.rectangle(img=img, pt1=(x, y), pt2=(
|
|
|
88 |
label_position = (x, y)
|
89 |
img = cv2.putText(img, output, label_position, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
90 |
st.image(img, use_column_width=True)
|
|
|
91 |
except:
|
92 |
st.write("face is not clear")
|
93 |
|
|
|
96 |
imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
|
97 |
facesCurFrame = face_recognition.face_locations(imgS)
|
98 |
encodesCurFrame = face_recognition.face_encodings(imgS,facesCurFrame)
|
|
|
99 |
for encodeFace,faceLoc in zip(encodesCurFrame,facesCurFrame):
|
100 |
matches = face_recognition.compare_faces(encodeListknown,encodeFace)
|
101 |
faceDis = face_recognition.face_distance(encodeListknown,encodeFace)
|
|
|
102 |
matchIndex = np.argmin(faceDis)
|
|
|
103 |
if matches[matchIndex]:
|
104 |
name = classnames[matchIndex]
|
105 |
st.write(name)
|
|
|
107 |
y1, x2, y2, x1 = y1*4,x2*4,y2*4,x1*4
|
108 |
cv2.rectangle(image,(x1,y1),(x2,y2),(0,255,0),2)
|
109 |
cv2.rectangle(image,(x1,y2-35),(x2,y2),(0,255,0),cv2.FILLED)
|
110 |
+
cv2.putText(image,name,(x1+6,y2-6),cv2.FONT_HERSHEY_COMPLEX,1,(255, 255, 255),2
|
111 |
+
st.image(image, use_column_width=True)
|
112 |
##############
|
113 |
if name:
|
114 |
if output=='happy':
|
|
|
133 |
|
134 |
|
135 |
else:
|
136 |
+
st.write("Failed")
|
137 |
|