Attendance1 / app.py
gurwindersingh's picture
Update app.py
def2cf6
raw
history blame
3.92 kB
from keras.models import load_model
from PIL import Image
import numpy as np
import cv2
import requests
from keras.models import model_from_json
import face_recognition
import os
from datetime import datetime
#the following are to do with this interactive notebook code
from matplotlib import pyplot as plt # this lets you draw inline pictures in the notebooks
import pylab # this allows you to control figure size
pylab.rcParams['figure.figsize'] = (10.0, 8.0) # this controls figure size in the notebook
import io
import streamlit as st
bytes_data=None
Images = []
classnames = []
myList = os.listdir()
#st.write(myList)
for cls in myList:
if os.path.splitext(cls)[1] == ".jpg" :
curImg = cv2.imread(f'{cls}')
Images.append(curImg)
classnames.append(os.path.splitext(cls)[0])
st.write(classnames)
json_file = open("facialemotionmodel.json", "r")
model_json = json_file.read()
json_file.close()
model = model_from_json(model_json)
model.load_weights("facialemotionmodel.h5")
haar_file=cv2.data.haarcascades + 'haarcascade_frontalface_default.xml'
face_cascade=cv2.CascadeClassifier(haar_file)
def findEncodings(Images):
encodeList = []
for img in Images:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
encode = face_recognition.face_encodings(img)[0]
encodeList.append(encode)
return encodeList
def extract_features(image):
feature = np.array(image)
feature = feature.reshape(1,48,48,1)
return feature/255.0
labels = {0 : 'angry', 1 : 'disgust', 2 : 'fear', 3 : 'happy', 4 : 'neutral', 5 : 'sad', 6 : 'surprise'}
encodeListknown = findEncodings(Images)
st.write('Encoding Complete')
img_file_buffer=st.camera_input("Take a picture")
if img_file_buffer is not None:
test_image = Image.open(img_file_buffer)
st.image(test_image, use_column_width=True)
image = np.asarray(test_image)
#########################
imgS = cv2.resize(image,(0,0),None,0.25,0.25)
imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
facesCurFrame = face_recognition.face_locations(imgS)
encodesCurFrame = face_recognition.face_encodings(imgS,facesCurFrame)
for encodeFace,faceLoc in zip(encodesCurFrame,facesCurFrame):
matches = face_recognition.compare_faces(encodeListknown,encodeFace)
faceDis = face_recognition.face_distance(encodeListknown,encodeFace)
#print(faceDis)
matchIndex = np.argmin(faceDis)
if matches[matchIndex]:
name = classnames[matchIndex]
st.write(name)
y1, x2, y2, x1 = faceLoc
y1, x2, y2, x1 = y1*4,x2*4,y2*4,x1*4
cv2.rectangle(image,(x1,y1),(x2,y2),(0,255,0),2)
cv2.rectangle(image,(x1,y2-35),(x2,y2),(0,255,0),cv2.FILLED)
cv2.putText(image,name,(x1+6,y2-6),cv2.FONT_HERSHEY_COMPLEX,1,(255, 255, 255),2)
faces=face_cascade.detectMultiScale(image,1.3,5)
st.write("close")
try:
for (p,q,r,s) in faces:
cv2.rectangle(im,(p,q),(p+r,q+s),(255,0,0),2)
image = cv2.resize(image,(48,48))
img = extract_features(image)
pred = model.predict(img)
prediction_label = labels[pred.argmax()]
# print("Predicted Output:", prediction_label)
# cv2.putText(im,prediction_label)
cv2.putText(image, '% s' %(prediction_label), (p-10, q-10),cv2.FONT_HERSHEY_COMPLEX_SMALL,2, (0,0,255))
st.write("success")
except:
pass
# ##############
# url = "https://kiwi-whispering-plier.glitch.me/update"
# data = {
# 'name': name,
# }
# else:
# st.write("Please smile")