Spaces:
Running
Running
File size: 4,707 Bytes
004ba9c ec3efc2 bee71be ec3efc2 004ba9c 4b60775 004ba9c e98339d 004ba9c 4b60775 004ba9c 4b60775 004ba9c 4b60775 004ba9c 4b60775 004ba9c 4b60775 004ba9c 4b60775 bee71be e98339d 4b60775 e98339d bee71be ec3efc2 004ba9c ec3efc2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 |
import cv2
import os
import gradio as gr
import numpy as np
from keras.models import load_model
from keras.utils import load_img, img_to_array
from tensorflow.image import resize
from PIL import Image
#gradio interface
def process_image(img):
"""
Parameters:
img: np.ndarray
an image (e.g., returned by cv2.imread)
Returns:
img: np.ndarray
an image annotated with the bounding box and label
"""
cv2_face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +
'haarcascade_frontalface_default.xml')
img = resize_img(img)
img, _ = annotate_objects_in_image(img,
cv2_face_cascade,
model.labeldict,
model.predictor)
return(img)
def annotate_objects_in_image(img, obj_cascade, labeldict, model):
"""
Parameters:
img: np.ndarray
an image (e.g., returned by cv2.imread)
obj_cascade: cv2.CascadeClassifier
OpenCV cascade classifier that can detect certain objects
labeldict: dict
a dictionary for decoding the model predictions, (e.g., {0:happy, 1:sad})
model: keras.engine.functional.Functional
a Keras model instance (e.g., that is returned by keras.models.load_model)
Returns:
img: np.ndarray
an image annotated with the bounding box and label
emotion : str
predicted emotion of the face image
"""
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
objects = obj_cascade.detectMultiScale(img_gray, 1.1, 4)
emotion = ""
try:
for (x, y, w, h) in objects:
face = img[y-50:y+h+50, x-50:x+w+50]
emotion, prob = predict_emotion_from_image(face, labeldict, model)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, emotion, (x, y), font, 1, (0, 0, 255), 2, cv2.LINE_AA)
cv2.rectangle(img, (x-25, y-25), (x+w+25, y+h+25), (0, 255, 0), 2)
except Exception as ex:
emotion = ""
return img, emotion
def predict_emotion_from_image (face_raw, labeldict, model):
"""
Parameters:
face_raw: np.ndarray
a square-like image of a human face (e.g., returned by cv2.imread)
label_dict: dict
a dictionary for decoding the model predictions, (e.g., {0:happy, 1:sad})
model: keras.engine.functional.Functional
a Keras model instance (e.g., that is returned by keras.models.load_model)
Returns:
emotion: str
predicted emotion of the face image
prob: float
percent probability of the predicted emotion
"""
face_res_arr = np.array(cv2.resize(face_raw, (48, 48)))
face_res_arr_gray = face_res_arr/255
face_res_arr_gray_4dims = np.expand_dims(face_res_arr_gray, axis=0)
prediction_vec = model.predict(face_res_arr_gray_4dims)
prediction = np.argmax(prediction_vec)
emotion = labeldict[prediction]
prob = prediction_vec.max()*100
return (emotion,prob)
def resize_img(img):
"""
Parameters:
img: np.ndarray
a potentially oversized image (e.g., returned by cv2.imread)
Returns:
img: np.ndarray
a resized and potentially L-R flipped image
"""
if img.shape[0]> 960:
img_AR = img.shape[0]/img.shape[1]
#print(f'img shape: {img.shape}; aspect ratio: {img_AR}')
img = cv2.resize(img, (640, int(np.round(640*img_AR))))
return img
class ModelClass:
def __init__(self,name='EDA_CNN.h5'):
self.name = name
self.predictor = load_model(os.path.join("models",modeltouse))
if name == "model_mobilenet_oncleandata_valacc078.h5":
self.labeldict = {0: 'fear', 1: 'Angry', 2: 'Neutral', 3: 'Happy'}
else:
self.labeldict = {0: 'Angry', 1: 'Disgust', 2: 'Fear', 3: 'Happy', 4: 'Sad', 5: 'Surprise', 6: 'Neutral'}
if name == "EDA_CNN.h5":
self.channelno = 1
else:
self.channelno = 3
#modeltouse = "EDA_CNN.h5"
modeltouse = "MobileNet12blocks_wdgenaug_onrawdata_valacc063.h5"
#modeltouse = "model_mobilenet_oncleandata_valacc078.h5"
model = ModelClass(modeltouse)
image_in = gr.inputs.Image() #shape=(48,48)
image_out = gr.inputs.Image()
examples = ['OnurH_CerenH.jpg', 'OnurA_CerenH.jpg']
#fname = 'Onur_happy.jpg'
#image = cv2.imread(fname)
#process_image(image, model)
intf = gr.Interface(fn=process_image, inputs=image_in, outputs=image_out, examples=examples)
intf.launch(inline=False) |