gurwindersingh commited on
Commit
49c5d4a
·
1 Parent(s): 86c88ec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -40
app.py CHANGED
@@ -4,15 +4,13 @@ import numpy as np
4
  import cv2
5
  import requests
6
  from keras.models import model_from_json
7
-
8
  import face_recognition
9
  import os
10
  from datetime import datetime
11
 
12
- #the following are to do with this interactive notebook code
13
- from matplotlib import pyplot as plt # this lets you draw inline pictures in the notebooks
14
- import pylab # this allows you to control figure size
15
- pylab.rcParams['figure.figsize'] = (10.0, 8.0) # this controls figure size in the notebook
16
 
17
  import io
18
  import streamlit as st
@@ -29,16 +27,22 @@ for cls in myList:
29
  classnames.append(os.path.splitext(cls)[0])
30
  st.write(classnames)
31
 
32
-
33
- json_file = open("facialemotionmodel.json", "r")
34
- model_json = json_file.read()
 
 
35
  json_file.close()
36
- model = model_from_json(model_json)
37
 
38
- model.load_weights("facialemotionmodel.h5")
39
- haar_file=cv2.data.haarcascades + 'haarcascade_frontalface_default.xml'
40
- face_cascade=cv2.CascadeClassifier(haar_file)
41
 
 
 
 
 
 
42
 
43
  def findEncodings(Images):
44
  encodeList = []
@@ -53,7 +57,6 @@ def extract_features(image):
53
  feature = feature.reshape(1,48,48,1)
54
  return feature/255.0
55
 
56
- labels = {0 : 'angry', 1 : 'disgust', 2 : 'fear', 3 : 'happy', 4 : 'neutral', 5 : 'sad', 6 : 'surprise'}
57
 
58
  encodeListknown = findEncodings(Images)
59
  st.write('Encoding Complete')
@@ -64,23 +67,48 @@ if img_file_buffer is not None:
64
  test_image = Image.open(img_file_buffer)
65
  image1 = Image.open(img_file_buffer)
66
  st.image(test_image, use_column_width=True)
67
- image = np.asarray(image1)
68
- image1 = np.asarray(test_image)
69
- image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
70
-
71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  #########################
73
  imgS = cv2.resize(image,(0,0),None,0.25,0.25)
74
  imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
75
  facesCurFrame = face_recognition.face_locations(imgS)
76
  encodesCurFrame = face_recognition.face_encodings(imgS,facesCurFrame)
77
-
78
  for encodeFace,faceLoc in zip(encodesCurFrame,facesCurFrame):
79
  matches = face_recognition.compare_faces(encodeListknown,encodeFace)
80
  faceDis = face_recognition.face_distance(encodeListknown,encodeFace)
81
  #print(faceDis)
82
  matchIndex = np.argmin(faceDis)
83
- st.write("start")
84
  if matches[matchIndex]:
85
  name = classnames[matchIndex]
86
  st.write(name)
@@ -89,28 +117,19 @@ if img_file_buffer is not None:
89
  cv2.rectangle(image,(x1,y1),(x2,y2),(0,255,0),2)
90
  cv2.rectangle(image,(x1,y2-35),(x2,y2),(0,255,0),cv2.FILLED)
91
  cv2.putText(image,name,(x1+6,y2-6),cv2.FONT_HERSHEY_COMPLEX,1,(255, 255, 255),2)
92
-
93
- faces=face_cascade.detectMultiScale(image1,1.3,5)
94
- for (p,q,r,s) in faces:
95
- image = image1[q:q+s,p:p+r]
96
- cv2.rectangle(image1,(p,q),(p+r,q+s),(255,0,0),2)
97
- image = cv2.resize(image,(48,48))
98
- img = extract_features(image)
99
- pred = model.predict(img)
100
- prediction_label = labels[pred.argmax()]
101
- st.write("Predicted Output:", prediction_label)
102
- # cv2.putText(im,prediction_label)
103
- img = cv2.putText(image1, '% s' %(prediction_label), (p-10, q-10),cv2.FONT_HERSHEY_COMPLEX_SMALL,2, (0,0,255))
104
- st.image(img, use_column_width=True)
105
  else:
106
  st.write("FAiled")
107
 
108
 
109
- # ##############
110
- # url = "https://kiwi-whispering-plier.glitch.me/update"
111
-
112
- # data = {
113
- # 'name': name,
114
- # }
115
- # else:
116
- # st.write("Please smile")
 
4
  import cv2
5
  import requests
6
  from keras.models import model_from_json
 
7
  import face_recognition
8
  import os
9
  from datetime import datetime
10
 
11
+ from keras.models import model_from_json
12
+ from keras.preprocessing.image import img_to_array
13
+ from PIL import Image
 
14
 
15
  import io
16
  import streamlit as st
 
27
  classnames.append(os.path.splitext(cls)[0])
28
  st.write(classnames)
29
 
30
+ # load model
31
+ emotion_dict = {0:'angry', 1 :'happy', 2: 'neutral', 3:'sad', 4: 'surprise'}
32
+ # load json and create model
33
+ json_file = open('emotion_model1.json', 'r')
34
+ loaded_model_json = json_file.read()
35
  json_file.close()
36
+ classifier = model_from_json(loaded_model_json)
37
 
38
+ # load weights into new model
39
+ classifier.load_weights("emotion_model1.h5")
 
40
 
41
+ #load face
42
+ try:
43
+ face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
44
+ except Exception:
45
+ st.write("Error loading cascade classifiers")
46
 
47
  def findEncodings(Images):
48
  encodeList = []
 
57
  feature = feature.reshape(1,48,48,1)
58
  return feature/255.0
59
 
 
60
 
61
  encodeListknown = findEncodings(Images)
62
  st.write('Encoding Complete')
 
67
  test_image = Image.open(img_file_buffer)
68
  image1 = Image.open(img_file_buffer)
69
  st.image(test_image, use_column_width=True)
70
+ image = np.asarray(test_image)
 
 
 
71
 
72
+ img = np.asarray(image1)
73
+ img = cv2.resize(img,(0,0),None,0.25,0.25)
74
+ st.write("1")
75
+
76
+ #image gray
77
+ img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
78
+ faces = face_cascade.detectMultiScale(
79
+ image=img_gray, scaleFactor=1.3, minNeighbors=5)
80
+ st.write("2")
81
+ for (x, y, w, h) in faces:
82
+ cv2.rectangle(img=img, pt1=(x, y), pt2=(
83
+ x + w, y + h), color=(255, 0, 0), thickness=2)
84
+ roi_gray = img_gray[y:y + h, x:x + w]
85
+ roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
86
+ if np.sum([roi_gray]) != 0:
87
+ roi = roi_gray.astype('float') / 255.0
88
+ roi = img_to_array(roi)
89
+ roi = np.expand_dims(roi, axis=0)
90
+ prediction = classifier.predict(roi)[0]
91
+ maxindex = int(np.argmax(prediction))
92
+ finalout = emotion_dict[maxindex]
93
+ output = str(finalout)
94
+ st.write("3")
95
+ label_position = (x, y)
96
+ img = cv2.putText(img, output, label_position, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
97
+ st.image(img, use_column_width=True)
98
+ st.write("4")
99
+
100
  #########################
101
  imgS = cv2.resize(image,(0,0),None,0.25,0.25)
102
  imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
103
  facesCurFrame = face_recognition.face_locations(imgS)
104
  encodesCurFrame = face_recognition.face_encodings(imgS,facesCurFrame)
105
+ st.write("5")
106
  for encodeFace,faceLoc in zip(encodesCurFrame,facesCurFrame):
107
  matches = face_recognition.compare_faces(encodeListknown,encodeFace)
108
  faceDis = face_recognition.face_distance(encodeListknown,encodeFace)
109
  #print(faceDis)
110
  matchIndex = np.argmin(faceDis)
111
+ st.write("6")
112
  if matches[matchIndex]:
113
  name = classnames[matchIndex]
114
  st.write(name)
 
117
  cv2.rectangle(image,(x1,y1),(x2,y2),(0,255,0),2)
118
  cv2.rectangle(image,(x1,y2-35),(x2,y2),(0,255,0),cv2.FILLED)
119
  cv2.putText(image,name,(x1+6,y2-6),cv2.FONT_HERSHEY_COMPLEX,1,(255, 255, 255),2)
120
+ st.write("7")
121
+ ##############
122
+ if name:
123
+ if output=='happy':
124
+ url = "https://kiwi-whispering-plier.glitch.me/update"
125
+
126
+ data = {
127
+ 'name': name,
128
+ }
129
+ else:
130
+ st.write("Please smile")
 
 
131
  else:
132
  st.write("FAiled")
133
 
134
 
135
+