gurwindersingh commited on
Commit
d58f394
·
1 Parent(s): bf7b46c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -55
app.py CHANGED
@@ -1,33 +1,19 @@
1
- from keras.models import load_model
2
- from PIL import Image
3
  import numpy as np
4
  import cv2
5
  import requests
6
-
7
  import face_recognition
8
  import os
9
  from datetime import datetime
10
-
11
- #the following are to do with this interactive notebook code
12
- from matplotlib import pyplot as plt # this lets you draw inline pictures in the notebooks
13
- import pylab # this allows you to control figure size
14
- pylab.rcParams['figure.figsize'] = (10.0, 8.0) # this controls figure size in the notebook
15
-
16
- import io
17
  import streamlit as st
18
- bytes_data=None
19
 
20
  Images = []
21
  classnames = []
22
  myList = os.listdir()
23
- #st.write(myList)
24
  for cls in myList:
25
- if os.path.splitext(cls)[1] == ".jpg" :
26
  curImg = cv2.imread(f'{cls}')
27
  Images.append(curImg)
28
  classnames.append(os.path.splitext(cls)[0])
29
- st.write(classnames)
30
-
31
 
32
  def findEncodings(Images):
33
  encodeList = []
@@ -37,50 +23,52 @@ def findEncodings(Images):
37
  encodeList.append(encode)
38
  return encodeList
39
 
40
-
41
  encodeListknown = findEncodings(Images)
 
42
  st.write('Encoding Complete')
43
 
44
- img_file_buffer=st.camera_input("Take a picture")
45
- if img_file_buffer is not None:
46
-
47
- test_image = Image.open(img_file_buffer)
48
- st.image(test_image, use_column_width=True)
49
-
50
- image = np.asarray(test_image)
51
 
52
- #########################
53
- imgS = cv2.resize(image,(0,0),None,0.25,0.25)
54
- imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
55
- facesCurFrame = face_recognition.face_locations(imgS)
56
- encodesCurFrame = face_recognition.face_encodings(imgS,facesCurFrame)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
- for encodeFace,faceLoc in zip(encodesCurFrame,facesCurFrame):
59
- matches = face_recognition.compare_faces(encodeListknown,encodeFace)
60
- faceDis = face_recognition.face_distance(encodeListknown,encodeFace)
61
- #print(faceDis)
62
- matchIndex = np.argmin(faceDis)
63
 
64
- if matches[matchIndex]:
65
- name = classnames[matchIndex]
66
- st.write(name)
67
- y1, x2, y2, x1 = faceLoc
68
- y1, x2, y2, x1 = y1*4,x2*4,y2*4,x1*4
69
- cv2.rectangle(image,(x1,y1),(x2,y2),(0,255,0),2)
70
- cv2.rectangle(image,(x1,y2-35),(x2,y2),(0,255,0),cv2.FILLED)
71
- cv2.putText(image,name,(x1+6,y2-6),cv2.FONT_HERSHEY_COMPLEX,1,(255, 255, 255),2)
72
 
73
- ##############
74
- url = "https://kiwi-whispering-plier.glitch.me/update"
75
- data1 = {'name':name }
76
- response = requests.post(url, data=data1)
77
-
78
- if response.status_code == 200 :
79
- st.write(" data updated on : " + url)
80
- else : st.write("data not updated ")
81
-
82
-
83
- ##############################
84
- st.image(image)
85
- if bytes_data is None:
86
- st.stop()
 
 
 
1
  import numpy as np
2
  import cv2
3
  import requests
 
4
  import face_recognition
5
  import os
6
  from datetime import datetime
 
 
 
 
 
 
 
7
  import streamlit as st
 
8
 
9
  Images = []
10
  classnames = []
11
  myList = os.listdir()
 
12
  for cls in myList:
13
+ if os.path.splitext(cls)[1] == ".jpg":
14
  curImg = cv2.imread(f'{cls}')
15
  Images.append(curImg)
16
  classnames.append(os.path.splitext(cls)[0])
 
 
17
 
18
  def findEncodings(Images):
19
  encodeList = []
 
23
  encodeList.append(encode)
24
  return encodeList
25
 
 
26
  encodeListknown = findEncodings(Images)
27
+
28
  st.write('Encoding Complete')
29
 
30
+ cap = cv2.VideoCapture(0) # Use 0 for the default camera
31
+ while True:
32
+ ret, frame = cap.read()
 
 
 
 
33
 
34
+ if ret:
35
+ imgS = cv2.resize(frame, (0, 0), None, 0.25, 0.25)
36
+ imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
37
+ facesCurFrame = face_recognition.face_locations(imgS)
38
+ encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)
39
+
40
+ for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):
41
+ matches = face_recognition.compare_faces(encodeListknown, encodeFace)
42
+ faceDis = face_recognition.face_distance(encodeListknown, encodeFace)
43
+ matchIndex = np.argmin(faceDis)
44
+
45
+ if matches[matchIndex]:
46
+ name = classnames[matchIndex]
47
+ st.write(name)
48
+ y1, x2, y2, x1 = faceLoc
49
+ y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4
50
+ cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
51
+ cv2.rectangle(frame, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)
52
+ cv2.putText(frame, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
53
+
54
+ # Sending data to a URL
55
+ url = "https://kiwi-whispering-plier.glitch.me/update"
56
+ data1 = {'name': name}
57
+ response = requests.post(url, data=data1)
58
+
59
+ if response.status_code == 200:
60
+ st.write("Data updated on: " + url)
61
+ else:
62
+ st.write("Data not updated")
63
 
64
+ # Show the flipped frame
65
+ frame = cv2.flip(frame, 1) # Flip horizontally
66
+ st.image(frame)
 
 
67
 
68
+ if cv2.waitKey(1) & 0xFF == ord('q'):
69
+ break
70
+ else:
71
+ break
 
 
 
 
72
 
73
+ cap.release()
74
+ cv2.destroyAllWindows()