gurwindersingh commited on
Commit
6f55d1e
·
1 Parent(s): c61053c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -85
app.py CHANGED
@@ -1,89 +1,47 @@
1
- from keras.models import load_model
2
- from PIL import Image
3
- import numpy as np
4
- import cv2
5
- import requests
6
-
7
- import face_recognition
8
- import os
9
- from datetime import datetime
10
-
11
- #the following are to do with this interactive notebook code
12
- from matplotlib import pyplot as plt # this lets you draw inline pictures in the notebooks
13
- import pylab # this allows you to control figure size
14
- pylab.rcParams['figure.figsize'] = (10.0, 8.0) # this controls figure size in the notebook
15
-
16
- import io
17
  import streamlit as st
18
- bytes_data=None
19
-
20
- Images = []
21
- classnames = []
22
- myList = os.listdir()
23
- #st.write(myList)
24
- for cls in myList:
25
- if os.path.splitext(cls)[1] == ".jpg" :
26
- curImg = cv2.imread(f'{cls}')
27
- Images.append(curImg)
28
- classnames.append(os.path.splitext(cls)[0])
29
- st.write(classnames)
30
-
31
-
32
- def findEncodings(Images):
33
- encodeList = []
34
- for img in Images:
35
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
36
- encode = face_recognition.face_encodings(img)[0]
37
- encodeList.append(encode)
38
- return encodeList
39
 
40
-
41
- encodeListknown = findEncodings(Images)
42
- st.write('Encoding Complete')
43
-
44
- img_file_buffer=st.camera_input("Take a picture")
45
- if img_file_buffer is not None:
46
-
47
- test_image = Image.open(img_file_buffer)
48
- st.image(test_image, use_column_width=True)
49
-
50
- image = np.asarray(test_image)
51
 
52
- #########################
53
- imgS = cv2.resize(image,(0,0),None,0.25,0.25)
54
- imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
55
- facesCurFrame = face_recognition.face_locations(imgS)
56
- encodesCurFrame = face_recognition.face_encodings(imgS,facesCurFrame)
57
-
58
- for encodeFace,faceLoc in zip(encodesCurFrame,facesCurFrame):
59
- matches = face_recognition.compare_faces(encodeListknown,encodeFace)
60
- faceDis = face_recognition.face_distance(encodeListknown,encodeFace)
61
- #print(faceDis)
62
- matchIndex = np.argmin(faceDis)
63
-
64
- if matches[matchIndex]:
65
- name = classnames[matchIndex]
66
- st.write(name)
67
- y1, x2, y2, x1 = faceLoc
68
- y1, x2, y2, x1 = y1*4,x2*4,y2*4,x1*4
69
- cv2.rectangle(image,(x1,y1),(x2,y2),(0,255,0),2)
70
- cv2.rectangle(image,(x1,y2-35),(x2,y2),(0,255,0),cv2.FILLED)
71
- cv2.putText(image,name,(x1+6,y2-6),cv2.FONT_HERSHEY_COMPLEX,1,(255, 255, 255),2)
72
-
73
- ##############
74
- url = "https://kiwi-whispering-plier.glitch.me/update"
75
-
76
- data = {
77
- 'name': name,
78
- }
 
 
 
 
 
 
 
 
 
 
79
 
80
- response = requests.get(url, params=data)
81
-
82
- if response.status_code == 200 :
83
- st.write(" data updated on : https://kiwi-whispering-plier.glitch.me" )
84
- else : st.write("data not updated ")
85
-
86
- ##############################
87
- st.image(image)
88
- if bytes_data is None:
89
- st.stop()
 
1
+ import cv2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
+ def detect_faces():
5
+ st.title("Real-time Face Detection App")
6
+ st.sidebar.title("Parameters")
 
 
 
 
 
 
 
 
7
 
8
+ # Function to detect faces in the video stream
9
+ def detect_in_video():
10
+ video_feed = st.sidebar.checkbox("Use Video Stream", True)
11
+ if video_feed:
12
+ video = cv2.VideoCapture(0) # Access the default camera (change 0 to another number for different cameras)
13
+ else:
14
+ uploaded_file = st.sidebar.file_uploader("Upload a video file", type=["mp4", "avi"])
15
+ if uploaded_file is not None:
16
+ video = cv2.VideoCapture(uploaded_file)
17
+ else:
18
+ st.warning("Please upload a video file.")
19
+ return
20
+
21
+ face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
22
+
23
+ # Display the video feed and detect faces
24
+ while video.isOpened():
25
+ ret, frame = video.read()
26
+ if not ret:
27
+ st.error("Failed to capture the video.")
28
+ break
29
+
30
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
31
+ faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
32
+
33
+ for (x, y, w, h) in faces:
34
+ cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 3)
35
+
36
+ st.image(frame, channels="BGR", use_column_width=True)
37
+
38
+ if cv2.waitKey(1) & 0xFF == ord('q'):
39
+ break
40
+
41
+ video.release()
42
+ cv2.destroyAllWindows()
43
+
44
+ detect_in_video()
45
 
46
+ if __name__ == "__main__":
47
+ detect_faces()