import os import gradio as gr import requests import json from PIL import Image import cv2 import numpy as np # Your existing CSS remains unchanged # I'll keep your helper functions (convert_fun, get_attributes, check_liveness) mostly the same # but add better error handling def get_attributes(frame): if frame is None: return None, "No image provided" url = "https://recognito.p.rapidapi.com/api/analyze_face" try: # If frame is a numpy array from webcam, convert to bytes if isinstance(frame, np.ndarray): _, buffer = cv2.imencode('.jpg', frame) files = {'image': ('image.jpg', buffer.tobytes(), 'image/jpeg')} else: files = {'image': open(frame, 'rb')} headers = {"X-RapidAPI-Key": os.environ.get("API_KEY")} r = requests.post(url=url, files=files, headers=headers) except Exception as e: return None, f"Error processing image: {str(e)}" # Rest of your get_attributes function remains the same # ... (keeping your existing logic) return face_crop, one_line_attribute def check_liveness(frame): if frame is None: return None, "No image provided", -200 url = "https://recognito-faceliveness.p.rapidapi.com/api/check_liveness" try: if isinstance(frame, np.ndarray): _, buffer = cv2.imencode('.jpg', frame) files = {'image': ('image.jpg', buffer.tobytes(), 'image/jpeg')} else: files = {'image': open(frame, 'rb')} headers = {"X-RapidAPI-Key": os.environ.get("API_KEY")} r = requests.post(url=url, files=files, headers=headers) except Exception as e: return None, f"Error processing image: {str(e)}", -200 # Rest of your check_liveness function remains the same # ... (keeping your existing logic) return face_crop, liveness_result, liveness_score def analyze_face(frame): if frame is None: return [None, "No image provided", "Please provide an image or enable webcam"] face_crop_1, liveness_result, liveness_score = check_liveness(frame) face_crop_2, attribute = get_attributes(frame) face_crop = face_crop_1 if face_crop_1 is not None else face_crop_2 return [face_crop, liveness_result, attribute] # Modified interface with webcam support with gr.Blocks(css=css) as demo: gr.Markdown( # Your existing header markdown ) with gr.Tabs(): with gr.Tab("Face Recognition"): # Keeping your existing Face Recognition tab unchanged # ... with gr.Tab("Face Liveness, Analysis"): with gr.Row(): with gr.Column(scale=1): # Modified input to include webcam face_input = gr.Image( label="Image (Upload or Webcam)", sources=["upload", "webcam"], type='filepath', # Will be None for webcam frames elem_classes="example-image", streaming=True # Enable continuous webcam streaming ) gr.Examples( ['examples/att_1.jpg', 'examples/att_2.jpg', 'examples/att_3.jpg'], inputs=face_input ) with gr.Blocks(): with gr.Column(scale=1, elem_classes="block-background"): analyze_face_button = gr.Button("Analyze Face", variant="primary", size="lg") with gr.Row(elem_classes="face-row"): face_output = gr.Image( value="icons/face.jpg", label="Face", scale=0, elem_classes="face-image" ) liveness_result = gr.Markdown("") attribute_result = gr.Markdown("") # Updated event handler for both upload and webcam analyze_face_button.click( analyze_face, inputs=face_input, outputs=[face_output, liveness_result, attribute_result] ) # Add streaming event for webcam face_input.stream( analyze_face, inputs=face_input, outputs=[face_output, liveness_result, attribute_result], _js="""(image) => { if (image && image.data) return image; return null; }""" ) gr.HTML( # Your existing footer HTML ) demo.launch(server_name="0.0.0.0", server_port=7860, show_api=False)