import os import time from datetime import datetime import gradio as gr import cv2 import numpy as np from detect_people import detect_people_yolo from table_occupancy import estimate_table_occupancy from face_utils import recognize_faces from db import get_table_status, log_customer_visit, get_alerts, db_enabled # ---------- Tunables via env ---------- CONF_THRESH = float(os.getenv("PEOPLE_CONF_THRESH", "0.55")) NMS_THRESH = float(os.getenv("PEOPLE_NMS_THRESH", "0.45")) MAX_SIDE = int(os.getenv("PEOPLE_MAX_SIDE", "1280")) # resize safeguard FACES_DIR = os.getenv("FACES_DIR", "faces_db") # faces gallery path TOPK = int(os.getenv("FACE_TOPK", "3")) # max matches to show ENABLE_FACES = os.getenv("ENABLE_FACES", "1") == "1" # -------------------------------------- def _safe_resize(frame, max_side=1280): h, w = frame.shape[:2] m = max(h, w) if m <= max_side: return frame scale = max_side / m return cv2.resize(frame, (int(w*scale), int(h*scale))) def process_frame(image): """ image: np.ndarray (H, W, 3) from gr.Image Returns: (annotated_frame, text_status) """ if image is None: return None, "No frame." frame = _safe_resize(image, MAX_SIDE) # 1) People detection (OpenCV DNN YOLOv4-tiny) people, boxes, confs, annotated = detect_people_yolo( frame, conf_thresh=CONF_THRESH, nms_thresh=NMS_THRESH, draw=True ) people_count = len(people) # 2) Table occupancy (simple heuristic; customize as needed) seated = estimate_table_occupancy(people_count) # 3) Face recognition (optional & robust) face_text = "Disabled" if ENABLE_FACES: try: matches = recognize_faces(frame, FACES_DIR, topk=TOPK) if isinstance(matches, str): face_text = matches # a friendly warning string else: # pretty print topk names with avg distance if len(matches) == 0: face_text = "No known faces" else: face_text = ", ".join( [f"{m['name']} ({m['score']:.2f})" for m in matches[:TOPK]] ) except Exception as e: face_text = f"Error: {e}" # 4) DB (optional / won’t crash if missing) db_text = "DB disabled" if db_enabled(): try: # log a single event to show it works (you can wire to your business logic) log_customer_visit(face_text if isinstance(face_text, str) else "faces", datetime.utcnow(), table_id=1) alerts = get_alerts() if alerts: db_text = f"Alerts: {alerts}" else: db_text = "Alerts: none" except Exception as e: db_text = f"DB error: {e}" status = f"People: {people_count}, Seated: {bool(seated)}, Face Match: {face_text}, {db_text}" return annotated[:, :, ::-1], status # convert BGR->RGB for display with gr.Blocks(theme=gr.themes.Soft()) as demo: gr.Markdown("## CCTV Backend – People Count, Table Occupancy & Face Match (CPU-only)") with gr.Row(): inp = gr.Image(type="numpy", label="Frame (image or snapshot)") with gr.Row(): out_img = gr.Image(type="numpy", label="Detections", interactive=False) out_txt = gr.Textbox(label="Status", interactive=False) btn = gr.Button("Process") btn.click(process_frame, inputs=[inp], outputs=[out_img, out_txt]) # Also auto-run when user drops an image inp.change(process_frame, inputs=[inp], outputs=[out_img, out_txt]) if __name__ == "__main__": # Gradio SSR sometimes logs warnings; keep defaults minimal for Spaces demo.launch(server_name="0.0.0.0", server_port=7860)