File size: 3,769 Bytes
e3c7de3
 
 
 
bb3d0a0
9c63de6
e3c7de3
 
 
 
e721db8
e3c7de3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bb3d0a0
e721db8
e3c7de3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9c63de6
e3c7de3
 
41cc662
e3c7de3
 
e721db8
e3c7de3
 
e721db8
e3c7de3
 
 
e721db8
e3c7de3
 
41cc662
e3c7de3
 
bb3d0a0
9c63de6
e3c7de3
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import os
import time
from datetime import datetime

import gradio as gr
import cv2
import numpy as np

from detect_people import detect_people_yolo
from table_occupancy import estimate_table_occupancy
from face_utils import recognize_faces
from db import get_table_status, log_customer_visit, get_alerts, db_enabled

# ---------- Tunables via env ----------
CONF_THRESH = float(os.getenv("PEOPLE_CONF_THRESH", "0.55"))
NMS_THRESH = float(os.getenv("PEOPLE_NMS_THRESH", "0.45"))
MAX_SIDE = int(os.getenv("PEOPLE_MAX_SIDE", "1280"))   # resize safeguard
FACES_DIR = os.getenv("FACES_DIR", "faces_db")         # faces gallery path
TOPK = int(os.getenv("FACE_TOPK", "3"))                # max matches to show
ENABLE_FACES = os.getenv("ENABLE_FACES", "1") == "1"
# --------------------------------------

def _safe_resize(frame, max_side=1280):
    h, w = frame.shape[:2]
    m = max(h, w)
    if m <= max_side:
        return frame
    scale = max_side / m
    return cv2.resize(frame, (int(w*scale), int(h*scale)))

def process_frame(image):
    """
    image: np.ndarray (H, W, 3) from gr.Image
    Returns: (annotated_frame, text_status)
    """
    if image is None:
        return None, "No frame."

    frame = _safe_resize(image, MAX_SIDE)

    # 1) People detection (OpenCV DNN YOLOv4-tiny)
    people, boxes, confs, annotated = detect_people_yolo(
        frame, conf_thresh=CONF_THRESH, nms_thresh=NMS_THRESH, draw=True
    )
    people_count = len(people)

    # 2) Table occupancy (simple heuristic; customize as needed)
    seated = estimate_table_occupancy(people_count)

    # 3) Face recognition (optional & robust)
    face_text = "Disabled"
    if ENABLE_FACES:
        try:
            matches = recognize_faces(frame, FACES_DIR, topk=TOPK)
            if isinstance(matches, str):
                face_text = matches  # a friendly warning string
            else:
                # pretty print topk names with avg distance
                if len(matches) == 0:
                    face_text = "No known faces"
                else:
                    face_text = ", ".join(
                        [f"{m['name']} ({m['score']:.2f})" for m in matches[:TOPK]]
                    )
        except Exception as e:
            face_text = f"Error: {e}"

    # 4) DB (optional / won’t crash if missing)
    db_text = "DB disabled"
    if db_enabled():
        try:
            # log a single event to show it works (you can wire to your business logic)
            log_customer_visit(face_text if isinstance(face_text, str) else "faces", datetime.utcnow(), table_id=1)
            alerts = get_alerts()
            if alerts:
                db_text = f"Alerts: {alerts}"
            else:
                db_text = "Alerts: none"
        except Exception as e:
            db_text = f"DB error: {e}"

    status = f"People: {people_count}, Seated: {bool(seated)}, Face Match: {face_text}, {db_text}"
    return annotated[:, :, ::-1], status  # convert BGR->RGB for display

with gr.Blocks(theme=gr.themes.Soft()) as demo:
    gr.Markdown("## CCTV Backend – People Count, Table Occupancy & Face Match (CPU-only)")

    with gr.Row():
        inp = gr.Image(type="numpy", label="Frame (image or snapshot)")

    with gr.Row():
        out_img = gr.Image(type="numpy", label="Detections", interactive=False)
        out_txt = gr.Textbox(label="Status", interactive=False)

    btn = gr.Button("Process")
    btn.click(process_frame, inputs=[inp], outputs=[out_img, out_txt])

    # Also auto-run when user drops an image
    inp.change(process_frame, inputs=[inp], outputs=[out_img, out_txt])

if __name__ == "__main__":
    # Gradio SSR sometimes logs warnings; keep defaults minimal for Spaces
    demo.launch(server_name="0.0.0.0", server_port=7860)