import streamlit as st import torch import cv2 import numpy as np # Title and description of the app st.title("YOLOv5 Object Detection with Video Input") st.write("Live object detection from your webcam using YOLOv5!") # Load the pre-trained YOLOv5 model (COCO dataset) @st.cache_resource def load_model(): return torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True) model = load_model() # Create a function to process video frames and apply YOLOv5 def process_frame(frame, model): # Perform inference results = model(frame) # Extract detections detections = results.pandas().xyxy[0] # Draw bounding boxes and labels on the frame for _, row in detections.iterrows(): x1, y1, x2, y2 = int(row['xmin']), int(row['ymin']), int(row['xmax']), int(row['ymax']) label = f"{row['name']} {row['confidence']:.2f}" cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 0, 0), 2) cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (36, 255, 12), 2) return frame # Start video capture run_video = st.checkbox("Start Webcam") if run_video: # Initialize the webcam cap = cv2.VideoCapture(0) # 0 is the default camera if not cap.isOpened(): st.error("Error: Could not open the webcam.") else: # Stream video stframe = st.empty() # Placeholder for displaying video frames while run_video: ret, frame = cap.read() if not ret: st.error("Error: Failed to capture video.") break # Process the frame with YOLOv5 processed_frame = process_frame(frame, model) # Convert BGR to RGB for Streamlit processed_frame = cv2.cvtColor(processed_frame, cv2.COLOR_BGR2RGB) # Display the frame in Streamlit stframe.image(processed_frame, channels="RGB", use_column_width=True) cap.release() else: st.write("Enable the checkbox above to start the webcam.")