Spaces:
Runtime error
Runtime error
import streamlit as st | |
import cv2 | |
import numpy as np | |
from PIL import Image | |
from ultralytics import YOLO | |
import os | |
import time | |
import matplotlib.pyplot as plt | |
from streamlit_webrtc import webrtc_streamer, WebRtcMode | |
import functools | |
import threading | |
# --- Page Configuration --- MUST BE THE VERY FIRST STREAMLIT CALL | |
st.set_page_config(page_title="YOLOv11 Object Detection App", page_icon="🧊", layout="wide") | |
# --- Load YOLO Model --- | |
def load_model(): | |
def load_in_thread(): | |
try: | |
model = YOLO("yolov8n.pt") # Or your custom model path | |
return model | |
except Exception as e: | |
st.error(f"Error loading YOLO model: {e}") | |
return None | |
model_thread = threading.Thread(target=load_in_thread) | |
model_thread.start() | |
model_thread.join() | |
return load_in_thread() | |
model = load_model() | |
# --- Functions --- | |
def detect_objects(image, confidence_threshold=0.5, iou_threshold=0.45): | |
if model is None: | |
st.warning("YOLOv11 model not loaded.") | |
return [] | |
results = model(image, conf=confidence_threshold, iou=iou_threshold) | |
detections = [] | |
for result in results: | |
if result.boxes is not None and len(result.boxes) > 0: | |
for box in result.boxes: | |
confidence = float(box.conf[0]) | |
class_id = int(box.cls[0]) | |
class_name = model.names[class_id] | |
xyxy = box.xyxy[0].tolist() | |
x1, y1, x2, y2 = map(int, xyxy) | |
detections.append(((x1, y1, x2, y2), class_name, confidence)) | |
return detections | |
def draw_boxes(image, detections): | |
for (x1, y1, x2, y2), class_name, confidence in detections: | |
label = f"{class_name}: {confidence:.2f}" | |
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2) | |
cv2.putText(image, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) | |
return image | |
def process_video(video_file, confidence_threshold, iou_threshold): | |
try: | |
video_bytes = video_file.read() | |
video_path = "temp_video.mp4" | |
with open(video_path, "wb") as temp_file: | |
temp_file.write(video_bytes) | |
cap = cv2.VideoCapture(video_path) | |
frame_placeholder = st.empty() | |
while cap.isOpened(): | |
ret, frame = cap.read() | |
if not ret: | |
break | |
detections = detect_objects(frame, confidence_threshold, iou_threshold) | |
frame_with_boxes = draw_boxes(frame.copy(), detections) | |
frame_with_boxes_rgb = cv2.cvtColor(frame_with_boxes, cv2.COLOR_BGR2RGB) | |
img = Image.fromarray(frame_with_boxes_rgb) | |
frame_placeholder.image(img, caption="Processed Video", use_column_width=True) | |
cap.release() | |
os.remove(video_path) | |
except Exception as e: | |
st.error(f"Error processing video: {e}") | |
def process_frame(frame, confidence_threshold, iou_threshold): | |
img = frame.to_ndarray(format="bgr24") | |
detections = detect_objects(img, confidence_threshold, iou_threshold) | |
img_with_boxes = draw_boxes(img.copy(), detections) | |
return img_with_boxes | |
def simulate_fine_tuning(epochs=10): | |
st.write("Simulating Fine-tuning...") # Placeholder message | |
time.sleep(2) # Simulate some processing time | |
st.success(f"Fine-tuning completed (simulated for {epochs} epochs).") | |
# --- Main App --- | |
def main(): | |
st.title("YOLOv11 Object Detection App") | |
with st.sidebar: | |
st.header("Settings") | |
app_mode = st.selectbox("Choose the App mode", ["About", "Run on Image", "Run on Video", "Live Camera Feed", "Fine-Tune (Simulated)"]) | |
confidence_threshold = st.slider("Confidence Threshold", 0.0, 1.0, 0.5, 0.01, help="Min confidence for detected objects.") | |
iou_threshold = st.slider("IoU Threshold", 0.0, 1.0, 0.45, 0.01, help="IoU threshold for NMS.") | |
if app_mode == "Fine-Tune (Simulated)": | |
num_epochs = st.slider("Number of Epochs", 1, 20, 5, 1) | |
if app_mode == "About": | |
st.subheader("About the App") | |
st.write("This app uses YOLOv8n for object detection.") # Updated model name | |
st.write("Upload an image or video, or use your camera.") | |
elif app_mode == "Run on Image": | |
st.subheader("Object Detection on Image") | |
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"]) | |
if uploaded_file is not None: | |
image = Image.open(uploaded_file) | |
st.image(image, caption="Uploaded Image.", use_column_width=True) | |
image_cv = np.array(image) | |
image_cv = cv2.cvtColor(image_cv, cv2.COLOR_RGB2BGR) | |
if st.button("Detect Objects"): | |
detections = detect_objects(image_cv, confidence_threshold, iou_threshold) | |
image_with_boxes = draw_boxes(image_cv.copy(), detections) | |
image_with_boxes_rgb = cv2.cvtColor(image_with_boxes, cv2.COLOR_BGR2RGB) | |
st.image(image_with_boxes_rgb, caption="Detected Objects", use_column_width=True) | |
elif app_mode == "Run on Video": | |
st.subheader("Object Detection on Video") | |
video_file = st.file_uploader("Choose a video...", type=["mp4", "avi"]) | |
if video_file is not None: | |
process_video(video_file, confidence_threshold, iou_threshold) | |
elif app_mode == "Live Camera Feed": | |
st.subheader("Live Camera Feed") | |
custom_process_frame = functools.partial(process_frame, confidence_threshold=confidence_threshold, iou_threshold=iou_threshold) | |
webrtc_streamer(key="live-feed", video_frame_callback=custom_process_frame, mode=WebRtcMode.SENDRECV, media_stream_constraints={"video": True, "audio": False}) | |
elif app_mode == "Fine-Tune (Simulated)": | |
simulate_fine_tuning(epochs=num_epochs) | |
if __name__ == "__main__": | |
main() |