Spaces:
Sleeping
Sleeping
import gradio as gr | |
import numpy as np | |
import cv2 | |
from ultralytics import YOLO | |
from PIL import Image | |
import pandas as pd | |
# Model labels | |
model1Labels = {0: 'single_number_plate', 1: 'double_number_plate'} | |
model2Labels = { | |
0: '0', 1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'A', 11: 'B', 12: 'C', | |
13: 'D', 14: 'E', 15: 'F', 16: 'G', 17: 'H', 18: 'I', 19: 'J', 20: 'K', 21: 'L', 22: 'M', 23: 'N', 24: 'O', | |
25: 'P', 26: 'Q', 27: 'R', 28: 'S', 29: 'T', 30: 'U', 31: 'V', 32: 'W', 33: 'X', 34: 'Y', 35: 'Z' | |
} | |
# Load models | |
model = YOLO("model/LP-detection.pt") | |
model2 = YOLO("model/Charcter-LP.pt") | |
def prediction(image): | |
result = model.predict(source=image, conf=0.5) | |
boxes = result[0].boxes | |
height = boxes.xywh | |
crd = boxes.data | |
n = len(crd) | |
lp_number = [] | |
img_lp_final = None | |
for i in range(n): | |
ht = int(height[i][3]) | |
c = int(crd[i][5]) | |
xmin = int(crd[i][0]) | |
ymin = int(crd[i][1]) | |
xmax = int(crd[i][2]) | |
ymax = int(crd[i][3]) | |
img_lp = image[ymin:ymax, xmin:xmax] | |
img_lp_final = img_lp.copy() # Store the cropped image for display | |
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2) | |
h = np.median(ht) | |
# Second Model Prediction | |
result2 = model2.predict(source=img_lp, conf=0.25) | |
boxes_ocr = result2[0].boxes | |
data2 = boxes_ocr.data | |
n2 = len(data2) | |
xaxis0, xaxis11, xaxis12 = [], [], [] | |
label0, label11, label12 = [], [], [] | |
numberPlate = "" | |
if c == 0: # Single line license plate | |
for i in range(n2): | |
x = int(data2[i][2]) | |
xaxis0.append(x) | |
l = int(data2[i][5]) | |
label0.append(l) | |
# Sort characters by x-axis for single line | |
sorted_labels = [label0[i] for i in np.argsort(xaxis0)] | |
numberPlate = ''.join([model2Labels.get(l) for l in sorted_labels]) | |
lp_number.append(numberPlate) | |
elif c == 1: # Double line license plate | |
for i in range(n2): | |
x = int(data2[i][0]) | |
y = int(data2[i][3]) | |
l = int(data2[i][5]) | |
if y < (h / 2): | |
xaxis11.append(x) | |
label11.append(l) | |
else: | |
xaxis12.append(x) | |
label12.append(l) | |
# Sort characters by x-axis for double line (upper and lower separately) | |
sorted_labels11 = [label11[i] for i in np.argsort(xaxis11)] | |
sorted_labels12 = [label12[i] for i in np.argsort(xaxis12)] | |
numberPlate = ''.join([model2Labels.get(l) for l in sorted_labels11 + sorted_labels12]) | |
lp_number.append(numberPlate) | |
return lp_number, img_lp_final | |
def process_video(video_file): | |
# Open the video | |
cap = cv2.VideoCapture(video_file) | |
license_plate_texts = [] | |
processed_frames = [] | |
while cap.isOpened(): | |
ret, frame = cap.read() | |
if not ret: | |
break | |
license_plate_text, cropped_plate_img = prediction(frame) | |
license_plate_texts.append(" ".join(license_plate_text)) # Join the list of texts into a single string | |
processed_frames.append(cropped_plate_img) | |
# Save detected texts to Excel | |
df = pd.DataFrame(license_plate_texts, columns=["License Plate"]) | |
df.to_excel("detected_license_plates.xlsx", index=False) | |
# Save processed video with license plates highlighted | |
output_video_path = 'processed_video.mp4' | |
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Codec for MP4 | |
out = cv2.VideoWriter(output_video_path, fourcc, 20.0, (frame.shape[1], frame.shape[0])) | |
for processed_frame in processed_frames: | |
out.write(processed_frame) | |
cap.release() | |
out.release() | |
return output_video_path, "detected_license_plates.xlsx" | |
# Gradio interface | |
with gr.Blocks() as demo: | |
gr.Markdown("# π License Plate Recognition (Video Upload)") | |
gr.Markdown("Upload a video to get the license number of vehicles detected in each frame.") | |
with gr.Row(): | |
video_input = gr.File(label="Upload Video", type="filepath") # Corrected the file type | |
video_output = gr.Video(label="Processed Video") | |
excel_output = gr.File(label="Excel File with Detected License Plates") | |
video_input.upload(process_video, inputs=video_input, outputs=[video_output, excel_output]) | |
demo.launch() | |