File size: 1,494 Bytes
2b84d47
 
 
 
 
 
 
 
 
 
 
8b16e95
2b84d47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import torch
from PIL import Image, ImageDraw
from pathlib import Path
from yolov5.models.experimental import attempt_load
from yolov5.utils.general import non_max_suppression, scale_coords
from yolov5.utils.torch_utils import select_device

# Set the device
device = select_device('')

# Load YOLOv5 model
weights_path = 'model/yolov5n6_RGB_D2304-v1_9C.pt'
model = attempt_load(weights_path, map_location=device)
stride = int(model.stride.max())  # model stride

# Set image size
img_size = 640

# Load the single image
image_path = 'path/to/single/image.jpg'
img0 = Image.open(image_path)
img = img0.convert('RGB')

# Inference
img = torch.from_numpy(img).to(device)
img = img.float() / 255.0  # 0-255 to 0.0-1.0
img = img.unsqueeze(0)  # add batch dimension
img = img.permute(0, 3, 1, 2)  # BGR to RGB, to 4D tensor (NCHW)

# Inference
pred = model(img)[0]

# Non-maximum suppression
pred = non_max_suppression(pred, conf_thres=0.25, iou_thres=0.45)[0]

# Draw bounding boxes on the image
for det in pred:
    det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.size).round()
    for *xyxy, conf, cls in det:
        xyxy = [int(x) for x in xyxy]
        label = f'{model.names[int(cls)]} {conf:.2f}'
        img0 = ImageDraw.Draw(img0)
        img0.rectangle(xyxy, outline='red', width=3)
        img0.text((xyxy[0], xyxy[1]), label, fill='red')

# Save the result
output_path = 'output/result.jpg'
img0.save(output_path)

print(f"Inference completed. Result saved at {output_path}")