Spaces:
Runtime error
Runtime error
| import math | |
| import time | |
| import numpy as np | |
| import torch | |
| import torchvision | |
| def check_img_size(img_size, s=32): | |
| # Verify img_size is a multiple of stride s | |
| new_size = make_divisible(img_size, int(s)) # ceil gs-multiple | |
| # if new_size != img_size: | |
| # print(f"WARNING: --img-size {img_size:g} must be multiple of max stride {s:g}, updating to {new_size:g}") | |
| return new_size | |
| def make_divisible(x, divisor): | |
| # Returns x evenly divisible by divisor | |
| return math.ceil(x / divisor) * divisor | |
| def xyxy2xywh(x): | |
| # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right | |
| y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) | |
| y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center | |
| y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center | |
| y[:, 2] = x[:, 2] - x[:, 0] # width | |
| y[:, 3] = x[:, 3] - x[:, 1] # height | |
| return y | |
| def xywh2xyxy(x): | |
| # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right | |
| y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) | |
| y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x | |
| y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y | |
| y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x | |
| y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y | |
| return y | |
| def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): | |
| # Rescale coords (xyxy) from img1_shape to img0_shape | |
| if ratio_pad is None: # calculate from img0_shape | |
| gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new | |
| pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding | |
| else: | |
| gain = ratio_pad[0][0] | |
| pad = ratio_pad[1] | |
| coords[:, [0, 2]] -= pad[0] # x padding | |
| coords[:, [1, 3]] -= pad[1] # y padding | |
| coords[:, :4] /= gain | |
| clip_coords(coords, img0_shape) | |
| return coords | |
| def clip_coords(boxes, img_shape): | |
| # Clip bounding xyxy bounding boxes to image shape (height, width) | |
| boxes[:, 0].clamp_(0, img_shape[1]) # x1 | |
| boxes[:, 1].clamp_(0, img_shape[0]) # y1 | |
| boxes[:, 2].clamp_(0, img_shape[1]) # x2 | |
| boxes[:, 3].clamp_(0, img_shape[0]) # y2 | |
| def box_iou(box1, box2): | |
| # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py | |
| """ | |
| Return intersection-over-union (Jaccard index) of boxes. | |
| Both sets of boxes are expected to be in (x1, y1, x2, y2) format. | |
| Arguments: | |
| box1 (Tensor[N, 4]) | |
| box2 (Tensor[M, 4]) | |
| Returns: | |
| iou (Tensor[N, M]): the NxM matrix containing the pairwise | |
| IoU values for every element in boxes1 and boxes2 | |
| """ | |
| def box_area(box): | |
| return (box[2] - box[0]) * (box[3] - box[1]) | |
| area1 = box_area(box1.T) | |
| area2 = box_area(box2.T) | |
| inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) | |
| return inter / (area1[:, None] + area2 - inter) | |
| def non_max_suppression_face(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()): | |
| """Performs Non-Maximum Suppression (NMS) on inference results | |
| Returns: | |
| detections with shape: nx6 (x1, y1, x2, y2, conf, cls) | |
| """ | |
| nc = prediction.shape[2] - 15 # number of classes | |
| xc = prediction[..., 4] > conf_thres # candidates | |
| # Settings | |
| # (pixels) maximum box width and height | |
| max_wh = 4096 | |
| time_limit = 10.0 # seconds to quit after | |
| redundant = True # require redundant detections | |
| multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img) | |
| merge = False # use merge-NMS | |
| t = time.time() | |
| output = [torch.zeros((0, 16), device=prediction.device)] * prediction.shape[0] | |
| for xi, x in enumerate(prediction): # image index, image inference | |
| # Apply constraints | |
| x = x[xc[xi]] # confidence | |
| # Cat apriori labels if autolabelling | |
| if labels and len(labels[xi]): | |
| label = labels[xi] | |
| v = torch.zeros((len(label), nc + 15), device=x.device) | |
| v[:, :4] = label[:, 1:5] # box | |
| v[:, 4] = 1.0 # conf | |
| v[range(len(label)), label[:, 0].long() + 15] = 1.0 # cls | |
| x = torch.cat((x, v), 0) | |
| # If none remain process next image | |
| if not x.shape[0]: | |
| continue | |
| # Compute conf | |
| x[:, 15:] *= x[:, 4:5] # conf = obj_conf * cls_conf | |
| # Box (center x, center y, width, height) to (x1, y1, x2, y2) | |
| box = xywh2xyxy(x[:, :4]) | |
| # Detections matrix nx6 (xyxy, conf, landmarks, cls) | |
| if multi_label: | |
| i, j = (x[:, 15:] > conf_thres).nonzero(as_tuple=False).T | |
| x = torch.cat((box[i], x[i, j + 15, None], x[:, 5:15], j[:, None].float()), 1) | |
| else: # best class only | |
| conf, j = x[:, 15:].max(1, keepdim=True) | |
| x = torch.cat((box, conf, x[:, 5:15], j.float()), 1)[conf.view(-1) > conf_thres] | |
| # Filter by class | |
| if classes is not None: | |
| x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] | |
| # If none remain process next image | |
| n = x.shape[0] # number of boxes | |
| if not n: | |
| continue | |
| # Batched NMS | |
| c = x[:, 15:16] * (0 if agnostic else max_wh) # classes | |
| boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores | |
| i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS | |
| if merge and (1 < n < 3e3): # Merge NMS (boxes merged using weighted mean) | |
| # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) | |
| iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix | |
| weights = iou * scores[None] # box weights | |
| x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes | |
| if redundant: | |
| i = i[iou.sum(1) > 1] # require redundancy | |
| output[xi] = x[i] | |
| if (time.time() - t) > time_limit: | |
| break # time limit exceeded | |
| return output | |
| def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()): | |
| """Performs Non-Maximum Suppression (NMS) on inference results | |
| Returns: | |
| detections with shape: nx6 (x1, y1, x2, y2, conf, cls) | |
| """ | |
| nc = prediction.shape[2] - 5 # number of classes | |
| xc = prediction[..., 4] > conf_thres # candidates | |
| # Settings | |
| # (pixels) maximum box width and height | |
| max_wh = 4096 | |
| time_limit = 10.0 # seconds to quit after | |
| redundant = True # require redundant detections | |
| multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img) | |
| merge = False # use merge-NMS | |
| t = time.time() | |
| output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0] | |
| for xi, x in enumerate(prediction): # image index, image inference | |
| x = x[xc[xi]] # confidence | |
| # Cat apriori labels if autolabelling | |
| if labels and len(labels[xi]): | |
| label_id = labels[xi] | |
| v = torch.zeros((len(label_id), nc + 5), device=x.device) | |
| v[:, :4] = label_id[:, 1:5] # box | |
| v[:, 4] = 1.0 # conf | |
| v[range(len(label_id)), label_id[:, 0].long() + 5] = 1.0 # cls | |
| x = torch.cat((x, v), 0) | |
| # If none remain process next image | |
| if not x.shape[0]: | |
| continue | |
| # Compute conf | |
| x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf | |
| # Box (center x, center y, width, height) to (x1, y1, x2, y2) | |
| box = xywh2xyxy(x[:, :4]) | |
| # Detections matrix nx6 (xyxy, conf, cls) | |
| if multi_label: | |
| i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T | |
| x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) | |
| else: # best class only | |
| conf, j = x[:, 5:].max(1, keepdim=True) | |
| x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] | |
| # Filter by class | |
| if classes is not None: | |
| x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] | |
| # Check shape | |
| n = x.shape[0] # number of boxes | |
| if not n: # no boxes | |
| continue | |
| x = x[x[:, 4].argsort(descending=True)] # sort by confidence | |
| # Batched NMS | |
| c = x[:, 5:6] * (0 if agnostic else max_wh) # classes | |
| boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores | |
| i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS | |
| if merge and (1 < n < 3e3): # Merge NMS (boxes merged using weighted mean) | |
| # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) | |
| iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix | |
| weights = iou * scores[None] # box weights | |
| x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes | |
| if redundant: | |
| i = i[iou.sum(1) > 1] # require redundancy | |
| output[xi] = x[i] | |
| if (time.time() - t) > time_limit: | |
| print(f"WARNING: NMS time limit {time_limit}s exceeded") | |
| break # time limit exceeded | |
| return output | |
| def scale_coords_landmarks(img1_shape, coords, img0_shape, ratio_pad=None): | |
| # Rescale coords (xyxy) from img1_shape to img0_shape | |
| if ratio_pad is None: # calculate from img0_shape | |
| gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new | |
| pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding | |
| else: | |
| gain = ratio_pad[0][0] | |
| pad = ratio_pad[1] | |
| coords[:, [0, 2, 4, 6, 8]] -= pad[0] # x padding | |
| coords[:, [1, 3, 5, 7, 9]] -= pad[1] # y padding | |
| coords[:, :10] /= gain | |
| coords[:, 0].clamp_(0, img0_shape[1]) # x1 | |
| coords[:, 1].clamp_(0, img0_shape[0]) # y1 | |
| coords[:, 2].clamp_(0, img0_shape[1]) # x2 | |
| coords[:, 3].clamp_(0, img0_shape[0]) # y2 | |
| coords[:, 4].clamp_(0, img0_shape[1]) # x3 | |
| coords[:, 5].clamp_(0, img0_shape[0]) # y3 | |
| coords[:, 6].clamp_(0, img0_shape[1]) # x4 | |
| coords[:, 7].clamp_(0, img0_shape[0]) # y4 | |
| coords[:, 8].clamp_(0, img0_shape[1]) # x5 | |
| coords[:, 9].clamp_(0, img0_shape[0]) # y5 | |
| return coords | |