|
|
|
""" |
|
square_crop.py |
|
|
|
Crop each COCO bounding box to the smallest square that contains it. |
|
|
|
""" |
|
import argparse |
|
import json |
|
from pathlib import Path |
|
import os |
|
|
|
from PIL import Image, ImageOps |
|
from tqdm import tqdm |
|
|
|
def load_coco(json_path): |
|
with open(json_path, "r") as f: |
|
coco = json.load(f) |
|
id2fname = {img["id"]: img["filename"] for img in coco["images"]} |
|
return id2fname, coco["annotations"] |
|
|
|
|
|
def square_from_bbox(x1, y1, x2, y2, img_w, img_h): |
|
""" |
|
Compute (left, top, side) of the smallest square fully containing the bbox. |
|
The square is centred on the bbox; if it overflows the image, it is shifted |
|
(but not resized) so it lies inside the image. Returns the final crop box |
|
(left, top, side). |
|
""" |
|
side = max(x2-x1, y2-y1) |
|
cx, cy = (x1+x2) / 2.0, (y1+y2) / 2.0 |
|
left = int(round(cx - side / 2.0)) |
|
top = int(round(cy - side / 2.0)) |
|
|
|
|
|
left = max(0, min(left, img_w - side)) |
|
top = max(0, min(top, img_h - side)) |
|
return left, top, int(side) |
|
|
|
|
|
def crop_annotation(img_path, ann, out_dir, pad_color=0): |
|
with Image.open(img_path) as img: |
|
img_w, img_h = img.size |
|
x1, y1, x2, y2 = ann["bbox"] |
|
left, top, side = square_from_bbox(x1, y1, x2, y2, img_w, img_h) |
|
|
|
|
|
crop = img.crop((left, top, left + side, top + side)) |
|
|
|
print(f"final image {crop.size} from original image {img.size}") |
|
|
|
|
|
if crop.size != (side, side): |
|
delta_w = side - crop.size[0] |
|
delta_h = side - crop.size[1] |
|
padding = (0, 0, delta_w, delta_h) |
|
crop = ImageOps.expand(crop, padding, fill=pad_color) |
|
|
|
|
|
crop = crop.resize((640,640)) |
|
|
|
|
|
stem = Path(img_path).stem |
|
suffix = Path(img_path).suffix |
|
out_name = f"{stem}_ann{ann['id']}{suffix}" |
|
crop.save(os.path.join(out_dir, out_name)) |
|
|
|
|
|
def run_square_crop(input_dir, coco_json_path, cropped_dir): |
|
id2fname, annotations = load_coco(coco_json_path) |
|
|
|
|
|
im2anns = {} |
|
for ann in annotations: |
|
im2anns.setdefault(ann["image_id"], []).append(ann) |
|
|
|
for img_id, anns in tqdm(im2anns.items(), desc="Processing images"): |
|
img_path = os.path.join(input_dir, id2fname[img_id]) |
|
|
|
for ann in anns: |
|
|
|
crop_annotation(img_path, ann, cropped_dir) |
|
|
|
|
|
|
|
return [os.path.join(cropped_dir, fname) for fname in sorted(os.listdir(cropped_dir))] |