instruction stringclasses 100
values | code stringlengths 78 193k | response stringlengths 259 170k | file stringlengths 59 203 |
|---|---|---|---|
Generate missing documentation strings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import ast
import shutil
import subprocess
import sys
from pathlib import Path
from types import SimpleNamespace
from typing import Any
from ultralytics import __version__
from ultralytics.utils import (
ASSETS,
DEFAULT_CFG,
DEFAULT_CFG_DICT,
DEFAULT_CFG_PATH,
FLOAT_OR_INT,
IS_VSCODE,
LOGGER,
RANK,
ROOT,
RUNS_DIR,
SETTINGS,
SETTINGS_FILE,
STR_OR_PATH,
TESTS_RUNNING,
YAML,
IterableSimpleNamespace,
checks,
colorstr,
deprecation_warn,
vscode_msg,
)
# Define valid solutions
SOLUTION_MAP = {
"count": "ObjectCounter",
"crop": "ObjectCropper",
"blur": "ObjectBlurrer",
"workout": "AIGym",
"heatmap": "Heatmap",
"isegment": "InstanceSegmentation",
"visioneye": "VisionEye",
"speed": "SpeedEstimator",
"queue": "QueueManager",
"analytics": "Analytics",
"inference": "Inference",
"trackzone": "TrackZone",
"help": None,
}
# Define valid tasks and modes
MODES = frozenset({"train", "val", "predict", "export", "track", "benchmark"})
TASKS = frozenset({"detect", "segment", "classify", "pose", "obb"})
TASK2DATA = {
"detect": "coco8.yaml",
"segment": "coco8-seg.yaml",
"classify": "imagenet10",
"pose": "coco8-pose.yaml",
"obb": "dota8.yaml",
}
TASK2MODEL = {
"detect": "yolo26n.pt",
"segment": "yolo26n-seg.pt",
"classify": "yolo26n-cls.pt",
"pose": "yolo26n-pose.pt",
"obb": "yolo26n-obb.pt",
}
TASK2METRIC = {
"detect": "metrics/mAP50-95(B)",
"segment": "metrics/mAP50-95(M)",
"classify": "metrics/accuracy_top1",
"pose": "metrics/mAP50-95(P)",
"obb": "metrics/mAP50-95(B)",
}
ARGV = sys.argv or ["", ""] # sometimes sys.argv = []
SOLUTIONS_HELP_MSG = f"""
Arguments received: {["yolo", *ARGV[1:]]!s}. Ultralytics 'yolo solutions' usage overview:
yolo solutions SOLUTION ARGS
Where SOLUTION (optional) is one of {list(SOLUTION_MAP.keys())[:-1]}
ARGS (optional) are any number of custom 'arg=value' pairs like 'show_in=True' that override defaults
at https://docs.ultralytics.com/usage/cfg
1. Call object counting solution
yolo solutions count source="path/to/video.mp4" region="[(20, 400), (1080, 400), (1080, 360), (20, 360)]"
2. Call heatmap solution
yolo solutions heatmap colormap=cv2.COLORMAP_PARULA model=yolo26n.pt
3. Call queue management solution
yolo solutions queue region="[(20, 400), (1080, 400), (1080, 360), (20, 360)]" model=yolo26n.pt
4. Call workout monitoring solution for push-ups
yolo solutions workout model=yolo26n-pose.pt kpts=[6, 8, 10]
5. Generate analytical graphs
yolo solutions analytics analytics_type="pie"
6. Track objects within specific zones
yolo solutions trackzone source="path/to/video.mp4" region="[(150, 150), (1130, 150), (1130, 570), (150, 570)]"
7. Streamlit real-time webcam inference GUI
yolo streamlit-predict
"""
CLI_HELP_MSG = f"""
Arguments received: {["yolo", *ARGV[1:]]!s}. Ultralytics 'yolo' commands use the following syntax:
yolo TASK MODE ARGS
Where TASK (optional) is one of {list(TASKS)}
MODE (required) is one of {list(MODES)}
ARGS (optional) are any number of custom 'arg=value' pairs like 'imgsz=320' that override defaults.
See all ARGS at https://docs.ultralytics.com/usage/cfg or with 'yolo cfg'
1. Train a detection model for 10 epochs with an initial learning_rate of 0.01
yolo train data=coco8.yaml model=yolo26n.pt epochs=10 lr0=0.01
2. Predict a YouTube video using a pretrained segmentation model at image size 320:
yolo predict model=yolo26n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320
3. Validate a pretrained detection model at batch-size 1 and image size 640:
yolo val model=yolo26n.pt data=coco8.yaml batch=1 imgsz=640
4. Export a YOLO26n classification model to ONNX format at image size 224 by 128 (no TASK required)
yolo export model=yolo26n-cls.pt format=onnx imgsz=224,128
5. Ultralytics solutions usage
yolo solutions count or any of {list(SOLUTION_MAP.keys())[1:-1]} source="path/to/video.mp4"
6. Run special commands:
yolo help
yolo checks
yolo version
yolo settings
yolo copy-cfg
yolo cfg
yolo solutions help
Docs: https://docs.ultralytics.com
Solutions: https://docs.ultralytics.com/solutions/
Community: https://community.ultralytics.com
GitHub: https://github.com/ultralytics/ultralytics
"""
# Define keys for arg type checks
CFG_FLOAT_KEYS = frozenset(
{ # integer or float arguments, i.e. x=2 and x=2.0
"warmup_epochs",
"box",
"cls",
"dfl",
"degrees",
"shear",
"time",
"workspace",
"batch",
}
)
CFG_FRACTION_KEYS = frozenset(
{ # fractional float arguments with 0.0<=values<=1.0
"dropout",
"lr0",
"lrf",
"momentum",
"weight_decay",
"warmup_momentum",
"warmup_bias_lr",
"hsv_h",
"hsv_s",
"hsv_v",
"translate",
"scale",
"perspective",
"flipud",
"fliplr",
"bgr",
"mosaic",
"mixup",
"cutmix",
"copy_paste",
"conf",
"iou",
"fraction",
"multi_scale",
}
)
CFG_INT_KEYS = frozenset(
{ # integer-only arguments
"epochs",
"patience",
"workers",
"seed",
"close_mosaic",
"mask_ratio",
"max_det",
"vid_stride",
"line_width",
"nbs",
"save_period",
}
)
CFG_BOOL_KEYS = frozenset(
{ # boolean-only arguments
"save",
"exist_ok",
"verbose",
"deterministic",
"single_cls",
"rect",
"cos_lr",
"overlap_mask",
"val",
"save_json",
"half",
"dnn",
"plots",
"show",
"save_txt",
"save_conf",
"save_crop",
"save_frames",
"show_labels",
"show_conf",
"visualize",
"augment",
"agnostic_nms",
"retina_masks",
"show_boxes",
"keras",
"optimize",
"int8",
"dynamic",
"simplify",
"nms",
"profile",
"end2end",
}
)
def cfg2dict(cfg: str | Path | dict | SimpleNamespace) -> dict:
if isinstance(cfg, STR_OR_PATH):
cfg = YAML.load(cfg) # load dict
elif isinstance(cfg, SimpleNamespace):
cfg = vars(cfg) # convert to dict
return cfg
def get_cfg(
cfg: str | Path | dict | SimpleNamespace = DEFAULT_CFG_DICT, overrides: dict | None = None
) -> SimpleNamespace:
cfg = cfg2dict(cfg)
# Merge overrides
if overrides:
overrides = cfg2dict(overrides)
check_dict_alignment(cfg, overrides)
cfg = {**cfg, **overrides} # merge cfg and overrides dicts (prefer overrides)
# Special handling for numeric project/name
for k in "project", "name":
if k in cfg and isinstance(cfg[k], FLOAT_OR_INT):
cfg[k] = str(cfg[k])
if cfg.get("name") == "model": # assign model to 'name' arg
cfg["name"] = str(cfg.get("model", "")).partition(".")[0]
LOGGER.warning(f"'name=model' automatically updated to 'name={cfg['name']}'.")
# Type and Value checks
check_cfg(cfg)
# Return instance
return IterableSimpleNamespace(**cfg)
def check_cfg(cfg: dict, hard: bool = True) -> None:
for k, v in cfg.items():
if v is not None: # None values may be from optional args
if k in CFG_FLOAT_KEYS and not isinstance(v, FLOAT_OR_INT):
if hard:
raise TypeError(
f"'{k}={v}' is of invalid type {type(v).__name__}. "
f"Valid '{k}' types are int (i.e. '{k}=0') or float (i.e. '{k}=0.5')"
)
cfg[k] = float(v)
elif k in CFG_FRACTION_KEYS:
if not isinstance(v, FLOAT_OR_INT):
if hard:
raise TypeError(
f"'{k}={v}' is of invalid type {type(v).__name__}. "
f"Valid '{k}' types are int (i.e. '{k}=0') or float (i.e. '{k}=0.5')"
)
cfg[k] = v = float(v)
if not (0.0 <= v <= 1.0):
raise ValueError(f"'{k}={v}' is an invalid value. Valid '{k}' values are between 0.0 and 1.0.")
elif k in CFG_INT_KEYS and not isinstance(v, int):
if hard:
raise TypeError(
f"'{k}={v}' is of invalid type {type(v).__name__}. '{k}' must be an int (i.e. '{k}=8')"
)
cfg[k] = int(v)
elif k in CFG_BOOL_KEYS and not isinstance(v, bool):
if hard:
raise TypeError(
f"'{k}={v}' is of invalid type {type(v).__name__}. "
f"'{k}' must be a bool (i.e. '{k}=True' or '{k}=False')"
)
cfg[k] = bool(v)
def get_save_dir(args: SimpleNamespace, name: str | None = None) -> Path:
if getattr(args, "save_dir", None):
save_dir = args.save_dir
else:
from ultralytics.utils.files import increment_path
project = args.project or ""
if not Path(project).is_absolute():
project = (ROOT.parent / "tests/tmp/runs" if TESTS_RUNNING else RUNS_DIR) / args.task / project
name = name or args.name or f"{args.mode}"
save_dir = increment_path(Path(project) / name, exist_ok=args.exist_ok if RANK in {-1, 0} else True)
return Path(save_dir).resolve() # resolve to display full path in console
def _handle_deprecation(custom: dict) -> dict:
deprecated_mappings = {
"boxes": ("show_boxes", lambda v: v),
"hide_labels": ("show_labels", lambda v: not bool(v)),
"hide_conf": ("show_conf", lambda v: not bool(v)),
"line_thickness": ("line_width", lambda v: v),
}
removed_keys = {"label_smoothing", "save_hybrid", "crop_fraction"}
for old_key, (new_key, transform) in deprecated_mappings.items():
if old_key not in custom:
continue
deprecation_warn(old_key, new_key)
custom[new_key] = transform(custom.pop(old_key))
for key in removed_keys:
if key not in custom:
continue
deprecation_warn(key)
custom.pop(key)
return custom
def check_dict_alignment(
base: dict, custom: dict, e: Exception | None = None, allowed_custom_keys: set | None = None
) -> None:
custom = _handle_deprecation(custom)
base_keys, custom_keys = (frozenset(x.keys()) for x in (base, custom))
# Allow 'augmentations' as a valid custom parameter for custom Albumentations transforms
if allowed_custom_keys is None:
allowed_custom_keys = {"augmentations", "save_dir"}
if mismatched := [k for k in custom_keys if k not in base_keys and k not in allowed_custom_keys]:
from difflib import get_close_matches
string = ""
for x in mismatched:
matches = get_close_matches(x, base_keys) # key list
matches = [f"{k}={base[k]}" if base.get(k) is not None else k for k in matches]
match_str = f"Similar arguments are i.e. {matches}." if matches else ""
string += f"'{colorstr('red', 'bold', x)}' is not a valid YOLO argument. {match_str}\n"
raise SyntaxError(string + CLI_HELP_MSG) from e
def merge_equals_args(args: list[str]) -> list[str]:
new_args = []
current = ""
depth = 0
i = 0
while i < len(args):
arg = args[i]
# Handle equals sign merging
if arg == "=" and 0 < i < len(args) - 1: # merge ['arg', '=', 'val']
new_args[-1] += f"={args[i + 1]}"
i += 2
continue
elif arg.endswith("=") and i < len(args) - 1 and "=" not in args[i + 1]: # merge ['arg=', 'val']
new_args.append(f"{arg}{args[i + 1]}")
i += 2
continue
elif arg.startswith("=") and i > 0: # merge ['arg', '=val']
new_args[-1] += arg
i += 1
continue
# Handle bracket joining
depth += arg.count("[") - arg.count("]")
current += arg
if depth == 0:
new_args.append(current)
current = ""
i += 1
# Append any remaining current string
if current:
new_args.append(current)
return new_args
def handle_yolo_hub(args: list[str]) -> None:
from ultralytics import hub
if args[0] == "login":
key = args[1] if len(args) > 1 else ""
# Log in to Ultralytics HUB using the provided API key
hub.login(key)
elif args[0] == "logout":
# Log out from Ultralytics HUB
hub.logout()
def handle_yolo_settings(args: list[str]) -> None:
url = "https://docs.ultralytics.com/quickstart/#ultralytics-settings" # help URL
try:
if any(args):
if args[0] == "reset":
SETTINGS_FILE.unlink() # delete the settings file
SETTINGS.reset() # create new settings
LOGGER.info("Settings reset successfully") # inform the user that settings have been reset
else: # save a new setting
new = dict(parse_key_value_pair(a) for a in args)
check_dict_alignment(SETTINGS, new)
SETTINGS.update(new)
for k, v in new.items():
LOGGER.info(f"✅ Updated '{k}={v}'")
LOGGER.info(SETTINGS) # print the current settings
LOGGER.info(f"💡 Learn more about Ultralytics Settings at {url}")
except Exception as e:
LOGGER.warning(f"settings error: '{e}'. Please see {url} for help.")
def handle_yolo_solutions(args: list[str]) -> None:
from ultralytics.solutions.config import SolutionConfig
full_args_dict = vars(SolutionConfig()) # arguments dictionary
overrides = {}
# check dictionary alignment
for arg in merge_equals_args(args):
arg = arg.lstrip("-").rstrip(",")
if "=" in arg:
try:
k, v = parse_key_value_pair(arg)
overrides[k] = v
except (NameError, SyntaxError, ValueError, AssertionError) as e:
check_dict_alignment(full_args_dict, {arg: ""}, e)
elif arg in full_args_dict and isinstance(full_args_dict.get(arg), bool):
overrides[arg] = True
check_dict_alignment(full_args_dict, overrides) # dict alignment
# Get solution name
if not args:
LOGGER.warning("No solution name provided. i.e `yolo solutions count`. Defaulting to 'count'.")
args = ["count"]
if args[0] == "help":
LOGGER.info(SOLUTIONS_HELP_MSG)
return # Early return for 'help' case
elif args[0] in SOLUTION_MAP:
solution_name = args.pop(0) # Extract the solution name directly
else:
LOGGER.warning(
f"❌ '{args[0]}' is not a valid solution. 💡 Defaulting to 'count'.\n"
f"🚀 Available solutions: {', '.join(list(SOLUTION_MAP.keys())[:-1])}\n"
)
solution_name = "count" # Default for invalid solution
if solution_name == "inference":
checks.check_requirements("streamlit>=1.29.0")
LOGGER.info("💡 Loading Ultralytics live inference app...")
subprocess.run(
[ # Run subprocess with Streamlit custom argument
"streamlit",
"run",
str(ROOT / "solutions/streamlit_inference.py"),
"--server.headless",
"true",
overrides.pop("model", "yolo26n.pt"),
]
)
else:
import cv2 # Only needed for cap and vw functionality
from ultralytics import solutions
solution = getattr(solutions, SOLUTION_MAP[solution_name])(is_cli=True, **overrides) # class i.e. ObjectCounter
cap = cv2.VideoCapture(solution.CFG["source"]) # read the video file
if solution_name != "crop":
# extract width, height and fps of the video file, create save directory and initialize video writer
w, h, fps = (
int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)
)
if solution_name == "analytics": # analytical graphs follow fixed shape for output i.e w=1920, h=1080
w, h = 1280, 720
save_dir = get_save_dir(SimpleNamespace(task="solutions", name="exp", exist_ok=False, project=None))
save_dir.mkdir(parents=True, exist_ok=True) # create the output directory i.e. runs/solutions/exp
vw = cv2.VideoWriter(str(save_dir / f"{solution_name}.avi"), cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
try: # Process video frames
f_n = 0 # frame number, required for analytical graphs
while cap.isOpened():
success, frame = cap.read()
if not success:
break
results = solution(frame, f_n := f_n + 1) if solution_name == "analytics" else solution(frame)
if solution_name != "crop":
vw.write(results.plot_im)
if solution.CFG["show"] and cv2.waitKey(1) & 0xFF == ord("q"):
break
finally:
cap.release()
def parse_key_value_pair(pair: str = "key=value") -> tuple:
k, v = pair.split("=", 1) # split on first '=' sign
k, v = k.strip(), v.strip() # remove spaces
assert v, f"missing '{k}' value"
return k, smart_value(v)
def smart_value(v: str) -> Any:
v_lower = v.lower()
if v_lower == "none":
return None
elif v_lower == "true":
return True
elif v_lower == "false":
return False
else:
try:
return ast.literal_eval(v)
except Exception:
return v
def entrypoint(debug: str = "") -> None:
args = (debug.split(" ") if debug else ARGV)[1:]
if not args: # no arguments passed
LOGGER.info(CLI_HELP_MSG)
return
special = {
"checks": checks.collect_system_info,
"version": lambda: LOGGER.info(__version__),
"settings": lambda: handle_yolo_settings(args[1:]),
"cfg": lambda: YAML.print(DEFAULT_CFG_PATH),
"hub": lambda: handle_yolo_hub(args[1:]),
"login": lambda: handle_yolo_hub(args),
"logout": lambda: handle_yolo_hub(args),
"copy-cfg": copy_default_cfg,
"solutions": lambda: handle_yolo_solutions(args[1:]),
"help": lambda: LOGGER.info(CLI_HELP_MSG), # help below hub for -h flag precedence
}
full_args_dict = {**DEFAULT_CFG_DICT, **{k: None for k in TASKS}, **{k: None for k in MODES}, **special}
# Define common misuses of special commands, i.e. -h, -help, --help
special.update({k[0]: v for k, v in special.items()}) # singular
special.update({k[:-1]: v for k, v in special.items() if len(k) > 1 and k.endswith("s")}) # singular
special = {**special, **{f"-{k}": v for k, v in special.items()}, **{f"--{k}": v for k, v in special.items()}}
overrides = {} # basic overrides, i.e. imgsz=320
for a in merge_equals_args(args): # merge spaces around '=' sign
if a.startswith("--"):
LOGGER.warning(f"argument '{a}' does not require leading dashes '--', updating to '{a[2:]}'.")
a = a[2:]
if a.endswith(","):
LOGGER.warning(f"argument '{a}' does not require trailing comma ',', updating to '{a[:-1]}'.")
a = a[:-1]
if "=" in a:
try:
k, v = parse_key_value_pair(a)
if k == "cfg" and v is not None: # custom.yaml passed
LOGGER.info(f"Overriding {DEFAULT_CFG_PATH} with {v}")
overrides = {k: val for k, val in YAML.load(checks.check_yaml(v)).items() if k != "cfg"}
else:
overrides[k] = v
except (NameError, SyntaxError, ValueError, AssertionError) as e:
check_dict_alignment(full_args_dict, {a: ""}, e)
elif a in TASKS:
overrides["task"] = a
elif a in MODES:
overrides["mode"] = a
elif a.lower() in special:
special[a.lower()]()
return
elif a in DEFAULT_CFG_DICT and isinstance(DEFAULT_CFG_DICT[a], bool):
overrides[a] = True # auto-True for default bool args, i.e. 'yolo show' sets show=True
elif a in DEFAULT_CFG_DICT:
raise SyntaxError(
f"'{colorstr('red', 'bold', a)}' is a valid YOLO argument but is missing an '=' sign "
f"to set its value, i.e. try '{a}={DEFAULT_CFG_DICT[a]}'\n{CLI_HELP_MSG}"
)
else:
check_dict_alignment(full_args_dict, {a: ""})
# Check keys
check_dict_alignment(full_args_dict, overrides)
# Mode
mode = overrides.get("mode")
if mode is None:
mode = DEFAULT_CFG.mode or "predict"
LOGGER.warning(f"'mode' argument is missing. Valid modes are {list(MODES)}. Using default 'mode={mode}'.")
elif mode not in MODES:
raise ValueError(f"Invalid 'mode={mode}'. Valid modes are {list(MODES)}.\n{CLI_HELP_MSG}")
# Task
task = overrides.pop("task", None)
if task:
if task not in TASKS:
if task == "track":
LOGGER.warning(
f"invalid 'task=track', setting 'task=detect' and 'mode=track'. Valid tasks are {list(TASKS)}.\n{CLI_HELP_MSG}."
)
task, mode = "detect", "track"
else:
raise ValueError(f"Invalid 'task={task}'. Valid tasks are {list(TASKS)}.\n{CLI_HELP_MSG}")
if "model" not in overrides:
overrides["model"] = TASK2MODEL[task]
# Model
model = overrides.pop("model", DEFAULT_CFG.model)
if model is None:
model = "yolo26n.pt"
LOGGER.warning(f"'model' argument is missing. Using default 'model={model}'.")
overrides["model"] = model
stem = Path(model).stem.lower()
if "rtdetr" in stem: # guess architecture
from ultralytics import RTDETR
model = RTDETR(model) # no task argument
elif "fastsam" in stem:
from ultralytics import FastSAM
model = FastSAM(model)
elif "sam_" in stem or "sam2_" in stem or "sam2.1_" in stem:
from ultralytics import SAM
model = SAM(model)
else:
from ultralytics import YOLO
model = YOLO(model, task=task)
if "yoloe" in stem or "world" in stem:
cls_list = overrides.pop("classes", DEFAULT_CFG.classes)
if cls_list is not None and isinstance(cls_list, str):
model.set_classes(cls_list.split(",")) # convert "person, bus" -> ['person', ' bus'].
# Task Update
if task != model.task:
if task:
LOGGER.warning(
f"conflicting 'task={task}' passed with 'task={model.task}' model. "
f"Ignoring 'task={task}' and updating to 'task={model.task}' to match model."
)
task = model.task
# Mode
if mode in {"predict", "track"} and "source" not in overrides:
overrides["source"] = (
"https://ultralytics.com/images/boats.jpg" if task == "obb" else DEFAULT_CFG.source or ASSETS
)
LOGGER.warning(f"'source' argument is missing. Using default 'source={overrides['source']}'.")
elif mode in {"train", "val"}:
if "data" not in overrides and "resume" not in overrides:
overrides["data"] = DEFAULT_CFG.data or TASK2DATA.get(task or DEFAULT_CFG.task, DEFAULT_CFG.data)
LOGGER.warning(f"'data' argument is missing. Using default 'data={overrides['data']}'.")
elif mode == "export":
if "format" not in overrides:
overrides["format"] = DEFAULT_CFG.format or "torchscript"
LOGGER.warning(f"'format' argument is missing. Using default 'format={overrides['format']}'.")
# Run command in python
getattr(model, mode)(**overrides) # default args from model
# Show help
LOGGER.info(f"💡 Learn more at https://docs.ultralytics.com/modes/{mode}")
# Recommend VS Code extension
if IS_VSCODE and SETTINGS.get("vscode_msg", True):
LOGGER.info(vscode_msg())
# Special modes --------------------------------------------------------------------------------------------------------
def copy_default_cfg() -> None:
new_file = Path.cwd() / DEFAULT_CFG_PATH.name.replace(".yaml", "_copy.yaml")
shutil.copy2(DEFAULT_CFG_PATH, new_file)
LOGGER.info(
f"{DEFAULT_CFG_PATH} copied to {new_file}\n"
f"Example YOLO command with this new custom cfg:\n yolo cfg='{new_file}' imgsz=320 batch=8"
)
if __name__ == "__main__":
# Example: entrypoint(debug='yolo predict model=yolo26n.pt')
entrypoint(debug="") | --- +++ @@ -244,6 +244,32 @@
def cfg2dict(cfg: str | Path | dict | SimpleNamespace) -> dict:
+ """Convert a configuration object to a dictionary.
+
+ Args:
+ cfg (str | Path | dict | SimpleNamespace): Configuration object to be converted. Can be a file path, a string, a
+ dictionary, or a SimpleNamespace object.
+
+ Returns:
+ (dict): Configuration object in dictionary format.
+
+ Examples:
+ Convert a YAML file path to a dictionary:
+ >>> config_dict = cfg2dict("config.yaml")
+
+ Convert a SimpleNamespace to a dictionary:
+ >>> from types import SimpleNamespace
+ >>> config_sn = SimpleNamespace(param1="value1", param2="value2")
+ >>> config_dict = cfg2dict(config_sn)
+
+ Pass through an already existing dictionary:
+ >>> config_dict = cfg2dict({"param1": "value1", "param2": "value2"})
+
+ Notes:
+ - If cfg is a path or string, it's loaded as YAML and converted to a dictionary.
+ - If cfg is a SimpleNamespace object, it's converted to a dictionary using vars().
+ - If cfg is already a dictionary, it's returned unchanged.
+ """
if isinstance(cfg, STR_OR_PATH):
cfg = YAML.load(cfg) # load dict
elif isinstance(cfg, SimpleNamespace):
@@ -254,6 +280,27 @@ def get_cfg(
cfg: str | Path | dict | SimpleNamespace = DEFAULT_CFG_DICT, overrides: dict | None = None
) -> SimpleNamespace:
+ """Load and merge configuration data from a file or dictionary, with optional overrides.
+
+ Args:
+ cfg (str | Path | dict | SimpleNamespace): Configuration data source. Can be a file path, dictionary, or
+ SimpleNamespace object.
+ overrides (dict | None): Dictionary containing key-value pairs to override the base configuration.
+
+ Returns:
+ (SimpleNamespace): Namespace containing the merged configuration arguments.
+
+ Examples:
+ >>> from ultralytics.cfg import get_cfg
+ >>> config = get_cfg() # Load default configuration
+ >>> config_with_overrides = get_cfg("path/to/config.yaml", overrides={"epochs": 50, "batch_size": 16})
+
+ Notes:
+ - If both `cfg` and `overrides` are provided, the values in `overrides` will take precedence.
+ - Special handling ensures alignment and correctness of the configuration, such as converting numeric
+ `project` and `name` to strings and validating configuration keys and values.
+ - The function performs type and value checks on the configuration data.
+ """
cfg = cfg2dict(cfg)
# Merge overrides
@@ -278,6 +325,32 @@
def check_cfg(cfg: dict, hard: bool = True) -> None:
+ """Check configuration argument types and values for the Ultralytics library.
+
+ This function validates the types and values of configuration arguments, ensuring correctness and converting them if
+ necessary. It checks for specific key types defined in global variables such as `CFG_FLOAT_KEYS`,
+ `CFG_FRACTION_KEYS`, `CFG_INT_KEYS`, and `CFG_BOOL_KEYS`.
+
+ Args:
+ cfg (dict): Configuration dictionary to validate.
+ hard (bool): If True, raises exceptions for invalid types and values; if False, attempts to convert them.
+
+ Examples:
+ >>> config = {
+ ... "epochs": 50, # valid integer
+ ... "lr0": 0.01, # valid float
+ ... "momentum": 1.2, # invalid float (out of 0.0-1.0 range)
+ ... "save": "true", # invalid bool
+ ... }
+ >>> check_cfg(config, hard=False)
+ >>> print(config)
+ {'epochs': 50, 'lr0': 0.01, 'momentum': 1.2, 'save': False} # corrected 'save' key
+
+ Notes:
+ - The function modifies the input dictionary in-place.
+ - None values are ignored as they may be from optional arguments.
+ - Fraction keys are checked to be within the range [0.0, 1.0].
+ """
for k, v in cfg.items():
if v is not None: # None values may be from optional args
if k in CFG_FLOAT_KEYS and not isinstance(v, FLOAT_OR_INT):
@@ -313,6 +386,24 @@
def get_save_dir(args: SimpleNamespace, name: str | None = None) -> Path:
+ """Return the directory path for saving outputs, derived from arguments or default settings.
+
+ Args:
+ args (SimpleNamespace): Namespace object containing configurations such as 'project', 'name', 'task', 'mode',
+ and 'save_dir'.
+ name (str | None): Optional name for the output directory. If not provided, it defaults to 'args.name' or the
+ 'args.mode'.
+
+ Returns:
+ (Path): Directory path where outputs should be saved.
+
+ Examples:
+ >>> from types import SimpleNamespace
+ >>> args = SimpleNamespace(project="my_project", task="detect", mode="train", exist_ok=True)
+ >>> save_dir = get_save_dir(args)
+ >>> print(save_dir)
+ runs/detect/my_project/train
+ """
if getattr(args, "save_dir", None):
save_dir = args.save_dir
else:
@@ -328,6 +419,25 @@
def _handle_deprecation(custom: dict) -> dict:
+ """Handle deprecated configuration keys by mapping them to current equivalents with deprecation warnings.
+
+ Args:
+ custom (dict): Configuration dictionary potentially containing deprecated keys.
+
+ Returns:
+ (dict): Updated configuration dictionary with deprecated keys replaced.
+
+ Examples:
+ >>> custom_config = {"boxes": True, "hide_labels": "False", "line_thickness": 2}
+ >>> _handle_deprecation(custom_config)
+ >>> print(custom_config)
+ {'show_boxes': True, 'show_labels': True, 'line_width': 2}
+
+ Notes:
+ This function modifies the input dictionary in-place, replacing deprecated keys with their current
+ equivalents. It also handles value conversions where necessary, such as inverting boolean values for
+ 'hide_labels' and 'hide_conf'.
+ """
deprecated_mappings = {
"boxes": ("show_boxes", lambda v: v),
"hide_labels": ("show_labels", lambda v: not bool(v)),
@@ -354,6 +464,31 @@ def check_dict_alignment(
base: dict, custom: dict, e: Exception | None = None, allowed_custom_keys: set | None = None
) -> None:
+ """Check alignment between custom and base configuration dictionaries, handling deprecated keys and providing error
+ messages for mismatched keys.
+
+ Args:
+ base (dict): The base configuration dictionary containing valid keys.
+ custom (dict): The custom configuration dictionary to be checked for alignment.
+ e (Exception | None): Optional error instance passed by the calling function.
+ allowed_custom_keys (set | None): Optional set of additional keys that are allowed in the custom dictionary.
+
+ Raises:
+ SystemExit: If mismatched keys are found between the custom and base dictionaries.
+
+ Examples:
+ >>> base_cfg = {"epochs": 50, "lr0": 0.01, "batch_size": 16}
+ >>> custom_cfg = {"epoch": 100, "lr": 0.02, "batch_size": 32}
+ >>> try:
+ ... check_dict_alignment(base_cfg, custom_cfg)
+ ... except SystemExit:
+ ... print("Mismatched keys found")
+
+ Notes:
+ - Suggests corrections for mismatched keys based on similarity to valid keys.
+ - Automatically replaces deprecated keys in the custom configuration with updated equivalents.
+ - Prints detailed error messages for each mismatched key to help users correct their configurations.
+ """
custom = _handle_deprecation(custom)
base_keys, custom_keys = (frozenset(x.keys()) for x in (base, custom))
# Allow 'augmentations' as a valid custom parameter for custom Albumentations transforms
@@ -372,6 +507,26 @@
def merge_equals_args(args: list[str]) -> list[str]:
+ """Merge arguments around isolated '=' in a list of strings and join fragments with brackets.
+
+ This function handles the following cases:
+ 1. ['arg', '=', 'val'] becomes ['arg=val']
+ 2. ['arg=', 'val'] becomes ['arg=val']
+ 3. ['arg', '=val'] becomes ['arg=val']
+ 4. Joins fragments with brackets, e.g., ['imgsz=[3,', '640,', '640]'] becomes ['imgsz=[3,640,640]']
+
+ Args:
+ args (list[str]): A list of strings where each element represents an argument or fragment.
+
+ Returns:
+ (list[str]): A list of strings where the arguments around isolated '=' are merged and fragments with brackets
+ are joined.
+
+ Examples:
+ >>> args = ["arg1", "=", "value", "arg2=", "value2", "arg3", "=value3", "imgsz=[3,", "640,", "640]"]
+ >>> merge_equals_args(args)
+ ['arg1=value', 'arg2=value2', 'arg3=value3', 'imgsz=[3,640,640]']
+ """
new_args = []
current = ""
depth = 0
@@ -411,6 +566,23 @@
def handle_yolo_hub(args: list[str]) -> None:
+ """Handle Ultralytics HUB command-line interface (CLI) commands for authentication.
+
+ This function processes Ultralytics HUB CLI commands such as login and logout. It should be called when executing a
+ script with arguments related to HUB authentication.
+
+ Args:
+ args (list[str]): A list of command line arguments. The first argument should be either 'login' or 'logout'. For
+ 'login', an optional second argument can be the API key.
+
+ Examples:
+ $ yolo login YOUR_API_KEY
+
+ Notes:
+ - The function imports the 'hub' module from ultralytics to perform login and logout operations.
+ - For the 'login' command, if no API key is provided, an empty string is passed to the login function.
+ - The 'logout' command does not require any additional arguments.
+ """
from ultralytics import hub
if args[0] == "login":
@@ -423,6 +595,27 @@
def handle_yolo_settings(args: list[str]) -> None:
+ """Handle YOLO settings command-line interface (CLI) commands.
+
+ This function processes YOLO settings CLI commands such as reset and updating individual settings. It should be
+ called when executing a script with arguments related to YOLO settings management.
+
+ Args:
+ args (list[str]): A list of command line arguments for YOLO settings management.
+
+ Examples:
+ >>> handle_yolo_settings(["reset"]) # Reset YOLO settings
+ >>> handle_yolo_settings(["default_cfg_path=yolo26n.yaml"]) # Update a specific setting
+
+ Notes:
+ - If no arguments are provided, the function will display the current settings.
+ - The 'reset' command will delete the existing settings file and create new default settings.
+ - Other arguments are treated as key-value pairs to update specific settings.
+ - The function will check for alignment between the provided settings and the existing ones.
+ - After processing, the updated settings will be displayed.
+ - For more information on handling YOLO settings, visit:
+ https://docs.ultralytics.com/quickstart/#ultralytics-settings
+ """
url = "https://docs.ultralytics.com/quickstart/#ultralytics-settings" # help URL
try:
if any(args):
@@ -444,6 +637,33 @@
def handle_yolo_solutions(args: list[str]) -> None:
+ """Process YOLO solutions arguments and run the specified computer vision solutions pipeline.
+
+ Args:
+ args (list[str]): Command-line arguments for configuring and running the Ultralytics YOLO solutions.
+
+ Examples:
+ Run people counting solution with default settings:
+ >>> handle_yolo_solutions(["count"])
+
+ Run analytics with custom configuration:
+ >>> handle_yolo_solutions(["analytics", "conf=0.25", "source=path/to/video.mp4"])
+
+ Run inference with custom configuration, requires Streamlit version 1.29.0 or higher.
+ >>> handle_yolo_solutions(["inference", "model=yolo26n.pt"])
+
+ Notes:
+ - Arguments can be provided in the format 'key=value' or as boolean flags
+ - Available solutions are defined in SOLUTION_MAP with their respective classes and methods
+ - If an invalid solution is provided, defaults to 'count' solution
+ - Output videos are saved in 'runs/solution/{solution_name}' directory
+ - For 'analytics' solution, frame numbers are tracked for generating analytical graphs
+ - Video processing can be interrupted by pressing 'q'
+ - Processes video frames sequentially and saves output in .avi format
+ - If no source is specified, downloads and uses a default sample video
+ - The inference solution will be launched using the 'streamlit run' command.
+ - The Streamlit app file is located in the Ultralytics package directory.
+ """
from ultralytics.solutions.config import SolutionConfig
full_args_dict = vars(SolutionConfig()) # arguments dictionary
@@ -526,6 +746,32 @@
def parse_key_value_pair(pair: str = "key=value") -> tuple:
+ """Parse a key-value pair string into separate key and value components.
+
+ Args:
+ pair (str): A string containing a key-value pair in the format "key=value".
+
+ Returns:
+ key (str): The parsed key.
+ value (str): The parsed value.
+
+ Raises:
+ AssertionError: If the value is missing or empty.
+
+ Examples:
+ >>> key, value = parse_key_value_pair("model=yolo26n.pt")
+ >>> print(f"Key: {key}, Value: {value}")
+ Key: model, Value: yolo26n.pt
+
+ >>> key, value = parse_key_value_pair("epochs=100")
+ >>> print(f"Key: {key}, Value: {value}")
+ Key: epochs, Value: 100
+
+ Notes:
+ - The function splits the input string on the first '=' character.
+ - Leading and trailing whitespace is removed from both key and value.
+ - An assertion error is raised if the value is empty after stripping.
+ """
k, v = pair.split("=", 1) # split on first '=' sign
k, v = k.strip(), v.strip() # remove spaces
assert v, f"missing '{k}' value"
@@ -533,6 +779,35 @@
def smart_value(v: str) -> Any:
+ """Convert a string representation of a value to its appropriate Python type.
+
+ This function attempts to convert a given string into a Python object of the most appropriate type. It handles
+ conversions to None, bool, int, float, and other types that can be evaluated safely.
+
+ Args:
+ v (str): The string representation of the value to be converted.
+
+ Returns:
+ (Any): The converted value. The type can be None, bool, int, float, or the original string if no conversion is
+ applicable.
+
+ Examples:
+ >>> smart_value("42")
+ 42
+ >>> smart_value("3.14")
+ 3.14
+ >>> smart_value("True")
+ True
+ >>> smart_value("None")
+ None
+ >>> smart_value("some_string")
+ 'some_string'
+
+ Notes:
+ - The function uses a case-insensitive comparison for boolean and None values.
+ - For other types, it attempts to use Python's ast.literal_eval() function for safe evaluation.
+ - If no conversion is possible, the original string is returned.
+ """
v_lower = v.lower()
if v_lower == "none":
return None
@@ -548,6 +823,29 @@
def entrypoint(debug: str = "") -> None:
+ """Ultralytics entrypoint function for parsing and executing command-line arguments.
+
+ This function serves as the main entry point for the Ultralytics CLI, parsing command-line arguments and executing
+ the corresponding tasks such as training, validation, prediction, exporting models, and more.
+
+ Args:
+ debug (str): Space-separated string of command-line arguments for debugging purposes.
+
+ Examples:
+ Train a detection model for 10 epochs with an initial learning_rate of 0.01:
+ >>> entrypoint("train data=coco8.yaml model=yolo26n.pt epochs=10 lr0=0.01")
+
+ Predict a YouTube video using a pretrained segmentation model at image size 320:
+ >>> entrypoint("predict model=yolo26n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320")
+
+ Validate a pretrained detection model at batch-size 1 and image size 640:
+ >>> entrypoint("val model=yolo26n.pt data=coco8.yaml batch=1 imgsz=640")
+
+ Notes:
+ - If no arguments are passed, the function will display the usage help message.
+ - For a list of all available commands and their arguments, see the provided help messages and the
+ Ultralytics documentation at https://docs.ultralytics.com.
+ """
args = (debug.split(" ") if debug else ARGV)[1:]
if not args: # no arguments passed
LOGGER.info(CLI_HELP_MSG)
@@ -697,6 +995,25 @@
# Special modes --------------------------------------------------------------------------------------------------------
def copy_default_cfg() -> None:
+ """Copy the default configuration file and create a new one with '_copy' appended to its name.
+
+ This function duplicates the existing default configuration file (DEFAULT_CFG_PATH) and saves it with '_copy'
+ appended to its name in the current working directory. It provides a convenient way to create a custom configuration
+ file based on the default settings.
+
+ Examples:
+ >>> copy_default_cfg()
+ # Output: default.yaml copied to /path/to/current/directory/default_copy.yaml
+ # Example YOLO command with this new custom cfg:
+ # yolo cfg='/path/to/current/directory/default_copy.yaml' imgsz=320 batch=8
+
+ Notes:
+ - The new configuration file is created in the current working directory.
+ - After copying, the function prints a message with the new file's location and an example
+ YOLO command demonstrating how to use the new configuration file.
+ - This function is useful for users who want to modify the default configuration without
+ altering the original file.
+ """
new_file = Path.cwd() / DEFAULT_CFG_PATH.name.replace(".yaml", "_copy.yaml")
shutil.copy2(DEFAULT_CFG_PATH, new_file)
LOGGER.info(
@@ -707,4 +1024,4 @@
if __name__ == "__main__":
# Example: entrypoint(debug='yolo predict model=yolo26n.pt')
- entrypoint(debug="")+ entrypoint(debug="")
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/cfg/__init__.py |
Write reusable docstrings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from __future__ import annotations
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn.init import trunc_normal_
from ultralytics.nn.modules import MLP
from ultralytics.utils import LOGGER
from .blocks import SAM2TwoWayTransformer, TwoWayTransformer
from .decoders import MaskDecoder, SAM2MaskDecoder
from .encoders import ImageEncoderViT, PromptEncoder
from .utils import get_1d_sine_pe, select_closest_cond_frames
# a large negative value as a placeholder score for missing objects
NO_OBJ_SCORE = -1024.0
class SAMModel(nn.Module):
mask_threshold: float = 0.0
def __init__(
self,
image_encoder: ImageEncoderViT,
prompt_encoder: PromptEncoder,
mask_decoder: MaskDecoder,
pixel_mean: list[float] = (123.675, 116.28, 103.53),
pixel_std: list[float] = (58.395, 57.12, 57.375),
) -> None:
super().__init__()
self.image_encoder = image_encoder
self.prompt_encoder = prompt_encoder
self.mask_decoder = mask_decoder
self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
def set_imgsz(self, imgsz):
if hasattr(self.image_encoder, "set_imgsz"):
self.image_encoder.set_imgsz(imgsz)
self.prompt_encoder.input_image_size = imgsz
self.prompt_encoder.image_embedding_size = [x // 16 for x in imgsz] # 16 is fixed as patch size of ViT model
self.image_encoder.img_size = imgsz[0]
class SAM2Model(torch.nn.Module):
mask_threshold: float = 0.0
def __init__(
self,
image_encoder,
memory_attention,
memory_encoder,
num_maskmem=7,
image_size=512,
backbone_stride=16,
sigmoid_scale_for_mem_enc=1.0,
sigmoid_bias_for_mem_enc=0.0,
binarize_mask_from_pts_for_mem_enc=False,
use_mask_input_as_output_without_sam=False,
max_cond_frames_in_attn=-1,
directly_add_no_mem_embed=False,
use_high_res_features_in_sam=False,
multimask_output_in_sam=False,
multimask_min_pt_num=1,
multimask_max_pt_num=1,
multimask_output_for_tracking=False,
use_multimask_token_for_obj_ptr: bool = False,
iou_prediction_use_sigmoid=False,
memory_temporal_stride_for_eval=1,
non_overlap_masks_for_mem_enc=False,
use_obj_ptrs_in_encoder=False,
max_obj_ptrs_in_encoder=16,
add_tpos_enc_to_obj_ptrs=True,
proj_tpos_enc_in_obj_ptrs=False,
use_signed_tpos_enc_to_obj_ptrs=False,
only_obj_ptrs_in_the_past_for_eval=False,
pred_obj_scores: bool = False,
pred_obj_scores_mlp: bool = False,
fixed_no_obj_ptr: bool = False,
soft_no_obj_ptr: bool = False,
use_mlp_for_obj_ptr_proj: bool = False,
no_obj_embed_spatial: bool = False,
sam_mask_decoder_extra_args=None,
compile_image_encoder: bool = False,
):
super().__init__()
# Part 1: the image backbone
self.image_encoder = image_encoder
# Use level 0, 1, 2 for high-res setting, or just level 2 for the default setting
self.use_high_res_features_in_sam = use_high_res_features_in_sam
self.num_feature_levels = 3 if use_high_res_features_in_sam else 1
self.use_obj_ptrs_in_encoder = use_obj_ptrs_in_encoder
self.max_obj_ptrs_in_encoder = max_obj_ptrs_in_encoder
if use_obj_ptrs_in_encoder:
# A conv layer to downsample the mask prompt to stride 4 (the same stride as
# low-res SAM mask logits) and to change its scales from 0~1 to SAM logit scale,
# so that it can be fed into the SAM mask decoder to generate a pointer.
self.mask_downsample = torch.nn.Conv2d(1, 1, kernel_size=4, stride=4)
self.add_tpos_enc_to_obj_ptrs = add_tpos_enc_to_obj_ptrs
if proj_tpos_enc_in_obj_ptrs:
assert add_tpos_enc_to_obj_ptrs # these options need to be used together
self.proj_tpos_enc_in_obj_ptrs = proj_tpos_enc_in_obj_ptrs
self.use_signed_tpos_enc_to_obj_ptrs = use_signed_tpos_enc_to_obj_ptrs
self.only_obj_ptrs_in_the_past_for_eval = only_obj_ptrs_in_the_past_for_eval
# Part 2: memory attention to condition current frame's visual features
# with memories (and obj ptrs) from past frames
self.memory_attention = memory_attention
self.hidden_dim = memory_attention.d_model
# Part 3: memory encoder for the previous frame's outputs
self.memory_encoder = memory_encoder
self.mem_dim = self.hidden_dim
if hasattr(self.memory_encoder, "out_proj") and hasattr(self.memory_encoder.out_proj, "weight"):
# if there is compression of memories along channel dim
self.mem_dim = self.memory_encoder.out_proj.weight.shape[0]
self.num_maskmem = num_maskmem # Number of memories accessible
# Temporal encoding of the memories
self.maskmem_tpos_enc = torch.nn.Parameter(torch.zeros(num_maskmem, 1, 1, self.mem_dim))
trunc_normal_(self.maskmem_tpos_enc, std=0.02)
# a single token to indicate no memory embedding from previous frames
self.no_mem_embed = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim))
self.no_mem_pos_enc = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim))
trunc_normal_(self.no_mem_embed, std=0.02)
trunc_normal_(self.no_mem_pos_enc, std=0.02)
self.directly_add_no_mem_embed = directly_add_no_mem_embed
# Apply sigmoid to the output raw mask logits (to turn them from
# range (-inf, +inf) to range (0, 1)) before feeding them into the memory encoder
self.sigmoid_scale_for_mem_enc = sigmoid_scale_for_mem_enc
self.sigmoid_bias_for_mem_enc = sigmoid_bias_for_mem_enc
self.binarize_mask_from_pts_for_mem_enc = binarize_mask_from_pts_for_mem_enc
self.non_overlap_masks_for_mem_enc = non_overlap_masks_for_mem_enc
self.memory_temporal_stride_for_eval = memory_temporal_stride_for_eval
# On frames with mask input, whether to directly output the input mask without
# using a SAM prompt encoder + mask decoder
self.use_mask_input_as_output_without_sam = use_mask_input_as_output_without_sam
self.multimask_output_in_sam = multimask_output_in_sam
self.multimask_min_pt_num = multimask_min_pt_num
self.multimask_max_pt_num = multimask_max_pt_num
self.multimask_output_for_tracking = multimask_output_for_tracking
self.use_multimask_token_for_obj_ptr = use_multimask_token_for_obj_ptr
self.iou_prediction_use_sigmoid = iou_prediction_use_sigmoid
# Part 4: SAM-style prompt encoder (for both mask and point inputs)
# and SAM-style mask decoder for the final mask output
self.image_size = image_size
self.backbone_stride = backbone_stride
self.sam_mask_decoder_extra_args = sam_mask_decoder_extra_args
self.pred_obj_scores = pred_obj_scores
self.pred_obj_scores_mlp = pred_obj_scores_mlp
self.fixed_no_obj_ptr = fixed_no_obj_ptr
self.soft_no_obj_ptr = soft_no_obj_ptr
if self.fixed_no_obj_ptr:
assert self.pred_obj_scores
assert self.use_obj_ptrs_in_encoder
if self.pred_obj_scores and self.use_obj_ptrs_in_encoder:
self.no_obj_ptr = torch.nn.Parameter(torch.zeros(1, self.hidden_dim))
trunc_normal_(self.no_obj_ptr, std=0.02)
self.use_mlp_for_obj_ptr_proj = use_mlp_for_obj_ptr_proj
self.no_obj_embed_spatial = None
if no_obj_embed_spatial:
self.no_obj_embed_spatial = torch.nn.Parameter(torch.zeros(1, self.mem_dim))
trunc_normal_(self.no_obj_embed_spatial, std=0.02)
self._build_sam_heads()
self.max_cond_frames_in_attn = max_cond_frames_in_attn
self.add_all_frames_to_correct_as_cond = True
# Model compilation
if compile_image_encoder:
# Compile the forward function (not the full module) to allow loading checkpoints.
LOGGER.info("Image encoder compilation is enabled. First forward pass will be slow.")
self.image_encoder.forward = torch.compile(
self.image_encoder.forward,
mode="max-autotune",
fullgraph=True,
dynamic=False,
)
@property
def device(self):
return next(self.parameters()).device
def forward(self, *args, **kwargs):
raise NotImplementedError(
"Please use the corresponding methods in SAM2VideoPredictor for inference."
"See notebooks/video_predictor_example.ipynb for an example."
)
def _build_sam_heads(self):
self.sam_prompt_embed_dim = self.hidden_dim
self.sam_image_embedding_size = self.image_size // self.backbone_stride
# Build PromptEncoder and MaskDecoder from SAM (hyperparameters like `mask_in_chans=16` are from SAM code)
self.sam_prompt_encoder = PromptEncoder(
embed_dim=self.sam_prompt_embed_dim,
image_embedding_size=(
self.sam_image_embedding_size,
self.sam_image_embedding_size,
),
input_image_size=(self.image_size, self.image_size),
mask_in_chans=16,
)
self.sam_mask_decoder = SAM2MaskDecoder(
num_multimask_outputs=3,
transformer=SAM2TwoWayTransformer(
depth=2,
embedding_dim=self.sam_prompt_embed_dim,
mlp_dim=2048,
num_heads=8,
),
transformer_dim=self.sam_prompt_embed_dim,
iou_head_depth=3,
iou_head_hidden_dim=256,
use_high_res_features=self.use_high_res_features_in_sam,
iou_prediction_use_sigmoid=self.iou_prediction_use_sigmoid,
pred_obj_scores=self.pred_obj_scores,
pred_obj_scores_mlp=self.pred_obj_scores_mlp,
use_multimask_token_for_obj_ptr=self.use_multimask_token_for_obj_ptr,
**(self.sam_mask_decoder_extra_args or {}),
)
if self.use_obj_ptrs_in_encoder:
# a linear projection on SAM output tokens to turn them into object pointers
self.obj_ptr_proj = torch.nn.Linear(self.hidden_dim, self.hidden_dim)
if self.use_mlp_for_obj_ptr_proj:
self.obj_ptr_proj = MLP(self.hidden_dim, self.hidden_dim, self.hidden_dim, 3)
else:
self.obj_ptr_proj = torch.nn.Identity()
if self.proj_tpos_enc_in_obj_ptrs:
# a linear projection on temporal positional encoding in object pointers to
# avoid potential interference with spatial positional encoding
self.obj_ptr_tpos_proj = torch.nn.Linear(self.hidden_dim, self.mem_dim)
else:
self.obj_ptr_tpos_proj = torch.nn.Identity()
def _forward_sam_heads(
self,
backbone_features,
point_inputs=None,
mask_inputs=None,
high_res_features=None,
multimask_output=False,
):
B = backbone_features.shape[0]
device = backbone_features.device
assert backbone_features.size(1) == self.sam_prompt_embed_dim
assert backbone_features.size(2) == self.sam_image_embedding_size
assert backbone_features.size(3) == self.sam_image_embedding_size
# a) Handle point prompts
if point_inputs is not None:
sam_point_coords = point_inputs["point_coords"]
sam_point_labels = point_inputs["point_labels"]
assert sam_point_coords.shape[0] == B and sam_point_labels.shape[0] == B
else:
# If no points are provide, pad with an empty point (with label -1)
sam_point_coords = torch.zeros(B, 1, 2, device=device, dtype=backbone_features.dtype)
sam_point_labels = -torch.ones(B, 1, dtype=torch.int32, device=device)
# b) Handle mask prompts
if mask_inputs is not None:
# If mask_inputs is provided, downsize it into low-res mask input if needed
# and feed it as a dense mask prompt into the SAM mask encoder
assert len(mask_inputs.shape) == 4 and mask_inputs.shape[:2] == (B, 1)
if mask_inputs.shape[-2:] != self.sam_prompt_encoder.mask_input_size:
sam_mask_prompt = F.interpolate(
mask_inputs.to(backbone_features.dtype),
size=self.sam_prompt_encoder.mask_input_size,
align_corners=False,
mode="bilinear",
antialias=True, # use antialias for downsampling
)
else:
sam_mask_prompt = mask_inputs
else:
# Otherwise, simply feed None (and SAM's prompt encoder will add
# a learned `no_mask_embed` to indicate no mask input in this case).
sam_mask_prompt = None
sparse_embeddings, dense_embeddings = self.sam_prompt_encoder(
points=(sam_point_coords, sam_point_labels),
boxes=None,
masks=sam_mask_prompt,
)
low_res_multimasks, ious, sam_output_tokens, object_score_logits = self.sam_mask_decoder(
image_embeddings=backbone_features,
image_pe=self.sam_prompt_encoder.get_dense_pe(),
sparse_prompt_embeddings=sparse_embeddings,
dense_prompt_embeddings=dense_embeddings,
multimask_output=multimask_output,
repeat_image=False, # the image is already batched
high_res_features=high_res_features,
)
if self.pred_obj_scores:
is_obj_appearing = object_score_logits > 0
# Spatial memory mask is a *hard* choice between obj and no obj, consistent with actual mask prediction
low_res_multimasks = torch.where(is_obj_appearing[:, None, None], low_res_multimasks, NO_OBJ_SCORE)
# convert masks from possibly bfloat16 (or float16) to float32
# (older PyTorch versions before 2.1 don't support `interpolate` on bf16)
high_res_multimasks = F.interpolate(
low_res_multimasks,
size=(self.image_size, self.image_size),
mode="bilinear",
align_corners=False,
)
sam_output_token = sam_output_tokens[:, 0]
if multimask_output:
# take the best mask prediction (with the highest IoU estimation)
best_iou_inds = torch.argmax(ious, dim=-1)
batch_inds = torch.arange(B, device=device)
low_res_masks = low_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1)
high_res_masks = high_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1)
if sam_output_tokens.size(1) > 1:
sam_output_token = sam_output_tokens[batch_inds, best_iou_inds]
else:
low_res_masks, high_res_masks = low_res_multimasks, high_res_multimasks
# Extract object pointer from the SAM output token (with occlusion handling)
obj_ptr = self.obj_ptr_proj(sam_output_token)
if self.pred_obj_scores:
# Allow *soft* no obj ptr, unlike for masks
if self.soft_no_obj_ptr:
lambda_is_obj_appearing = object_score_logits.sigmoid()
else:
lambda_is_obj_appearing = is_obj_appearing.to(obj_ptr.dtype)
if self.fixed_no_obj_ptr:
obj_ptr = lambda_is_obj_appearing * obj_ptr
obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr
return (
low_res_multimasks,
high_res_multimasks,
ious,
low_res_masks,
high_res_masks,
obj_ptr,
object_score_logits,
)
def _use_mask_as_output(self, mask_inputs, backbone_features=None, high_res_features=None):
# Use -10/+10 as logits for neg/pos pixels (very close to 0/1 in prob after sigmoid).
out_scale, out_bias = 20.0, -10.0 # sigmoid(-10.0)=4.5398e-05
mask_inputs_float = mask_inputs.float()
high_res_masks = mask_inputs_float * out_scale + out_bias
low_res_masks = F.interpolate(
high_res_masks,
size=(high_res_masks.size(-2) // 4, high_res_masks.size(-1) // 4),
align_corners=False,
mode="bilinear",
antialias=True, # use antialias for downsampling
)
# a dummy IoU prediction of all 1's under mask input
ious = mask_inputs.new_ones(mask_inputs.shape[0], 1).float()
if not self.use_obj_ptrs_in_encoder or backbone_features is None or high_res_features is None:
# all zeros as a dummy object pointer (of shape [B, C])
obj_ptr = torch.zeros(mask_inputs.shape[0], self.hidden_dim, device=mask_inputs.device)
else:
# produce an object pointer using the SAM decoder from the mask input
_, _, _, _, _, obj_ptr, _ = self._forward_sam_heads(
backbone_features=backbone_features,
mask_inputs=self.mask_downsample(mask_inputs_float.to(backbone_features.dtype)),
high_res_features=high_res_features,
)
# In this method, we are treating mask_input as output, e.g. using it directly to create spatial mem;
# Below, we follow the same design axiom to use mask_input to decide if obj appears or not instead of relying
# on the object_scores from the SAM decoder.
is_obj_appearing = torch.any(mask_inputs.flatten(1).float() > 0.0, dim=1)
is_obj_appearing = is_obj_appearing[..., None]
lambda_is_obj_appearing = is_obj_appearing.float()
object_score_logits = out_scale * lambda_is_obj_appearing + out_bias
if self.pred_obj_scores:
if self.fixed_no_obj_ptr:
obj_ptr = lambda_is_obj_appearing * obj_ptr
obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr
return (
low_res_masks,
high_res_masks,
ious,
low_res_masks,
high_res_masks,
obj_ptr,
object_score_logits,
)
def forward_image(self, img_batch: torch.Tensor):
backbone_out = self.image_encoder(img_batch)
if self.use_high_res_features_in_sam:
# precompute projected level 0 and level 1 features in SAM decoder
# to avoid running it again on every SAM click
backbone_out["backbone_fpn"][0] = self.sam_mask_decoder.conv_s0(backbone_out["backbone_fpn"][0])
backbone_out["backbone_fpn"][1] = self.sam_mask_decoder.conv_s1(backbone_out["backbone_fpn"][1])
return backbone_out
def _prepare_backbone_features(self, backbone_out, batch=1):
if batch > 1: # expand features if there's more than one prompt
backbone_out = {
**backbone_out,
"backbone_fpn": [feat.expand(batch, -1, -1, -1) for feat in backbone_out["backbone_fpn"]],
"vision_pos_enc": [pos.expand(batch, -1, -1, -1) for pos in backbone_out["vision_pos_enc"]],
}
assert len(backbone_out["backbone_fpn"]) == len(backbone_out["vision_pos_enc"])
assert len(backbone_out["backbone_fpn"]) >= self.num_feature_levels
feature_maps = backbone_out["backbone_fpn"][-self.num_feature_levels :]
vision_pos_embeds = backbone_out["vision_pos_enc"][-self.num_feature_levels :]
feat_sizes = [(x.shape[-2], x.shape[-1]) for x in vision_pos_embeds]
# flatten NxCxHxW to HWxNxC
vision_feats = [x.flatten(2).permute(2, 0, 1) for x in feature_maps]
vision_pos_embeds = [x.flatten(2).permute(2, 0, 1) for x in vision_pos_embeds]
return backbone_out, vision_feats, vision_pos_embeds, feat_sizes
def _prepare_memory_conditioned_features(
self,
frame_idx,
is_init_cond_frame,
current_vision_feats,
current_vision_pos_embeds,
feat_sizes,
output_dict,
num_frames,
track_in_reverse=False, # tracking in reverse time order (for demo usage)
):
B = current_vision_feats[-1].size(1) # batch size on this frame
C = self.hidden_dim
H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size
device = current_vision_feats[-1].device
# The case of `self.num_maskmem == 0` below is primarily used for reproducing SAM on images.
# In this case, we skip the fusion with any memory.
if self.num_maskmem == 0: # Disable memory and skip fusion
return current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W)
num_obj_ptr_tokens = 0
tpos_sign_mul = -1 if track_in_reverse else 1
# Step 1: condition the visual features of the current frame on previous memories
if not is_init_cond_frame:
# Retrieve the memories encoded with the maskmem backbone
to_cat_memory, to_cat_memory_pos_embed = [], []
# Add conditioning frame's output first (all cond frames have t_pos=0 for
# when getting temporal positional embedding below)
assert len(output_dict["cond_frame_outputs"]) > 0
# Select a maximum number of temporally closest cond frames for cross attention
cond_outputs = output_dict["cond_frame_outputs"]
selected_cond_outputs, unselected_cond_outputs = select_closest_cond_frames(
frame_idx, cond_outputs, self.max_cond_frames_in_attn
)
t_pos_and_prevs = [(0, out) for out in selected_cond_outputs.values()]
# Add last (self.num_maskmem - 1) frames before current frame for non-conditioning memory
# the earliest one has t_pos=1 and the latest one has t_pos=self.num_maskmem-1
# We also allow taking the memory frame non-consecutively (with r>1), in which case
# we take (self.num_maskmem - 2) frames among every r-th frames plus the last frame.
r = 1 if self.training else self.memory_temporal_stride_for_eval
for t_pos in range(1, self.num_maskmem):
t_rel = self.num_maskmem - t_pos # how many frames before current frame
if t_rel == 1:
# for t_rel == 1, we take the last frame (regardless of r)
prev_frame_idx = frame_idx + t_rel if track_in_reverse else frame_idx - t_rel
elif not track_in_reverse:
# first find the nearest frame among every r-th frames before this frame
# for r=1, this would be (frame_idx - 2)
prev_frame_idx = ((frame_idx - 2) // r) * r
# then seek further among every r-th frames
prev_frame_idx = prev_frame_idx - (t_rel - 2) * r
else:
# first find the nearest frame among every r-th frames after this frame
# for r=1, this would be (frame_idx + 2)
prev_frame_idx = -(-(frame_idx + 2) // r) * r
# then seek further among every r-th frames
prev_frame_idx = prev_frame_idx + (t_rel - 2) * r
out = output_dict["non_cond_frame_outputs"].get(prev_frame_idx, None)
if out is None:
# If an unselected conditioning frame is among the last (self.num_maskmem - 1)
# frames, we still attend to it as if it's a non-conditioning frame.
out = unselected_cond_outputs.get(prev_frame_idx, None)
t_pos_and_prevs.append((t_pos, out))
for t_pos, prev in t_pos_and_prevs:
if prev is None:
continue # skip padding frames
# "maskmem_features" might have been offloaded to CPU in demo use cases,
# so we load it back to inference device (it's a no-op if it's already on device).
feats = prev["maskmem_features"].to(device=device, non_blocking=device.type == "cuda")
to_cat_memory.append(feats.flatten(2).permute(2, 0, 1))
# Spatial positional encoding (it might have been offloaded to CPU in eval)
maskmem_enc = prev["maskmem_pos_enc"][-1].to(device=device)
maskmem_enc = maskmem_enc.flatten(2).permute(2, 0, 1)
# Temporal positional encoding
maskmem_enc = maskmem_enc + self.maskmem_tpos_enc[self.num_maskmem - t_pos - 1]
to_cat_memory_pos_embed.append(maskmem_enc)
# Construct the list of past object pointers
if self.use_obj_ptrs_in_encoder:
max_obj_ptrs_in_encoder = min(num_frames, self.max_obj_ptrs_in_encoder)
# First add those object pointers from selected conditioning frames
# (optionally, only include object pointers in the past during evaluation)
if not self.training and self.only_obj_ptrs_in_the_past_for_eval:
ptr_cond_outputs = {
t: out
for t, out in selected_cond_outputs.items()
if (t >= frame_idx if track_in_reverse else t <= frame_idx)
}
else:
ptr_cond_outputs = selected_cond_outputs
pos_and_ptrs = [
# Temporal pos encoding contains how far away each pointer is from current frame
(
(
(frame_idx - t) * tpos_sign_mul
if self.use_signed_tpos_enc_to_obj_ptrs
else abs(frame_idx - t)
),
out["obj_ptr"],
)
for t, out in ptr_cond_outputs.items()
]
# Add up to (max_obj_ptrs_in_encoder - 1) non-conditioning frames before current frame
for t_diff in range(1, max_obj_ptrs_in_encoder):
t = frame_idx + t_diff if track_in_reverse else frame_idx - t_diff
if t < 0 or (num_frames is not None and t >= num_frames):
break
out = output_dict["non_cond_frame_outputs"].get(t, unselected_cond_outputs.get(t, None))
if out is not None:
pos_and_ptrs.append((t_diff, out["obj_ptr"]))
# If we have at least one object pointer, add them to the across attention
if pos_and_ptrs:
pos_list, ptrs_list = zip(*pos_and_ptrs)
# stack object pointers along dim=0 into [ptr_seq_len, B, C] shape
obj_ptrs = torch.stack(ptrs_list, dim=0)
# a temporal positional embedding based on how far each object pointer is from
# the current frame (sine embedding normalized by the max pointer num).
if self.add_tpos_enc_to_obj_ptrs:
t_diff_max = max_obj_ptrs_in_encoder - 1
tpos_dim = C if self.proj_tpos_enc_in_obj_ptrs else self.mem_dim
obj_pos = torch.tensor(pos_list, device=device, dtype=current_vision_feats[-1].dtype)
obj_pos = get_1d_sine_pe(obj_pos / t_diff_max, dim=tpos_dim)
obj_pos = self.obj_ptr_tpos_proj(obj_pos)
obj_pos = obj_pos.unsqueeze(1).expand(-1, B, self.mem_dim)
else:
obj_pos = obj_ptrs.new_zeros(len(pos_list), B, self.mem_dim)
if self.mem_dim < C:
# split a pointer into (C // self.mem_dim) tokens for self.mem_dim < C
obj_ptrs = obj_ptrs.reshape(-1, B, C // self.mem_dim, self.mem_dim)
obj_ptrs = obj_ptrs.permute(0, 2, 1, 3).flatten(0, 1)
obj_pos = obj_pos.repeat_interleave(C // self.mem_dim, dim=0)
to_cat_memory.append(obj_ptrs)
to_cat_memory_pos_embed.append(obj_pos)
num_obj_ptr_tokens = obj_ptrs.shape[0]
else:
num_obj_ptr_tokens = 0
else:
# for initial conditioning frames, encode them without using any previous memory
if self.directly_add_no_mem_embed:
# directly add no-mem embedding (instead of using the transformer encoder)
pix_feat_with_mem = current_vision_feats[-1] + self.no_mem_embed
pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W)
return pix_feat_with_mem
# Use a dummy token on the first frame (to avoid empty memory input to transformer encoder)
to_cat_memory = [self.no_mem_embed.expand(1, B, self.mem_dim)]
to_cat_memory_pos_embed = [self.no_mem_pos_enc.expand(1, B, self.mem_dim)]
# Step 2: Concatenate the memories and forward through the transformer encoder
memory = torch.cat(to_cat_memory, dim=0)
memory_pos_embed = torch.cat(to_cat_memory_pos_embed, dim=0)
pix_feat_with_mem = self.memory_attention(
curr=current_vision_feats,
curr_pos=current_vision_pos_embeds,
memory=memory,
memory_pos=memory_pos_embed,
num_obj_ptr_tokens=num_obj_ptr_tokens,
)
# Reshape output (HW)BC => BCHW
pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W)
return pix_feat_with_mem
def _encode_new_memory(
self,
current_vision_feats,
feat_sizes,
pred_masks_high_res,
object_score_logits,
is_mask_from_pts,
):
B = current_vision_feats[-1].size(1) # batch size on this frame
C = self.hidden_dim
H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size
# top-level feature, (HW)BC => BCHW
pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W)
if self.non_overlap_masks_for_mem_enc and not self.training:
# optionally, apply non-overlapping constraints to the masks (it's applied
# in the batch dimension and should only be used during eval, where all
# the objects come from the same video under batch size 1).
pred_masks_high_res = self._apply_non_overlapping_constraints(pred_masks_high_res)
# scale the raw mask logits with a temperature before applying sigmoid
binarize = self.binarize_mask_from_pts_for_mem_enc and is_mask_from_pts
if binarize and not self.training:
mask_for_mem = (pred_masks_high_res > 0).to(pix_feat.dtype)
else:
# apply sigmoid on the raw mask logits to turn them into range (0, 1)
mask_for_mem = torch.sigmoid(pred_masks_high_res)
# apply scale and bias terms to the sigmoid probabilities
if self.sigmoid_scale_for_mem_enc != 1.0:
mask_for_mem = mask_for_mem * self.sigmoid_scale_for_mem_enc
if self.sigmoid_bias_for_mem_enc != 0.0:
mask_for_mem = mask_for_mem + self.sigmoid_bias_for_mem_enc
maskmem_out = self.memory_encoder(pix_feat, mask_for_mem, skip_mask_sigmoid=True) # sigmoid already applied
maskmem_features = maskmem_out["vision_features"]
# add a no-object embedding to the spatial memory to indicate that the frame
# is predicted to be occluded (i.e. no object is appearing in the frame)
if self.no_obj_embed_spatial is not None:
is_obj_appearing = (object_score_logits > 0).float()
maskmem_features += (1 - is_obj_appearing[..., None, None]) * self.no_obj_embed_spatial[
..., None, None
].expand(*maskmem_features.shape)
return maskmem_features, maskmem_out["vision_pos_enc"]
def _track_step(
self,
frame_idx,
is_init_cond_frame,
current_vision_feats,
current_vision_pos_embeds,
feat_sizes,
point_inputs,
mask_inputs,
output_dict,
num_frames,
track_in_reverse,
prev_sam_mask_logits,
):
# High-resolution feature maps for the SAM head, reshape (HW)BC => BCHW
if len(current_vision_feats) > 1:
high_res_features = [
x.permute(1, 2, 0).view(x.size(1), x.size(2), *s)
for x, s in zip(current_vision_feats[:-1], feat_sizes[:-1])
]
else:
high_res_features = None
if mask_inputs is not None and self.use_mask_input_as_output_without_sam:
# When use_mask_input_as_output_without_sam=True, we directly output the mask input
# (see it as a GT mask) without using a SAM prompt encoder + mask decoder.
pix_feat = current_vision_feats[-1].permute(1, 2, 0)
pix_feat = pix_feat.view(-1, self.hidden_dim, *feat_sizes[-1])
sam_outputs = self._use_mask_as_output(mask_inputs, pix_feat, high_res_features)
else:
# Fuse visual features with previous memory features in the memory bank
pix_feat = self._prepare_memory_conditioned_features(
frame_idx=frame_idx,
is_init_cond_frame=is_init_cond_frame,
current_vision_feats=current_vision_feats[-1:],
current_vision_pos_embeds=current_vision_pos_embeds[-1:],
feat_sizes=feat_sizes[-1:],
output_dict=output_dict,
num_frames=num_frames,
track_in_reverse=track_in_reverse,
)
# apply SAM-style segmentation head
# here we might feed previously predicted low-res SAM mask logits into the SAM mask decoder,
# e.g. in demo where such logits come from earlier interaction instead of correction sampling
# (in this case, any `mask_inputs` shouldn't reach here as they are sent to _use_mask_as_output instead)
if prev_sam_mask_logits is not None:
assert point_inputs is not None and mask_inputs is None
mask_inputs = prev_sam_mask_logits
multimask_output = self._use_multimask(is_init_cond_frame, point_inputs)
sam_outputs = self._forward_sam_heads(
backbone_features=pix_feat,
point_inputs=point_inputs,
mask_inputs=mask_inputs,
high_res_features=high_res_features,
multimask_output=multimask_output,
)
return sam_outputs, high_res_features, pix_feat
def _encode_memory_in_output(
self,
current_vision_feats,
feat_sizes,
point_inputs,
run_mem_encoder,
high_res_masks,
object_score_logits,
current_out,
):
if run_mem_encoder and self.num_maskmem > 0:
maskmem_features, maskmem_pos_enc = self._encode_new_memory(
current_vision_feats=current_vision_feats,
feat_sizes=feat_sizes,
pred_masks_high_res=high_res_masks,
object_score_logits=object_score_logits,
is_mask_from_pts=(point_inputs is not None),
)
current_out["maskmem_features"] = maskmem_features
current_out["maskmem_pos_enc"] = maskmem_pos_enc
else:
current_out["maskmem_features"] = None
current_out["maskmem_pos_enc"] = None
def track_step(
self,
frame_idx,
is_init_cond_frame,
current_vision_feats,
current_vision_pos_embeds,
feat_sizes,
point_inputs,
mask_inputs,
output_dict,
num_frames,
track_in_reverse=False, # tracking in reverse time order (for demo usage)
# Whether to run the memory encoder on the predicted masks. Sometimes we might want
# to skip the memory encoder with `run_mem_encoder=False`. For example,
# in demo we might call `track_step` multiple times for each user click,
# and only encode the memory when the user finalizes their clicks. And in ablation
# settings like SAM training on static images, we don't need the memory encoder.
run_mem_encoder=True,
# The previously predicted SAM mask logits (which can be fed together with new clicks in demo).
prev_sam_mask_logits=None,
):
sam_outputs, _, _ = self._track_step(
frame_idx,
is_init_cond_frame,
current_vision_feats,
current_vision_pos_embeds,
feat_sizes,
point_inputs,
mask_inputs,
output_dict,
num_frames,
track_in_reverse,
prev_sam_mask_logits,
)
_, _, _, low_res_masks, high_res_masks, obj_ptr, object_score_logits = sam_outputs
current_out = {
"pred_masks": low_res_masks,
"pred_masks_high_res": high_res_masks,
"obj_ptr": obj_ptr,
}
if not self.training:
# Only add this in inference (to avoid unused param in activation checkpointing;
# it's mainly used in the demo to encode spatial memories w/ consolidated masks)
current_out["object_score_logits"] = object_score_logits
# Run memory encoder on the predicted mask to encode it into a new memory feature (for use in future frames)
self._encode_memory_in_output(
current_vision_feats,
feat_sizes,
point_inputs,
run_mem_encoder,
high_res_masks,
object_score_logits,
current_out,
)
return current_out
def _use_multimask(self, is_init_cond_frame, point_inputs):
num_pts = 0 if point_inputs is None else point_inputs["point_labels"].size(1)
return (
self.multimask_output_in_sam
and (is_init_cond_frame or self.multimask_output_for_tracking)
and (self.multimask_min_pt_num <= num_pts <= self.multimask_max_pt_num)
)
@staticmethod
def _apply_non_overlapping_constraints(pred_masks):
batch_size = pred_masks.shape[0]
if batch_size == 1:
return pred_masks
device = pred_masks.device
# "max_obj_inds": object index of the object with the highest score at each location
max_obj_inds = torch.argmax(pred_masks, dim=0, keepdim=True)
# "batch_obj_inds": object index of each object slice (along dim 0) in `pred_masks`
batch_obj_inds = torch.arange(batch_size, device=device)[:, None, None, None]
keep = max_obj_inds == batch_obj_inds
# suppress overlapping regions' scores below -10.0 so that the foreground regions
# don't overlap (here sigmoid(-10.0)=4.5398e-05)
pred_masks = torch.where(keep, pred_masks, torch.clamp(pred_masks, max=-10.0))
return pred_masks
def set_binarize(self, binarize=False):
self.binarize_mask_from_pts_for_mem_enc = binarize
def set_imgsz(self, imgsz):
if hasattr(self.image_encoder, "set_imgsz"):
self.image_encoder.set_imgsz(imgsz)
self.image_size = imgsz[0]
self.sam_prompt_encoder.input_image_size = imgsz
self.sam_prompt_encoder.image_embedding_size = [
x // self.backbone_stride for x in imgsz
] # fixed ViT patch size of 16
self.sam_prompt_encoder.mask_input_size = [
x // self.backbone_stride * 4 for x in imgsz
] # fixed ViT patch size of 16
self.sam_image_embedding_size = self.image_size // self.backbone_stride # update image embedding size
class SAM3Model(SAM2Model):
def __init__(
self,
image_encoder,
memory_attention,
memory_encoder,
num_maskmem=7,
image_size=1008,
backbone_stride=14,
sigmoid_scale_for_mem_enc=1,
sigmoid_bias_for_mem_enc=0,
binarize_mask_from_pts_for_mem_enc=False,
use_mask_input_as_output_without_sam=False,
max_cond_frames_in_attn=-1,
directly_add_no_mem_embed=False,
use_high_res_features_in_sam=False,
multimask_output_in_sam=False,
multimask_min_pt_num=1,
multimask_max_pt_num=1,
multimask_output_for_tracking=False,
use_multimask_token_for_obj_ptr: bool = False,
iou_prediction_use_sigmoid=False,
memory_temporal_stride_for_eval=1,
non_overlap_masks_for_mem_enc=False,
use_obj_ptrs_in_encoder=False,
max_obj_ptrs_in_encoder=16,
add_tpos_enc_to_obj_ptrs=True,
proj_tpos_enc_in_obj_ptrs=False,
use_signed_tpos_enc_to_obj_ptrs=False,
only_obj_ptrs_in_the_past_for_eval=False,
pred_obj_scores: bool = False,
pred_obj_scores_mlp: bool = False,
fixed_no_obj_ptr: bool = False,
soft_no_obj_ptr: bool = False,
use_mlp_for_obj_ptr_proj: bool = False,
no_obj_embed_spatial: bool = False,
sam_mask_decoder_extra_args=None,
compile_image_encoder: bool = False,
):
super().__init__(
image_encoder,
memory_attention,
memory_encoder,
num_maskmem,
image_size,
backbone_stride,
sigmoid_scale_for_mem_enc,
sigmoid_bias_for_mem_enc,
binarize_mask_from_pts_for_mem_enc,
use_mask_input_as_output_without_sam,
max_cond_frames_in_attn,
directly_add_no_mem_embed,
use_high_res_features_in_sam,
multimask_output_in_sam,
multimask_min_pt_num,
multimask_max_pt_num,
multimask_output_for_tracking,
use_multimask_token_for_obj_ptr,
iou_prediction_use_sigmoid,
memory_temporal_stride_for_eval,
non_overlap_masks_for_mem_enc,
use_obj_ptrs_in_encoder,
max_obj_ptrs_in_encoder,
add_tpos_enc_to_obj_ptrs,
proj_tpos_enc_in_obj_ptrs,
use_signed_tpos_enc_to_obj_ptrs,
only_obj_ptrs_in_the_past_for_eval,
pred_obj_scores,
pred_obj_scores_mlp,
fixed_no_obj_ptr,
soft_no_obj_ptr,
use_mlp_for_obj_ptr_proj,
no_obj_embed_spatial,
sam_mask_decoder_extra_args,
compile_image_encoder,
)
self.sam_mask_decoder = SAM2MaskDecoder(
num_multimask_outputs=3,
transformer=TwoWayTransformer(
depth=2,
embedding_dim=self.sam_prompt_embed_dim,
mlp_dim=2048,
num_heads=8,
),
transformer_dim=self.sam_prompt_embed_dim,
iou_head_depth=3,
iou_head_hidden_dim=256,
use_high_res_features=self.use_high_res_features_in_sam,
iou_prediction_use_sigmoid=self.iou_prediction_use_sigmoid,
pred_obj_scores=self.pred_obj_scores,
pred_obj_scores_mlp=self.pred_obj_scores_mlp,
use_multimask_token_for_obj_ptr=self.use_multimask_token_for_obj_ptr,
**(self.sam_mask_decoder_extra_args or {}),
)
def forward_image(self, img_batch: torch.Tensor):
backbone_out = self.image_encoder.forward_image_sam2(img_batch)
if self.use_high_res_features_in_sam:
# precompute projected level 0 and level 1 features in SAM decoder
# to avoid running it again on every SAM click
backbone_out["backbone_fpn"][0] = self.sam_mask_decoder.conv_s0(backbone_out["backbone_fpn"][0])
backbone_out["backbone_fpn"][1] = self.sam_mask_decoder.conv_s1(backbone_out["backbone_fpn"][1])
return backbone_out
def set_imgsz(self, imgsz: tuple[int, int]):
super().set_imgsz(imgsz)
self.memory_encoder.mask_downsampler.interpol_size = [size // 14 * 16 for size in imgsz]
@staticmethod
def _suppress_shrinked_masks(pred_masks, new_pred_masks, shrink_threshold=0.3):
area_before = (pred_masks > 0).sum(dim=(-1, -2))
area_after = (new_pred_masks > 0).sum(dim=(-1, -2))
area_before = torch.clamp(area_before, min=1.0)
area_ratio = area_after / area_before
keep = area_ratio >= shrink_threshold
keep_mask = keep[..., None, None].expand_as(pred_masks)
pred_masks_after = torch.where(keep_mask, pred_masks, torch.clamp(pred_masks, max=-10.0))
return pred_masks_after
def _suppress_object_pw_area_shrinkage(self, pred_masks):
# Apply pixel-wise non-overlapping constraint based on mask scores
pixel_level_non_overlapping_masks = self._apply_non_overlapping_constraints(pred_masks)
# Fully suppress masks with high shrinkage (probably noisy) based on the pixel wise non-overlapping constraints
# NOTE: The output of this function can be a no op if none of the masks shrink by a large factor.
pred_masks = self._suppress_shrinked_masks(pred_masks, pixel_level_non_overlapping_masks)
return pred_masks | --- +++ @@ -23,6 +23,32 @@
class SAMModel(nn.Module):
+ """Segment Anything Model (SAM) for object segmentation tasks.
+
+ This class combines image encoders, prompt encoders, and mask decoders to predict object masks from images and input
+ prompts.
+
+ Attributes:
+ mask_threshold (float): Threshold value for mask prediction.
+ image_encoder (ImageEncoderViT): Backbone for encoding images into embeddings.
+ prompt_encoder (PromptEncoder): Encoder for various types of input prompts.
+ mask_decoder (MaskDecoder): Predicts object masks from image and prompt embeddings.
+ pixel_mean (torch.Tensor): Mean values for normalizing pixels in the input image.
+ pixel_std (torch.Tensor): Standard deviation values for normalizing pixels in the input image.
+
+ Methods:
+ set_imgsz: Set image size to make model compatible with different image sizes.
+
+ Examples:
+ >>> image_encoder = ImageEncoderViT(...)
+ >>> prompt_encoder = PromptEncoder(...)
+ >>> mask_decoder = MaskDecoder(...)
+ >>> sam_model = SAMModel(image_encoder, prompt_encoder, mask_decoder)
+ >>> # Further usage depends on SAMPredictor class
+
+ Notes:
+ All forward() operations are implemented in the SAMPredictor class.
+ """
mask_threshold: float = 0.0
@@ -34,6 +60,18 @@ pixel_mean: list[float] = (123.675, 116.28, 103.53),
pixel_std: list[float] = (58.395, 57.12, 57.375),
) -> None:
+ """Initialize the SAMModel class to predict object masks from an image and input prompts.
+
+ Args:
+ image_encoder (ImageEncoderViT): The backbone used to encode the image into image embeddings.
+ prompt_encoder (PromptEncoder): Encodes various types of input prompts.
+ mask_decoder (MaskDecoder): Predicts masks from the image embeddings and encoded prompts.
+ pixel_mean (list[float]): Mean values for normalizing pixels in the input image.
+ pixel_std (list[float]): Standard deviation values for normalizing pixels in the input image.
+
+ Notes:
+ All forward() operations moved to SAMPredictor.
+ """
super().__init__()
self.image_encoder = image_encoder
self.prompt_encoder = prompt_encoder
@@ -42,6 +80,7 @@ self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
def set_imgsz(self, imgsz):
+ """Set image size to make model compatible with different image sizes."""
if hasattr(self.image_encoder, "set_imgsz"):
self.image_encoder.set_imgsz(imgsz)
self.prompt_encoder.input_image_size = imgsz
@@ -50,6 +89,74 @@
class SAM2Model(torch.nn.Module):
+ """SAM2Model class for Segment Anything Model 2 with memory-based video object segmentation capabilities.
+
+ This class extends the functionality of SAM to handle video sequences, incorporating memory mechanisms for temporal
+ consistency and efficient tracking of objects across frames.
+
+ Attributes:
+ mask_threshold (float): Threshold value for mask prediction.
+ image_encoder (ImageEncoderViT): Visual encoder for extracting image features.
+ memory_attention (nn.Module): Module for attending to memory features.
+ memory_encoder (nn.Module): Encoder for generating memory representations.
+ num_maskmem (int): Number of accessible memory frames.
+ image_size (int): Size of input images.
+ backbone_stride (int): Stride of the backbone network output.
+ sam_prompt_embed_dim (int): Dimension of SAM prompt embeddings.
+ sam_image_embedding_size (int): Size of SAM image embeddings.
+ sam_prompt_encoder (PromptEncoder): Encoder for processing input prompts.
+ sam_mask_decoder (SAM2MaskDecoder): Decoder for generating object masks.
+ obj_ptr_proj (nn.Module): Projection layer for object pointers.
+ obj_ptr_tpos_proj (nn.Module): Projection for temporal positional encoding in object pointers.
+ hidden_dim (int): Hidden dimension of the model.
+ mem_dim (int): Memory dimension for encoding features.
+ use_high_res_features_in_sam (bool): Whether to use high-resolution feature maps in the SAM mask decoder.
+ use_obj_ptrs_in_encoder (bool): Whether to cross-attend to object pointers from other frames in the encoder.
+ max_obj_ptrs_in_encoder (int): Maximum number of object pointers from other frames in encoder cross-attention.
+ add_tpos_enc_to_obj_ptrs (bool): Whether to add temporal positional encoding to object pointers.
+ proj_tpos_enc_in_obj_ptrs (bool): Whether to add an extra linear projection layer for temporal positional
+ encoding in object pointers.
+ use_signed_tpos_enc_to_obj_ptrs (bool): Whether to use signed distance in temporal positional encoding.
+ only_obj_ptrs_in_the_past_for_eval (bool): Whether to only attend to object pointers in the past during
+ evaluation.
+ pred_obj_scores (bool): Whether to predict if there is an object in the frame.
+ pred_obj_scores_mlp (bool): Whether to use an MLP to predict object scores.
+ fixed_no_obj_ptr (bool): Whether to have a fixed no-object pointer when there is no object present.
+ soft_no_obj_ptr (bool): Whether to mix in no-object pointer softly for easier recovery and error mitigation.
+ use_mlp_for_obj_ptr_proj (bool): Whether to use MLP for object pointer projection.
+ no_obj_embed_spatial (torch.Tensor | None): No-object embedding for spatial frames.
+ max_cond_frames_in_attn (int): Maximum number of conditioning frames to participate in memory attention.
+ directly_add_no_mem_embed (bool): Whether to directly add no-memory embedding to image feature on the first
+ frame.
+ multimask_output_in_sam (bool): Whether to output multiple masks for the first click on initial conditioning
+ frames.
+ multimask_min_pt_num (int): Minimum number of clicks to use multimask output in SAM.
+ multimask_max_pt_num (int): Maximum number of clicks to use multimask output in SAM.
+ multimask_output_for_tracking (bool): Whether to use multimask output for tracking.
+ use_multimask_token_for_obj_ptr (bool): Whether to use multimask tokens for object pointers.
+ iou_prediction_use_sigmoid (bool): Whether to use sigmoid to restrict IoU prediction to [0-1].
+ memory_temporal_stride_for_eval (int): Memory bank's temporal stride during evaluation.
+ non_overlap_masks_for_mem_enc (bool): Whether to apply non-overlapping constraints on object masks in memory
+ encoder during evaluation.
+ sigmoid_scale_for_mem_enc (float): Scale factor for mask sigmoid probability.
+ sigmoid_bias_for_mem_enc (float): Bias factor for mask sigmoid probability.
+ binarize_mask_from_pts_for_mem_enc (bool): Whether to binarize sigmoid mask logits on interacted frames with
+ clicks during evaluation.
+ use_mask_input_as_output_without_sam (bool): Whether to directly output the input mask without using SAM prompt
+ encoder and mask decoder on frames with mask input.
+
+ Methods:
+ forward_image: Process image batch through encoder to extract multi-level features.
+ track_step: Perform a single tracking step, updating object masks and memory features.
+ set_binarize: Set binarize for VideoPredictor.
+ set_imgsz: Set image size to make model compatible with different image sizes.
+
+ Examples:
+ >>> model = SAM2Model(image_encoder, memory_attention, memory_encoder)
+ >>> image_batch = torch.rand(1, 3, 512, 512)
+ >>> features = model.forward_image(image_batch)
+ >>> track_results = model.track_step(0, True, features, None, None, None, {})
+ """
mask_threshold: float = 0.0
@@ -91,6 +198,55 @@ sam_mask_decoder_extra_args=None,
compile_image_encoder: bool = False,
):
+ """Initialize the SAM2Model for video object segmentation with memory-based tracking.
+
+ Args:
+ image_encoder (nn.Module): Visual encoder for extracting image features.
+ memory_attention (nn.Module): Module for attending to memory features.
+ memory_encoder (nn.Module): Encoder for generating memory representations.
+ num_maskmem (int): Number of accessible memory frames.
+ image_size (int): Size of input images.
+ backbone_stride (int): Stride of the image backbone output.
+ sigmoid_scale_for_mem_enc (float): Scale factor for mask sigmoid probability.
+ sigmoid_bias_for_mem_enc (float): Bias factor for mask sigmoid probability.
+ binarize_mask_from_pts_for_mem_enc (bool): Whether to binarize sigmoid mask logits on interacted frames with
+ clicks during evaluation.
+ use_mask_input_as_output_without_sam (bool): Whether to directly output the input mask without using SAM
+ prompt encoder and mask decoder on frames with mask input.
+ max_cond_frames_in_attn (int): Maximum number of conditioning frames to participate in memory attention.
+ directly_add_no_mem_embed (bool): Whether to directly add no-memory embedding to image feature on the first
+ frame.
+ use_high_res_features_in_sam (bool): Whether to use high-resolution feature maps in the SAM mask decoder.
+ multimask_output_in_sam (bool): Whether to output multiple masks for the first click on initial conditioning
+ frames.
+ multimask_min_pt_num (int): Minimum number of clicks to use multimask output in SAM.
+ multimask_max_pt_num (int): Maximum number of clicks to use multimask output in SAM.
+ multimask_output_for_tracking (bool): Whether to use multimask output for tracking.
+ use_multimask_token_for_obj_ptr (bool): Whether to use multimask tokens for object pointers.
+ iou_prediction_use_sigmoid (bool): Whether to use sigmoid to restrict IoU prediction to [0-1].
+ memory_temporal_stride_for_eval (int): Memory bank's temporal stride during evaluation.
+ non_overlap_masks_for_mem_enc (bool): Whether to apply non-overlapping constraints on object masks in memory
+ encoder during evaluation.
+ use_obj_ptrs_in_encoder (bool): Whether to cross-attend to object pointers from other frames in the encoder.
+ max_obj_ptrs_in_encoder (int): Maximum number of object pointers from other frames in encoder
+ cross-attention.
+ add_tpos_enc_to_obj_ptrs (bool): Whether to add temporal positional encoding to object pointers in the
+ encoder.
+ proj_tpos_enc_in_obj_ptrs (bool): Whether to add an extra linear projection layer for temporal positional
+ encoding in object pointers.
+ use_signed_tpos_enc_to_obj_ptrs (bool): Whether to use signed distance in the temporal positional encoding
+ in the object pointers.
+ only_obj_ptrs_in_the_past_for_eval (bool): Whether to only attend to object pointers in the past during
+ evaluation.
+ pred_obj_scores (bool): Whether to predict if there is an object in the frame.
+ pred_obj_scores_mlp (bool): Whether to use an MLP to predict object scores.
+ fixed_no_obj_ptr (bool): Whether to have a fixed no-object pointer when there is no object present.
+ soft_no_obj_ptr (bool): Whether to mix in no-object pointer softly for easier recovery and error mitigation.
+ use_mlp_for_obj_ptr_proj (bool): Whether to use MLP for object pointer projection.
+ no_obj_embed_spatial (bool): Whether to add no-object embedding to spatial frames.
+ sam_mask_decoder_extra_args (dict | None): Extra arguments for constructing the SAM mask decoder.
+ compile_image_encoder (bool): Whether to compile the image encoder for faster inference.
+ """
super().__init__()
# Part 1: the image backbone
@@ -188,15 +344,18 @@
@property
def device(self):
+ """Return the device on which the model's parameters are stored."""
return next(self.parameters()).device
def forward(self, *args, **kwargs):
+ """Process image and prompt inputs to generate object masks and scores in video sequences."""
raise NotImplementedError(
"Please use the corresponding methods in SAM2VideoPredictor for inference."
"See notebooks/video_predictor_example.ipynb for an example."
)
def _build_sam_heads(self):
+ """Build SAM-style prompt encoder and mask decoder for image segmentation tasks."""
self.sam_prompt_embed_dim = self.hidden_dim
self.sam_image_embedding_size = self.image_size // self.backbone_stride
@@ -250,6 +409,47 @@ high_res_features=None,
multimask_output=False,
):
+ """Forward pass through SAM prompt encoders and mask heads.
+
+ This method processes image features and optional point/mask inputs to generate object masks and scores.
+
+ Args:
+ backbone_features (torch.Tensor): Image features with shape (B, C, H, W).
+ point_inputs (dict[str, torch.Tensor] | None): Dictionary containing point prompts with keys 'point_coords'
+ (Tensor of shape (B, P, 2) with float32 dtype, containing absolute pixel-unit coordinates in (x, y)
+ format for P input points) and 'point_labels' (Tensor of shape (B, P) with int32 dtype, where 1 means
+ positive clicks, 0 means negative clicks, and -1 means padding).
+ mask_inputs (torch.Tensor | None): Mask of shape (B, 1, H*16, W*16), float or bool, with the same spatial
+ size as the image.
+ high_res_features (list[torch.Tensor] | None): List of two feature maps with shapes (B, C, 4*H, 4*W) and (B,
+ C, 2*H, 2*W) respectively, used as high-resolution feature maps for SAM decoder.
+ multimask_output (bool): If True, output 3 candidate masks and their IoU estimates; if False, output only 1
+ mask and its IoU estimate.
+
+ Returns:
+ low_res_multimasks (torch.Tensor): Tensor of shape (B, M, H*4, W*4) with SAM output mask logits.
+ high_res_multimasks (torch.Tensor): Tensor of shape (B, M, H*16, W*16) with upsampled mask logits.
+ ious (torch.Tensor): Tensor of shape (B, M) with estimated IoU for each output mask.
+ low_res_masks (torch.Tensor): Tensor of shape (B, 1, H*4, W*4) with the best low-resolution mask.
+ high_res_masks (torch.Tensor): Tensor of shape (B, 1, H*16, W*16) with the best high-resolution mask.
+ obj_ptr (torch.Tensor): Tensor of shape (B, C) with object pointer vector for the output mask.
+ object_score_logits (torch.Tensor): Tensor of shape (B, 1) with object score logits.
+
+ Examples:
+ >>> backbone_features = torch.rand(1, 256, 32, 32)
+ >>> point_inputs = {"point_coords": torch.rand(1, 2, 2), "point_labels": torch.tensor([[1, 0]])}
+ >>> mask_inputs = torch.rand(1, 1, 512, 512)
+ >>> results = model._forward_sam_heads(backbone_features, point_inputs, mask_inputs)
+ >>> (
+ ... low_res_multimasks,
+ ... high_res_multimasks,
+ ... ious,
+ ... low_res_masks,
+ ... high_res_masks,
+ ... obj_ptr,
+ ... object_score_logits,
+ ... ) = results
+ """
B = backbone_features.shape[0]
device = backbone_features.device
assert backbone_features.size(1) == self.sam_prompt_embed_dim
@@ -350,6 +550,7 @@ )
def _use_mask_as_output(self, mask_inputs, backbone_features=None, high_res_features=None):
+ """Process mask inputs directly as output, bypassing SAM encoder/decoder."""
# Use -10/+10 as logits for neg/pos pixels (very close to 0/1 in prob after sigmoid).
out_scale, out_bias = 20.0, -10.0 # sigmoid(-10.0)=4.5398e-05
mask_inputs_float = mask_inputs.float()
@@ -396,6 +597,7 @@ )
def forward_image(self, img_batch: torch.Tensor):
+ """Process image batch through encoder to extract multi-level features for SAM model."""
backbone_out = self.image_encoder(img_batch)
if self.use_high_res_features_in_sam:
# precompute projected level 0 and level 1 features in SAM decoder
@@ -405,6 +607,7 @@ return backbone_out
def _prepare_backbone_features(self, backbone_out, batch=1):
+ """Prepare and flatten visual features from the image backbone output for further processing."""
if batch > 1: # expand features if there's more than one prompt
backbone_out = {
**backbone_out,
@@ -434,6 +637,7 @@ num_frames,
track_in_reverse=False, # tracking in reverse time order (for demo usage)
):
+ """Prepare memory-conditioned features by fusing current frame's visual features with previous memories."""
B = current_vision_feats[-1].size(1) # batch size on this frame
C = self.hidden_dim
H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size
@@ -594,6 +798,7 @@ object_score_logits,
is_mask_from_pts,
):
+ """Encode frame features and masks into a new memory representation for video segmentation."""
B = current_vision_feats[-1].size(1) # batch size on this frame
C = self.hidden_dim
H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size
@@ -642,6 +847,7 @@ track_in_reverse,
prev_sam_mask_logits,
):
+ """Perform a single tracking step, updating object masks and memory features based on current frame inputs."""
# High-resolution feature maps for the SAM head, reshape (HW)BC => BCHW
if len(current_vision_feats) > 1:
high_res_features = [
@@ -695,6 +901,7 @@ object_score_logits,
current_out,
):
+ """Run memory encoder on predicted mask to encode it into a new memory feature for future frames."""
if run_mem_encoder and self.num_maskmem > 0:
maskmem_features, maskmem_pos_enc = self._encode_new_memory(
current_vision_feats=current_vision_feats,
@@ -730,6 +937,7 @@ # The previously predicted SAM mask logits (which can be fed together with new clicks in demo).
prev_sam_mask_logits=None,
):
+ """Perform a single tracking step, updating object masks and memory features based on current frame inputs."""
sam_outputs, _, _ = self._track_step(
frame_idx,
is_init_cond_frame,
@@ -769,6 +977,7 @@ return current_out
def _use_multimask(self, is_init_cond_frame, point_inputs):
+ """Determine whether to use multiple mask outputs in the SAM head based on configuration and inputs."""
num_pts = 0 if point_inputs is None else point_inputs["point_labels"].size(1)
return (
self.multimask_output_in_sam
@@ -778,6 +987,7 @@
@staticmethod
def _apply_non_overlapping_constraints(pred_masks):
+ """Apply non-overlapping constraints to masks, keeping the highest scoring object per location."""
batch_size = pred_masks.shape[0]
if batch_size == 1:
return pred_masks
@@ -794,9 +1004,11 @@ return pred_masks
def set_binarize(self, binarize=False):
+ """Set binarize for VideoPredictor."""
self.binarize_mask_from_pts_for_mem_enc = binarize
def set_imgsz(self, imgsz):
+ """Set image size to make model compatible with different image sizes."""
if hasattr(self.image_encoder, "set_imgsz"):
self.image_encoder.set_imgsz(imgsz)
self.image_size = imgsz[0]
@@ -811,6 +1023,7 @@
class SAM3Model(SAM2Model):
+ """SAM3Model class for Segment Anything Model 3 with memory-based video object segmentation capabilities."""
def __init__(
self,
@@ -850,6 +1063,7 @@ sam_mask_decoder_extra_args=None,
compile_image_encoder: bool = False,
):
+ """SAM3Model class for Segment Anything Model 3 with memory-based video object segmentation capabilities."""
super().__init__(
image_encoder,
memory_attention,
@@ -907,6 +1121,7 @@ )
def forward_image(self, img_batch: torch.Tensor):
+ """Process image batch through encoder to extract multi-level features for SAM model."""
backbone_out = self.image_encoder.forward_image_sam2(img_batch)
if self.use_high_res_features_in_sam:
# precompute projected level 0 and level 1 features in SAM decoder
@@ -916,11 +1131,13 @@ return backbone_out
def set_imgsz(self, imgsz: tuple[int, int]):
+ """Set the image size for the model and mask downsampler."""
super().set_imgsz(imgsz)
self.memory_encoder.mask_downsampler.interpol_size = [size // 14 * 16 for size in imgsz]
@staticmethod
def _suppress_shrinked_masks(pred_masks, new_pred_masks, shrink_threshold=0.3):
+ """Suppress masks that shrink in area after applying pixelwise non-overlapping constraints."""
area_before = (pred_masks > 0).sum(dim=(-1, -2))
area_after = (new_pred_masks > 0).sum(dim=(-1, -2))
area_before = torch.clamp(area_before, min=1.0)
@@ -931,9 +1148,12 @@ return pred_masks_after
def _suppress_object_pw_area_shrinkage(self, pred_masks):
+ """This function suppresses masks that shrink in area after applying pixelwise non-overlapping constraints. Note
+ that the final output can still be overlapping.
+ """
# Apply pixel-wise non-overlapping constraint based on mask scores
pixel_level_non_overlapping_masks = self._apply_non_overlapping_constraints(pred_masks)
# Fully suppress masks with high shrinkage (probably noisy) based on the pixel wise non-overlapping constraints
# NOTE: The output of this function can be a no op if none of the masks shrink by a large factor.
pred_masks = self._suppress_shrinked_masks(pred_masks, pixel_level_non_overlapping_masks)
- return pred_masks+ return pred_masks
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/sam/modules/sam.py |
Write reusable docstrings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import json
from collections import defaultdict
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from typing import Any
import cv2
import numpy as np
import torch
from PIL import Image
from torch.utils.data import ConcatDataset
from ultralytics.utils import LOCAL_RANK, LOGGER, NUM_THREADS, TQDM, colorstr
from ultralytics.utils.instance import Instances
from ultralytics.utils.ops import resample_segments, segments2boxes
from ultralytics.utils.torch_utils import TORCHVISION_0_18
from .augment import (
Compose,
Format,
LetterBox,
RandomLoadText,
classify_augmentations,
classify_transforms,
v8_transforms,
)
from .base import BaseDataset
from .converter import merge_multi_segment
from .utils import (
HELP_URL,
check_file_speeds,
get_hash,
img2label_paths,
load_dataset_cache_file,
save_dataset_cache_file,
verify_image,
verify_image_label,
)
# Ultralytics dataset *.cache version, >= 1.0.0 for Ultralytics YOLO models
DATASET_CACHE_VERSION = "1.0.3"
class YOLODataset(BaseDataset):
def __init__(self, *args, data: dict | None = None, task: str = "detect", **kwargs):
self.use_segments = task == "segment"
self.use_keypoints = task == "pose"
self.use_obb = task == "obb"
self.data = data
assert not (self.use_segments and self.use_keypoints), "Can not use both segments and keypoints."
super().__init__(*args, channels=self.data.get("channels", 3), **kwargs)
def cache_labels(self, path: Path = Path("./labels.cache")) -> dict:
x = {"labels": []}
nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages
desc = f"{self.prefix}Scanning {path.parent / path.stem}..."
total = len(self.im_files)
nkpt, ndim = self.data.get("kpt_shape", (0, 0))
if self.use_keypoints and (nkpt <= 0 or ndim not in {2, 3}):
raise ValueError(
"'kpt_shape' in data.yaml missing or incorrect. Should be a list with [number of "
"keypoints, number of dims (2 for x,y or 3 for x,y,visible)], i.e. 'kpt_shape: [17, 3]'"
)
with ThreadPool(NUM_THREADS) as pool:
results = pool.imap(
func=verify_image_label,
iterable=zip(
self.im_files,
self.label_files,
repeat(self.prefix),
repeat(self.use_keypoints),
repeat(len(self.data["names"])),
repeat(nkpt),
repeat(ndim),
repeat(self.single_cls),
),
)
pbar = TQDM(results, desc=desc, total=total)
for im_file, lb, shape, segments, keypoint, nm_f, nf_f, ne_f, nc_f, msg in pbar:
nm += nm_f
nf += nf_f
ne += ne_f
nc += nc_f
if im_file:
x["labels"].append(
{
"im_file": im_file,
"shape": shape,
"cls": lb[:, 0:1], # n, 1
"bboxes": lb[:, 1:], # n, 4
"segments": segments,
"keypoints": keypoint,
"normalized": True,
"bbox_format": "xywh",
}
)
if msg:
msgs.append(msg)
pbar.desc = f"{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt"
pbar.close()
if msgs:
LOGGER.info("\n".join(msgs))
if nf == 0:
LOGGER.warning(f"{self.prefix}No labels found in {path}. {HELP_URL}")
x["hash"] = get_hash(self.label_files + self.im_files)
x["results"] = nf, nm, ne, nc, len(self.im_files)
x["msgs"] = msgs # warnings
save_dataset_cache_file(self.prefix, path, x, DATASET_CACHE_VERSION)
return x
def get_labels(self) -> list[dict]:
self.label_files = img2label_paths(self.im_files)
cache_path = Path(self.label_files[0]).parent.with_suffix(".cache")
try:
cache, exists = load_dataset_cache_file(cache_path), True # attempt to load a *.cache file
assert cache["version"] == DATASET_CACHE_VERSION # matches current version
assert cache["hash"] == get_hash(self.label_files + self.im_files) # identical hash
except (FileNotFoundError, AssertionError, AttributeError, ModuleNotFoundError):
cache, exists = self.cache_labels(cache_path), False # run cache ops
# Display cache
nf, nm, ne, nc, n = cache.pop("results") # found, missing, empty, corrupt, total
if exists and LOCAL_RANK in {-1, 0}:
d = f"Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt"
TQDM(None, desc=self.prefix + d, total=n, initial=n) # display results
if cache["msgs"]:
LOGGER.info("\n".join(cache["msgs"])) # display warnings
# Read cache
[cache.pop(k) for k in ("hash", "version", "msgs")] # remove items
labels = cache["labels"]
if not labels:
raise RuntimeError(
f"No valid images found in {cache_path}. Images with incorrectly formatted labels are ignored. {HELP_URL}"
)
self.im_files = [lb["im_file"] for lb in labels] # update im_files
# Check if the dataset is all boxes or all segments
lengths = ((len(lb["cls"]), len(lb["bboxes"]), len(lb["segments"])) for lb in labels)
len_cls, len_boxes, len_segments = (sum(x) for x in zip(*lengths))
if len_segments and len_boxes != len_segments:
LOGGER.warning(
f"Box and segment counts should be equal, but got len(segments) = {len_segments}, "
f"len(boxes) = {len_boxes}. To resolve this only boxes will be used and all segments will be removed. "
"To avoid this please supply either a detect or segment dataset, not a detect-segment mixed dataset."
)
for lb in labels:
lb["segments"] = []
if len_cls == 0:
LOGGER.warning(f"Labels are missing or empty in {cache_path}, training may not work correctly. {HELP_URL}")
return labels
def build_transforms(self, hyp: dict | None = None) -> Compose:
if self.augment:
hyp.mosaic = hyp.mosaic if self.augment and not self.rect else 0.0
hyp.mixup = hyp.mixup if self.augment and not self.rect else 0.0
hyp.cutmix = hyp.cutmix if self.augment and not self.rect else 0.0
transforms = v8_transforms(self, self.imgsz, hyp)
else:
transforms = Compose([LetterBox(new_shape=(self.imgsz, self.imgsz), scaleup=False)])
transforms.append(
Format(
bbox_format="xywh",
normalize=True,
return_mask=self.use_segments,
return_keypoint=self.use_keypoints,
return_obb=self.use_obb,
batch_idx=True,
mask_ratio=hyp.mask_ratio,
mask_overlap=hyp.overlap_mask,
bgr=hyp.bgr if self.augment else 0.0, # only affect training.
)
)
return transforms
def close_mosaic(self, hyp: dict) -> None:
hyp.mosaic = 0.0
hyp.copy_paste = 0.0
hyp.mixup = 0.0
hyp.cutmix = 0.0
self.transforms = self.build_transforms(hyp)
def update_labels_info(self, label: dict) -> dict:
bboxes = label.pop("bboxes")
segments = label.pop("segments", [])
keypoints = label.pop("keypoints", None)
bbox_format = label.pop("bbox_format")
normalized = label.pop("normalized")
# NOTE: do NOT resample oriented boxes
segment_resamples = 100 if self.use_obb else 1000
if len(segments) > 0:
# make sure segments interpolate correctly if original length is greater than segment_resamples
max_len = max(len(s) for s in segments)
segment_resamples = (max_len + 1) if segment_resamples < max_len else segment_resamples
# list[np.array(segment_resamples, 2)] * num_samples
segments = np.stack(resample_segments(segments, n=segment_resamples), axis=0)
else:
segments = np.zeros((0, segment_resamples, 2), dtype=np.float32)
label["instances"] = Instances(bboxes, segments, keypoints, bbox_format=bbox_format, normalized=normalized)
return label
@staticmethod
def collate_fn(batch: list[dict]) -> dict:
new_batch = {}
batch = [dict(sorted(b.items())) for b in batch] # make sure the keys are in the same order
keys = batch[0].keys()
values = list(zip(*[list(b.values()) for b in batch]))
for i, k in enumerate(keys):
value = values[i]
if k in {"img", "text_feats", "sem_masks"}:
value = torch.stack(value, 0)
elif k == "visuals":
value = torch.nn.utils.rnn.pad_sequence(value, batch_first=True)
if k in {"masks", "keypoints", "bboxes", "cls", "segments", "obb"}:
value = torch.cat(value, 0)
new_batch[k] = value
new_batch["batch_idx"] = list(new_batch["batch_idx"])
for i in range(len(new_batch["batch_idx"])):
new_batch["batch_idx"][i] += i # add target image index for build_targets()
new_batch["batch_idx"] = torch.cat(new_batch["batch_idx"], 0)
return new_batch
class YOLOMultiModalDataset(YOLODataset):
def __init__(self, *args, data: dict | None = None, task: str = "detect", **kwargs):
super().__init__(*args, data=data, task=task, **kwargs)
def update_labels_info(self, label: dict) -> dict:
labels = super().update_labels_info(label)
# NOTE: some categories are concatenated with its synonyms by `/`.
# NOTE: and `RandomLoadText` would randomly select one of them if there are multiple words.
labels["texts"] = [v.split("/") for _, v in self.data["names"].items()]
return labels
def build_transforms(self, hyp: dict | None = None) -> Compose:
transforms = super().build_transforms(hyp)
if self.augment:
# NOTE: hard-coded the args for now.
# NOTE: this implementation is different from official yoloe,
# the strategy of selecting negative is restricted in one dataset,
# while official pre-saved neg embeddings from all datasets at once.
transform = RandomLoadText(
max_samples=min(self.data["nc"], 80),
padding=True,
padding_value=self._get_neg_texts(self.category_freq),
)
transforms.insert(-1, transform)
return transforms
@property
def category_names(self):
names = self.data["names"].values()
return {n.strip() for name in names for n in name.split("/")} # category names
@property
def category_freq(self):
texts = [v.split("/") for v in self.data["names"].values()]
category_freq = defaultdict(int)
for label in self.labels:
for c in label["cls"].squeeze(-1): # to check
text = texts[int(c)]
for t in text:
t = t.strip()
category_freq[t] += 1
return category_freq
@staticmethod
def _get_neg_texts(category_freq: dict, threshold: int = 100) -> list[str]:
threshold = min(max(category_freq.values()), 100)
return [k for k, v in category_freq.items() if v >= threshold]
class GroundingDataset(YOLODataset):
def __init__(self, *args, task: str = "detect", json_file: str = "", max_samples: int = 80, **kwargs):
assert task in {"detect", "segment"}, "GroundingDataset currently only supports `detect` and `segment` tasks"
self.json_file = json_file
self.max_samples = max_samples
super().__init__(*args, task=task, data={"channels": 3}, **kwargs)
def get_img_files(self, img_path: str) -> list:
return []
def verify_labels(self, labels: list[dict[str, Any]]) -> None:
expected_counts = {
"final_mixed_train_no_coco_segm": 3662412,
"final_mixed_train_no_coco": 3681235,
"final_flickr_separateGT_train_segm": 638214,
"final_flickr_separateGT_train": 640704,
}
instance_count = sum(label["bboxes"].shape[0] for label in labels)
for data_name, count in expected_counts.items():
if data_name in self.json_file:
assert instance_count == count, f"'{self.json_file}' has {instance_count} instances, expected {count}."
return
LOGGER.warning(f"Skipping instance count verification for unrecognized dataset '{self.json_file}'")
def cache_labels(self, path: Path = Path("./labels.cache")) -> dict[str, Any]:
x = {"labels": []}
LOGGER.info("Loading annotation file...")
with open(self.json_file) as f:
annotations = json.load(f)
images = {f"{x['id']:d}": x for x in annotations["images"]}
img_to_anns = defaultdict(list)
for ann in annotations["annotations"]:
img_to_anns[ann["image_id"]].append(ann)
for img_id, anns in TQDM(img_to_anns.items(), desc=f"Reading annotations {self.json_file}"):
img = images[f"{img_id:d}"]
h, w, f = img["height"], img["width"], img["file_name"]
im_file = Path(self.img_path) / f
if not im_file.exists():
continue
self.im_files.append(str(im_file))
bboxes = []
segments = []
cat2id = {}
texts = []
for ann in anns:
if ann["iscrowd"]:
continue
box = np.array(ann["bbox"], dtype=np.float32)
box[:2] += box[2:] / 2
box[[0, 2]] /= float(w)
box[[1, 3]] /= float(h)
if box[2] <= 0 or box[3] <= 0:
continue
caption = img["caption"]
cat_name = " ".join([caption[t[0] : t[1]] for t in ann["tokens_positive"]]).lower().strip()
if not cat_name:
continue
if cat_name not in cat2id:
cat2id[cat_name] = len(cat2id)
texts.append([cat_name])
cls = cat2id[cat_name] # class
box = [cls, *box.tolist()]
if box not in bboxes:
bboxes.append(box)
if ann.get("segmentation") is not None:
if len(ann["segmentation"]) == 0:
segments.append(box)
continue
elif len(ann["segmentation"]) > 1:
s = merge_multi_segment(ann["segmentation"])
s = (np.concatenate(s, axis=0) / np.array([w, h], dtype=np.float32)).reshape(-1).tolist()
else:
s = [j for i in ann["segmentation"] for j in i] # all segments concatenated
s = (
(np.array(s, dtype=np.float32).reshape(-1, 2) / np.array([w, h], dtype=np.float32))
.reshape(-1)
.tolist()
)
s = [cls, *s]
segments.append(s)
lb = np.array(bboxes, dtype=np.float32) if len(bboxes) else np.zeros((0, 5), dtype=np.float32)
if segments:
classes = np.array([x[0] for x in segments], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in segments] # (cls, xy1...)
lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
lb = np.array(lb, dtype=np.float32)
x["labels"].append(
{
"im_file": im_file,
"shape": (h, w),
"cls": lb[:, 0:1], # n, 1
"bboxes": lb[:, 1:], # n, 4
"segments": segments,
"normalized": True,
"bbox_format": "xywh",
"texts": texts,
}
)
x["hash"] = get_hash(self.json_file)
save_dataset_cache_file(self.prefix, path, x, DATASET_CACHE_VERSION)
return x
def get_labels(self) -> list[dict]:
cache_path = Path(self.json_file).with_suffix(".cache")
try:
cache, _ = load_dataset_cache_file(cache_path), True # attempt to load a *.cache file
assert cache["version"] == DATASET_CACHE_VERSION # matches current version
assert cache["hash"] == get_hash(self.json_file) # identical hash
except (FileNotFoundError, AssertionError, AttributeError, ModuleNotFoundError):
cache, _ = self.cache_labels(cache_path), False # run cache ops
[cache.pop(k) for k in ("hash", "version")] # remove items
labels = cache["labels"]
self.verify_labels(labels)
self.im_files = [str(label["im_file"]) for label in labels]
if LOCAL_RANK in {-1, 0}:
LOGGER.info(f"Load {self.json_file} from cache file {cache_path}")
return labels
def build_transforms(self, hyp: dict | None = None) -> Compose:
transforms = super().build_transforms(hyp)
if self.augment:
# NOTE: hard-coded the args for now.
# NOTE: this implementation is different from official yoloe,
# the strategy of selecting negative is restricted in one dataset,
# while official pre-saved neg embeddings from all datasets at once.
transform = RandomLoadText(
max_samples=min(self.max_samples, 80),
padding=True,
padding_value=self._get_neg_texts(self.category_freq),
)
transforms.insert(-1, transform)
return transforms
@property
def category_names(self):
return {t.strip() for label in self.labels for text in label["texts"] for t in text}
@property
def category_freq(self):
category_freq = defaultdict(int)
for label in self.labels:
for text in label["texts"]:
for t in text:
t = t.strip()
category_freq[t] += 1
return category_freq
@staticmethod
def _get_neg_texts(category_freq: dict, threshold: int = 100) -> list[str]:
threshold = min(max(category_freq.values()), 100)
return [k for k, v in category_freq.items() if v >= threshold]
class YOLOConcatDataset(ConcatDataset):
@staticmethod
def collate_fn(batch: list[dict]) -> dict:
return YOLODataset.collate_fn(batch)
def close_mosaic(self, hyp: dict) -> None:
for dataset in self.datasets:
if not hasattr(dataset, "close_mosaic"):
continue
dataset.close_mosaic(hyp)
# TODO: support semantic segmentation
class SemanticDataset(BaseDataset):
def __init__(self):
super().__init__()
class ClassificationDataset:
def __init__(self, root: str, args, augment: bool = False, prefix: str = ""):
import torchvision # scope for faster 'import ultralytics'
# Base class assigned as attribute rather than used as base class to allow for scoping slow torchvision import
if TORCHVISION_0_18: # 'allow_empty' argument first introduced in torchvision 0.18
self.base = torchvision.datasets.ImageFolder(root=root, allow_empty=True)
else:
self.base = torchvision.datasets.ImageFolder(root=root)
self.samples = self.base.samples
self.root = self.base.root
# Initialize attributes
if augment and args.fraction < 1.0: # reduce training fraction
self.samples = self.samples[: round(len(self.samples) * args.fraction)]
self.prefix = colorstr(f"{prefix}: ") if prefix else ""
self.cache_ram = args.cache is True or str(args.cache).lower() == "ram" # cache images into RAM
if self.cache_ram:
LOGGER.warning(
"Classification `cache_ram` training has known memory leak in "
"https://github.com/ultralytics/ultralytics/issues/9824, setting `cache_ram=False`."
)
self.cache_ram = False
self.cache_disk = str(args.cache).lower() == "disk" # cache images on hard drive as uncompressed *.npy files
self.samples = self.verify_images() # filter out bad images
self.samples = [[*list(x), Path(x[0]).with_suffix(".npy"), None] for x in self.samples] # file, index, npy, im
scale = (1.0 - args.scale, 1.0) # (0.08, 1.0)
self.torch_transforms = (
classify_augmentations(
size=args.imgsz,
scale=scale,
hflip=args.fliplr,
vflip=args.flipud,
erasing=args.erasing,
auto_augment=args.auto_augment,
hsv_h=args.hsv_h,
hsv_s=args.hsv_s,
hsv_v=args.hsv_v,
)
if augment
else classify_transforms(size=args.imgsz)
)
def __getitem__(self, i: int) -> dict:
f, j, fn, im = self.samples[i] # filename, index, filename.with_suffix('.npy'), image
if self.cache_ram:
if im is None: # Warning: two separate if statements required here, do not combine this with previous line
im = self.samples[i][3] = cv2.imread(f)
elif self.cache_disk:
if not fn.exists(): # load npy
np.save(fn.as_posix(), cv2.imread(f), allow_pickle=False)
im = np.load(fn)
else: # read image
im = cv2.imread(f) # BGR
# Convert NumPy array to PIL image
im = Image.fromarray(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))
sample = self.torch_transforms(im)
return {"img": sample, "cls": j}
def __len__(self) -> int:
return len(self.samples)
def verify_images(self) -> list[tuple]:
desc = f"{self.prefix}Scanning {self.root}..."
path = Path(self.root).with_suffix(".cache") # *.cache file path
try:
check_file_speeds([file for (file, _) in self.samples[:5]], prefix=self.prefix) # check image read speeds
cache = load_dataset_cache_file(path) # attempt to load a *.cache file
assert cache["version"] == DATASET_CACHE_VERSION # matches current version
assert cache["hash"] == get_hash([x[0] for x in self.samples]) # identical hash
nf, nc, n, samples = cache.pop("results") # found, missing, empty, corrupt, total
if LOCAL_RANK in {-1, 0}:
d = f"{desc} {nf} images, {nc} corrupt"
TQDM(None, desc=d, total=n, initial=n)
if cache["msgs"]:
LOGGER.info("\n".join(cache["msgs"])) # display warnings
return samples
# NOTE: ModuleNotFoundError to prevent numpy version conflicts when loading cache files created with different numpy versions
except (FileNotFoundError, AssertionError, AttributeError, ModuleNotFoundError):
# Run scan if *.cache retrieval failed
nf, nc, msgs, samples, x = 0, 0, [], [], {}
with ThreadPool(NUM_THREADS) as pool:
results = pool.imap(func=verify_image, iterable=zip(self.samples, repeat(self.prefix)))
pbar = TQDM(results, desc=desc, total=len(self.samples))
for sample, nf_f, nc_f, msg in pbar:
if nf_f:
samples.append(sample)
if msg:
msgs.append(msg)
nf += nf_f
nc += nc_f
pbar.desc = f"{desc} {nf} images, {nc} corrupt"
pbar.close()
if msgs:
LOGGER.info("\n".join(msgs))
x["hash"] = get_hash([x[0] for x in self.samples])
x["results"] = nf, nc, len(samples), samples
x["msgs"] = msgs # warnings
save_dataset_cache_file(self.prefix, path, x, DATASET_CACHE_VERSION)
return samples | --- +++ @@ -47,8 +47,39 @@
class YOLODataset(BaseDataset):
+ """Dataset class for loading object detection and/or segmentation labels in YOLO format.
+
+ This class supports loading data for object detection, segmentation, pose estimation, and oriented bounding box
+ (OBB) tasks using the YOLO format.
+
+ Attributes:
+ use_segments (bool): Indicates if segmentation masks should be used.
+ use_keypoints (bool): Indicates if keypoints should be used for pose estimation.
+ use_obb (bool): Indicates if oriented bounding boxes should be used.
+ data (dict): Dataset configuration dictionary.
+
+ Methods:
+ cache_labels: Cache dataset labels, check images and read shapes.
+ get_labels: Return list of label dictionaries for YOLO training.
+ build_transforms: Build and append transforms to the list.
+ close_mosaic: Disable mosaic, copy_paste, mixup and cutmix augmentations and build transformations.
+ update_labels_info: Update label format for different tasks.
+ collate_fn: Collate data samples into batches.
+
+ Examples:
+ >>> dataset = YOLODataset(img_path="path/to/images", data={"names": {0: "person"}}, task="detect")
+ >>> dataset.get_labels()
+ """
def __init__(self, *args, data: dict | None = None, task: str = "detect", **kwargs):
+ """Initialize the YOLODataset.
+
+ Args:
+ data (dict, optional): Dataset configuration dictionary.
+ task (str): Task type, one of 'detect', 'segment', 'pose', or 'obb'.
+ *args (Any): Additional positional arguments for the parent class.
+ **kwargs (Any): Additional keyword arguments for the parent class.
+ """
self.use_segments = task == "segment"
self.use_keypoints = task == "pose"
self.use_obb = task == "obb"
@@ -57,6 +88,14 @@ super().__init__(*args, channels=self.data.get("channels", 3), **kwargs)
def cache_labels(self, path: Path = Path("./labels.cache")) -> dict:
+ """Cache dataset labels, check images and read shapes.
+
+ Args:
+ path (Path): Path where to save the cache file.
+
+ Returns:
+ (dict): Dictionary containing cached labels and related information.
+ """
x = {"labels": []}
nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages
desc = f"{self.prefix}Scanning {path.parent / path.stem}..."
@@ -116,6 +155,13 @@ return x
def get_labels(self) -> list[dict]:
+ """Return list of label dictionaries for YOLO training.
+
+ This method loads labels from disk or cache, verifies their integrity, and prepares them for training.
+
+ Returns:
+ (list[dict]): List of label dictionaries, each containing information about an image and its annotations.
+ """
self.label_files = img2label_paths(self.im_files)
cache_path = Path(self.label_files[0]).parent.with_suffix(".cache")
try:
@@ -158,6 +204,14 @@ return labels
def build_transforms(self, hyp: dict | None = None) -> Compose:
+ """Build and append transforms to the list.
+
+ Args:
+ hyp (dict, optional): Hyperparameters for transforms.
+
+ Returns:
+ (Compose): Composed transforms.
+ """
if self.augment:
hyp.mosaic = hyp.mosaic if self.augment and not self.rect else 0.0
hyp.mixup = hyp.mixup if self.augment and not self.rect else 0.0
@@ -181,6 +235,11 @@ return transforms
def close_mosaic(self, hyp: dict) -> None:
+ """Disable mosaic, copy_paste, mixup and cutmix augmentations by setting their probabilities to 0.0.
+
+ Args:
+ hyp (dict): Hyperparameters for transforms.
+ """
hyp.mosaic = 0.0
hyp.copy_paste = 0.0
hyp.mixup = 0.0
@@ -188,6 +247,18 @@ self.transforms = self.build_transforms(hyp)
def update_labels_info(self, label: dict) -> dict:
+ """Update label format for different tasks.
+
+ Args:
+ label (dict): Label dictionary containing bboxes, segments, keypoints, etc.
+
+ Returns:
+ (dict): Updated label dictionary with instances.
+
+ Notes:
+ cls is not with bboxes now, classification and semantic segmentation need an independent cls label
+ Can also support classification and semantic segmentation by adding or removing dict keys there.
+ """
bboxes = label.pop("bboxes")
segments = label.pop("segments", [])
keypoints = label.pop("keypoints", None)
@@ -209,6 +280,14 @@
@staticmethod
def collate_fn(batch: list[dict]) -> dict:
+ """Collate data samples into batches.
+
+ Args:
+ batch (list[dict]): List of dictionaries containing sample data.
+
+ Returns:
+ (dict): Collated batch with stacked tensors.
+ """
new_batch = {}
batch = [dict(sorted(b.items())) for b in batch] # make sure the keys are in the same order
keys = batch[0].keys()
@@ -230,11 +309,41 @@
class YOLOMultiModalDataset(YOLODataset):
+ """Dataset class for loading object detection and/or segmentation labels in YOLO format with multi-modal support.
+
+ This class extends YOLODataset to add text information for multi-modal model training, enabling models to process
+ both image and text data.
+
+ Methods:
+ update_labels_info: Add text information for multi-modal model training.
+ build_transforms: Enhance data transformations with text augmentation.
+
+ Examples:
+ >>> dataset = YOLOMultiModalDataset(img_path="path/to/images", data={"names": {0: "person"}}, task="detect")
+ >>> batch = next(iter(dataset))
+ >>> print(batch.keys()) # Should include 'texts'
+ """
def __init__(self, *args, data: dict | None = None, task: str = "detect", **kwargs):
+ """Initialize a YOLOMultiModalDataset.
+
+ Args:
+ data (dict, optional): Dataset configuration dictionary.
+ task (str): Task type, one of 'detect', 'segment', 'pose', or 'obb'.
+ *args (Any): Additional positional arguments for the parent class.
+ **kwargs (Any): Additional keyword arguments for the parent class.
+ """
super().__init__(*args, data=data, task=task, **kwargs)
def update_labels_info(self, label: dict) -> dict:
+ """Add text information for multi-modal model training.
+
+ Args:
+ label (dict): Label dictionary containing bboxes, segments, keypoints, etc.
+
+ Returns:
+ (dict): Updated label dictionary with instances and texts.
+ """
labels = super().update_labels_info(label)
# NOTE: some categories are concatenated with its synonyms by `/`.
# NOTE: and `RandomLoadText` would randomly select one of them if there are multiple words.
@@ -243,6 +352,14 @@ return labels
def build_transforms(self, hyp: dict | None = None) -> Compose:
+ """Enhance data transformations with optional text augmentation for multi-modal training.
+
+ Args:
+ hyp (dict, optional): Hyperparameters for transforms.
+
+ Returns:
+ (Compose): Composed transforms including text augmentation if applicable.
+ """
transforms = super().build_transforms(hyp)
if self.augment:
# NOTE: hard-coded the args for now.
@@ -259,11 +376,17 @@
@property
def category_names(self):
+ """Return category names for the dataset.
+
+ Returns:
+ (set[str]): Set of class names.
+ """
names = self.data["names"].values()
return {n.strip() for name in names for n in name.split("/")} # category names
@property
def category_freq(self):
+ """Return frequency of each category in the dataset."""
texts = [v.split("/") for v in self.data["names"].values()]
category_freq = defaultdict(int)
for label in self.labels:
@@ -276,22 +399,75 @@
@staticmethod
def _get_neg_texts(category_freq: dict, threshold: int = 100) -> list[str]:
+ """Get negative text samples based on frequency threshold."""
threshold = min(max(category_freq.values()), 100)
return [k for k, v in category_freq.items() if v >= threshold]
class GroundingDataset(YOLODataset):
+ """Dataset class for object detection tasks using annotations from a JSON file in grounding format.
+
+ This dataset is designed for grounding tasks where annotations are provided in a JSON file rather than the standard
+ YOLO format text files.
+
+ Attributes:
+ json_file (str): Path to the JSON file containing annotations.
+
+ Methods:
+ get_img_files: Return empty list as image files are read in get_labels.
+ get_labels: Load annotations from a JSON file and prepare them for training.
+ build_transforms: Configure augmentations for training with optional text loading.
+
+ Examples:
+ >>> dataset = GroundingDataset(img_path="path/to/images", json_file="annotations.json", task="detect")
+ >>> len(dataset) # Number of valid images with annotations
+ """
def __init__(self, *args, task: str = "detect", json_file: str = "", max_samples: int = 80, **kwargs):
+ """Initialize a GroundingDataset for object detection.
+
+ Args:
+ json_file (str): Path to the JSON file containing annotations.
+ task (str): Must be 'detect' or 'segment' for GroundingDataset.
+ max_samples (int): Maximum number of samples to load for text augmentation.
+ *args (Any): Additional positional arguments for the parent class.
+ **kwargs (Any): Additional keyword arguments for the parent class.
+ """
assert task in {"detect", "segment"}, "GroundingDataset currently only supports `detect` and `segment` tasks"
self.json_file = json_file
self.max_samples = max_samples
super().__init__(*args, task=task, data={"channels": 3}, **kwargs)
def get_img_files(self, img_path: str) -> list:
+ """The image files would be read in `get_labels` function, return empty list here.
+
+ Args:
+ img_path (str): Path to the directory containing images.
+
+ Returns:
+ (list): Empty list as image files are read in get_labels.
+ """
return []
def verify_labels(self, labels: list[dict[str, Any]]) -> None:
+ """Verify the number of instances in the dataset matches expected counts.
+
+ This method checks if the total number of bounding box instances in the provided labels matches the expected
+ count for known datasets. It performs validation against a predefined set of datasets with known instance
+ counts.
+
+ Args:
+ labels (list[dict[str, Any]]): List of label dictionaries, where each dictionary contains dataset
+ annotations. Each label dict must have a 'bboxes' key with a numpy array or tensor containing bounding
+ box coordinates.
+
+ Raises:
+ AssertionError: If the actual instance count doesn't match the expected count for a recognized dataset.
+
+ Notes:
+ For unrecognized datasets (those not in the predefined expected_counts),
+ a warning is logged and verification is skipped.
+ """
expected_counts = {
"final_mixed_train_no_coco_segm": 3662412,
"final_mixed_train_no_coco": 3681235,
@@ -307,6 +483,14 @@ LOGGER.warning(f"Skipping instance count verification for unrecognized dataset '{self.json_file}'")
def cache_labels(self, path: Path = Path("./labels.cache")) -> dict[str, Any]:
+ """Load annotations from a JSON file, filter, and normalize bounding boxes for each image.
+
+ Args:
+ path (Path): Path where to save the cache file.
+
+ Returns:
+ (dict[str, Any]): Dictionary containing cached labels and related information.
+ """
x = {"labels": []}
LOGGER.info("Loading annotation file...")
with open(self.json_file) as f:
@@ -389,6 +573,11 @@ return x
def get_labels(self) -> list[dict]:
+ """Load labels from cache or generate them from JSON file.
+
+ Returns:
+ (list[dict]): List of label dictionaries, each containing information about an image and its annotations.
+ """
cache_path = Path(self.json_file).with_suffix(".cache")
try:
cache, _ = load_dataset_cache_file(cache_path), True # attempt to load a *.cache file
@@ -405,6 +594,14 @@ return labels
def build_transforms(self, hyp: dict | None = None) -> Compose:
+ """Configure augmentations for training with optional text loading.
+
+ Args:
+ hyp (dict, optional): Hyperparameters for transforms.
+
+ Returns:
+ (Compose): Composed transforms including text augmentation if applicable.
+ """
transforms = super().build_transforms(hyp)
if self.augment:
# NOTE: hard-coded the args for now.
@@ -421,10 +618,12 @@
@property
def category_names(self):
+ """Return unique category names from the dataset."""
return {t.strip() for label in self.labels for text in label["texts"] for t in text}
@property
def category_freq(self):
+ """Return frequency of each category in the dataset."""
category_freq = defaultdict(int)
for label in self.labels:
for text in label["texts"]:
@@ -435,17 +634,44 @@
@staticmethod
def _get_neg_texts(category_freq: dict, threshold: int = 100) -> list[str]:
+ """Get negative text samples based on frequency threshold."""
threshold = min(max(category_freq.values()), 100)
return [k for k, v in category_freq.items() if v >= threshold]
class YOLOConcatDataset(ConcatDataset):
+ """Dataset as a concatenation of multiple datasets.
+
+ This class is useful to assemble different existing datasets for YOLO training, ensuring they use the same collation
+ function.
+
+ Methods:
+ collate_fn: Static method that collates data samples into batches using YOLODataset's collation function.
+
+ Examples:
+ >>> dataset1 = YOLODataset(...)
+ >>> dataset2 = YOLODataset(...)
+ >>> combined_dataset = YOLOConcatDataset([dataset1, dataset2])
+ """
@staticmethod
def collate_fn(batch: list[dict]) -> dict:
+ """Collate data samples into batches.
+
+ Args:
+ batch (list[dict]): List of dictionaries containing sample data.
+
+ Returns:
+ (dict): Collated batch with stacked tensors.
+ """
return YOLODataset.collate_fn(batch)
def close_mosaic(self, hyp: dict) -> None:
+ """Disable mosaic, copy_paste, mixup and cutmix augmentations by setting their probabilities to 0.0.
+
+ Args:
+ hyp (dict): Hyperparameters for transforms.
+ """
for dataset in self.datasets:
if not hasattr(dataset, "close_mosaic"):
continue
@@ -454,14 +680,45 @@
# TODO: support semantic segmentation
class SemanticDataset(BaseDataset):
+ """Semantic Segmentation Dataset."""
def __init__(self):
+ """Initialize a SemanticDataset object."""
super().__init__()
class ClassificationDataset:
+ """Dataset class for image classification tasks wrapping torchvision ImageFolder functionality.
+
+ This class offers functionalities like image augmentation, caching, and verification. It's designed to efficiently
+ handle large datasets for training deep learning models, with optional image transformations and caching mechanisms
+ to speed up training.
+
+ Attributes:
+ cache_ram (bool): Indicates if caching in RAM is enabled.
+ cache_disk (bool): Indicates if caching on disk is enabled.
+ samples (list): A list of lists, each containing the path to an image, its class index, path to its .npy cache
+ file (if caching on disk), and optionally the loaded image array (if caching in RAM).
+ torch_transforms (callable): PyTorch transforms to be applied to the images.
+ root (str): Root directory of the dataset.
+ prefix (str): Prefix for logging and cache filenames.
+
+ Methods:
+ __getitem__: Return transformed image and class index for the given sample index.
+ __len__: Return the total number of samples in the dataset.
+ verify_images: Verify all images in dataset.
+ """
def __init__(self, root: str, args, augment: bool = False, prefix: str = ""):
+ """Initialize YOLO classification dataset with root directory, arguments, augmentations, and cache settings.
+
+ Args:
+ root (str): Path to the dataset directory where images are stored in a class-specific folder structure.
+ args (Namespace): Configuration containing dataset-related settings such as image size, augmentation
+ parameters, and cache settings.
+ augment (bool, optional): Whether to apply augmentations to the dataset.
+ prefix (str, optional): Prefix for logging and cache filenames, aiding in dataset identification.
+ """
import torchvision # scope for faster 'import ultralytics'
# Base class assigned as attribute rather than used as base class to allow for scoping slow torchvision import
@@ -504,6 +761,14 @@ )
def __getitem__(self, i: int) -> dict:
+ """Return transformed image and class index for the given sample index.
+
+ Args:
+ i (int): Index of the sample to retrieve.
+
+ Returns:
+ (dict): Dictionary containing the image and its class index.
+ """
f, j, fn, im = self.samples[i] # filename, index, filename.with_suffix('.npy'), image
if self.cache_ram:
if im is None: # Warning: two separate if statements required here, do not combine this with previous line
@@ -520,9 +785,15 @@ return {"img": sample, "cls": j}
def __len__(self) -> int:
+ """Return the total number of samples in the dataset."""
return len(self.samples)
def verify_images(self) -> list[tuple]:
+ """Verify all images in dataset.
+
+ Returns:
+ (list[tuple]): List of valid samples after verification.
+ """
desc = f"{self.prefix}Scanning {self.root}..."
path = Path(self.root).with_suffix(".cache") # *.cache file path
@@ -561,4 +832,4 @@ x["results"] = nf, nc, len(samples), samples
x["msgs"] = msgs # warnings
save_dataset_cache_file(self.prefix, path, x, DATASET_CACHE_VERSION)
- return samples+ return samples
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/data/dataset.py |
Add inline docstrings for readability | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import copy
import torch
from torch import nn
from .blocks import RoPEAttention
class MemoryAttentionLayer(nn.Module):
def __init__(
self,
d_model: int = 256,
dim_feedforward: int = 2048,
dropout: float = 0.1,
pos_enc_at_attn: bool = False,
pos_enc_at_cross_attn_keys: bool = True,
pos_enc_at_cross_attn_queries: bool = False,
self_attn: nn.Module | None = None,
cross_attn: nn.Module | None = None,
):
super().__init__()
self.d_model = d_model
self.dim_feedforward = dim_feedforward
self.dropout_value = dropout
self.self_attn = self_attn or RoPEAttention(embedding_dim=256, num_heads=1, downsample_rate=1)
self.cross_attn_image = cross_attn or RoPEAttention(
rope_k_repeat=True,
embedding_dim=256,
num_heads=1,
downsample_rate=1,
kv_in_dim=64,
)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = nn.ReLU()
# Where to add pos enc
self.pos_enc_at_attn = pos_enc_at_attn
self.pos_enc_at_cross_attn_queries = pos_enc_at_cross_attn_queries
self.pos_enc_at_cross_attn_keys = pos_enc_at_cross_attn_keys
def _forward_sa(self, tgt: torch.Tensor, query_pos: torch.Tensor | None) -> torch.Tensor:
tgt2 = self.norm1(tgt)
q = k = tgt2 + query_pos if self.pos_enc_at_attn else tgt2
tgt2 = self.self_attn(q, k, v=tgt2)
tgt = tgt + self.dropout1(tgt2)
return tgt
def _forward_ca(
self,
tgt: torch.Tensor,
memory: torch.Tensor,
query_pos: torch.Tensor | None,
pos: torch.Tensor | None,
num_k_exclude_rope: int = 0,
) -> torch.Tensor:
kwds = {}
if num_k_exclude_rope > 0:
assert isinstance(self.cross_attn_image, RoPEAttention)
kwds = {"num_k_exclude_rope": num_k_exclude_rope}
# Cross-Attention
tgt2 = self.norm2(tgt)
tgt2 = self.cross_attn_image(
q=tgt2 + query_pos if self.pos_enc_at_cross_attn_queries else tgt2,
k=memory + pos if self.pos_enc_at_cross_attn_keys else memory,
v=memory,
**kwds,
)
tgt = tgt + self.dropout2(tgt2)
return tgt
def forward(
self,
tgt: torch.Tensor,
memory: torch.Tensor,
pos: torch.Tensor | None = None,
query_pos: torch.Tensor | None = None,
num_k_exclude_rope: int = 0,
) -> torch.Tensor:
tgt = self._forward_sa(tgt, query_pos)
tgt = self._forward_ca(tgt, memory, query_pos, pos, num_k_exclude_rope)
# MLP
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt
class MemoryAttention(nn.Module):
def __init__(
self,
d_model: int,
pos_enc_at_input: bool,
layer: nn.Module,
num_layers: int,
batch_first: bool = True, # Do layers expect batch first input?
):
super().__init__()
self.d_model = d_model
self.layers = nn.ModuleList([copy.deepcopy(layer) for _ in range(num_layers)])
self.num_layers = num_layers
self.norm = nn.LayerNorm(d_model)
self.pos_enc_at_input = pos_enc_at_input
self.batch_first = batch_first
def forward(
self,
curr: torch.Tensor, # self-attention inputs
memory: torch.Tensor, # cross-attention inputs
curr_pos: torch.Tensor | None = None, # pos_enc for self-attention inputs
memory_pos: torch.Tensor | None = None, # pos_enc for cross-attention inputs
num_obj_ptr_tokens: int = 0, # number of object pointer *tokens*
) -> torch.Tensor:
if isinstance(curr, list):
assert isinstance(curr_pos, list)
assert len(curr) == len(curr_pos) == 1
curr, curr_pos = curr[0], curr_pos[0]
assert curr.shape[1] == memory.shape[1], "Batch size must be the same for curr and memory"
output = curr
if self.pos_enc_at_input and curr_pos is not None:
output = output + 0.1 * curr_pos
if self.batch_first:
# Convert to batch first
output = output.transpose(0, 1)
curr_pos = curr_pos.transpose(0, 1)
memory = memory.transpose(0, 1)
memory_pos = memory_pos.transpose(0, 1)
for layer in self.layers:
kwds = {}
if isinstance(layer.cross_attn_image, RoPEAttention):
kwds = {"num_k_exclude_rope": num_obj_ptr_tokens}
output = layer(
tgt=output,
memory=memory,
pos=memory_pos,
query_pos=curr_pos,
**kwds,
)
normed_output = self.norm(output)
if self.batch_first:
# Convert back to seq first
normed_output = normed_output.transpose(0, 1)
curr_pos = curr_pos.transpose(0, 1)
return normed_output | --- +++ @@ -11,6 +11,45 @@
class MemoryAttentionLayer(nn.Module):
+ """Implements a memory attention layer with self-attention and cross-attention mechanisms for neural networks.
+
+ This class combines self-attention, cross-attention, and feedforward components to process input tensors and
+ generate memory-based attention outputs.
+
+ Attributes:
+ d_model (int): Dimensionality of the model.
+ dim_feedforward (int): Dimensionality of the feedforward network.
+ dropout_value (float): Dropout rate for regularization.
+ self_attn (RoPEAttention): Self-attention mechanism using RoPE (Rotary Position Embedding).
+ cross_attn_image (RoPEAttention): Cross-attention mechanism for image processing.
+ linear1 (nn.Linear): First linear layer of the feedforward network.
+ linear2 (nn.Linear): Second linear layer of the feedforward network.
+ norm1 (nn.LayerNorm): Layer normalization for self-attention output.
+ norm2 (nn.LayerNorm): Layer normalization for cross-attention output.
+ norm3 (nn.LayerNorm): Layer normalization for feedforward network output.
+ dropout1 (nn.Dropout): Dropout layer after self-attention.
+ dropout2 (nn.Dropout): Dropout layer after cross-attention.
+ dropout3 (nn.Dropout): Dropout layer after feedforward network.
+ activation (nn.ReLU): Activation function for the feedforward network.
+ pos_enc_at_attn (bool): Flag to add positional encoding at attention.
+ pos_enc_at_cross_attn_queries (bool): Flag to add positional encoding to cross-attention queries.
+ pos_enc_at_cross_attn_keys (bool): Flag to add positional encoding to cross-attention keys.
+
+ Methods:
+ forward: Performs the full memory attention operation on input tensors.
+ _forward_sa: Performs self-attention on input tensor.
+ _forward_ca: Performs cross-attention between target and memory tensors.
+
+ Examples:
+ >>> layer = MemoryAttentionLayer(d_model=256, dim_feedforward=2048, dropout=0.1)
+ >>> tgt = torch.randn(1, 100, 256)
+ >>> memory = torch.randn(1, 100, 64)
+ >>> pos = torch.randn(1, 100, 256)
+ >>> query_pos = torch.randn(1, 100, 256)
+ >>> output = layer(tgt, memory, pos, query_pos)
+ >>> print(output.shape)
+ torch.Size([1, 100, 256])
+ """
def __init__(
self,
@@ -23,6 +62,18 @@ self_attn: nn.Module | None = None,
cross_attn: nn.Module | None = None,
):
+ """Initialize a memory attention layer with self-attention, cross-attention, and feedforward components.
+
+ Args:
+ d_model (int): Dimensionality of the model.
+ dim_feedforward (int): Dimensionality of the feedforward network.
+ dropout (float): Dropout rate for regularization.
+ pos_enc_at_attn (bool): Whether to add positional encoding at attention.
+ pos_enc_at_cross_attn_keys (bool): Whether to add positional encoding to cross-attention keys.
+ pos_enc_at_cross_attn_queries (bool): Whether to add positional encoding to cross-attention queries.
+ self_attn (nn.Module | None): Custom self-attention module. If None, a default RoPEAttention is used.
+ cross_attn (nn.Module | None): Custom cross-attention module. If None, a default RoPEAttention is used.
+ """
super().__init__()
self.d_model = d_model
self.dim_feedforward = dim_feedforward
@@ -56,6 +107,7 @@ self.pos_enc_at_cross_attn_keys = pos_enc_at_cross_attn_keys
def _forward_sa(self, tgt: torch.Tensor, query_pos: torch.Tensor | None) -> torch.Tensor:
+ """Perform self-attention on input tensor using positional encoding and RoPE attention mechanism."""
tgt2 = self.norm1(tgt)
q = k = tgt2 + query_pos if self.pos_enc_at_attn else tgt2
tgt2 = self.self_attn(q, k, v=tgt2)
@@ -70,6 +122,7 @@ pos: torch.Tensor | None,
num_k_exclude_rope: int = 0,
) -> torch.Tensor:
+ """Perform cross-attention between target and memory tensors using RoPEAttention mechanism."""
kwds = {}
if num_k_exclude_rope > 0:
assert isinstance(self.cross_attn_image, RoPEAttention)
@@ -94,6 +147,18 @@ query_pos: torch.Tensor | None = None,
num_k_exclude_rope: int = 0,
) -> torch.Tensor:
+ """Process input tensors through self-attention, cross-attention, and feedforward network layers.
+
+ Args:
+ tgt (torch.Tensor): Target tensor for self-attention with shape (N, L, D).
+ memory (torch.Tensor): Memory tensor for cross-attention with shape (N, S, D).
+ pos (torch.Tensor | None): Positional encoding for memory tensor.
+ query_pos (torch.Tensor | None): Positional encoding for target tensor.
+ num_k_exclude_rope (int): Number of keys to exclude from rotary position embedding.
+
+ Returns:
+ (torch.Tensor): Processed tensor after attention and feedforward layers with shape (N, L, D).
+ """
tgt = self._forward_sa(tgt, query_pos)
tgt = self._forward_ca(tgt, memory, query_pos, pos, num_k_exclude_rope)
# MLP
@@ -104,6 +169,34 @@
class MemoryAttention(nn.Module):
+ """Memory attention module for processing sequential data with self and cross-attention mechanisms.
+
+ This class implements a multi-layer attention mechanism that combines self-attention and cross-attention for
+ processing sequential data, particularly useful in transformer-like architectures.
+
+ Attributes:
+ d_model (int): The dimension of the model's hidden state.
+ layers (nn.ModuleList): A list of MemoryAttentionLayer modules.
+ num_layers (int): The number of attention layers.
+ norm (nn.LayerNorm): Layer normalization applied to the output.
+ pos_enc_at_input (bool): Whether to apply positional encoding at the input.
+ batch_first (bool): Whether the input tensors are in batch-first format.
+
+ Methods:
+ forward: Processes input tensors through the attention layers.
+
+ Examples:
+ >>> d_model = 256
+ >>> layer = MemoryAttentionLayer(d_model)
+ >>> attention = MemoryAttention(d_model, pos_enc_at_input=True, layer=layer, num_layers=3)
+ >>> curr = torch.randn(10, 32, d_model) # (seq_len, batch_size, d_model)
+ >>> memory = torch.randn(20, 32, d_model) # (mem_len, batch_size, d_model)
+ >>> curr_pos = torch.randn(10, 32, d_model)
+ >>> memory_pos = torch.randn(20, 32, d_model)
+ >>> output = attention(curr, memory, curr_pos, memory_pos)
+ >>> print(output.shape)
+ torch.Size([10, 32, 256])
+ """
def __init__(
self,
@@ -113,6 +206,18 @@ num_layers: int,
batch_first: bool = True, # Do layers expect batch first input?
):
+ """Initialize MemoryAttention with specified layers and normalization for sequential data processing.
+
+ This class implements a multi-layer attention mechanism that combines self-attention and cross-attention for
+ processing sequential data, particularly useful in transformer-like architectures.
+
+ Args:
+ d_model (int): The dimension of the model's hidden state.
+ pos_enc_at_input (bool): Whether to apply positional encoding at the input.
+ layer (nn.Module): The attention layer to be used in the module.
+ num_layers (int): The number of attention layers.
+ batch_first (bool): Whether the input tensors are in batch-first format.
+ """
super().__init__()
self.d_model = d_model
self.layers = nn.ModuleList([copy.deepcopy(layer) for _ in range(num_layers)])
@@ -129,6 +234,30 @@ memory_pos: torch.Tensor | None = None, # pos_enc for cross-attention inputs
num_obj_ptr_tokens: int = 0, # number of object pointer *tokens*
) -> torch.Tensor:
+ """Process inputs through attention layers, applying self and cross-attention with positional encoding.
+
+ Args:
+ curr (torch.Tensor): Self-attention input tensor, representing the current state.
+ memory (torch.Tensor): Cross-attention input tensor, representing memory information.
+ curr_pos (torch.Tensor | None): Positional encoding for self-attention inputs.
+ memory_pos (torch.Tensor | None): Positional encoding for cross-attention inputs.
+ num_obj_ptr_tokens (int): Number of object pointer tokens to exclude from rotary position embedding.
+
+ Returns:
+ (torch.Tensor): Processed output tensor after applying attention layers and normalization.
+
+ Examples:
+ >>> d_model = 256
+ >>> layer = MemoryAttentionLayer(d_model)
+ >>> attention = MemoryAttention(d_model, pos_enc_at_input=True, layer=layer, num_layers=3)
+ >>> curr = torch.randn(10, 32, d_model) # (seq_len, batch_size, d_model)
+ >>> memory = torch.randn(20, 32, d_model) # (mem_len, batch_size, d_model)
+ >>> curr_pos = torch.randn(10, 32, d_model)
+ >>> memory_pos = torch.randn(20, 32, d_model)
+ >>> output = attention(curr, memory, curr_pos, memory_pos)
+ >>> print(output.shape)
+ torch.Size([10, 32, 256])
+ """
if isinstance(curr, list):
assert isinstance(curr_pos, list)
assert len(curr) == len(curr_pos) == 1
@@ -166,4 +295,4 @@ normed_output = normed_output.transpose(0, 1)
curr_pos = curr_pos.transpose(0, 1)
- return normed_output+ return normed_output
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/sam/modules/memory_attention.py |
Document all endpoints with docstrings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import json
import time
from pathlib import Path
import numpy as np
import torch
import torch.distributed as dist
from ultralytics.cfg import get_cfg, get_save_dir
from ultralytics.data.utils import check_cls_dataset, check_det_dataset
from ultralytics.nn.autobackend import AutoBackend
from ultralytics.utils import LOGGER, RANK, TQDM, callbacks, colorstr, emojis
from ultralytics.utils.checks import check_imgsz
from ultralytics.utils.ops import Profile
from ultralytics.utils.torch_utils import attempt_compile, select_device, smart_inference_mode, unwrap_model
class BaseValidator:
def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks: dict | None = None):
import torchvision # noqa (import here so torchvision import time not recorded in postprocess time)
self.args = get_cfg(overrides=args)
self.dataloader = dataloader
self.stride = None
self.data = None
self.device = None
self.batch_i = None
self.training = True
self.names = None
self.seen = None
self.stats = None
self.confusion_matrix = None
self.nc = None
self.iouv = None
self.jdict = None
self.speed = {"preprocess": 0.0, "inference": 0.0, "loss": 0.0, "postprocess": 0.0}
self.save_dir = save_dir or get_save_dir(self.args)
(self.save_dir / "labels" if self.args.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True)
if self.args.conf is None:
self.args.conf = 0.01 if self.args.task == "obb" else 0.001 # reduce OBB val memory usage
self.args.imgsz = check_imgsz(self.args.imgsz, max_dim=1)
self.plots = {}
self.callbacks = _callbacks or callbacks.get_default_callbacks()
@smart_inference_mode()
def __call__(self, trainer=None, model=None):
self.training = trainer is not None
augment = self.args.augment and (not self.training)
if self.training:
self.device = trainer.device
self.data = trainer.data
# Force FP16 val during training
self.args.half = self.device.type != "cpu" and trainer.amp
model = trainer.ema.ema or trainer.model
if trainer.args.compile and hasattr(model, "_orig_mod"):
model = model._orig_mod # validate non-compiled original model to avoid issues
model = model.half() if self.args.half else model.float()
self.loss = torch.zeros_like(trainer.loss_items, device=trainer.device)
self.args.plots &= trainer.stopper.possible_stop or (trainer.epoch == trainer.epochs - 1)
model.eval()
else:
if str(self.args.model).endswith(".yaml") and model is None:
LOGGER.warning("validating an untrained model YAML will result in 0 mAP.")
callbacks.add_integration_callbacks(self)
if hasattr(model, "end2end"):
if self.args.end2end is not None:
model.end2end = self.args.end2end
if model.end2end:
model.set_head_attr(max_det=self.args.max_det, agnostic_nms=self.args.agnostic_nms)
model = AutoBackend(
model=model or self.args.model,
device=select_device(self.args.device) if RANK == -1 else torch.device("cuda", RANK),
dnn=self.args.dnn,
data=self.args.data,
fp16=self.args.half,
)
self.device = model.device # update device
self.args.half = model.fp16 # update half
stride, fmt = model.stride, model.format
pt = fmt == "pt"
imgsz = check_imgsz(self.args.imgsz, stride=stride)
if fmt not in {"pt", "torchscript"} and not getattr(model, "dynamic", False):
self.args.batch = model.metadata.get("batch", 1) # export.py models default to batch-size 1
LOGGER.info(f"Setting batch={self.args.batch} input of shape ({self.args.batch}, 3, {imgsz}, {imgsz})")
if str(self.args.data).rsplit(".", 1)[-1] in {"yaml", "yml"}:
self.data = check_det_dataset(self.args.data)
elif self.args.task == "classify":
self.data = check_cls_dataset(self.args.data, split=self.args.split)
else:
raise FileNotFoundError(emojis(f"Dataset '{self.args.data}' for task={self.args.task} not found ❌"))
if self.device.type in {"cpu", "mps"}:
self.args.workers = 0 # faster CPU val as time dominated by inference, not dataloading
if not (pt or (getattr(model, "dynamic", False) and fmt != "imx")):
self.args.rect = False
self.stride = model.stride # used in get_dataloader() for padding
self.dataloader = self.dataloader or self.get_dataloader(self.data.get(self.args.split), self.args.batch)
model.eval()
if self.args.compile:
model = attempt_compile(model, device=self.device)
model.warmup(imgsz=(1 if pt else self.args.batch, self.data["channels"], imgsz, imgsz)) # warmup
self.run_callbacks("on_val_start")
dt = (
Profile(device=self.device),
Profile(device=self.device),
Profile(device=self.device),
Profile(device=self.device),
)
bar = TQDM(self.dataloader, desc=self.get_desc(), total=len(self.dataloader))
self.init_metrics(unwrap_model(model))
self.jdict = [] # empty before each val
for batch_i, batch in enumerate(bar):
self.run_callbacks("on_val_batch_start")
self.batch_i = batch_i
# Preprocess
with dt[0]:
batch = self.preprocess(batch)
# Inference
with dt[1]:
preds = model(batch["img"], augment=augment)
# Loss
with dt[2]:
if self.training:
self.loss += model.loss(batch, preds)[1]
# Postprocess
with dt[3]:
preds = self.postprocess(preds)
self.update_metrics(preds, batch)
if self.args.plots and batch_i < 3 and RANK in {-1, 0}:
self.plot_val_samples(batch, batch_i)
self.plot_predictions(batch, preds, batch_i)
self.run_callbacks("on_val_batch_end")
stats = {}
self.gather_stats()
if RANK in {-1, 0}:
stats = self.get_stats()
self.speed = dict(zip(self.speed.keys(), (x.t / len(self.dataloader.dataset) * 1e3 for x in dt)))
self.finalize_metrics()
self.print_results()
self.run_callbacks("on_val_end")
if self.training:
model.float()
# Reduce loss across all GPUs
loss = self.loss.clone().detach()
if trainer.world_size > 1:
dist.reduce(loss, dst=0, op=dist.ReduceOp.AVG)
if RANK > 0:
return
results = {**stats, **trainer.label_loss_items(loss.cpu() / len(self.dataloader), prefix="val")}
return {k: round(float(v), 5) for k, v in results.items()} # return results as 5 decimal place floats
else:
if RANK > 0:
return stats
LOGGER.info(
"Speed: {:.1f}ms preprocess, {:.1f}ms inference, {:.1f}ms loss, {:.1f}ms postprocess per image".format(
*tuple(self.speed.values())
)
)
if self.args.save_json and self.jdict:
with open(str(self.save_dir / "predictions.json"), "w", encoding="utf-8") as f:
LOGGER.info(f"Saving {f.name}...")
json.dump(self.jdict, f) # flatten and save
stats = self.eval_json(stats) # update stats
if self.args.plots or self.args.save_json:
LOGGER.info(f"Results saved to {colorstr('bold', self.save_dir)}")
return stats
def match_predictions(
self, pred_classes: torch.Tensor, true_classes: torch.Tensor, iou: torch.Tensor, use_scipy: bool = False
) -> torch.Tensor:
# Dx10 matrix, where D - detections, 10 - IoU thresholds
correct = np.zeros((pred_classes.shape[0], self.iouv.shape[0])).astype(bool)
# LxD matrix where L - labels (rows), D - detections (columns)
correct_class = true_classes[:, None] == pred_classes
iou = iou * correct_class # zero out the wrong classes
iou = iou.cpu().numpy()
for i, threshold in enumerate(self.iouv.cpu().tolist()):
if use_scipy:
import scipy # scope import to avoid importing for all commands
cost_matrix = iou * (iou >= threshold)
if cost_matrix.any():
labels_idx, detections_idx = scipy.optimize.linear_sum_assignment(cost_matrix, maximize=True)
valid = cost_matrix[labels_idx, detections_idx] > 0
if valid.any():
correct[detections_idx[valid], i] = True
else:
matches = np.nonzero(iou >= threshold) # IoU > threshold and classes match
matches = np.array(matches).T
if matches.shape[0]:
if matches.shape[0] > 1:
matches = matches[iou[matches[:, 0], matches[:, 1]].argsort()[::-1]]
matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
correct[matches[:, 1].astype(int), i] = True
return torch.tensor(correct, dtype=torch.bool, device=pred_classes.device)
def add_callback(self, event: str, callback):
self.callbacks[event].append(callback)
def run_callbacks(self, event: str):
for callback in self.callbacks.get(event, []):
callback(self)
def get_dataloader(self, dataset_path, batch_size):
raise NotImplementedError("get_dataloader function not implemented for this validator")
def build_dataset(self, img_path):
raise NotImplementedError("build_dataset function not implemented in validator")
def preprocess(self, batch):
return batch
def postprocess(self, preds):
return preds
def init_metrics(self, model):
pass
def update_metrics(self, preds, batch):
pass
def finalize_metrics(self):
pass
def get_stats(self):
return {}
def gather_stats(self):
pass
def print_results(self):
pass
def get_desc(self):
pass
@property
def metric_keys(self):
return []
def on_plot(self, name, data=None):
plot_type = data.get("type") if data else None
if plot_type and any((v.get("data") or {}).get("type") == plot_type for v in self.plots.values()):
return # Skip duplicate plot types
self.plots[Path(name)] = {"data": data, "timestamp": time.time()}
def plot_val_samples(self, batch, ni):
pass
def plot_predictions(self, batch, preds, ni):
pass
def pred_to_json(self, preds, batch):
pass
def eval_json(self, stats):
pass | --- +++ @@ -1,4 +1,27 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""
+Check a model's accuracy on a test or val split of a dataset.
+
+Usage:
+ $ yolo mode=val model=yolo26n.pt data=coco8.yaml imgsz=640
+
+Usage - formats:
+ $ yolo mode=val model=yolo26n.pt # PyTorch
+ yolo26n.torchscript # TorchScript
+ yolo26n.onnx # ONNX Runtime or OpenCV DNN with dnn=True
+ yolo26n_openvino_model # OpenVINO
+ yolo26n.engine # TensorRT
+ yolo26n.mlpackage # CoreML (macOS-only)
+ yolo26n_saved_model # TensorFlow SavedModel
+ yolo26n.pb # TensorFlow GraphDef
+ yolo26n.tflite # TensorFlow Lite
+ yolo26n_edgetpu.tflite # TensorFlow Edge TPU
+ yolo26n_paddle_model # PaddlePaddle
+ yolo26n.mnn # MNN
+ yolo26n_ncnn_model # NCNN
+ yolo26n_imx_model # Sony IMX
+ yolo26n_rknn_model # Rockchip RKNN
+"""
from __future__ import annotations
@@ -20,8 +43,65 @@
class BaseValidator:
+ """A base class for creating validators.
+
+ This class provides the foundation for validation processes, including model evaluation, metric computation, and
+ result visualization.
+
+ Attributes:
+ args (SimpleNamespace): Configuration for the validator.
+ dataloader (DataLoader): DataLoader to use for validation.
+ model (nn.Module): Model to validate.
+ data (dict): Data dictionary containing dataset information.
+ device (torch.device): Device to use for validation.
+ batch_i (int): Current batch index.
+ training (bool): Whether the model is in training mode.
+ names (dict): Class names mapping.
+ seen (int): Number of images seen so far during validation.
+ stats (dict): Statistics collected during validation.
+ confusion_matrix: Confusion matrix for classification evaluation.
+ nc (int): Number of classes.
+ iouv (torch.Tensor): IoU thresholds from 0.50 to 0.95 in steps of 0.05.
+ jdict (list): List to store JSON validation results.
+ speed (dict): Dictionary with keys 'preprocess', 'inference', 'loss', 'postprocess' and their respective batch
+ processing times in milliseconds.
+ save_dir (Path): Directory to save results.
+ plots (dict): Dictionary to store plots for visualization.
+ callbacks (dict): Dictionary to store various callback functions.
+ stride (int): Model stride for padding calculations.
+ loss (torch.Tensor): Accumulated loss during training validation.
+
+ Methods:
+ __call__: Execute validation process, running inference on dataloader and computing performance metrics.
+ match_predictions: Match predictions to ground truth objects using IoU.
+ add_callback: Append the given callback to the specified event.
+ run_callbacks: Run all callbacks associated with a specified event.
+ get_dataloader: Get data loader from dataset path and batch size.
+ build_dataset: Build dataset from image path.
+ preprocess: Preprocess an input batch.
+ postprocess: Postprocess the predictions.
+ init_metrics: Initialize performance metrics for the YOLO model.
+ update_metrics: Update metrics based on predictions and batch.
+ finalize_metrics: Finalize and return all metrics.
+ get_stats: Return statistics about the model's performance.
+ print_results: Print the results of the model's predictions.
+ get_desc: Get description of the YOLO model.
+ on_plot: Register plots for visualization.
+ plot_val_samples: Plot validation samples during training.
+ plot_predictions: Plot YOLO model predictions on batch images.
+ pred_to_json: Convert predictions to JSON format.
+ eval_json: Evaluate and return JSON format of prediction statistics.
+ """
def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks: dict | None = None):
+ """Initialize a BaseValidator instance.
+
+ Args:
+ dataloader (torch.utils.data.DataLoader, optional): DataLoader to be used for validation.
+ save_dir (Path, optional): Directory to save results.
+ args (SimpleNamespace, optional): Configuration for the validator.
+ _callbacks (dict, optional): Dictionary to store various callback functions.
+ """
import torchvision # noqa (import here so torchvision import time not recorded in postprocess time)
self.args = get_cfg(overrides=args)
@@ -51,6 +131,15 @@
@smart_inference_mode()
def __call__(self, trainer=None, model=None):
+ """Execute validation process, running inference on dataloader and computing performance metrics.
+
+ Args:
+ trainer (object, optional): Trainer object that contains the model to validate.
+ model (nn.Module, optional): Model to validate if not using a trainer.
+
+ Returns:
+ (dict): Dictionary containing validation statistics.
+ """
self.training = trainer is not None
augment = self.args.augment and (not self.training)
if self.training:
@@ -185,6 +274,17 @@ def match_predictions(
self, pred_classes: torch.Tensor, true_classes: torch.Tensor, iou: torch.Tensor, use_scipy: bool = False
) -> torch.Tensor:
+ """Match predictions to ground truth objects using IoU.
+
+ Args:
+ pred_classes (torch.Tensor): Predicted class indices of shape (N,).
+ true_classes (torch.Tensor): Target class indices of shape (M,).
+ iou (torch.Tensor): An NxM tensor containing the pairwise IoU values for predictions and ground truth.
+ use_scipy (bool, optional): Whether to use scipy for matching (more precise).
+
+ Returns:
+ (torch.Tensor): Correct tensor of shape (N, 10) for 10 IoU thresholds.
+ """
# Dx10 matrix, where D - detections, 10 - IoU thresholds
correct = np.zeros((pred_classes.shape[0], self.iouv.shape[0])).astype(bool)
# LxD matrix where L - labels (rows), D - detections (columns)
@@ -213,63 +313,82 @@ return torch.tensor(correct, dtype=torch.bool, device=pred_classes.device)
def add_callback(self, event: str, callback):
+ """Append the given callback to the specified event."""
self.callbacks[event].append(callback)
def run_callbacks(self, event: str):
+ """Run all callbacks associated with a specified event."""
for callback in self.callbacks.get(event, []):
callback(self)
def get_dataloader(self, dataset_path, batch_size):
+ """Get data loader from dataset path and batch size."""
raise NotImplementedError("get_dataloader function not implemented for this validator")
def build_dataset(self, img_path):
+ """Build dataset from image path."""
raise NotImplementedError("build_dataset function not implemented in validator")
def preprocess(self, batch):
+ """Preprocess an input batch."""
return batch
def postprocess(self, preds):
+ """Postprocess the predictions."""
return preds
def init_metrics(self, model):
+ """Initialize performance metrics for the YOLO model."""
pass
def update_metrics(self, preds, batch):
+ """Update metrics based on predictions and batch."""
pass
def finalize_metrics(self):
+ """Finalize and return all metrics."""
pass
def get_stats(self):
+ """Return statistics about the model's performance."""
return {}
def gather_stats(self):
+ """Gather statistics from all the GPUs during DDP training to GPU 0."""
pass
def print_results(self):
+ """Print the results of the model's predictions."""
pass
def get_desc(self):
+ """Get description of the YOLO model."""
pass
@property
def metric_keys(self):
+ """Return the metric keys used in YOLO training/validation."""
return []
def on_plot(self, name, data=None):
+ """Register plots for visualization, deduplicating by type."""
plot_type = data.get("type") if data else None
if plot_type and any((v.get("data") or {}).get("type") == plot_type for v in self.plots.values()):
return # Skip duplicate plot types
self.plots[Path(name)] = {"data": data, "timestamp": time.time()}
def plot_val_samples(self, batch, ni):
+ """Plot validation samples during training."""
pass
def plot_predictions(self, batch, preds, ni):
+ """Plot YOLO model predictions on batch images."""
pass
def pred_to_json(self, preds, batch):
+ """Convert predictions to JSON format."""
pass
def eval_json(self, stats):
- pass+ """Evaluate and return JSON format of prediction statistics."""
+ pass
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/engine/validator.py |
Provide docstrings following PEP 257 | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from pathlib import Path
from typing import Any
import numpy as np
import torch
from ultralytics.models.yolo.detect import DetectionValidator
from ultralytics.utils import LOGGER, ops
from ultralytics.utils.metrics import OBBMetrics, batch_probiou
from ultralytics.utils.nms import TorchNMS
from ultralytics.utils.plotting import plot_images
class OBBValidator(DetectionValidator):
def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks: dict | None = None) -> None:
super().__init__(dataloader, save_dir, args, _callbacks)
self.args.task = "obb"
self.metrics = OBBMetrics()
def init_metrics(self, model: torch.nn.Module) -> None:
super().init_metrics(model)
val = self.data.get(self.args.split, "") # validation path
self.is_dota = isinstance(val, str) and "DOTA" in val # check if dataset is DOTA format
self.confusion_matrix.task = "obb" # set confusion matrix task to 'obb'
def _process_batch(self, preds: dict[str, torch.Tensor], batch: dict[str, torch.Tensor]) -> dict[str, np.ndarray]:
if batch["cls"].shape[0] == 0 or preds["cls"].shape[0] == 0:
return {"tp": np.zeros((preds["cls"].shape[0], self.niou), dtype=bool)}
iou = batch_probiou(batch["bboxes"], preds["bboxes"])
return {"tp": self.match_predictions(preds["cls"], batch["cls"], iou).cpu().numpy()}
def postprocess(self, preds: torch.Tensor) -> list[dict[str, torch.Tensor]]:
preds = super().postprocess(preds)
for pred in preds:
pred["bboxes"] = torch.cat([pred["bboxes"], pred.pop("extra")], dim=-1) # concatenate angle
return preds
def _prepare_batch(self, si: int, batch: dict[str, Any]) -> dict[str, Any]:
idx = batch["batch_idx"] == si
cls = batch["cls"][idx].squeeze(-1)
bbox = batch["bboxes"][idx]
ori_shape = batch["ori_shape"][si]
imgsz = batch["img"].shape[2:]
ratio_pad = batch["ratio_pad"][si]
if cls.shape[0]:
bbox[..., :4].mul_(torch.tensor(imgsz, device=self.device)[[1, 0, 1, 0]]) # target boxes
return {
"cls": cls,
"bboxes": bbox,
"ori_shape": ori_shape,
"imgsz": imgsz,
"ratio_pad": ratio_pad,
"im_file": batch["im_file"][si],
}
def plot_predictions(self, batch: dict[str, Any], preds: list[dict[str, torch.Tensor]], ni: int) -> None:
if not preds:
return
for i, pred in enumerate(preds):
pred["batch_idx"] = torch.ones_like(pred["conf"]) * i
keys = preds[0].keys()
batched_preds = {k: torch.cat([x[k] for x in preds], dim=0) for k in keys}
plot_images(
images=batch["img"],
labels=batched_preds,
paths=batch["im_file"],
fname=self.save_dir / f"val_batch{ni}_pred.jpg",
names=self.names,
on_plot=self.on_plot,
)
def pred_to_json(self, predn: dict[str, torch.Tensor], pbatch: dict[str, Any]) -> None:
path = Path(pbatch["im_file"])
stem = path.stem
image_id = int(stem) if stem.isnumeric() else stem
rbox = predn["bboxes"]
poly = ops.xywhr2xyxyxyxy(rbox).view(-1, 8)
for r, b, s, c in zip(rbox.tolist(), poly.tolist(), predn["conf"].tolist(), predn["cls"].tolist()):
self.jdict.append(
{
"image_id": image_id,
"file_name": path.name,
"category_id": self.class_map[int(c)],
"score": round(s, 5),
"rbox": [round(x, 3) for x in r],
"poly": [round(x, 3) for x in b],
}
)
def save_one_txt(self, predn: dict[str, torch.Tensor], save_conf: bool, shape: tuple[int, int], file: Path) -> None:
import numpy as np
from ultralytics.engine.results import Results
Results(
np.zeros((shape[0], shape[1]), dtype=np.uint8),
path=None,
names=self.names,
obb=torch.cat([predn["bboxes"], predn["conf"].unsqueeze(-1), predn["cls"].unsqueeze(-1)], dim=1),
).save_txt(file, save_conf=save_conf)
def scale_preds(self, predn: dict[str, torch.Tensor], pbatch: dict[str, Any]) -> dict[str, torch.Tensor]:
return {
**predn,
"bboxes": ops.scale_boxes(
pbatch["imgsz"], predn["bboxes"].clone(), pbatch["ori_shape"], ratio_pad=pbatch["ratio_pad"], xywh=True
),
}
def eval_json(self, stats: dict[str, Any]) -> dict[str, Any]:
if self.args.save_json and self.is_dota and len(self.jdict):
import json
import re
from collections import defaultdict
pred_json = self.save_dir / "predictions.json" # predictions
pred_txt = self.save_dir / "predictions_txt" # predictions
pred_txt.mkdir(parents=True, exist_ok=True)
data = json.load(open(pred_json))
# Save split results
LOGGER.info(f"Saving predictions with DOTA format to {pred_txt}...")
for d in data:
image_id = d["image_id"]
score = d["score"]
classname = self.names[d["category_id"] - 1].replace(" ", "-")
p = d["poly"]
with open(f"{pred_txt / f'Task1_{classname}'}.txt", "a", encoding="utf-8") as f:
f.writelines(f"{image_id} {score} {p[0]} {p[1]} {p[2]} {p[3]} {p[4]} {p[5]} {p[6]} {p[7]}\n")
# Save merged results, this could result slightly lower map than using official merging script,
# because of the probiou calculation.
pred_merged_txt = self.save_dir / "predictions_merged_txt" # predictions
pred_merged_txt.mkdir(parents=True, exist_ok=True)
merged_results = defaultdict(list)
LOGGER.info(f"Saving merged predictions with DOTA format to {pred_merged_txt}...")
for d in data:
image_id = d["image_id"].split("__", 1)[0]
pattern = re.compile(r"\d+___\d+")
x, y = (int(c) for c in re.findall(pattern, d["image_id"])[0].split("___"))
bbox, score, cls = d["rbox"], d["score"], d["category_id"] - 1
bbox[0] += x
bbox[1] += y
bbox.extend([score, cls])
merged_results[image_id].append(bbox)
for image_id, bbox in merged_results.items():
bbox = torch.tensor(bbox)
max_wh = torch.max(bbox[:, :2]).item() * 2
c = bbox[:, 6:7] * max_wh # classes
scores = bbox[:, 5] # scores
b = bbox[:, :5].clone()
b[:, :2] += c
# 0.3 could get results close to the ones from official merging script, even slightly better.
i = TorchNMS.fast_nms(b, scores, 0.3, iou_func=batch_probiou)
bbox = bbox[i]
b = ops.xywhr2xyxyxyxy(bbox[:, :5]).view(-1, 8)
for x in torch.cat([b, bbox[:, 5:7]], dim=-1).tolist():
classname = self.names[int(x[-1])].replace(" ", "-")
p = [round(i, 3) for i in x[:-2]] # poly
score = round(x[-2], 3)
with open(f"{pred_merged_txt / f'Task1_{classname}'}.txt", "a", encoding="utf-8") as f:
f.writelines(f"{image_id} {score} {p[0]} {p[1]} {p[2]} {p[3]} {p[4]} {p[5]} {p[6]} {p[7]}\n")
return stats | --- +++ @@ -16,31 +16,114 @@
class OBBValidator(DetectionValidator):
+ """A class extending the DetectionValidator class for validation based on an Oriented Bounding Box (OBB) model.
+
+ This validator specializes in evaluating models that predict rotated bounding boxes, commonly used for aerial and
+ satellite imagery where objects can appear at various orientations.
+
+ Attributes:
+ args (dict): Configuration arguments for the validator.
+ metrics (OBBMetrics): Metrics object for evaluating OBB model performance.
+ is_dota (bool): Flag indicating whether the validation dataset is in DOTA format.
+
+ Methods:
+ init_metrics: Initialize evaluation metrics for YOLO.
+ _process_batch: Process batch of detections and ground truth boxes to compute IoU matrix.
+ _prepare_batch: Prepare batch data for OBB validation.
+ _prepare_pred: Prepare predictions for evaluation against ground truth.
+ plot_predictions: Plot predicted bounding boxes on input images.
+ pred_to_json: Serialize YOLO predictions to COCO json format.
+ save_one_txt: Save YOLO detections to a txt file in normalized coordinates.
+ eval_json: Evaluate YOLO output in JSON format and return performance statistics.
+
+ Examples:
+ >>> from ultralytics.models.yolo.obb import OBBValidator
+ >>> args = dict(model="yolo26n-obb.pt", data="dota8.yaml")
+ >>> validator = OBBValidator(args=args)
+ >>> validator(model=args["model"])
+ """
def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks: dict | None = None) -> None:
+ """Initialize OBBValidator and set task to 'obb', metrics to OBBMetrics.
+
+ This constructor initializes an OBBValidator instance for validating Oriented Bounding Box (OBB) models. It
+ extends the DetectionValidator class and configures it specifically for the OBB task.
+
+ Args:
+ dataloader (torch.utils.data.DataLoader, optional): DataLoader to be used for validation.
+ save_dir (str | Path, optional): Directory to save results.
+ args (dict, optional): Arguments containing validation parameters.
+ _callbacks (dict, optional): Dictionary of callback functions to be called during validation.
+ """
super().__init__(dataloader, save_dir, args, _callbacks)
self.args.task = "obb"
self.metrics = OBBMetrics()
def init_metrics(self, model: torch.nn.Module) -> None:
+ """Initialize evaluation metrics for YOLO obb validation.
+
+ Args:
+ model (torch.nn.Module): Model to validate.
+ """
super().init_metrics(model)
val = self.data.get(self.args.split, "") # validation path
self.is_dota = isinstance(val, str) and "DOTA" in val # check if dataset is DOTA format
self.confusion_matrix.task = "obb" # set confusion matrix task to 'obb'
def _process_batch(self, preds: dict[str, torch.Tensor], batch: dict[str, torch.Tensor]) -> dict[str, np.ndarray]:
+ """Compute the correct prediction matrix for a batch of detections and ground truth bounding boxes.
+
+ Args:
+ preds (dict[str, torch.Tensor]): Prediction dictionary containing 'cls' and 'bboxes' keys with detected
+ class labels and bounding boxes.
+ batch (dict[str, torch.Tensor]): Batch dictionary containing 'cls' and 'bboxes' keys with ground truth class
+ labels and bounding boxes.
+
+ Returns:
+ (dict[str, np.ndarray]): Dictionary containing 'tp' key with the correct prediction matrix as a numpy array
+ with shape (N, 10), which includes 10 IoU levels for each detection, indicating the accuracy of
+ predictions compared to the ground truth.
+
+ Examples:
+ >>> preds = {"cls": torch.randint(0, 5, (100,)), "bboxes": torch.rand(100, 5)}
+ >>> batch = {"cls": torch.randint(0, 5, (50,)), "bboxes": torch.rand(50, 5)}
+ >>> correct_matrix = validator._process_batch(preds, batch)
+ """
if batch["cls"].shape[0] == 0 or preds["cls"].shape[0] == 0:
return {"tp": np.zeros((preds["cls"].shape[0], self.niou), dtype=bool)}
iou = batch_probiou(batch["bboxes"], preds["bboxes"])
return {"tp": self.match_predictions(preds["cls"], batch["cls"], iou).cpu().numpy()}
def postprocess(self, preds: torch.Tensor) -> list[dict[str, torch.Tensor]]:
+ """Postprocess OBB predictions.
+
+ Args:
+ preds (torch.Tensor): Raw predictions from the model.
+
+ Returns:
+ (list[dict[str, torch.Tensor]]): Processed predictions with angle information concatenated to bboxes.
+ """
preds = super().postprocess(preds)
for pred in preds:
pred["bboxes"] = torch.cat([pred["bboxes"], pred.pop("extra")], dim=-1) # concatenate angle
return preds
def _prepare_batch(self, si: int, batch: dict[str, Any]) -> dict[str, Any]:
+ """Prepare batch data for OBB validation with proper scaling and formatting.
+
+ Args:
+ si (int): Sample index within the batch.
+ batch (dict[str, Any]): Dictionary containing batch data with keys:
+ - batch_idx: Tensor of batch indices
+ - cls: Tensor of class labels
+ - bboxes: Tensor of bounding boxes
+ - ori_shape: Original image shapes
+ - img: Batch of images
+ - ratio_pad: Ratio and padding information
+
+ Returns:
+ (dict[str, Any]): Prepared batch data with scaled bounding boxes and metadata.
+ """
idx = batch["batch_idx"] == si
cls = batch["cls"][idx].squeeze(-1)
bbox = batch["bboxes"][idx]
@@ -59,6 +142,19 @@ }
def plot_predictions(self, batch: dict[str, Any], preds: list[dict[str, torch.Tensor]], ni: int) -> None:
+ """Plot predicted bounding boxes on input images and save the result.
+
+ Args:
+ batch (dict[str, Any]): Batch data containing images, file paths, and other metadata.
+ preds (list[dict[str, torch.Tensor]]): List of prediction dictionaries for each image in the batch.
+ ni (int): Batch index used for naming the output file.
+
+ Examples:
+ >>> validator = OBBValidator()
+ >>> batch = {"img": images, "im_file": paths}
+ >>> preds = [{"bboxes": torch.rand(10, 5), "cls": torch.zeros(10), "conf": torch.rand(10)}]
+ >>> validator.plot_predictions(batch, preds, 0)
+ """
if not preds:
return
for i, pred in enumerate(preds):
@@ -75,6 +171,18 @@ )
def pred_to_json(self, predn: dict[str, torch.Tensor], pbatch: dict[str, Any]) -> None:
+ """Convert YOLO predictions to COCO JSON format with rotated bounding box information.
+
+ Args:
+ predn (dict[str, torch.Tensor]): Prediction dictionary containing 'bboxes', 'conf', and 'cls' keys with
+ bounding box coordinates, confidence scores, and class predictions.
+ pbatch (dict[str, Any]): Batch dictionary containing 'imgsz', 'ori_shape', 'ratio_pad', and 'im_file'.
+
+ Notes:
+ This method processes rotated bounding box predictions and converts them to both rbox format
+ (x, y, w, h, angle) and polygon format (x1, y1, x2, y2, x3, y3, x4, y4) before adding them
+ to the JSON dictionary.
+ """
path = Path(pbatch["im_file"])
stem = path.stem
image_id = int(stem) if stem.isnumeric() else stem
@@ -93,6 +201,24 @@ )
def save_one_txt(self, predn: dict[str, torch.Tensor], save_conf: bool, shape: tuple[int, int], file: Path) -> None:
+ """Save YOLO OBB detections to a text file in normalized coordinates.
+
+ Args:
+ predn (dict[str, torch.Tensor]): Prediction dictionary containing 'bboxes', 'conf', and 'cls' keys with
+ bounding box coordinates (including angle), confidence scores, and class predictions.
+ save_conf (bool): Whether to save confidence scores in the text file.
+ shape (tuple[int, int]): Original image shape in format (height, width).
+ file (Path): Output file path to save detections.
+
+ Examples:
+ >>> validator = OBBValidator()
+ >>> predn = {
+ ... "bboxes": torch.tensor([[100, 100, 50, 30, 45]]),
+ ... "conf": torch.tensor([0.9]),
+ ... "cls": torch.tensor([0]),
+ ... }
+ >>> validator.save_one_txt(predn, True, (640, 480), Path("detection.txt"))
+ """
import numpy as np
from ultralytics.engine.results import Results
@@ -105,6 +231,7 @@ ).save_txt(file, save_conf=save_conf)
def scale_preds(self, predn: dict[str, torch.Tensor], pbatch: dict[str, Any]) -> dict[str, torch.Tensor]:
+ """Scales predictions to the original image size."""
return {
**predn,
"bboxes": ops.scale_boxes(
@@ -113,6 +240,14 @@ }
def eval_json(self, stats: dict[str, Any]) -> dict[str, Any]:
+ """Evaluate YOLO output in JSON format and save predictions in DOTA format.
+
+ Args:
+ stats (dict[str, Any]): Performance statistics dictionary.
+
+ Returns:
+ (dict[str, Any]): Updated performance statistics.
+ """
if self.args.save_json and self.is_dota and len(self.jdict):
import json
import re
@@ -167,4 +302,4 @@ with open(f"{pred_merged_txt / f'Task1_{classname}'}.txt", "a", encoding="utf-8") as f:
f.writelines(f"{image_id} {score} {p[0]} {p[1]} {p[2]} {p[3]} {p[4]} {p[5]} {p[6]} {p[7]}\n")
- return stats+ return stats
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/yolo/obb/val.py |
Write docstrings describing functionality | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from ultralytics.models.yolo.segment import SegmentationValidator
class FastSAMValidator(SegmentationValidator):
def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks: dict | None = None):
super().__init__(dataloader, save_dir, args, _callbacks)
self.args.task = "segment"
self.args.plots = False # disable ConfusionMatrix and other plots to avoid errors | --- +++ @@ -6,8 +6,35 @@
class FastSAMValidator(SegmentationValidator):
+ """Custom validation class for FastSAM (Segment Anything Model) segmentation in the Ultralytics YOLO framework.
+
+ Extends the SegmentationValidator class, customizing the validation process specifically for FastSAM. This class
+ sets the task to 'segment' and uses the SegmentMetrics for evaluation. Additionally, plotting features are disabled
+ to avoid errors during validation.
+
+ Attributes:
+ dataloader (torch.utils.data.DataLoader): The data loader object used for validation.
+ save_dir (Path): The directory where validation results will be saved.
+ args (SimpleNamespace): Additional arguments for customization of the validation process.
+ _callbacks (dict): Dictionary of callback functions to be invoked during validation.
+ metrics (SegmentMetrics): Segmentation metrics calculator for evaluation.
+
+ Methods:
+ __init__: Initialize the FastSAMValidator with custom settings for FastSAM.
+ """
def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks: dict | None = None):
+ """Initialize the FastSAMValidator class, setting the task to 'segment' and metrics to SegmentMetrics.
+
+ Args:
+ dataloader (torch.utils.data.DataLoader, optional): DataLoader to be used for validation.
+ save_dir (Path, optional): Directory to save results.
+ args (SimpleNamespace, optional): Configuration for the validator.
+ _callbacks (dict, optional): Dictionary of callback functions to be invoked during validation.
+
+ Notes:
+ Plots for ConfusionMatrix and other related metrics are disabled in this class to avoid errors.
+ """
super().__init__(dataloader, save_dir, args, _callbacks)
self.args.task = "segment"
- self.args.plots = False # disable ConfusionMatrix and other plots to avoid errors+ self.args.plots = False # disable ConfusionMatrix and other plots to avoid errors
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/fastsam/val.py |
Create docstrings for each class method | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
# Based on https://github.com/IDEA-Research/GroundingDINO
from __future__ import annotations
import torch
from torch import nn
from ultralytics.nn.modules.utils import _get_clones
from .model_misc import get_valid_ratio
class TransformerEncoderLayer(nn.Module):
def __init__(
self,
d_model: int,
dim_feedforward: int,
dropout: float,
pos_enc_at_attn: bool,
pos_enc_at_cross_attn_keys: bool,
pos_enc_at_cross_attn_queries: bool,
pre_norm: bool,
self_attention: nn.Module = None,
cross_attention: nn.Module = None,
):
super().__init__()
self.d_model = d_model
self.dim_feedforward = dim_feedforward
self.dropout_value = dropout
self.self_attn = self_attention or nn.MultiheadAttention(num_heads=8, dropout=0.1, embed_dim=256)
self.cross_attn_image = cross_attention or nn.MultiheadAttention(num_heads=8, dropout=0.1, embed_dim=256)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = nn.ReLU()
self.pre_norm = pre_norm
self.pos_enc_at_attn = pos_enc_at_attn
self.pos_enc_at_cross_attn_queries = pos_enc_at_cross_attn_queries
self.pos_enc_at_cross_attn_keys = pos_enc_at_cross_attn_keys
self.layer_idx = None
def forward_post(
self,
tgt: torch.Tensor,
memory: torch.Tensor,
tgt_mask: torch.Tensor = None,
memory_mask: torch.Tensor = None,
tgt_key_padding_mask: torch.Tensor = None,
memory_key_padding_mask: torch.Tensor = None,
pos: torch.Tensor = None,
query_pos: torch.Tensor = None,
**kwargs,
) -> torch.Tensor:
q = k = tgt + query_pos if self.pos_enc_at_attn else tgt
# Self attention
tgt2 = self.self_attn(
q, k, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask, need_weights=False
)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
# Cross attention to image
tgt2 = self.cross_attn_image(
query=tgt + query_pos if self.pos_enc_at_cross_attn_queries else tgt,
key=memory + pos if self.pos_enc_at_cross_attn_keys else memory,
value=memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
need_weights=False,
)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
# FFN
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward_pre(
self,
tgt: torch.Tensor,
memory: torch.Tensor,
dac: bool = False,
tgt_mask: torch.Tensor = None,
memory_mask: torch.Tensor = None,
tgt_key_padding_mask: torch.Tensor = None,
memory_key_padding_mask: torch.Tensor = None,
pos: torch.Tensor = None,
query_pos: torch.Tensor = None,
) -> torch.Tensor:
if dac:
# we only apply self attention to the first half of the queries
assert tgt.shape[0] % 2 == 0
other_tgt = tgt[tgt.shape[0] // 2 :]
tgt = tgt[: tgt.shape[0] // 2]
tgt2 = self.norm1(tgt).contiguous()
q = k = tgt2 + query_pos if self.pos_enc_at_attn else tgt2
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
if dac:
# Recombine
tgt = torch.cat((tgt, other_tgt), dim=0)
tgt2 = self.norm2(tgt)
memory = memory.to(tgt2.dtype).contiguous()
tgt2 = self.cross_attn_image(
query=tgt2 + query_pos if self.pos_enc_at_cross_attn_queries else tgt2,
key=memory + pos if self.pos_enc_at_cross_attn_keys else memory,
value=memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)[0]
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt
def forward(
self,
tgt: torch.Tensor,
memory: torch.Tensor,
dac: bool = False,
tgt_mask: torch.Tensor = None,
memory_mask: torch.Tensor = None,
tgt_key_padding_mask: torch.Tensor = None,
memory_key_padding_mask: torch.Tensor = None,
pos: torch.Tensor = None,
query_pos: torch.Tensor = None,
) -> torch.Tensor:
fwd_fn = self.forward_pre if self.pre_norm else self.forward_post
return fwd_fn(
tgt,
memory,
dac=dac,
tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos,
query_pos=query_pos,
# attn_bias=attn_bias,
# **kwds,
)
class TransformerEncoder(nn.Module):
def __init__(
self,
layer: nn.Module,
num_layers: int,
d_model: int,
num_feature_levels: int,
frozen: bool = False,
use_act_checkpoint: bool = False,
):
super().__init__()
self.layers = _get_clones(layer, num_layers)
self.num_layers = num_layers
self.num_feature_levels = num_feature_levels
self.level_embed = None
if num_feature_levels > 1:
self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model))
if frozen:
for p in self.parameters():
p.requires_grad_(False)
self.use_act_checkpoint = use_act_checkpoint
# assign layer index to each layer so that some layers can decide what to do
# based on which layer index they are (e.g. cross attention to memory bank only
# in selected layers)
for layer_idx, layer in enumerate(self.layers):
layer.layer_idx = layer_idx
def _prepare_multilevel_features(self, srcs, masks, pos_embeds):
assert len(srcs) == self.num_feature_levels, "mismatch between expected and received # of feature levels"
src_flatten = []
mask_flatten = []
lvl_pos_embed_flatten = []
spatial_shapes = []
has_mask = masks is not None and masks[0] is not None
for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)):
_, _, h, w = src.shape
spatial_shape = (h, w)
spatial_shapes.append(spatial_shape)
src = src.flatten(2).transpose(1, 2) # bs, hw, c
if has_mask:
mask = mask.flatten(1)
pos_embed = pos_embed.flatten(2).transpose(1, 2) # bs, hw, c
if self.level_embed is not None:
lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1)
else:
lvl_pos_embed = pos_embed
lvl_pos_embed_flatten.append(lvl_pos_embed)
src_flatten.append(src)
if has_mask:
mask_flatten.append(mask)
src_flatten = torch.cat(src_flatten, 1) # bs, \sum{hxw}, c
mask_flatten = torch.cat(mask_flatten, 1) if has_mask else None # bs, \sum{hxw}
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) # bs, \sum{hxw}, c
spatial_shapes = torch.tensor(spatial_shapes, dtype=torch.long, device=src_flatten.device)
level_start_index = torch.cat(
(
spatial_shapes.new_zeros((1,)),
spatial_shapes.prod(1).cumsum(0)[:-1],
)
)
if has_mask:
valid_ratios = torch.stack([get_valid_ratio(m) for m in masks], 1)
else:
valid_ratios = torch.ones(
(src_flatten.shape[0], self.num_feature_levels, 2),
device=src_flatten.device,
dtype=src_flatten.dtype,
)
return (
src_flatten,
mask_flatten,
lvl_pos_embed_flatten,
level_start_index,
valid_ratios,
spatial_shapes,
)
def forward(
self,
src: list[torch.Tensor],
src_key_padding_masks: list[torch.Tensor] | None = None,
pos: list[torch.Tensor] | None = None,
prompt: torch.Tensor = None,
prompt_key_padding_mask: torch.Tensor = None,
encoder_extra_kwargs: dict | None = None,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
assert len(src) == self.num_feature_levels, "must be equal to num_feature_levels"
if src_key_padding_masks is not None:
assert len(src_key_padding_masks) == self.num_feature_levels
if pos is not None:
assert len(pos) == self.num_feature_levels
# Flatten multilevel feats and add level pos embeds
(
src_flatten,
key_padding_masks_flatten,
lvl_pos_embed_flatten,
level_start_index,
valid_ratios,
spatial_shapes,
) = self._prepare_multilevel_features(src, src_key_padding_masks, pos)
output = src_flatten
for layer in self.layers:
layer_kwargs = {}
assert isinstance(layer, TransformerEncoderLayer)
layer_kwargs["memory"] = prompt
layer_kwargs["memory_key_padding_mask"] = prompt_key_padding_mask
layer_kwargs["query_pos"] = lvl_pos_embed_flatten
layer_kwargs["tgt"] = output
layer_kwargs["tgt_key_padding_mask"] = key_padding_masks_flatten
if self.training:
assert self.use_act_checkpoint, "activation ckpt not enabled in encoder"
if encoder_extra_kwargs is not None:
layer_kwargs.update(encoder_extra_kwargs)
output = layer(**layer_kwargs)
# return as seq first
return (
output.transpose(0, 1),
(key_padding_masks_flatten.transpose(0, 1) if key_padding_masks_flatten is not None else None),
lvl_pos_embed_flatten.transpose(0, 1),
level_start_index,
spatial_shapes,
valid_ratios,
)
class TransformerEncoderFusion(TransformerEncoder):
def __init__(
self,
layer: nn.Module,
num_layers: int,
d_model: int,
num_feature_levels: int,
add_pooled_text_to_img_feat: bool = True,
pool_text_with_mask: bool = False,
compile_mode: str | None = None,
**kwargs,
):
super().__init__(
layer,
num_layers,
d_model,
num_feature_levels,
**kwargs,
)
self.add_pooled_text_to_img_feat = add_pooled_text_to_img_feat
if self.add_pooled_text_to_img_feat:
self.text_pooling_proj = nn.Linear(d_model, d_model)
self.pool_text_with_mask = pool_text_with_mask
if compile_mode is not None:
self.forward = torch.compile(self.forward, mode=compile_mode, fullgraph=True)
def forward(
self,
src: list[torch.Tensor],
prompt: torch.Tensor,
src_key_padding_mask: list[torch.Tensor] | None = None,
src_pos: list[torch.Tensor] | None = None,
prompt_key_padding_mask: torch.Tensor = None,
feat_sizes: list[int] | None = None,
encoder_extra_kwargs: dict | None = None,
):
# Restore spatial shapes of vision
bs = src[0].shape[1] # seq first
if feat_sizes is not None:
assert len(feat_sizes) == len(src)
if src_key_padding_mask is None:
src_key_padding_mask = [None] * len(src)
for i, (h, w) in enumerate(feat_sizes):
src[i] = src[i].reshape(h, w, bs, -1).permute(2, 3, 0, 1)
src_pos[i] = src_pos[i].reshape(h, w, bs, -1).permute(2, 3, 0, 1)
src_key_padding_mask[i] = (
src_key_padding_mask[i].reshape(h, w, bs).permute(2, 0, 1)
if src_key_padding_mask[i] is not None
else None
)
else:
assert all(x.dim == 4 for x in src), "expected list of (bs, c, h, w) tensors"
if self.add_pooled_text_to_img_feat:
# Fusion: Add mean pooled text to image features
pooled_text = pool_text_feat(prompt, prompt_key_padding_mask, self.pool_text_with_mask)
pooled_text = self.text_pooling_proj(pooled_text)[..., None, None] # prompt is seq first
src = [x.add_(pooled_text) for x in src]
(
out,
key_padding_masks_flatten,
lvl_pos_embed_flatten,
level_start_index,
spatial_shapes,
valid_ratios,
) = super().forward(
src,
src_key_padding_masks=src_key_padding_mask,
pos=src_pos,
prompt=prompt.transpose(0, 1),
prompt_key_padding_mask=prompt_key_padding_mask,
encoder_extra_kwargs=encoder_extra_kwargs,
)
return {
"memory": out,
"padding_mask": key_padding_masks_flatten,
"pos_embed": lvl_pos_embed_flatten,
"memory_text": prompt,
"level_start_index": level_start_index,
"spatial_shapes": spatial_shapes,
"valid_ratios": valid_ratios,
}
def pool_text_feat(prompt, prompt_mask, pool_with_mask):
# prompt has shape (seq, bs, dim)
if not pool_with_mask:
return prompt.mean(dim=0)
# prompt_mask has shape (bs, seq), where False is valid and True is padding
assert prompt_mask.dim() == 2
# is_valid has shape (seq, bs, 1), where 1 is valid and 0 is padding
is_valid = (~prompt_mask).float().permute(1, 0)[..., None]
# num_valid has shape (bs, 1)
num_valid = torch.clamp(torch.sum(is_valid, dim=0), min=1.0)
# mean pool over all the valid tokens
pooled_text = (prompt * is_valid).sum(dim=0) / num_valid
return pooled_text | --- +++ @@ -13,6 +13,15 @@
class TransformerEncoderLayer(nn.Module):
+ """Transformer encoder layer that performs self-attention followed by cross-attention.
+
+ This layer was previously called TransformerDecoderLayer but was renamed to better reflect its role in the
+ architecture. It processes input sequences through self-attention and then cross-attention with another input
+ (typically image features).
+
+ The layer supports both pre-norm and post-norm configurations, as well as positional encoding at different stages of
+ the attention mechanism.
+ """
def __init__(
self,
@@ -26,6 +35,19 @@ self_attention: nn.Module = None,
cross_attention: nn.Module = None,
):
+ """Initialize a transformer encoder layer.
+
+ Args:
+ d_model: Model dimension/hidden size
+ dim_feedforward: Dimension of the feedforward network
+ dropout: Dropout probability
+ pos_enc_at_attn: Whether to add positional encodings at self-attention
+ pos_enc_at_cross_attn_keys: Whether to add positional encodings to keys in cross-attention
+ pos_enc_at_cross_attn_queries: Whether to add positional encodings to queries in cross-attention
+ pre_norm: Whether to use pre-norm (True) or post-norm (False) architecture
+ self_attention: Self-attention module
+ cross_attention: Cross-attention module for attending to image features
+ """
super().__init__()
self.d_model = d_model
self.dim_feedforward = dim_feedforward
@@ -66,6 +88,24 @@ query_pos: torch.Tensor = None,
**kwargs,
) -> torch.Tensor:
+ """Forward pass for post-norm architecture.
+
+ In post-norm architecture, normalization is applied after attention and feedforward operations.
+
+ Args:
+ tgt (torch.Tensor): Input tensor to be processed.
+ memory (torch.Tensor): Memory tensor for cross-attention.
+ tgt_mask (torch.Tensor): Mask for self-attention.
+ memory_mask (torch.Tensor): Mask for cross-attention.
+ tgt_key_padding_mask (torch.Tensor): Key padding mask for self-attention.
+ memory_key_padding_mask (torch.Tensor): Key padding mask for cross-attention.
+ pos (torch.Tensor): Positional encoding for memory.
+ query_pos (torch.Tensor): Positional encoding for query.
+ **kwargs (Any): Additional keyword arguments.
+
+ Returns:
+ Processed tensor
+ """
q = k = tgt + query_pos if self.pos_enc_at_attn else tgt
# Self attention
@@ -105,6 +145,24 @@ pos: torch.Tensor = None,
query_pos: torch.Tensor = None,
) -> torch.Tensor:
+ """Forward pass for pre-norm architecture.
+
+ In pre-norm architecture, normalization is applied before attention and feedforward operations.
+
+ Args:
+ tgt: Input tensor to be processed
+ memory: Memory tensor for cross-attention
+ dac: Whether to use Divide-and-Conquer attention
+ tgt_mask: Mask for self-attention
+ memory_mask: Mask for cross-attention
+ tgt_key_padding_mask: Key padding mask for self-attention
+ memory_key_padding_mask: Key padding mask for cross-attention
+ pos: Positional encoding for memory
+ query_pos: Positional encoding for query
+
+ Returns:
+ Processed tensor
+ """
if dac:
# we only apply self attention to the first half of the queries
assert tgt.shape[0] % 2 == 0
@@ -144,6 +202,22 @@ pos: torch.Tensor = None,
query_pos: torch.Tensor = None,
) -> torch.Tensor:
+ """Forward pass for the transformer encoder layer.
+
+ Args:
+ tgt: Input tensor to be processed
+ memory: Memory tensor (e.g., image features) for cross-attention
+ dac: Whether to use Divide-and-Conquer attention (only apply self-attention to first half)
+ tgt_mask: Mask for self-attention
+ memory_mask: Mask for cross-attention
+ tgt_key_padding_mask: Key padding mask for self-attention
+ memory_key_padding_mask: Key padding mask for cross-attention
+ pos: Positional encoding for memory
+ query_pos: Positional encoding for query
+
+ Returns:
+ Processed tensor after self-attention, cross-attention, and feedforward network
+ """
fwd_fn = self.forward_pre if self.pre_norm else self.forward_post
return fwd_fn(
tgt,
@@ -161,6 +235,20 @@
class TransformerEncoder(nn.Module):
+ """Transformer encoder that processes multi-level features.
+
+ This encoder takes multi-level features (e.g., from a backbone network) and processes them through a stack of
+ transformer encoder layers. It supports features from multiple levels (e.g., different resolutions) and can apply
+ activation checkpointing for memory efficiency during training.
+
+ Args:
+ layer: The encoder layer to be stacked multiple times
+ num_layers: Number of encoder layers to stack
+ d_model: Model dimension/hidden size
+ num_feature_levels: Number of feature levels to process
+ frozen: Whether to freeze the parameters of this module
+ use_act_checkpoint: Whether to use activation checkpointing during training
+ """
def __init__(
self,
@@ -171,6 +259,7 @@ frozen: bool = False,
use_act_checkpoint: bool = False,
):
+ """Initialize the transformer encoder."""
super().__init__()
self.layers = _get_clones(layer, num_layers)
self.num_layers = num_layers
@@ -193,6 +282,7 @@ layer.layer_idx = layer_idx
def _prepare_multilevel_features(self, srcs, masks, pos_embeds):
+ """Prepare multi-level features for transformer encoder."""
assert len(srcs) == self.num_feature_levels, "mismatch between expected and received # of feature levels"
src_flatten = []
@@ -254,6 +344,27 @@ prompt_key_padding_mask: torch.Tensor = None,
encoder_extra_kwargs: dict | None = None,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
+ """Process multi-level features through the transformer encoder.
+
+ Args:
+ src: List of multi-level features, each with shape (batch_size, channels, height, width)
+ src_key_padding_masks: List of padding masks for each feature level, each with shape (batch_size, height,
+ width)
+ pos: List of positional embeddings for each feature level, each with shape (batch_size, channels, height,
+ width)
+ prompt: Optional text/prompt features to attend to, with shape (seq_len, batch_size, d_model)
+ prompt_key_padding_mask: Optional padding mask for prompt, with shape (batch_size, seq_len)
+ encoder_extra_kwargs: Optional additional arguments to pass to each encoder layer
+
+ Returns:
+ A tuple containing:
+ - output: Processed features with shape (seq_len, batch_size, d_model)
+ - key_padding_masks_flatten: Flattened padding masks
+ - lvl_pos_embed_flatten: Flattened positional embeddings
+ - level_start_index: Starting indices for each feature level
+ - spatial_shapes: Spatial dimensions of each feature level
+ - valid_ratios: Valid ratios for each feature level
+ """
assert len(src) == self.num_feature_levels, "must be equal to num_feature_levels"
if src_key_padding_masks is not None:
assert len(src_key_padding_masks) == self.num_feature_levels
@@ -297,6 +408,21 @@
class TransformerEncoderFusion(TransformerEncoder):
+ """Transformer encoder that fuses text and image features.
+
+ This encoder extends TransformerEncoder to handle both text and image features, with the ability to add pooled text
+ features to image features for better cross-modal fusion. It supports torch.compile for performance optimization.
+
+ Args:
+ layer (nn.Module): The encoder layer to be stacked multiple times.
+ num_layers (int): Number of encoder layers to stack.
+ d_model (int): Model dimension/hidden size.
+ num_feature_levels (int): Number of feature levels to process.
+ add_pooled_text_to_img_feat (bool): Whether to add pooled text features to image features.
+ pool_text_with_mask (bool): Whether to use the mask when pooling text features.
+ compile_mode (str | None): Mode for torch.compile, or None to disable compilation.
+ **kwargs (Any): Additional arguments to pass to the parent class.
+ """
def __init__(
self,
@@ -309,6 +435,7 @@ compile_mode: str | None = None,
**kwargs,
):
+ """Initialize the transformer encoder with text-image fusion."""
super().__init__(
layer,
num_layers,
@@ -333,6 +460,7 @@ feat_sizes: list[int] | None = None,
encoder_extra_kwargs: dict | None = None,
):
+ """Forward pass for the transformer encoder with text-image fusion."""
# Restore spatial shapes of vision
bs = src[0].shape[1] # seq first
if feat_sizes is not None:
@@ -384,6 +512,7 @@
def pool_text_feat(prompt, prompt_mask, pool_with_mask):
+ """Mean-pool the prompt embeddings over the valid tokens only."""
# prompt has shape (seq, bs, dim)
if not pool_with_mask:
return prompt.mean(dim=0)
@@ -397,4 +526,4 @@
# mean pool over all the valid tokens
pooled_text = (prompt * is_valid).sum(dim=0) / num_valid
- return pooled_text+ return pooled_text
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/sam/sam3/encoder.py |
Replace inline comments with docstrings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
from __future__ import annotations
from copy import deepcopy
import torch
from ultralytics.nn.modules.utils import inverse_sigmoid
from ultralytics.utils.ops import xywh2xyxy
from ..modules.sam import SAM2Model
from .geometry_encoders import Prompt
from .vl_combiner import SAM3VLBackbone
def _update_out(out, out_name, out_value, auxiliary=True, update_aux=True):
out[out_name] = out_value[-1] if auxiliary else out_value
if auxiliary and update_aux:
if "aux_outputs" not in out:
out["aux_outputs"] = [{} for _ in range(len(out_value) - 1)]
assert len(out["aux_outputs"]) == len(out_value) - 1
for aux_output, aux_value in zip(out["aux_outputs"], out_value[:-1]):
aux_output[out_name] = aux_value
class SAM3SemanticModel(torch.nn.Module):
def __init__(
self,
backbone: SAM3VLBackbone,
transformer,
input_geometry_encoder,
segmentation_head=None,
num_feature_levels=1,
o2m_mask_predict=True,
dot_prod_scoring=None,
use_instance_query: bool = True,
multimask_output: bool = True,
use_act_checkpoint_seg_head: bool = True,
matcher=None,
use_dot_prod_scoring=True,
supervise_joint_box_scores: bool = False, # only relevant if using presence token/score
detach_presence_in_joint_score: bool = False, # only relevant if using presence token/score
separate_scorer_for_instance: bool = False,
num_interactive_steps_val: int = 0,
):
super().__init__()
self.backbone = backbone
self.geometry_encoder = input_geometry_encoder
self.transformer = transformer
self.hidden_dim = transformer.d_model
self.num_feature_levels = num_feature_levels
self.segmentation_head = segmentation_head
self.o2m_mask_predict = o2m_mask_predict
self.dot_prod_scoring = dot_prod_scoring
self.use_act_checkpoint_seg_head = use_act_checkpoint_seg_head
self.matcher = matcher
self.num_interactive_steps_val = num_interactive_steps_val
self.use_dot_prod_scoring = use_dot_prod_scoring
if self.use_dot_prod_scoring:
assert dot_prod_scoring is not None
self.dot_prod_scoring = dot_prod_scoring
self.instance_dot_prod_scoring = None
if separate_scorer_for_instance:
self.instance_dot_prod_scoring = deepcopy(dot_prod_scoring)
else:
self.class_embed = torch.nn.Linear(self.hidden_dim, 1)
self.instance_class_embed = None
if separate_scorer_for_instance:
self.instance_class_embed = deepcopy(self.class_embed)
self.supervise_joint_box_scores = supervise_joint_box_scores
self.detach_presence_in_joint_score = detach_presence_in_joint_score
# verify the number of queries for O2O and O2M
num_o2o_static = self.transformer.decoder.num_queries
num_o2m_static = self.transformer.decoder.num_o2m_queries
assert num_o2m_static == (num_o2o_static if self.transformer.decoder.dac else 0)
self.dac = self.transformer.decoder.dac
self.use_instance_query = use_instance_query
self.multimask_output = multimask_output
self.text_embeddings = {}
self.names = []
def _encode_prompt(
self,
img_feats,
img_pos_embeds,
vis_feat_sizes,
geometric_prompt,
visual_prompt_embed=None,
visual_prompt_mask=None,
prev_mask_pred=None,
):
if prev_mask_pred is not None:
img_feats = [img_feats[-1] + prev_mask_pred]
# Encode geometry
geo_feats, geo_masks = self.geometry_encoder(
geo_prompt=geometric_prompt,
img_feats=img_feats,
img_sizes=vis_feat_sizes,
img_pos_embeds=img_pos_embeds,
)
if visual_prompt_embed is None:
visual_prompt_embed = torch.zeros((0, *geo_feats.shape[1:]), device=geo_feats.device)
visual_prompt_mask = torch.zeros(
(*geo_masks.shape[:-1], 0),
device=geo_masks.device,
dtype=geo_masks.dtype,
)
prompt = torch.cat([geo_feats, visual_prompt_embed], dim=0)
prompt_mask = torch.cat([geo_masks, visual_prompt_mask], dim=1)
return prompt, prompt_mask
def _run_encoder(
self,
img_feats,
img_pos_embeds,
vis_feat_sizes,
prompt,
prompt_mask,
encoder_extra_kwargs: dict | None = None,
):
# Run the encoder
# make a copy of the image feature lists since the encoder may modify these lists in-place
memory = self.transformer.encoder(
src=img_feats.copy(),
src_key_padding_mask=None,
src_pos=img_pos_embeds.copy(),
prompt=prompt,
prompt_key_padding_mask=prompt_mask,
feat_sizes=vis_feat_sizes,
encoder_extra_kwargs=encoder_extra_kwargs,
)
encoder_out = {
# encoded image features
"encoder_hidden_states": memory["memory"],
"pos_embed": memory["pos_embed"],
"padding_mask": memory["padding_mask"],
"spatial_shapes": memory["spatial_shapes"],
"valid_ratios": memory["valid_ratios"],
"vis_feat_sizes": vis_feat_sizes,
# encoded text features (or other prompts)
"prompt_before_enc": prompt,
"prompt_after_enc": memory.get("memory_text", prompt),
"prompt_mask": prompt_mask,
}
return encoder_out
def _run_decoder(
self,
pos_embed,
memory,
src_mask,
out,
prompt,
prompt_mask,
encoder_out,
):
bs = memory.shape[1]
query_embed = self.transformer.decoder.query_embed.weight
tgt = query_embed.unsqueeze(1).repeat(1, bs, 1)
hs, reference_boxes, dec_presence_out, _ = self.transformer.decoder(
tgt=tgt,
memory=memory,
memory_key_padding_mask=src_mask,
pos=pos_embed,
reference_boxes=None,
spatial_shapes=encoder_out["spatial_shapes"],
valid_ratios=encoder_out["valid_ratios"],
tgt_mask=None,
memory_text=prompt,
text_attention_mask=prompt_mask,
apply_dac=False,
)
hs = hs.transpose(1, 2) # seq-first to batch-first
reference_boxes = reference_boxes.transpose(1, 2) # seq-first to batch-first
if dec_presence_out is not None:
# seq-first to batch-first
dec_presence_out = dec_presence_out.transpose(1, 2)
self._update_scores_and_boxes(
out,
hs,
reference_boxes,
prompt,
prompt_mask,
dec_presence_out=dec_presence_out,
)
return out, hs
def _update_scores_and_boxes(
self,
out,
hs,
reference_boxes,
prompt,
prompt_mask,
dec_presence_out=None,
is_instance_prompt=False,
):
num_o2o = hs.size(2)
# score prediction
if self.use_dot_prod_scoring:
dot_prod_scoring_head = self.dot_prod_scoring
if is_instance_prompt and self.instance_dot_prod_scoring is not None:
dot_prod_scoring_head = self.instance_dot_prod_scoring
outputs_class = dot_prod_scoring_head(hs, prompt, prompt_mask)
else:
class_embed_head = self.class_embed
if is_instance_prompt and self.instance_class_embed is not None:
class_embed_head = self.instance_class_embed
outputs_class = class_embed_head(hs)
# box prediction
box_head = self.transformer.decoder.bbox_embed
if is_instance_prompt and self.transformer.decoder.instance_bbox_embed is not None:
box_head = self.transformer.decoder.instance_bbox_embed
anchor_box_offsets = box_head(hs)
reference_boxes_inv_sig = inverse_sigmoid(reference_boxes)
outputs_coord = (reference_boxes_inv_sig + anchor_box_offsets).sigmoid()
outputs_boxes_xyxy = xywh2xyxy(outputs_coord)
if dec_presence_out is not None:
_update_out(out, "presence_logit_dec", dec_presence_out, update_aux=False)
if self.supervise_joint_box_scores:
assert dec_presence_out is not None
prob_dec_presence_out = dec_presence_out.clone().sigmoid()
if self.detach_presence_in_joint_score:
prob_dec_presence_out = prob_dec_presence_out.detach()
outputs_class = inverse_sigmoid(outputs_class.sigmoid() * prob_dec_presence_out.unsqueeze(2)).clamp(
min=-10.0, max=10.0
)
_update_out(out, "pred_logits", outputs_class[:, :, :num_o2o], update_aux=False)
_update_out(out, "pred_boxes", outputs_coord[:, :, :num_o2o], update_aux=False)
_update_out(out, "pred_boxes_xyxy", outputs_boxes_xyxy[:, :, :num_o2o], update_aux=False)
def _run_segmentation_heads(
self,
out,
backbone_out,
encoder_hidden_states,
prompt,
prompt_mask,
hs,
):
if self.segmentation_head is not None:
num_o2o = hs.size(2)
obj_queries = hs if self.o2m_mask_predict else hs[:, :, :num_o2o]
seg_head_outputs = self.segmentation_head(
backbone_feats=backbone_out["backbone_fpn"],
obj_queries=obj_queries,
encoder_hidden_states=encoder_hidden_states,
prompt=prompt,
prompt_mask=prompt_mask,
)
for k, v in seg_head_outputs.items():
if k in self.segmentation_head.instance_keys:
_update_out(out, k, v[:, :num_o2o], auxiliary=False)
else:
out[k] = v
else:
backbone_out.pop("backbone_fpn", None)
def forward_grounding(
self, backbone_out: dict[str, torch.Tensor], text_ids: torch.Tensor, geometric_prompt: Prompt = None
):
backbone_out, img_feats, img_pos_embeds, vis_feat_sizes = SAM2Model._prepare_backbone_features(
self, backbone_out, batch=len(text_ids)
)
backbone_out.update({k: v for k, v in self.text_embeddings.items()})
with torch.profiler.record_function("SAM3Image._encode_prompt"):
prompt, prompt_mask = self._encode_prompt(img_feats, img_pos_embeds, vis_feat_sizes, geometric_prompt)
# index text features (note that regardless of early or late fusion, the batch size of
# `txt_feats` is always the number of *prompts* in the encoder)
txt_feats = backbone_out["language_features"][:, text_ids]
txt_masks = backbone_out["language_mask"][text_ids]
# encode text
prompt = torch.cat([txt_feats, prompt], dim=0)
prompt_mask = torch.cat([txt_masks, prompt_mask], dim=1)
# Run the encoder
with torch.profiler.record_function("SAM3Image._run_encoder"):
encoder_out = self._run_encoder(img_feats, img_pos_embeds, vis_feat_sizes, prompt, prompt_mask)
out = {"backbone_out": backbone_out}
# Run the decoder
with torch.profiler.record_function("SAM3Image._run_decoder"):
out, hs = self._run_decoder(
memory=encoder_out["encoder_hidden_states"],
pos_embed=encoder_out["pos_embed"],
src_mask=encoder_out["padding_mask"],
out=out,
prompt=prompt,
prompt_mask=prompt_mask,
encoder_out=encoder_out,
)
# Run segmentation heads
with torch.profiler.record_function("SAM3Image._run_segmentation_heads"):
self._run_segmentation_heads(
out=out,
backbone_out=backbone_out,
encoder_hidden_states=encoder_out["encoder_hidden_states"],
prompt=prompt,
prompt_mask=prompt_mask,
hs=hs,
)
return out
def set_classes(self, text: list[str]):
self.text_embeddings = self.backbone.forward_text(text)
self.names = text
def set_imgsz(self, imgsz: tuple[int, int]):
self.backbone.set_imgsz(imgsz) | --- +++ @@ -17,6 +17,7 @@
def _update_out(out, out_name, out_value, auxiliary=True, update_aux=True):
+ """Helper function to update output dictionary with main and auxiliary outputs."""
out[out_name] = out_value[-1] if auxiliary else out_value
if auxiliary and update_aux:
if "aux_outputs" not in out:
@@ -27,6 +28,7 @@
class SAM3SemanticModel(torch.nn.Module):
+ """SAM3 model for semantic segmentation with vision-language backbone."""
def __init__(
self,
@@ -47,6 +49,7 @@ separate_scorer_for_instance: bool = False,
num_interactive_steps_val: int = 0,
):
+ """Initialize the SAM3SemanticModel."""
super().__init__()
self.backbone = backbone
self.geometry_encoder = input_geometry_encoder
@@ -101,6 +104,7 @@ visual_prompt_mask=None,
prev_mask_pred=None,
):
+ """Encode the geometric and visual prompts."""
if prev_mask_pred is not None:
img_feats = [img_feats[-1] + prev_mask_pred]
# Encode geometry
@@ -130,6 +134,7 @@ prompt_mask,
encoder_extra_kwargs: dict | None = None,
):
+ """Run the transformer encoder."""
# Run the encoder
# make a copy of the image feature lists since the encoder may modify these lists in-place
memory = self.transformer.encoder(
@@ -166,6 +171,7 @@ prompt_mask,
encoder_out,
):
+ """Run the transformer decoder."""
bs = memory.shape[1]
query_embed = self.transformer.decoder.query_embed.weight
tgt = query_embed.unsqueeze(1).repeat(1, bs, 1)
@@ -208,6 +214,7 @@ dec_presence_out=None,
is_instance_prompt=False,
):
+ """Update output dict with class scores and box predictions."""
num_o2o = hs.size(2)
# score prediction
if self.use_dot_prod_scoring:
@@ -256,6 +263,7 @@ prompt_mask,
hs,
):
+ """Run segmentation heads and get masks."""
if self.segmentation_head is not None:
num_o2o = hs.size(2)
obj_queries = hs if self.o2m_mask_predict else hs[:, :, :num_o2o]
@@ -277,6 +285,7 @@ def forward_grounding(
self, backbone_out: dict[str, torch.Tensor], text_ids: torch.Tensor, geometric_prompt: Prompt = None
):
+ """Forward pass for grounding (detection + segmentation) given input images and text."""
backbone_out, img_feats, img_pos_embeds, vis_feat_sizes = SAM2Model._prepare_backbone_features(
self, backbone_out, batch=len(text_ids)
)
@@ -321,8 +330,10 @@ return out
def set_classes(self, text: list[str]):
+ """Set the text embeddings for the given class names."""
self.text_embeddings = self.backbone.forward_text(text)
self.names = text
def set_imgsz(self, imgsz: tuple[int, int]):
- self.backbone.set_imgsz(imgsz)+ """Set the image size for the model."""
+ self.backbone.set_imgsz(imgsz)
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/sam/sam3/sam3_image.py |
Generate docstrings with parameter types | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
from __future__ import annotations
import math
import numpy as np
import torch
from torch import Tensor, nn
class DotProductScoring(torch.nn.Module):
def __init__(
self,
d_model,
d_proj,
prompt_mlp=None,
clamp_logits=True,
clamp_max_val=12.0,
):
super().__init__()
self.d_proj = d_proj
assert isinstance(prompt_mlp, torch.nn.Module) or prompt_mlp is None
self.prompt_mlp = prompt_mlp # an optional MLP projection for prompt
self.prompt_proj = torch.nn.Linear(d_model, d_proj)
self.hs_proj = torch.nn.Linear(d_model, d_proj)
self.scale = float(1.0 / np.sqrt(d_proj))
self.clamp_logits = clamp_logits
if self.clamp_logits:
self.clamp_max_val = clamp_max_val
@staticmethod
def mean_pool_text(prompt, prompt_mask):
# is_valid has shape (seq, bs, 1), where 1 is valid and 0 is padding
is_valid = (~prompt_mask).to(prompt.dtype).permute(1, 0)[..., None]
# num_valid has shape (bs, 1)
num_valid = torch.clamp(torch.sum(is_valid, dim=0), min=1.0)
# mean pool over all the valid tokens -- pooled_prompt has shape (bs, proj_dim)
pooled_prompt = (prompt * is_valid).sum(dim=0) / num_valid
return pooled_prompt
def forward(self, hs, prompt, prompt_mask):
# hs has shape (num_layer, bs, num_query, d_model)
# prompt has shape (seq, bs, d_model)
# prompt_mask has shape (bs, seq), where 1 is valid and 0 is padding
assert hs.dim() == 4 and prompt.dim() == 3 and prompt_mask.dim() == 2
# apply MLP on prompt if specified
if self.prompt_mlp is not None:
prompt = self.prompt_mlp(prompt.to(hs.dtype))
# first, get the mean-pooled version of the prompt
pooled_prompt = self.mean_pool_text(prompt, prompt_mask)
# then, project pooled_prompt and hs to d_proj dimensions
proj_pooled_prompt = self.prompt_proj(pooled_prompt) # (bs, d_proj)
proj_hs = self.hs_proj(hs) # (num_layer, bs, num_query, d_proj)
# finally, get dot-product scores of shape (num_layer, bs, num_query, 1)
scores = torch.matmul(proj_hs, proj_pooled_prompt.unsqueeze(-1))
scores *= self.scale
# clamp scores to a max value to avoid numerical issues in loss or matcher
if self.clamp_logits:
scores.clamp_(min=-self.clamp_max_val, max=self.clamp_max_val)
return scores
class LayerScale(nn.Module):
def __init__(
self,
dim: int,
init_values: float | Tensor = 1e-5,
inplace: bool = False,
) -> None:
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x: Tensor) -> Tensor:
return x.mul_(self.gamma) if self.inplace else x * self.gamma
class TransformerWrapper(nn.Module):
def __init__(
self,
encoder,
decoder,
d_model: int,
two_stage_type="none", # ["none"] only for now
pos_enc_at_input_dec=True,
):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.num_queries = decoder.num_queries if decoder is not None else None
self.pos_enc_at_input_dec = pos_enc_at_input_dec
# for two stage
assert two_stage_type in ["none"], f"unknown param {two_stage_type} of two_stage_type"
self.two_stage_type = two_stage_type
self._reset_parameters()
self.d_model = d_model
def _reset_parameters(self):
for n, p in self.named_parameters():
if p.dim() > 1:
if "box_embed" not in n and "query_embed" not in n and "reference_points" not in n:
nn.init.xavier_uniform_(p)
def get_valid_ratio(mask):
_, H, W = mask.shape
valid_H = torch.sum(~mask[:, :, 0], 1)
valid_W = torch.sum(~mask[:, 0, :], 1)
valid_ratio_h = valid_H.float() / H
valid_ratio_w = valid_W.float() / W
valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)
return valid_ratio
def gen_sineembed_for_position(pos_tensor: torch.Tensor, num_feats: int = 256):
assert num_feats % 2 == 0
num_feats = num_feats // 2
# n_query, bs, _ = pos_tensor.size()
# sineembed_tensor = torch.zeros(n_query, bs, 256)
scale = 2 * math.pi
dim_t = torch.arange(num_feats, dtype=pos_tensor.dtype, device=pos_tensor.device)
dim_t = 10000 ** (2 * (torch.div(dim_t, 2, rounding_mode="floor")) / num_feats)
x_embed = pos_tensor[:, :, 0] * scale
y_embed = pos_tensor[:, :, 1] * scale
pos_x = x_embed[:, :, None] / dim_t
pos_y = y_embed[:, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2)
pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2)
if pos_tensor.size(-1) == 2:
pos = torch.cat((pos_y, pos_x), dim=2)
elif pos_tensor.size(-1) == 4:
w_embed = pos_tensor[:, :, 2] * scale
pos_w = w_embed[:, :, None] / dim_t
pos_w = torch.stack((pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), dim=3).flatten(2)
h_embed = pos_tensor[:, :, 3] * scale
pos_h = h_embed[:, :, None] / dim_t
pos_h = torch.stack((pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), dim=3).flatten(2)
pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2)
else:
raise ValueError(f"Unknown pos_tensor shape(-1):{pos_tensor.size(-1)}")
return pos | --- +++ @@ -2,6 +2,7 @@
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
+"""Various utility models."""
from __future__ import annotations
@@ -13,6 +14,7 @@
class DotProductScoring(torch.nn.Module):
+ """A module that computes dot-product scores between query features and pooled prompt embeddings."""
def __init__(
self,
@@ -22,6 +24,7 @@ clamp_logits=True,
clamp_max_val=12.0,
):
+ """Initialize the DotProductScoring module."""
super().__init__()
self.d_proj = d_proj
assert isinstance(prompt_mlp, torch.nn.Module) or prompt_mlp is None
@@ -35,6 +38,7 @@
@staticmethod
def mean_pool_text(prompt, prompt_mask):
+ """Mean-pool the prompt embeddings over the valid tokens only."""
# is_valid has shape (seq, bs, 1), where 1 is valid and 0 is padding
is_valid = (~prompt_mask).to(prompt.dtype).permute(1, 0)[..., None]
# num_valid has shape (bs, 1)
@@ -44,6 +48,7 @@ return pooled_prompt
def forward(self, hs, prompt, prompt_mask):
+ """Compute dot-product scores between hs and prompt."""
# hs has shape (num_layer, bs, num_query, d_model)
# prompt has shape (seq, bs, d_model)
# prompt_mask has shape (bs, seq), where 1 is valid and 0 is padding
@@ -72,6 +77,7 @@
class LayerScale(nn.Module):
+ """LayerScale module for per-channel scaling of layer outputs."""
def __init__(
self,
@@ -79,15 +85,18 @@ init_values: float | Tensor = 1e-5,
inplace: bool = False,
) -> None:
+ """Initialize the LayerScale module."""
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x: Tensor) -> Tensor:
+ """Apply LayerScale to the input tensor."""
return x.mul_(self.gamma) if self.inplace else x * self.gamma
class TransformerWrapper(nn.Module):
+ """A wrapper for the transformer consisting of an encoder and a decoder."""
def __init__(
self,
@@ -97,6 +106,7 @@ two_stage_type="none", # ["none"] only for now
pos_enc_at_input_dec=True,
):
+ """Initialize the TransformerWrapper."""
super().__init__()
self.encoder = encoder
self.decoder = decoder
@@ -111,6 +121,7 @@ self.d_model = d_model
def _reset_parameters(self):
+ """Initialize the parameters of the model."""
for n, p in self.named_parameters():
if p.dim() > 1:
if "box_embed" not in n and "query_embed" not in n and "reference_points" not in n:
@@ -118,6 +129,7 @@
def get_valid_ratio(mask):
+ """Compute the valid ratio of height and width from the mask."""
_, H, W = mask.shape
valid_H = torch.sum(~mask[:, :, 0], 1)
valid_W = torch.sum(~mask[:, 0, :], 1)
@@ -128,6 +140,35 @@
def gen_sineembed_for_position(pos_tensor: torch.Tensor, num_feats: int = 256):
+ """Generate sinusoidal position embeddings for 2D or 4D coordinate tensors.
+
+ This function creates sinusoidal embeddings using sine and cosine functions at different frequencies, similar to the
+ positional encoding used in Transformer models. It supports both 2D position tensors (x, y) and 4D tensors (x, y, w,
+ h) for bounding box coordinates.
+
+ Args:
+ pos_tensor (torch.Tensor): Input position tensor of shape (n_query, bs, 2) for 2D coordinates or (n_query, bs,
+ 4) for 4D coordinates (bounding boxes).
+ num_feats (int): Number of feature dimensions for the output embedding. Must be even. Defaults to 256.
+
+ Returns:
+ (torch.Tensor): Sinusoidal position embeddings of shape (n_query, bs, num_feats) for 2D input or (n_query, bs,
+ num_feats * 2) for 4D input.
+
+ Raises:
+ AssertionError: If num_feats is not even.
+ ValueError: If pos_tensor.size(-1) is not 2 or 4.
+
+ Examples:
+ >>> pos_2d = torch.rand(100, 8, 2) # 100 queries, batch size 8, 2D coordinates
+ >>> embeddings_2d = gen_sineembed_for_position(pos_2d, num_feats=256)
+ >>> embeddings_2d.shape
+ torch.Size([100, 8, 256])
+ >>> pos_4d = torch.rand(50, 4, 4) # 50 queries, batch size 4, 4D coordinates
+ >>> embeddings_4d = gen_sineembed_for_position(pos_4d, num_feats=128)
+ >>> embeddings_4d.shape
+ torch.Size([50, 4, 256])
+ """
assert num_feats % 2 == 0
num_feats = num_feats // 2
# n_query, bs, _ = pos_tensor.size()
@@ -155,4 +196,4 @@ pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2)
else:
raise ValueError(f"Unknown pos_tensor shape(-1):{pos_tensor.size(-1)}")
- return pos+ return pos
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/sam/sam3/model_misc.py |
Add inline docstrings for readability | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import itertools
from pathlib import Path
from typing import Any
import torch
from ultralytics.data import build_yolo_dataset
from ultralytics.models.yolo.detect import DetectionTrainer
from ultralytics.nn.tasks import WorldModel
from ultralytics.utils import DEFAULT_CFG, LOGGER, RANK
from ultralytics.utils.torch_utils import unwrap_model
def on_pretrain_routine_end(trainer) -> None:
if RANK in {-1, 0}:
# Set class names for evaluation
names = [name.split("/", 1)[0] for name in list(trainer.test_loader.dataset.data["names"].values())]
unwrap_model(trainer.ema.ema).set_classes(names, cache_clip_model=False)
class WorldTrainer(DetectionTrainer):
def __init__(self, cfg=DEFAULT_CFG, overrides: dict[str, Any] | None = None, _callbacks: dict | None = None):
if overrides is None:
overrides = {}
assert not overrides.get("compile"), f"Training with 'model={overrides['model']}' requires 'compile=False'"
super().__init__(cfg, overrides, _callbacks)
self.text_embeddings = None
def get_model(self, cfg=None, weights: str | None = None, verbose: bool = True) -> WorldModel:
# NOTE: This `nc` here is the max number of different text samples in one image, rather than the actual `nc`.
# NOTE: Following the official config, nc hard-coded to 80 for now.
model = WorldModel(
cfg["yaml_file"] if isinstance(cfg, dict) else cfg,
ch=self.data["channels"],
nc=min(self.data["nc"], 80),
verbose=verbose and RANK == -1,
)
if weights:
model.load(weights)
self.add_callback("on_pretrain_routine_end", on_pretrain_routine_end)
return model
def build_dataset(self, img_path: str, mode: str = "train", batch: int | None = None):
gs = max(int(unwrap_model(self.model).stride.max() if self.model else 0), 32)
dataset = build_yolo_dataset(
self.args, img_path, batch, self.data, mode=mode, rect=mode == "val", stride=gs, multi_modal=mode == "train"
)
if mode == "train":
self.set_text_embeddings([dataset], batch) # cache text embeddings to accelerate training
return dataset
def set_text_embeddings(self, datasets: list[Any], batch: int | None) -> None:
text_embeddings = {}
for dataset in datasets:
if not hasattr(dataset, "category_names"):
continue
text_embeddings.update(
self.generate_text_embeddings(
list(dataset.category_names), batch, cache_dir=Path(dataset.img_path).parent
)
)
self.text_embeddings = text_embeddings
def generate_text_embeddings(self, texts: list[str], batch: int, cache_dir: Path) -> dict[str, torch.Tensor]:
model = "clip:ViT-B/32"
cache_path = cache_dir / f"text_embeddings_{model.replace(':', '_').replace('/', '_')}.pt"
if cache_path.exists():
LOGGER.info(f"Reading existed cache from '{cache_path}'")
txt_map = torch.load(cache_path, map_location=self.device)
if sorted(txt_map.keys()) == sorted(texts):
return txt_map
LOGGER.info(f"Caching text embeddings to '{cache_path}'")
assert self.model is not None
txt_feats = unwrap_model(self.model).get_text_pe(texts, batch, cache_clip_model=False)
txt_map = dict(zip(texts, txt_feats.squeeze(0)))
torch.save(txt_map, cache_path)
return txt_map
def preprocess_batch(self, batch: dict[str, Any]) -> dict[str, Any]:
batch = DetectionTrainer.preprocess_batch(self, batch)
# Add text features
texts = list(itertools.chain(*batch["texts"]))
txt_feats = torch.stack([self.text_embeddings[text] for text in texts]).to(
self.device, non_blocking=self.device.type == "cuda"
)
batch["txt_feats"] = txt_feats.reshape(len(batch["texts"]), -1, txt_feats.shape[-1])
return batch | --- +++ @@ -16,6 +16,7 @@
def on_pretrain_routine_end(trainer) -> None:
+ """Set up model classes and text encoder at the end of the pretrain routine."""
if RANK in {-1, 0}:
# Set class names for evaluation
names = [name.split("/", 1)[0] for name in list(trainer.test_loader.dataset.data["names"].values())]
@@ -23,8 +24,42 @@
class WorldTrainer(DetectionTrainer):
+ """A trainer class for fine-tuning YOLO World models on close-set datasets.
+
+ This trainer extends the DetectionTrainer to support training YOLO World models, which combine visual and textual
+ features for improved object detection and understanding. It handles text embedding generation and caching to
+ accelerate training with multi-modal data.
+
+ Attributes:
+ text_embeddings (dict[str, torch.Tensor] | None): Cached text embeddings for category names to accelerate
+ training.
+ model (WorldModel): The YOLO World model being trained.
+ data (dict[str, Any]): Dataset configuration containing class information.
+ args (Any): Training arguments and configuration.
+
+ Methods:
+ get_model: Return WorldModel initialized with specified config and weights.
+ build_dataset: Build YOLO Dataset for training or validation.
+ set_text_embeddings: Set text embeddings for datasets to accelerate training.
+ generate_text_embeddings: Generate text embeddings for a list of text samples.
+ preprocess_batch: Preprocess a batch of images and text for YOLOWorld training.
+
+ Examples:
+ Initialize and train a YOLO World model
+ >>> from ultralytics.models.yolo.world import WorldTrainer
+ >>> args = dict(model="yolov8s-world.pt", data="coco8.yaml", epochs=3)
+ >>> trainer = WorldTrainer(overrides=args)
+ >>> trainer.train()
+ """
def __init__(self, cfg=DEFAULT_CFG, overrides: dict[str, Any] | None = None, _callbacks: dict | None = None):
+ """Initialize a WorldTrainer object with given arguments.
+
+ Args:
+ cfg (dict[str, Any]): Configuration for the trainer.
+ overrides (dict[str, Any], optional): Configuration overrides.
+ _callbacks (dict, optional): Dictionary of callback functions.
+ """
if overrides is None:
overrides = {}
assert not overrides.get("compile"), f"Training with 'model={overrides['model']}' requires 'compile=False'"
@@ -32,6 +67,16 @@ self.text_embeddings = None
def get_model(self, cfg=None, weights: str | None = None, verbose: bool = True) -> WorldModel:
+ """Return WorldModel initialized with specified config and weights.
+
+ Args:
+ cfg (dict[str, Any] | str, optional): Model configuration.
+ weights (str, optional): Path to pretrained weights.
+ verbose (bool): Whether to display model info.
+
+ Returns:
+ (WorldModel): Initialized WorldModel.
+ """
# NOTE: This `nc` here is the max number of different text samples in one image, rather than the actual `nc`.
# NOTE: Following the official config, nc hard-coded to 80 for now.
model = WorldModel(
@@ -47,6 +92,16 @@ return model
def build_dataset(self, img_path: str, mode: str = "train", batch: int | None = None):
+ """Build YOLO Dataset for training or validation.
+
+ Args:
+ img_path (str): Path to the folder containing images.
+ mode (str): `train` mode or `val` mode, users are able to customize different augmentations for each mode.
+ batch (int, optional): Size of batches, this is for `rect`.
+
+ Returns:
+ (Any): YOLO dataset configured for training or validation.
+ """
gs = max(int(unwrap_model(self.model).stride.max() if self.model else 0), 32)
dataset = build_yolo_dataset(
self.args, img_path, batch, self.data, mode=mode, rect=mode == "val", stride=gs, multi_modal=mode == "train"
@@ -56,6 +111,19 @@ return dataset
def set_text_embeddings(self, datasets: list[Any], batch: int | None) -> None:
+ """Set text embeddings for datasets to accelerate training by caching category names.
+
+ This method collects unique category names from all datasets, then generates and caches text embeddings for
+ these categories to improve training efficiency.
+
+ Args:
+ datasets (list[Any]): List of datasets from which to extract category names.
+ batch (int | None): Batch size used for processing.
+
+ Notes:
+ This method collects category names from datasets that have the 'category_names' attribute,
+ then uses the first dataset's image path to determine where to cache the generated text embeddings.
+ """
text_embeddings = {}
for dataset in datasets:
if not hasattr(dataset, "category_names"):
@@ -68,6 +136,16 @@ self.text_embeddings = text_embeddings
def generate_text_embeddings(self, texts: list[str], batch: int, cache_dir: Path) -> dict[str, torch.Tensor]:
+ """Generate text embeddings for a list of text samples.
+
+ Args:
+ texts (list[str]): List of text samples to encode.
+ batch (int): Batch size for processing.
+ cache_dir (Path): Directory to save/load cached embeddings.
+
+ Returns:
+ (dict[str, torch.Tensor]): Dictionary mapping text samples to their embeddings.
+ """
model = "clip:ViT-B/32"
cache_path = cache_dir / f"text_embeddings_{model.replace(':', '_').replace('/', '_')}.pt"
if cache_path.exists():
@@ -83,6 +161,7 @@ return txt_map
def preprocess_batch(self, batch: dict[str, Any]) -> dict[str, Any]:
+ """Preprocess a batch of images and text for YOLOWorld training."""
batch = DetectionTrainer.preprocess_batch(self, batch)
# Add text features
@@ -91,4 +170,4 @@ self.device, non_blocking=self.device.type == "cuda"
)
batch["txt_feats"] = txt_feats.reshape(len(batch["texts"]), -1, txt_feats.shape[-1])
- return batch+ return batch
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/yolo/world/train.py |
Document all endpoints with docstrings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import torch
import torch.nn as nn
import torch.nn.functional as F
from ultralytics.nn.modules import LayerNorm2d
from .blocks import (
Block,
CXBlock,
Fuser,
MaskDownSampler,
MultiScaleBlock,
PatchEmbed,
PositionEmbeddingRandom,
PositionEmbeddingSine,
)
class ImageEncoderViT(nn.Module):
def __init__(
self,
img_size: int = 1024,
patch_size: int = 16,
in_chans: int = 3,
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
mlp_ratio: float = 4.0,
out_chans: int = 256,
qkv_bias: bool = True,
norm_layer: type[nn.Module] = nn.LayerNorm,
act_layer: type[nn.Module] = nn.GELU,
use_abs_pos: bool = True,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
window_size: int = 0,
global_attn_indexes: tuple[int, ...] = (),
) -> None:
super().__init__()
self.img_size = img_size
self.patch_embed = PatchEmbed(
kernel_size=(patch_size, patch_size),
stride=(patch_size, patch_size),
in_chans=in_chans,
embed_dim=embed_dim,
)
self.pos_embed: nn.Parameter | None = None
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size
self.pos_embed = nn.Parameter(torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim))
self.blocks = nn.ModuleList()
for i in range(depth):
block = Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
norm_layer=norm_layer,
act_layer=act_layer,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
window_size=window_size if i not in global_attn_indexes else 0,
input_size=(img_size // patch_size, img_size // patch_size),
)
self.blocks.append(block)
self.neck = nn.Sequential(
nn.Conv2d(
embed_dim,
out_chans,
kernel_size=1,
bias=False,
),
LayerNorm2d(out_chans),
nn.Conv2d(
out_chans,
out_chans,
kernel_size=3,
padding=1,
bias=False,
),
LayerNorm2d(out_chans),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.patch_embed(x)
if self.pos_embed is not None:
pos_embed = (
F.interpolate(self.pos_embed.permute(0, 3, 1, 2), scale_factor=self.img_size / 1024).permute(0, 2, 3, 1)
if self.img_size != 1024
else self.pos_embed
)
x = x + pos_embed
for blk in self.blocks:
x = blk(x)
return self.neck(x.permute(0, 3, 1, 2))
class PromptEncoder(nn.Module):
def __init__(
self,
embed_dim: int,
image_embedding_size: tuple[int, int],
input_image_size: tuple[int, int],
mask_in_chans: int,
activation: type[nn.Module] = nn.GELU,
) -> None:
super().__init__()
self.embed_dim = embed_dim
self.input_image_size = input_image_size
self.image_embedding_size = image_embedding_size
self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)
self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners
point_embeddings = [nn.Embedding(1, embed_dim) for _ in range(self.num_point_embeddings)]
self.point_embeddings = nn.ModuleList(point_embeddings)
self.not_a_point_embed = nn.Embedding(1, embed_dim)
self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1])
self.mask_downscaling = nn.Sequential(
nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),
LayerNorm2d(mask_in_chans // 4),
activation(),
nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),
LayerNorm2d(mask_in_chans),
activation(),
nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),
)
self.no_mask_embed = nn.Embedding(1, embed_dim)
def get_dense_pe(self) -> torch.Tensor:
return self.pe_layer(self.image_embedding_size).unsqueeze(0)
def _embed_points(self, points: torch.Tensor, labels: torch.Tensor, pad: bool) -> torch.Tensor:
points = points + 0.5 # Shift to center of pixel
if pad:
padding_point = torch.zeros((points.shape[0], 1, 2), dtype=points.dtype, device=points.device)
padding_label = -torch.ones((labels.shape[0], 1), dtype=labels.dtype, device=labels.device)
points = torch.cat([points, padding_point], dim=1)
labels = torch.cat([labels, padding_label], dim=1)
point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)
point_embedding[labels == -1] = 0.0
point_embedding[labels == -1] += self.not_a_point_embed.weight
point_embedding[labels == 0] += self.point_embeddings[0].weight
point_embedding[labels == 1] += self.point_embeddings[1].weight
point_embedding[labels == 2] += self.point_embeddings[2].weight
point_embedding[labels == 3] += self.point_embeddings[3].weight
return point_embedding
def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
boxes = boxes + 0.5 # Shift to center of pixel
coords = boxes.reshape(-1, 2, 2)
corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)
corner_embedding[:, 0, :] += self.point_embeddings[2].weight
corner_embedding[:, 1, :] += self.point_embeddings[3].weight
return corner_embedding
def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
return self.mask_downscaling(masks)
@staticmethod
def _get_batch_size(
points: tuple[torch.Tensor, torch.Tensor] | None,
boxes: torch.Tensor | None,
masks: torch.Tensor | None,
) -> int:
if points is not None:
return points[0].shape[0]
elif boxes is not None:
return boxes.shape[0]
elif masks is not None:
return masks.shape[0]
else:
return 1
def forward(
self,
points: tuple[torch.Tensor, torch.Tensor] | None,
boxes: torch.Tensor | None,
masks: torch.Tensor | None,
) -> tuple[torch.Tensor, torch.Tensor]:
bs = self._get_batch_size(points, boxes, masks)
sparse_embeddings = torch.empty(
(bs, 0, self.embed_dim),
dtype=self.point_embeddings[0].weight.dtype,
device=self.point_embeddings[0].weight.device,
)
if points is not None:
coords, labels = points
point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))
sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)
if boxes is not None:
box_embeddings = self._embed_boxes(boxes)
sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)
if masks is not None:
dense_embeddings = self._embed_masks(masks)
else:
dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]
)
return sparse_embeddings, dense_embeddings
class MemoryEncoder(nn.Module):
def __init__(
self,
out_dim,
in_dim=256, # in_dim of pix_feats
interpol_size: tuple[int, int] | None = None,
):
super().__init__()
self.mask_downsampler = MaskDownSampler(kernel_size=3, stride=2, padding=1, interpol_size=interpol_size)
self.pix_feat_proj = nn.Conv2d(in_dim, in_dim, kernel_size=1)
self.fuser = Fuser(CXBlock(dim=256), num_layers=2)
self.position_encoding = PositionEmbeddingSine(num_pos_feats=64)
self.out_proj = nn.Identity()
if out_dim != in_dim:
self.out_proj = nn.Conv2d(in_dim, out_dim, kernel_size=1)
def forward(
self,
pix_feat: torch.Tensor,
masks: torch.Tensor,
skip_mask_sigmoid: bool = False,
) -> dict:
if not skip_mask_sigmoid:
masks = F.sigmoid(masks)
masks = self.mask_downsampler(masks)
# Fuse pix_feats and downsampled masks, in case the visual features are on CPU, cast them to CUDA
pix_feat = pix_feat.to(masks.device)
x = self.pix_feat_proj(pix_feat)
x = x + masks
x = self.fuser(x)
x = self.out_proj(x)
pos = self.position_encoding(x).to(x.dtype)
return {"vision_features": x, "vision_pos_enc": [pos]}
class ImageEncoder(nn.Module):
def __init__(
self,
trunk: nn.Module,
neck: nn.Module,
scalp: int = 0,
):
super().__init__()
self.trunk = trunk
self.neck = neck
self.scalp = scalp
assert self.trunk.channel_list == self.neck.backbone_channel_list, (
f"Channel dims of trunk {self.trunk.channel_list} and neck {self.neck.backbone_channel_list} do not match."
)
def forward(self, sample: torch.Tensor):
features, pos = self.neck(self.trunk(sample))
if self.scalp > 0:
# Discard the lowest resolution features
features, pos = features[: -self.scalp], pos[: -self.scalp]
src = features[-1]
return {
"vision_features": src,
"vision_pos_enc": pos,
"backbone_fpn": features,
}
class FpnNeck(nn.Module):
def __init__(
self,
d_model: int,
backbone_channel_list: list[int],
kernel_size: int = 1,
stride: int = 1,
padding: int = 0,
fpn_interp_model: str = "bilinear",
fuse_type: str = "sum",
fpn_top_down_levels: list[int] | None = None,
):
super().__init__()
self.position_encoding = PositionEmbeddingSine(num_pos_feats=256)
self.convs = nn.ModuleList()
self.backbone_channel_list = backbone_channel_list
for dim in backbone_channel_list:
current = nn.Sequential()
current.add_module(
"conv",
nn.Conv2d(
in_channels=dim,
out_channels=d_model,
kernel_size=kernel_size,
stride=stride,
padding=padding,
),
)
self.convs.append(current)
self.fpn_interp_model = fpn_interp_model
assert fuse_type in {"sum", "avg"}
self.fuse_type = fuse_type
# Levels to have top-down features in its outputs
# e.g. if fpn_top_down_levels is [2, 3], then only outputs of level 2 and 3
# have top-down propagation, while outputs of level 0 and level 1 have only
# lateral features from the same backbone level
if fpn_top_down_levels is None:
# Default is to have top-down features on all levels
fpn_top_down_levels = range(len(self.convs))
self.fpn_top_down_levels = list(fpn_top_down_levels)
def forward(self, xs: list[torch.Tensor]):
out = [None] * len(self.convs)
pos = [None] * len(self.convs)
assert len(xs) == len(self.convs)
# FPN forward pass
# see https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/fpn.py
prev_features = None
# Forward in top-down order (from low to high resolution)
n = len(self.convs) - 1
for i in range(n, -1, -1):
x = xs[i]
lateral_features = self.convs[n - i](x)
if i in self.fpn_top_down_levels and prev_features is not None:
top_down_features = F.interpolate(
prev_features.to(dtype=x.dtype),
scale_factor=2.0,
mode=self.fpn_interp_model,
align_corners=(None if self.fpn_interp_model == "nearest" else False),
antialias=False,
)
prev_features = lateral_features + top_down_features
if self.fuse_type == "avg":
prev_features /= 2
else:
prev_features = lateral_features
x_out = prev_features
out[i] = x_out
pos[i] = self.position_encoding(x_out).to(x_out.dtype)
return out, pos
class Hiera(nn.Module):
def __init__(
self,
embed_dim: int = 96, # initial embed dim
num_heads: int = 1, # initial number of heads
drop_path_rate: float = 0.0, # stochastic depth
q_pool: int = 3, # number of q_pool stages
q_stride: tuple[int, int] = (2, 2), # downsample stride bet. stages
stages: tuple[int, ...] = (2, 3, 16, 3), # blocks per stage
dim_mul: float = 2.0, # dim_mul factor at stage shift
head_mul: float = 2.0, # head_mul factor at stage shift
window_pos_embed_bkg_spatial_size: tuple[int, int] = (14, 14),
# window size per stage, when not using global att.
window_spec: tuple[int, ...] = (
8,
4,
14,
7,
),
# global attn in these blocks
global_att_blocks: tuple[int, ...] = (
12,
16,
20,
),
return_interm_layers=True, # return feats from every stage
):
super().__init__()
assert len(stages) == len(window_spec)
self.window_spec = window_spec
depth = sum(stages)
self.q_stride = q_stride
self.stage_ends = [sum(stages[:i]) - 1 for i in range(1, len(stages) + 1)]
assert 0 <= q_pool <= len(self.stage_ends[:-1])
self.q_pool_blocks = [x + 1 for x in self.stage_ends[:-1]][:q_pool]
self.return_interm_layers = return_interm_layers
self.patch_embed = PatchEmbed(
embed_dim=embed_dim,
kernel_size=(7, 7),
stride=(4, 4),
padding=(3, 3),
)
# Which blocks have global attention?
self.global_att_blocks = global_att_blocks
# Windowed positional embedding (https://arxiv.org/abs/2311.05613)
self.window_pos_embed_bkg_spatial_size = window_pos_embed_bkg_spatial_size
self.pos_embed = nn.Parameter(torch.zeros(1, embed_dim, *self.window_pos_embed_bkg_spatial_size))
self.pos_embed_window = nn.Parameter(torch.zeros(1, embed_dim, self.window_spec[0], self.window_spec[0]))
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
cur_stage = 1
self.blocks = nn.ModuleList()
for i in range(depth):
dim_out = embed_dim
# Lags by a block, so first block of next stage uses an initial window size
# of previous stage and final window size of current stage
window_size = self.window_spec[cur_stage - 1]
if self.global_att_blocks is not None:
window_size = 0 if i in self.global_att_blocks else window_size
if i - 1 in self.stage_ends:
dim_out = int(embed_dim * dim_mul)
num_heads = int(num_heads * head_mul)
cur_stage += 1
block = MultiScaleBlock(
dim=embed_dim,
dim_out=dim_out,
num_heads=num_heads,
drop_path=dpr[i],
q_stride=self.q_stride if i in self.q_pool_blocks else None,
window_size=window_size,
)
embed_dim = dim_out
self.blocks.append(block)
self.channel_list = (
[self.blocks[i].dim_out for i in self.stage_ends[::-1]]
if return_interm_layers
else [self.blocks[-1].dim_out]
)
def _get_pos_embed(self, hw: tuple[int, int]) -> torch.Tensor:
h, w = hw
window_embed = self.pos_embed_window
pos_embed = F.interpolate(self.pos_embed, size=(h, w), mode="bicubic")
pos_embed = pos_embed + window_embed.tile([x // y for x, y in zip(pos_embed.shape, window_embed.shape)])
pos_embed = pos_embed.permute(0, 2, 3, 1)
return pos_embed
def forward(self, x: torch.Tensor) -> list[torch.Tensor]:
x = self.patch_embed(x)
# x: (B, H, W, C)
# Add positional embedding
x = x + self._get_pos_embed(x.shape[1:3])
outputs = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if (i == self.stage_ends[-1]) or (i in self.stage_ends and self.return_interm_layers):
feats = x.permute(0, 3, 1, 2)
outputs.append(feats)
return outputs | --- +++ @@ -21,6 +21,28 @@
class ImageEncoderViT(nn.Module):
+ """An image encoder using Vision Transformer (ViT) architecture for encoding images into a compact latent space.
+
+ This class processes images by splitting them into patches, applying transformer blocks, and generating a final
+ encoded representation through a neck module.
+
+ Attributes:
+ img_size (int): Dimension of input images, assumed to be square.
+ patch_embed (PatchEmbed): Module for patch embedding.
+ pos_embed (nn.Parameter | None): Absolute positional embedding for patches.
+ blocks (nn.ModuleList): List of transformer blocks for processing patch embeddings.
+ neck (nn.Sequential): Neck module to further process the output.
+
+ Methods:
+ forward: Process input through patch embedding, positional embedding, blocks, and neck.
+
+ Examples:
+ >>> import torch
+ >>> encoder = ImageEncoderViT(img_size=224, patch_size=16, embed_dim=768, depth=12, num_heads=12)
+ >>> input_image = torch.randn(1, 3, 224, 224)
+ >>> output = encoder(input_image)
+ >>> print(output.shape)
+ """
def __init__(
self,
@@ -41,6 +63,26 @@ window_size: int = 0,
global_attn_indexes: tuple[int, ...] = (),
) -> None:
+ """Initialize an ImageEncoderViT instance for encoding images using Vision Transformer architecture.
+
+ Args:
+ img_size (int): Input image size, assumed to be square.
+ patch_size (int): Size of image patches.
+ in_chans (int): Number of input image channels.
+ embed_dim (int): Dimension of patch embeddings.
+ depth (int): Number of transformer blocks.
+ num_heads (int): Number of attention heads in each block.
+ mlp_ratio (float): Ratio of MLP hidden dimension to embedding dimension.
+ out_chans (int): Number of output channels from the neck module.
+ qkv_bias (bool): If True, adds learnable bias to query, key, value projections.
+ norm_layer (type[nn.Module]): Type of normalization layer to use.
+ act_layer (type[nn.Module]): Type of activation layer to use.
+ use_abs_pos (bool): If True, uses absolute positional embeddings.
+ use_rel_pos (bool): If True, adds relative positional embeddings to attention maps.
+ rel_pos_zero_init (bool): If True, initializes relative positional parameters to zero.
+ window_size (int): Size of attention window for windowed attention blocks.
+ global_attn_indexes (tuple[int, ...]): Indices of blocks that use global attention.
+ """
super().__init__()
self.img_size = img_size
@@ -91,6 +133,7 @@ )
def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Process input through patch embedding, positional embedding, transformer blocks, and neck module."""
x = self.patch_embed(x)
if self.pos_embed is not None:
pos_embed = (
@@ -105,6 +148,33 @@
class PromptEncoder(nn.Module):
+ """Encode different types of prompts for input to SAM's mask decoder, producing sparse and dense embeddings.
+
+ Attributes:
+ embed_dim (int): Dimension of the embeddings.
+ input_image_size (tuple[int, int]): Size of the input image as (H, W).
+ image_embedding_size (tuple[int, int]): Spatial size of the image embedding as (H, W).
+ pe_layer (PositionEmbeddingRandom): Module for random position embedding.
+ num_point_embeddings (int): Number of point embeddings for different types of points.
+ point_embeddings (nn.ModuleList): List of point embeddings.
+ not_a_point_embed (nn.Embedding): Embedding for points that are not part of any label.
+ mask_input_size (tuple[int, int]): Size of the input mask.
+ mask_downscaling (nn.Sequential): Neural network for downscaling the mask.
+ no_mask_embed (nn.Embedding): Embedding for cases where no mask is provided.
+
+ Methods:
+ get_dense_pe: Return the positional encoding used to encode point prompts.
+ forward: Embed different types of prompts, returning both sparse and dense embeddings.
+
+ Examples:
+ >>> prompt_encoder = PromptEncoder(256, (64, 64), (1024, 1024), 16)
+ >>> points = (torch.rand(1, 5, 2), torch.randint(0, 4, (1, 5)))
+ >>> boxes = torch.rand(1, 2, 2)
+ >>> masks = torch.rand(1, 1, 256, 256)
+ >>> sparse_embeddings, dense_embeddings = prompt_encoder(points, boxes, masks)
+ >>> print(sparse_embeddings.shape, dense_embeddings.shape)
+ torch.Size([1, 7, 256]) torch.Size([1, 256, 64, 64])
+ """
def __init__(
self,
@@ -114,6 +184,15 @@ mask_in_chans: int,
activation: type[nn.Module] = nn.GELU,
) -> None:
+ """Initialize the PromptEncoder module for encoding various types of prompts.
+
+ Args:
+ embed_dim (int): The dimension of the embeddings.
+ image_embedding_size (tuple[int, int]): The spatial size of the image embedding as (H, W).
+ input_image_size (tuple[int, int]): The padded size of the input image as (H, W).
+ mask_in_chans (int): The number of hidden channels used for encoding input masks.
+ activation (type[nn.Module]): The activation function to use when encoding input masks.
+ """
super().__init__()
self.embed_dim = embed_dim
self.input_image_size = input_image_size
@@ -138,9 +217,25 @@ self.no_mask_embed = nn.Embedding(1, embed_dim)
def get_dense_pe(self) -> torch.Tensor:
+ """Return the dense positional encoding used for encoding point prompts.
+
+ Generate a positional encoding for a dense set of points matching the shape of the image
+ encoding. The encoding is used to provide spatial information to the model when processing point prompts.
+
+ Returns:
+ (torch.Tensor): Positional encoding tensor with shape (1, embed_dim, H, W), where H and W are the height and
+ width of the image embedding size, respectively.
+
+ Examples:
+ >>> prompt_encoder = PromptEncoder(256, (64, 64), (1024, 1024), 16)
+ >>> dense_pe = prompt_encoder.get_dense_pe()
+ >>> print(dense_pe.shape)
+ torch.Size([1, 256, 64, 64])
+ """
return self.pe_layer(self.image_embedding_size).unsqueeze(0)
def _embed_points(self, points: torch.Tensor, labels: torch.Tensor, pad: bool) -> torch.Tensor:
+ """Embed point prompts by applying positional encoding and label-specific embeddings."""
points = points + 0.5 # Shift to center of pixel
if pad:
padding_point = torch.zeros((points.shape[0], 1, 2), dtype=points.dtype, device=points.device)
@@ -157,6 +252,7 @@ return point_embedding
def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
+ """Embed box prompts by applying positional encoding and adding corner embeddings."""
boxes = boxes + 0.5 # Shift to center of pixel
coords = boxes.reshape(-1, 2, 2)
corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)
@@ -165,6 +261,7 @@ return corner_embedding
def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
+ """Embed mask inputs by downscaling and processing through convolutional layers."""
return self.mask_downscaling(masks)
@staticmethod
@@ -173,6 +270,7 @@ boxes: torch.Tensor | None,
masks: torch.Tensor | None,
) -> int:
+ """Get the batch size of the output given the batch size of the input prompts."""
if points is not None:
return points[0].shape[0]
elif boxes is not None:
@@ -188,6 +286,27 @@ boxes: torch.Tensor | None,
masks: torch.Tensor | None,
) -> tuple[torch.Tensor, torch.Tensor]:
+ """Embed different types of prompts, returning both sparse and dense embeddings.
+
+ Args:
+ points (tuple[torch.Tensor, torch.Tensor] | None): Point coordinates and labels to embed. The first tensor
+ contains coordinates of shape (B, N, 2), and the second tensor contains labels of shape (B, N).
+ boxes (torch.Tensor | None): Boxes to embed with shape (B, M, 2, 2), where M is the number of boxes.
+ masks (torch.Tensor | None): Masks to embed with shape (B, 1, H, W).
+
+ Returns:
+ sparse_embeddings (torch.Tensor): Sparse embeddings for points and boxes with shape (B, N, embed_dim).
+ dense_embeddings (torch.Tensor): Dense embeddings for masks of shape (B, embed_dim, embed_H, embed_W).
+
+ Examples:
+ >>> encoder = PromptEncoder(256, (64, 64), (1024, 1024), 16)
+ >>> points = (torch.rand(1, 5, 2), torch.randint(0, 4, (1, 5)))
+ >>> boxes = torch.rand(1, 2, 2, 2)
+ >>> masks = torch.rand(1, 1, 256, 256)
+ >>> sparse_emb, dense_emb = encoder(points, boxes, masks)
+ >>> print(sparse_emb.shape, dense_emb.shape)
+ torch.Size([1, 7, 256]) torch.Size([1, 256, 64, 64])
+ """
bs = self._get_batch_size(points, boxes, masks)
sparse_embeddings = torch.empty(
(bs, 0, self.embed_dim),
@@ -213,6 +332,30 @@
class MemoryEncoder(nn.Module):
+ """Encode pixel features and masks into a memory representation for efficient image segmentation.
+
+ This class processes pixel-level features and masks, fusing them to generate encoded memory representations suitable
+ for downstream tasks in image segmentation models like SAM (Segment Anything Model).
+
+ Attributes:
+ mask_downsampler (MaskDownSampler): Module for downsampling input masks.
+ pix_feat_proj (nn.Conv2d): Convolutional layer for projecting pixel features.
+ fuser (Fuser): Module for fusing pixel features and masks.
+ position_encoding (PositionEmbeddingSine): Module for adding positional encoding to features.
+ out_proj (nn.Module): Output projection layer, either nn.Identity or nn.Conv2d.
+
+ Methods:
+ forward: Process input pixel features and masks to generate encoded memory representations.
+
+ Examples:
+ >>> import torch
+ >>> encoder = MemoryEncoder(out_dim=256, in_dim=256)
+ >>> pix_feat = torch.randn(1, 256, 64, 64)
+ >>> masks = torch.randn(1, 1, 64, 64)
+ >>> encoded_feat, pos = encoder(pix_feat, masks)
+ >>> print(encoded_feat.shape, pos.shape)
+ torch.Size([1, 256, 64, 64]) torch.Size([1, 128, 64, 64])
+ """
def __init__(
self,
@@ -220,6 +363,17 @@ in_dim=256, # in_dim of pix_feats
interpol_size: tuple[int, int] | None = None,
):
+ """Initialize the MemoryEncoder for encoding pixel features and masks into memory representations.
+
+ This encoder processes pixel-level features and masks, fusing them to generate encoded memory representations
+ suitable for downstream tasks in image segmentation models like SAM (Segment Anything Model).
+
+ Args:
+ out_dim (int): Output dimension of the encoded features.
+ in_dim (int): Input dimension of the pixel features.
+ interpol_size (tuple[int, int] | None): Size to interpolate masks to. If None, uses the size of pixel
+ features.
+ """
super().__init__()
self.mask_downsampler = MaskDownSampler(kernel_size=3, stride=2, padding=1, interpol_size=interpol_size)
@@ -237,6 +391,7 @@ masks: torch.Tensor,
skip_mask_sigmoid: bool = False,
) -> dict:
+ """Process pixel features and masks to generate encoded memory representations for segmentation."""
if not skip_mask_sigmoid:
masks = F.sigmoid(masks)
masks = self.mask_downsampler(masks)
@@ -255,6 +410,28 @@
class ImageEncoder(nn.Module):
+ """Encode images using a trunk-neck architecture, producing multiscale features and positional encodings.
+
+ This class combines a trunk network for feature extraction with a neck network for feature refinement and positional
+ encoding generation. It can optionally discard the lowest resolution features.
+
+ Attributes:
+ trunk (nn.Module): The trunk network for initial feature extraction.
+ neck (nn.Module): The neck network for feature refinement and positional encoding generation.
+ scalp (int): Number of lowest resolution feature levels to discard.
+
+ Methods:
+ forward: Process the input image through the trunk and neck networks.
+
+ Examples:
+ >>> trunk = SomeTrunkNetwork()
+ >>> neck = SomeNeckNetwork()
+ >>> encoder = ImageEncoder(trunk, neck, scalp=1)
+ >>> image = torch.randn(1, 3, 224, 224)
+ >>> output = encoder(image)
+ >>> print(output.keys())
+ dict_keys(['vision_features', 'vision_pos_enc', 'backbone_fpn'])
+ """
def __init__(
self,
@@ -262,6 +439,16 @@ neck: nn.Module,
scalp: int = 0,
):
+ """Initialize the ImageEncoder with trunk and neck networks for feature extraction and refinement.
+
+ This encoder combines a trunk network for feature extraction with a neck network for feature refinement and
+ positional encoding generation. It can optionally discard the lowest resolution features.
+
+ Args:
+ trunk (nn.Module): The trunk network for initial feature extraction.
+ neck (nn.Module): The neck network for feature refinement and positional encoding generation.
+ scalp (int): Number of lowest resolution feature levels to discard.
+ """
super().__init__()
self.trunk = trunk
self.neck = neck
@@ -271,6 +458,7 @@ )
def forward(self, sample: torch.Tensor):
+ """Encode input through trunk and neck networks, returning multiscale features and positional encodings."""
features, pos = self.neck(self.trunk(sample))
if self.scalp > 0:
# Discard the lowest resolution features
@@ -285,6 +473,30 @@
class FpnNeck(nn.Module):
+ """A Feature Pyramid Network (FPN) neck variant for multiscale feature fusion in object detection models.
+
+ This FPN variant removes the output convolution and uses bicubic interpolation for feature resizing, similar to ViT
+ positional embedding interpolation.
+
+ Attributes:
+ position_encoding (PositionEmbeddingSine): Sinusoidal positional encoding module.
+ convs (nn.ModuleList): List of convolutional layers for each backbone level.
+ backbone_channel_list (list[int]): List of channel dimensions from the backbone.
+ fpn_interp_model (str): Interpolation mode for FPN feature resizing.
+ fuse_type (str): Type of feature fusion, either 'sum' or 'avg'.
+ fpn_top_down_levels (list[int]): Levels to have top-down features in outputs.
+
+ Methods:
+ forward: Perform forward pass through the FPN neck.
+
+ Examples:
+ >>> backbone_channels = [64, 128, 256, 512]
+ >>> fpn_neck = FpnNeck(256, backbone_channels)
+ >>> inputs = [torch.rand(1, c, 32, 32) for c in backbone_channels]
+ >>> outputs, positions = fpn_neck(inputs)
+ >>> print(len(outputs), len(positions))
+ 4 4
+ """
def __init__(
self,
@@ -297,6 +509,21 @@ fuse_type: str = "sum",
fpn_top_down_levels: list[int] | None = None,
):
+ """Initialize a modified Feature Pyramid Network (FPN) neck.
+
+ This FPN variant removes the output convolution and uses bicubic interpolation for feature resizing, similar to
+ ViT positional embedding interpolation.
+
+ Args:
+ d_model (int): Dimension of the model.
+ backbone_channel_list (list[int]): List of channel dimensions from the backbone.
+ kernel_size (int): Kernel size for the convolutional layers.
+ stride (int): Stride for the convolutional layers.
+ padding (int): Padding for the convolutional layers.
+ fpn_interp_model (str): Interpolation mode for FPN feature resizing.
+ fuse_type (str): Type of feature fusion, either 'sum' or 'avg'.
+ fpn_top_down_levels (list[int] | None): Levels to have top-down features in outputs.
+ """
super().__init__()
self.position_encoding = PositionEmbeddingSine(num_pos_feats=256)
self.convs = nn.ModuleList()
@@ -329,6 +556,26 @@ self.fpn_top_down_levels = list(fpn_top_down_levels)
def forward(self, xs: list[torch.Tensor]):
+ """Perform forward pass through the Feature Pyramid Network (FPN) neck.
+
+ This method processes a list of input tensors from the backbone through the FPN, applying lateral connections
+ and top-down feature fusion. It generates output feature maps and corresponding positional encodings.
+
+ Args:
+ xs (list[torch.Tensor]): List of input tensors from the backbone, each with shape (B, C, H, W).
+
+ Returns:
+ out (list[torch.Tensor]): List of output feature maps after FPN processing, each with shape (B, d_model, H,
+ W).
+ pos (list[torch.Tensor]): List of positional encodings corresponding to each output feature map.
+
+ Examples:
+ >>> fpn_neck = FpnNeck(d_model=256, backbone_channel_list=[64, 128, 256, 512])
+ >>> inputs = [torch.rand(1, c, 32, 32) for c in [64, 128, 256, 512]]
+ >>> outputs, positions = fpn_neck(inputs)
+ >>> print(len(outputs), len(positions))
+ 4 4
+ """
out = [None] * len(self.convs)
pos = [None] * len(self.convs)
assert len(xs) == len(self.convs)
@@ -361,6 +608,37 @@
class Hiera(nn.Module):
+ """Hierarchical vision transformer for efficient multiscale feature extraction in image processing tasks.
+
+ This class implements a Hiera model, which is a hierarchical vision transformer architecture designed for efficient
+ multiscale feature extraction. It uses a series of transformer blocks organized into stages, with optional pooling
+ and global attention mechanisms.
+
+ Attributes:
+ window_spec (tuple[int, ...]): Window sizes for each stage.
+ q_stride (tuple[int, int]): Downsampling stride between stages.
+ stage_ends (list[int]): Indices of the last block in each stage.
+ q_pool_blocks (list[int]): Indices of blocks where pooling is applied.
+ return_interm_layers (bool): Whether to return intermediate layer outputs.
+ patch_embed (PatchEmbed): Module for patch embedding.
+ global_att_blocks (tuple[int, ...]): Indices of blocks with global attention.
+ window_pos_embed_bkg_spatial_size (tuple[int, int]): Spatial size for window positional embedding background.
+ pos_embed (nn.Parameter): Positional embedding for the background.
+ pos_embed_window (nn.Parameter): Positional embedding for the window.
+ blocks (nn.ModuleList): List of MultiScaleBlock modules.
+ channel_list (list[int]): List of output channel dimensions for each stage.
+
+ Methods:
+ _get_pos_embed: Generate positional embeddings by interpolating and combining window and background embeddings.
+ forward: Perform the forward pass through the Hiera model.
+
+ Examples:
+ >>> model = Hiera(embed_dim=96, num_heads=1, stages=(2, 3, 16, 3))
+ >>> input_tensor = torch.randn(1, 3, 224, 224)
+ >>> output_features = model(input_tensor)
+ >>> for feat in output_features:
+ ... print(feat.shape)
+ """
def __init__(
self,
@@ -388,6 +666,27 @@ ),
return_interm_layers=True, # return feats from every stage
):
+ """Initialize a Hiera model, a hierarchical vision transformer for efficient multiscale feature extraction.
+
+ Hiera is a hierarchical vision transformer architecture designed for efficient multiscale feature extraction in
+ image processing tasks. It uses a series of transformer blocks organized into stages, with optional pooling and
+ global attention mechanisms.
+
+ Args:
+ embed_dim (int): Initial embedding dimension for the model.
+ num_heads (int): Initial number of attention heads.
+ drop_path_rate (float): Stochastic depth rate.
+ q_pool (int): Number of query pooling stages.
+ q_stride (tuple[int, int]): Downsampling stride between stages.
+ stages (tuple[int, ...]): Number of blocks per stage.
+ dim_mul (float): Dimension multiplier factor at stage transitions.
+ head_mul (float): Head multiplier factor at stage transitions.
+ window_pos_embed_bkg_spatial_size (tuple[int, int]): Spatial size for window positional embedding
+ background.
+ window_spec (tuple[int, ...]): Window sizes for each stage when not using global attention.
+ global_att_blocks (tuple[int, ...]): Indices of blocks that use global attention.
+ return_interm_layers (bool): Whether to return intermediate layer outputs.
+ """
super().__init__()
assert len(stages) == len(window_spec)
@@ -452,6 +751,7 @@ )
def _get_pos_embed(self, hw: tuple[int, int]) -> torch.Tensor:
+ """Generate positional embeddings by interpolating and combining window and background embeddings."""
h, w = hw
window_embed = self.pos_embed_window
pos_embed = F.interpolate(self.pos_embed, size=(h, w), mode="bicubic")
@@ -460,6 +760,24 @@ return pos_embed
def forward(self, x: torch.Tensor) -> list[torch.Tensor]:
+ """Perform forward pass through Hiera model, extracting multiscale features from input images.
+
+ Args:
+ x (torch.Tensor): Input tensor with shape (B, C, H, W) representing a batch of images.
+
+ Returns:
+ (list[torch.Tensor]): List of feature maps at different scales, each with shape (B, C_i, H_i, W_i), where
+ C_i is the channel dimension and H_i, W_i are the spatial dimensions at scale i. The list is ordered
+ from highest resolution (fine features) to lowest resolution (coarse features) if return_interm_layers
+ is True, otherwise contains only the final output.
+
+ Examples:
+ >>> model = Hiera(embed_dim=96, num_heads=1, stages=(2, 3, 16, 3))
+ >>> input_tensor = torch.randn(1, 3, 224, 224)
+ >>> output_features = model(input_tensor)
+ >>> for feat in output_features:
+ ... print(feat.shape)
+ """
x = self.patch_embed(x)
# x: (B, H, W, C)
@@ -473,4 +791,4 @@ feats = x.permute(0, 3, 1, 2)
outputs.append(feats)
- return outputs+ return outputs
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/sam/modules/encoders.py |
Generate docstrings for script automation | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import math
from collections.abc import Generator
from itertools import product
from typing import Any
import numpy as np
import torch
def is_box_near_crop_edge(
boxes: torch.Tensor, crop_box: list[int], orig_box: list[int], atol: float = 20.0
) -> torch.Tensor:
crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)
orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)
boxes = uncrop_boxes_xyxy(boxes, crop_box).float()
near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)
near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)
near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)
return torch.any(near_crop_edge, dim=1)
def batch_iterator(batch_size: int, *args) -> Generator[list[Any]]:
assert args and all(len(a) == len(args[0]) for a in args), "Batched iteration must have same-size inputs."
n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)
for b in range(n_batches):
yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]
def calculate_stability_score(masks: torch.Tensor, mask_threshold: float, threshold_offset: float) -> torch.Tensor:
intersections = (masks > (mask_threshold + threshold_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32)
unions = (masks > (mask_threshold - threshold_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32)
return intersections / unions
def build_point_grid(n_per_side: int) -> np.ndarray:
offset = 1 / (2 * n_per_side)
points_one_side = np.linspace(offset, 1 - offset, n_per_side)
points_x = np.tile(points_one_side[None, :], (n_per_side, 1))
points_y = np.tile(points_one_side[:, None], (1, n_per_side))
return np.stack([points_x, points_y], axis=-1).reshape(-1, 2)
def build_all_layer_point_grids(n_per_side: int, n_layers: int, scale_per_layer: int) -> list[np.ndarray]:
return [build_point_grid(int(n_per_side / (scale_per_layer**i))) for i in range(n_layers + 1)]
def generate_crop_boxes(
im_size: tuple[int, ...], n_layers: int, overlap_ratio: float
) -> tuple[list[list[int]], list[int]]:
crop_boxes, layer_idxs = [], []
im_h, im_w = im_size
short_side = min(im_h, im_w)
# Original image
crop_boxes.append([0, 0, im_w, im_h])
layer_idxs.append(0)
def crop_len(orig_len, n_crops, overlap):
return math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops)
for i_layer in range(n_layers):
n_crops_per_side = 2 ** (i_layer + 1)
overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))
crop_w = crop_len(im_w, n_crops_per_side, overlap)
crop_h = crop_len(im_h, n_crops_per_side, overlap)
crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]
crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]
# Crops in XYWH format
for x0, y0 in product(crop_box_x0, crop_box_y0):
box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]
crop_boxes.append(box)
layer_idxs.append(i_layer + 1)
return crop_boxes, layer_idxs
def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: list[int]) -> torch.Tensor:
x0, y0, _, _ = crop_box
offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)
# Check if boxes has a channel dimension
if len(boxes.shape) == 3:
offset = offset.unsqueeze(1)
return boxes + offset
def uncrop_points(points: torch.Tensor, crop_box: list[int]) -> torch.Tensor:
x0, y0, _, _ = crop_box
offset = torch.tensor([[x0, y0]], device=points.device)
# Check if points has a channel dimension
if len(points.shape) == 3:
offset = offset.unsqueeze(1)
return points + offset
def uncrop_masks(masks: torch.Tensor, crop_box: list[int], orig_h: int, orig_w: int) -> torch.Tensor:
x0, y0, x1, y1 = crop_box
if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:
return masks
# Coordinate transform masks
pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)
pad = (x0, pad_x - x0, y0, pad_y - y0)
return torch.nn.functional.pad(masks, pad, value=0)
def remove_small_regions(mask: np.ndarray, area_thresh: float, mode: str) -> tuple[np.ndarray, bool]:
import cv2 # type: ignore
assert mode in {"holes", "islands"}, f"Provided mode {mode} is invalid"
correct_holes = mode == "holes"
working_mask = (correct_holes ^ mask).astype(np.uint8)
n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)
sizes = stats[:, -1][1:] # Row 0 is background label
small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]
if not small_regions:
return mask, False
fill_labels = [0, *small_regions]
if not correct_holes:
# If every region is below threshold, keep largest
fill_labels = [i for i in range(n_labels) if i not in fill_labels] or [int(np.argmax(sizes)) + 1]
mask = np.isin(regions, fill_labels)
return mask, True
def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:
# torch.max below raises an error on empty inputs, just skip in this case
if torch.numel(masks) == 0:
return torch.zeros(*masks.shape[:-2], 4, device=masks.device)
# Normalize shape to CxHxW
shape = masks.shape
h, w = shape[-2:]
masks = masks.flatten(0, -3) if len(shape) > 2 else masks.unsqueeze(0)
# Get top and bottom edges
in_height, _ = torch.max(masks, dim=-1)
in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]
bottom_edges, _ = torch.max(in_height_coords, dim=-1)
in_height_coords = in_height_coords + h * (~in_height)
top_edges, _ = torch.min(in_height_coords, dim=-1)
# Get left and right edges
in_width, _ = torch.max(masks, dim=-2)
in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]
right_edges, _ = torch.max(in_width_coords, dim=-1)
in_width_coords = in_width_coords + w * (~in_width)
left_edges, _ = torch.min(in_width_coords, dim=-1)
# If the mask is empty the right edge will be to the left of the left edge.
# Replace these boxes with [0, 0, 0, 0]
empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)
out = out * (~empty_filter).unsqueeze(-1)
# Return to original shape
return out.reshape(*shape[:-2], 4) if len(shape) > 2 else out[0] | --- +++ @@ -14,6 +14,23 @@ def is_box_near_crop_edge(
boxes: torch.Tensor, crop_box: list[int], orig_box: list[int], atol: float = 20.0
) -> torch.Tensor:
+ """Determine if bounding boxes are near the edge of a cropped image region using a specified tolerance.
+
+ Args:
+ boxes (torch.Tensor): Bounding boxes in XYXY format.
+ crop_box (list[int]): Crop box coordinates in [x0, y0, x1, y1] format.
+ orig_box (list[int]): Original image box coordinates in [x0, y0, x1, y1] format.
+ atol (float, optional): Absolute tolerance for edge proximity detection.
+
+ Returns:
+ (torch.Tensor): Boolean tensor indicating which boxes are near crop edges.
+
+ Examples:
+ >>> boxes = torch.tensor([[10, 10, 50, 50], [100, 100, 150, 150]])
+ >>> crop_box = [0, 0, 200, 200]
+ >>> orig_box = [0, 0, 300, 300]
+ >>> near_edge = is_box_near_crop_edge(boxes, crop_box, orig_box, atol=20.0)
+ """
crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)
orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)
boxes = uncrop_boxes_xyxy(boxes, crop_box).float()
@@ -24,6 +41,27 @@
def batch_iterator(batch_size: int, *args) -> Generator[list[Any]]:
+ """Yield batches of data from input arguments with specified batch size for efficient processing.
+
+ This function takes a batch size and any number of iterables, then yields batches of elements from those
+ iterables. All input iterables must have the same length.
+
+ Args:
+ batch_size (int): Size of each batch to yield.
+ *args (Any): Variable length input iterables to batch. All iterables must have the same length.
+
+ Yields:
+ (list[Any]): A list of batched elements from each input iterable.
+
+ Examples:
+ >>> data = [1, 2, 3, 4, 5]
+ >>> labels = ["a", "b", "c", "d", "e"]
+ >>> for batch in batch_iterator(2, data, labels):
+ ... print(batch)
+ [[1, 2], ['a', 'b']]
+ [[3, 4], ['c', 'd']]
+ [[5], ['e']]
+ """
assert args and all(len(a) == len(args[0]) for a in args), "Batched iteration must have same-size inputs."
n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)
for b in range(n_batches):
@@ -31,12 +69,36 @@
def calculate_stability_score(masks: torch.Tensor, mask_threshold: float, threshold_offset: float) -> torch.Tensor:
+ """Compute the stability score for a batch of masks.
+
+ The stability score is the IoU between binary masks obtained by thresholding the predicted mask logits at high and
+ low values.
+
+ Args:
+ masks (torch.Tensor): Batch of predicted mask logits.
+ mask_threshold (float): Threshold value for creating binary masks.
+ threshold_offset (float): Offset applied to the threshold for creating high and low binary masks.
+
+ Returns:
+ (torch.Tensor): Stability scores for each mask in the batch.
+
+ Examples:
+ >>> masks = torch.rand(10, 256, 256) # Batch of 10 masks
+ >>> mask_threshold = 0.5
+ >>> threshold_offset = 0.1
+ >>> stability_scores = calculate_stability_score(masks, mask_threshold, threshold_offset)
+
+ Notes:
+ - One mask is always contained inside the other.
+ - Memory is saved by preventing unnecessary cast to torch.int64.
+ """
intersections = (masks > (mask_threshold + threshold_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32)
unions = (masks > (mask_threshold - threshold_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32)
return intersections / unions
def build_point_grid(n_per_side: int) -> np.ndarray:
+ """Generate a 2D grid of evenly spaced points in the range [0,1]x[0,1] for image segmentation tasks."""
offset = 1 / (2 * n_per_side)
points_one_side = np.linspace(offset, 1 - offset, n_per_side)
points_x = np.tile(points_one_side[None, :], (n_per_side, 1))
@@ -45,12 +107,30 @@
def build_all_layer_point_grids(n_per_side: int, n_layers: int, scale_per_layer: int) -> list[np.ndarray]:
+ """Generate point grids for multiple crop layers with varying scales and densities."""
return [build_point_grid(int(n_per_side / (scale_per_layer**i))) for i in range(n_layers + 1)]
def generate_crop_boxes(
im_size: tuple[int, ...], n_layers: int, overlap_ratio: float
) -> tuple[list[list[int]], list[int]]:
+ """Generate crop boxes of varying sizes for multiscale image processing, with layered overlapping regions.
+
+ Args:
+ im_size (tuple[int, ...]): Height and width of the input image.
+ n_layers (int): Number of layers to generate crop boxes for.
+ overlap_ratio (float): Ratio of overlap between adjacent crop boxes.
+
+ Returns:
+ crop_boxes (list[list[int]]): List of crop boxes in [x0, y0, x1, y1] format.
+ layer_idxs (list[int]): List of layer indices corresponding to each crop box.
+
+ Examples:
+ >>> im_size = (800, 1200) # Height, width
+ >>> n_layers = 3
+ >>> overlap_ratio = 0.25
+ >>> crop_boxes, layer_idxs = generate_crop_boxes(im_size, n_layers, overlap_ratio)
+ """
crop_boxes, layer_idxs = [], []
im_h, im_w = im_size
short_side = min(im_h, im_w)
@@ -60,6 +140,7 @@ layer_idxs.append(0)
def crop_len(orig_len, n_crops, overlap):
+ """Calculate the length of each crop given the original length, number of crops, and overlap."""
return math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops)
for i_layer in range(n_layers):
@@ -82,6 +163,7 @@
def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: list[int]) -> torch.Tensor:
+ """Uncrop bounding boxes by adding the crop box offset to their coordinates."""
x0, y0, _, _ = crop_box
offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)
# Check if boxes has a channel dimension
@@ -91,6 +173,7 @@
def uncrop_points(points: torch.Tensor, crop_box: list[int]) -> torch.Tensor:
+ """Uncrop points by adding the crop box offset to their coordinates."""
x0, y0, _, _ = crop_box
offset = torch.tensor([[x0, y0]], device=points.device)
# Check if points has a channel dimension
@@ -100,6 +183,7 @@
def uncrop_masks(masks: torch.Tensor, crop_box: list[int], orig_h: int, orig_w: int) -> torch.Tensor:
+ """Uncrop masks by padding them to the original image size, handling coordinate transformations."""
x0, y0, x1, y1 = crop_box
if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:
return masks
@@ -110,6 +194,24 @@
def remove_small_regions(mask: np.ndarray, area_thresh: float, mode: str) -> tuple[np.ndarray, bool]:
+ """Remove small disconnected regions or holes in a mask based on area threshold and mode.
+
+ Args:
+ mask (np.ndarray): Binary mask to process.
+ area_thresh (float): Area threshold below which regions will be removed.
+ mode (str): Processing mode, either 'holes' to fill small holes or 'islands' to remove small disconnected
+ regions.
+
+ Returns:
+ processed_mask (np.ndarray): Processed binary mask with small regions removed.
+ modified (bool): Whether any regions were modified.
+
+ Examples:
+ >>> mask = np.zeros((100, 100), dtype=np.bool_)
+ >>> mask[40:60, 40:60] = True # Create a square
+ >>> mask[45:55, 45:55] = False # Create a hole
+ >>> processed_mask, modified = remove_small_regions(mask, 50, "holes")
+ """
import cv2 # type: ignore
assert mode in {"holes", "islands"}, f"Provided mode {mode} is invalid"
@@ -129,6 +231,18 @@
def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:
+ """Calculate bounding boxes in XYXY format around binary masks.
+
+ Args:
+ masks (torch.Tensor): Binary masks with shape (B, H, W) or (B, C, H, W).
+
+ Returns:
+ (torch.Tensor): Bounding boxes in XYXY format with shape (B, 4) or (B, C, 4).
+
+ Notes:
+ - Handles empty masks by returning zero boxes.
+ - Preserves input tensor dimensions in the output.
+ """
# torch.max below raises an error on empty inputs, just skip in this case
if torch.numel(masks) == 0:
return torch.zeros(*masks.shape[:-2], 4, device=masks.device)
@@ -158,4 +272,4 @@ out = out * (~empty_filter).unsqueeze(-1)
# Return to original shape
- return out.reshape(*shape[:-2], 4) if len(shape) > 2 else out[0]+ return out.reshape(*shape[:-2], 4) if len(shape) > 2 else out[0]
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/sam/amg.py |
Add docstrings to my Python code | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from pathlib import Path
from typing import Any
import numpy as np
import torch
import torch.nn.functional as F
from ultralytics.models.yolo.detect import DetectionValidator
from ultralytics.utils import LOGGER, ops
from ultralytics.utils.checks import check_requirements
from ultralytics.utils.metrics import SegmentMetrics, mask_iou
class SegmentationValidator(DetectionValidator):
def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks: dict | None = None) -> None:
super().__init__(dataloader, save_dir, args, _callbacks)
self.process = None
self.args.task = "segment"
self.metrics = SegmentMetrics()
def preprocess(self, batch: dict[str, Any]) -> dict[str, Any]:
batch = super().preprocess(batch)
batch["masks"] = batch["masks"].float()
return batch
def init_metrics(self, model: torch.nn.Module) -> None:
super().init_metrics(model)
if self.args.save_json:
check_requirements("faster-coco-eval>=1.6.7")
# More accurate vs faster
self.process = ops.process_mask_native if self.args.save_json or self.args.save_txt else ops.process_mask
def get_desc(self) -> str:
return ("%22s" + "%11s" * 10) % (
"Class",
"Images",
"Instances",
"Box(P",
"R",
"mAP50",
"mAP50-95)",
"Mask(P",
"R",
"mAP50",
"mAP50-95)",
)
def postprocess(self, preds: list[torch.Tensor]) -> list[dict[str, torch.Tensor]]:
proto = preds[0][1] if isinstance(preds[0], tuple) else preds[1]
preds = super().postprocess(preds[0])
imgsz = [4 * x for x in proto.shape[2:]] # get image size from proto
for i, pred in enumerate(preds):
coefficient = pred.pop("extra")
pred["masks"] = (
self.process(proto[i], coefficient, pred["bboxes"], shape=imgsz)
if coefficient.shape[0]
else torch.zeros(
(0, *(imgsz if self.process is ops.process_mask_native else proto.shape[2:])),
dtype=torch.uint8,
device=pred["bboxes"].device,
)
)
return preds
def _prepare_batch(self, si: int, batch: dict[str, Any]) -> dict[str, Any]:
prepared_batch = super()._prepare_batch(si, batch)
nl = prepared_batch["cls"].shape[0]
if self.args.overlap_mask:
masks = batch["masks"][si]
index = torch.arange(1, nl + 1, device=masks.device).view(nl, 1, 1)
masks = (masks == index).float()
else:
masks = batch["masks"][batch["batch_idx"] == si]
if nl:
mask_size = [s if self.process is ops.process_mask_native else s // 4 for s in prepared_batch["imgsz"]]
if masks.shape[1:] != mask_size:
masks = F.interpolate(masks[None], mask_size, mode="bilinear", align_corners=False)[0]
masks = masks.gt_(0.5)
prepared_batch["masks"] = masks
return prepared_batch
def _process_batch(self, preds: dict[str, torch.Tensor], batch: dict[str, Any]) -> dict[str, np.ndarray]:
tp = super()._process_batch(preds, batch)
gt_cls = batch["cls"]
if gt_cls.shape[0] == 0 or preds["cls"].shape[0] == 0:
tp_m = np.zeros((preds["cls"].shape[0], self.niou), dtype=bool)
else:
iou = mask_iou(batch["masks"].flatten(1), preds["masks"].flatten(1).float()) # float, uint8
tp_m = self.match_predictions(preds["cls"], gt_cls, iou).cpu().numpy()
tp.update({"tp_m": tp_m}) # update tp with mask IoU
return tp
def plot_predictions(self, batch: dict[str, Any], preds: list[dict[str, torch.Tensor]], ni: int) -> None:
for p in preds:
masks = p["masks"]
if masks.shape[0] > self.args.max_det:
LOGGER.warning(f"Limiting validation plots to 'max_det={self.args.max_det}' items.")
p["masks"] = torch.as_tensor(masks[: self.args.max_det], dtype=torch.uint8).cpu()
super().plot_predictions(batch, preds, ni, max_det=self.args.max_det) # plot bboxes
def save_one_txt(self, predn: dict[str, torch.Tensor], save_conf: bool, shape: tuple[int, int], file: Path) -> None:
from ultralytics.engine.results import Results
Results(
np.zeros((shape[0], shape[1]), dtype=np.uint8),
path=None,
names=self.names,
boxes=torch.cat([predn["bboxes"], predn["conf"].unsqueeze(-1), predn["cls"].unsqueeze(-1)], dim=1),
masks=torch.as_tensor(predn["masks"], dtype=torch.uint8),
).save_txt(file, save_conf=save_conf)
def pred_to_json(self, predn: dict[str, torch.Tensor], pbatch: dict[str, Any]) -> None:
def to_string(counts: list[int]) -> str:
result = []
for i in range(len(counts)):
x = int(counts[i])
# Apply delta encoding for all counts after the second entry
if i > 2:
x -= int(counts[i - 2])
# Variable-length encode the value
while True:
c = x & 0x1F # Take 5 bits
x >>= 5
# If the sign bit (0x10) is set, continue if x != -1;
# otherwise, continue if x != 0
more = (x != -1) if (c & 0x10) else (x != 0)
if more:
c |= 0x20 # Set continuation bit
c += 48 # Shift to ASCII
result.append(chr(c))
if not more:
break
return "".join(result)
def multi_encode(pixels: torch.Tensor) -> list[int]:
transitions = pixels[:, 1:] != pixels[:, :-1]
row_idx, col_idx = torch.where(transitions)
col_idx = col_idx + 1
# Compute run lengths
counts = []
for i in range(pixels.shape[0]):
positions = col_idx[row_idx == i]
if len(positions):
count = torch.diff(positions).tolist()
count.insert(0, positions[0].item())
count.append(len(pixels[i]) - positions[-1].item())
else:
count = [len(pixels[i])]
# Ensure starting with background (0) count
if pixels[i][0].item() == 1:
count = [0, *count]
counts.append(count)
return counts
pred_masks = predn["masks"].transpose(2, 1).contiguous().view(len(predn["masks"]), -1) # N, H*W
h, w = predn["masks"].shape[1:3]
counts = multi_encode(pred_masks)
rles = []
for c in counts:
rles.append({"size": [h, w], "counts": to_string(c)})
super().pred_to_json(predn, pbatch)
for i, r in enumerate(rles):
self.jdict[-len(rles) + i]["segmentation"] = r # segmentation
def scale_preds(self, predn: dict[str, torch.Tensor], pbatch: dict[str, Any]) -> dict[str, torch.Tensor]:
return {
**super().scale_preds(predn, pbatch),
"masks": ops.scale_masks(predn["masks"][None], pbatch["ori_shape"], ratio_pad=pbatch["ratio_pad"])[
0
].byte(),
}
def eval_json(self, stats: dict[str, Any]) -> dict[str, Any]:
pred_json = self.save_dir / "predictions.json" # predictions
anno_json = (
self.data["path"]
/ "annotations"
/ ("instances_val2017.json" if self.is_coco else f"lvis_v1_{self.args.split}.json")
) # annotations
return super().coco_evaluate(stats, pred_json, anno_json, ["bbox", "segm"], suffix=["Box", "Mask"]) | --- +++ @@ -16,19 +16,58 @@
class SegmentationValidator(DetectionValidator):
+ """A class extending the DetectionValidator class for validation based on a segmentation model.
+
+ This validator handles the evaluation of segmentation models, processing both bounding box and mask predictions to
+ compute metrics such as mAP for both detection and segmentation tasks.
+
+ Attributes:
+ plot_masks (list): List to store masks for plotting.
+ process (callable): Function to process masks based on save_json and save_txt flags.
+ args (SimpleNamespace): Arguments for the validator.
+ metrics (SegmentMetrics): Metrics calculator for segmentation tasks.
+ stats (dict): Dictionary to store statistics during validation.
+
+ Examples:
+ >>> from ultralytics.models.yolo.segment import SegmentationValidator
+ >>> args = dict(model="yolo26n-seg.pt", data="coco8-seg.yaml")
+ >>> validator = SegmentationValidator(args=args)
+ >>> validator()
+ """
def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks: dict | None = None) -> None:
+ """Initialize SegmentationValidator and set task to 'segment', metrics to SegmentMetrics.
+
+ Args:
+ dataloader (torch.utils.data.DataLoader, optional): DataLoader to use for validation.
+ save_dir (Path, optional): Directory to save results.
+ args (dict, optional): Arguments for the validator.
+ _callbacks (dict, optional): Dictionary of callback functions.
+ """
super().__init__(dataloader, save_dir, args, _callbacks)
self.process = None
self.args.task = "segment"
self.metrics = SegmentMetrics()
def preprocess(self, batch: dict[str, Any]) -> dict[str, Any]:
+ """Preprocess batch of images for YOLO segmentation validation.
+
+ Args:
+ batch (dict[str, Any]): Batch containing images and annotations.
+
+ Returns:
+ (dict[str, Any]): Preprocessed batch.
+ """
batch = super().preprocess(batch)
batch["masks"] = batch["masks"].float()
return batch
def init_metrics(self, model: torch.nn.Module) -> None:
+ """Initialize metrics and select mask processing function based on save_json flag.
+
+ Args:
+ model (torch.nn.Module): Model to validate.
+ """
super().init_metrics(model)
if self.args.save_json:
check_requirements("faster-coco-eval>=1.6.7")
@@ -36,6 +75,7 @@ self.process = ops.process_mask_native if self.args.save_json or self.args.save_txt else ops.process_mask
def get_desc(self) -> str:
+ """Return a formatted description of evaluation metrics."""
return ("%22s" + "%11s" * 10) % (
"Class",
"Images",
@@ -51,6 +91,14 @@ )
def postprocess(self, preds: list[torch.Tensor]) -> list[dict[str, torch.Tensor]]:
+ """Post-process YOLO predictions and return output detections with proto.
+
+ Args:
+ preds (list[torch.Tensor]): Raw predictions from the model.
+
+ Returns:
+ (list[dict[str, torch.Tensor]]): Processed detection predictions with masks.
+ """
proto = preds[0][1] if isinstance(preds[0], tuple) else preds[1]
preds = super().postprocess(preds[0])
imgsz = [4 * x for x in proto.shape[2:]] # get image size from proto
@@ -68,6 +116,15 @@ return preds
def _prepare_batch(self, si: int, batch: dict[str, Any]) -> dict[str, Any]:
+ """Prepare a batch for validation by processing images and targets.
+
+ Args:
+ si (int): Sample index within the batch.
+ batch (dict[str, Any]): Batch data containing images and annotations.
+
+ Returns:
+ (dict[str, Any]): Prepared batch with processed annotations.
+ """
prepared_batch = super()._prepare_batch(si, batch)
nl = prepared_batch["cls"].shape[0]
if self.args.overlap_mask:
@@ -85,6 +142,24 @@ return prepared_batch
def _process_batch(self, preds: dict[str, torch.Tensor], batch: dict[str, Any]) -> dict[str, np.ndarray]:
+ """Compute correct prediction matrix for a batch based on bounding boxes and optional masks.
+
+ Args:
+ preds (dict[str, torch.Tensor]): Dictionary containing predictions with keys like 'cls' and 'masks'.
+ batch (dict[str, Any]): Dictionary containing batch data with keys like 'cls' and 'masks'.
+
+ Returns:
+ (dict[str, np.ndarray]): A dictionary containing correct prediction matrices including 'tp_m' for mask IoU.
+
+ Examples:
+ >>> preds = {"cls": torch.tensor([1, 0]), "masks": torch.rand(2, 640, 640), "bboxes": torch.rand(2, 4)}
+ >>> batch = {"cls": torch.tensor([1, 0]), "masks": torch.rand(2, 640, 640), "bboxes": torch.rand(2, 4)}
+ >>> correct_preds = validator._process_batch(preds, batch)
+
+ Notes:
+ - This method computes IoU between predicted and ground truth masks.
+ - Overlapping masks are handled based on the overlap_mask argument setting.
+ """
tp = super()._process_batch(preds, batch)
gt_cls = batch["cls"]
if gt_cls.shape[0] == 0 or preds["cls"].shape[0] == 0:
@@ -96,6 +171,13 @@ return tp
def plot_predictions(self, batch: dict[str, Any], preds: list[dict[str, torch.Tensor]], ni: int) -> None:
+ """Plot batch predictions with masks and bounding boxes.
+
+ Args:
+ batch (dict[str, Any]): Batch containing images and annotations.
+ preds (list[dict[str, torch.Tensor]]): List of predictions from the model.
+ ni (int): Batch index.
+ """
for p in preds:
masks = p["masks"]
if masks.shape[0] > self.args.max_det:
@@ -104,6 +186,14 @@ super().plot_predictions(batch, preds, ni, max_det=self.args.max_det) # plot bboxes
def save_one_txt(self, predn: dict[str, torch.Tensor], save_conf: bool, shape: tuple[int, int], file: Path) -> None:
+ """Save YOLO detections to a txt file in normalized coordinates in a specific format.
+
+ Args:
+ predn (dict[str, torch.Tensor]): Prediction dictionary containing 'bboxes', 'conf', 'cls', and 'masks' keys.
+ save_conf (bool): Whether to save confidence scores.
+ shape (tuple[int, int]): Shape of the original image.
+ file (Path): File path to save the detections.
+ """
from ultralytics.engine.results import Results
Results(
@@ -115,8 +205,20 @@ ).save_txt(file, save_conf=save_conf)
def pred_to_json(self, predn: dict[str, torch.Tensor], pbatch: dict[str, Any]) -> None:
+ """Save one JSON result for COCO evaluation.
+
+ Args:
+ predn (dict[str, torch.Tensor]): Predictions containing bboxes, masks, confidence scores, and classes.
+ pbatch (dict[str, Any]): Batch dictionary containing 'imgsz', 'ori_shape', 'ratio_pad', and 'im_file'.
+ """
def to_string(counts: list[int]) -> str:
+ """Converts the RLE object into a compact string representation. Each count is delta-encoded and
+ variable-length encoded as a string.
+
+ Args:
+ counts (list[int]): List of RLE counts.
+ """
result = []
for i in range(len(counts)):
@@ -144,6 +246,15 @@ return "".join(result)
def multi_encode(pixels: torch.Tensor) -> list[int]:
+ """Convert multiple binary masks using Run-Length Encoding (RLE).
+
+ Args:
+ pixels (torch.Tensor): A 2D tensor where each row represents a flattened binary mask with shape [N,
+ H*W].
+
+ Returns:
+ (list[list[int]]): A list of RLE counts for each mask.
+ """
transitions = pixels[:, 1:] != pixels[:, :-1]
row_idx, col_idx = torch.where(transitions)
col_idx = col_idx + 1
@@ -177,6 +288,7 @@ self.jdict[-len(rles) + i]["segmentation"] = r # segmentation
def scale_preds(self, predn: dict[str, torch.Tensor], pbatch: dict[str, Any]) -> dict[str, torch.Tensor]:
+ """Scales predictions to the original image size."""
return {
**super().scale_preds(predn, pbatch),
"masks": ops.scale_masks(predn["masks"][None], pbatch["ori_shape"], ratio_pad=pbatch["ratio_pad"])[
@@ -185,10 +297,11 @@ }
def eval_json(self, stats: dict[str, Any]) -> dict[str, Any]:
+ """Return COCO-style instance segmentation evaluation metrics."""
pred_json = self.save_dir / "predictions.json" # predictions
anno_json = (
self.data["path"]
/ "annotations"
/ ("instances_val2017.json" if self.is_coco else f"lvis_v1_{self.args.split}.json")
) # annotations
- return super().coco_evaluate(stats, pred_json, anno_json, ["bbox", "segm"], suffix=["Box", "Mask"])+ return super().coco_evaluate(stats, pred_json, anno_json, ["bbox", "segm"], suffix=["Box", "Mask"])
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/yolo/segment/val.py |
Improve documentation using docstrings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
from __future__ import annotations
from copy import deepcopy
import torch
import torch.nn as nn
class Sam3DualViTDetNeck(nn.Module):
def __init__(
self,
trunk: nn.Module,
position_encoding: nn.Module,
d_model: int,
scale_factors=(4.0, 2.0, 1.0, 0.5),
add_sam2_neck: bool = False,
):
super().__init__()
self.trunk = trunk
self.position_encoding = position_encoding
self.convs = nn.ModuleList()
self.scale_factors = scale_factors
use_bias = True
dim: int = self.trunk.channel_list[-1]
for _, scale in enumerate(scale_factors):
current = nn.Sequential()
if scale == 4.0:
current.add_module(
"dconv_2x2_0",
nn.ConvTranspose2d(dim, dim // 2, kernel_size=2, stride=2),
)
current.add_module(
"gelu",
nn.GELU(),
)
current.add_module(
"dconv_2x2_1",
nn.ConvTranspose2d(dim // 2, dim // 4, kernel_size=2, stride=2),
)
out_dim = dim // 4
elif scale == 2.0:
current.add_module(
"dconv_2x2",
nn.ConvTranspose2d(dim, dim // 2, kernel_size=2, stride=2),
)
out_dim = dim // 2
elif scale == 1.0:
out_dim = dim
elif scale == 0.5:
current.add_module(
"maxpool_2x2",
nn.MaxPool2d(kernel_size=2, stride=2),
)
out_dim = dim
else:
raise NotImplementedError(f"scale_factor={scale} is not supported yet.")
current.add_module(
"conv_1x1",
nn.Conv2d(
in_channels=out_dim,
out_channels=d_model,
kernel_size=1,
bias=use_bias,
),
)
current.add_module(
"conv_3x3",
nn.Conv2d(
in_channels=d_model,
out_channels=d_model,
kernel_size=3,
padding=1,
bias=use_bias,
),
)
self.convs.append(current)
self.sam2_convs = None
if add_sam2_neck:
# Assumes sam2 neck is just a clone of the original neck
self.sam2_convs = deepcopy(self.convs)
def forward(
self, tensor_list: list[torch.Tensor]
) -> tuple[list[torch.Tensor], list[torch.Tensor], list[torch.Tensor] | None, list[torch.Tensor] | None]:
xs = self.trunk(tensor_list)
x = xs[-1] # simpleFPN
sam3_out, sam3_pos = self.sam_forward_feature_levels(x, self.convs)
if self.sam2_convs is None:
return sam3_out, sam3_pos, None, None
sam2_out, sam2_pos = self.sam_forward_feature_levels(x, self.sam2_convs)
return sam3_out, sam3_pos, sam2_out, sam2_pos
def sam_forward_feature_levels(
self, x: torch.Tensor, convs: nn.ModuleList
) -> tuple[list[torch.Tensor], list[torch.Tensor]]:
outs, poss = [], []
for conv in convs:
feat = conv(x)
outs.append(feat)
poss.append(self.position_encoding(feat).to(feat.dtype))
return outs, poss
def set_imgsz(self, imgsz: list[int] = [1008, 1008]):
self.trunk.set_imgsz(imgsz) | --- +++ @@ -2,6 +2,7 @@
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
+"""Necks are the interface between a vision backbone and the rest of the detection model."""
from __future__ import annotations
@@ -12,6 +13,7 @@
class Sam3DualViTDetNeck(nn.Module):
+ """A neck that implements a simple FPN as in ViTDet, with support for dual necks (for SAM3 and SAM2)."""
def __init__(
self,
@@ -21,6 +23,17 @@ scale_factors=(4.0, 2.0, 1.0, 0.5),
add_sam2_neck: bool = False,
):
+ """
+ SimpleFPN neck a la ViTDet
+ (From detectron2, very lightly adapted)
+ It supports a "dual neck" setting, where we have two identical necks (for SAM3 and SAM2), with different weights.
+
+ :param trunk: the backbone
+ :param position_encoding: the positional encoding to use
+ :param d_model: the dimension of the model
+ :param scale_factors: tuple of scale factors for each FPN level
+ :param add_sam2_neck: whether to add a second neck for SAM2
+ """
super().__init__()
self.trunk = trunk
self.position_encoding = position_encoding
@@ -93,6 +106,7 @@ def forward(
self, tensor_list: list[torch.Tensor]
) -> tuple[list[torch.Tensor], list[torch.Tensor], list[torch.Tensor] | None, list[torch.Tensor] | None]:
+ """Get feature maps and positional encodings from the neck."""
xs = self.trunk(tensor_list)
x = xs[-1] # simpleFPN
sam3_out, sam3_pos = self.sam_forward_feature_levels(x, self.convs)
@@ -104,6 +118,7 @@ def sam_forward_feature_levels(
self, x: torch.Tensor, convs: nn.ModuleList
) -> tuple[list[torch.Tensor], list[torch.Tensor]]:
+ """Run neck convolutions and compute positional encodings for each feature level."""
outs, poss = [], []
for conv in convs:
feat = conv(x)
@@ -112,4 +127,5 @@ return outs, poss
def set_imgsz(self, imgsz: list[int] = [1008, 1008]):
- self.trunk.set_imgsz(imgsz)+ """Set the image size for the trunk backbone."""
+ self.trunk.set_imgsz(imgsz)
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/sam/sam3/necks.py |
Add docstrings including usage examples | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import math
import random
from copy import deepcopy
from typing import Any
import cv2
import numpy as np
import torch
from PIL import Image
from torch.nn import functional as F
from ultralytics.data.utils import polygons2masks, polygons2masks_overlap
from ultralytics.utils import LOGGER, IterableSimpleNamespace, colorstr
from ultralytics.utils.checks import check_version
from ultralytics.utils.instance import Instances
from ultralytics.utils.metrics import bbox_ioa
from ultralytics.utils.ops import segment2box, xywh2xyxy, xyxyxyxy2xywhr
from ultralytics.utils.torch_utils import TORCHVISION_0_10, TORCHVISION_0_11, TORCHVISION_0_13
DEFAULT_MEAN = (0.0, 0.0, 0.0)
DEFAULT_STD = (1.0, 1.0, 1.0)
class BaseTransform:
def __init__(self) -> None:
pass
def apply_image(self, labels):
pass
def apply_instances(self, labels):
pass
def apply_semantic(self, labels):
pass
def __call__(self, labels):
self.apply_image(labels)
self.apply_instances(labels)
self.apply_semantic(labels)
class Compose:
def __init__(self, transforms):
self.transforms = transforms if isinstance(transforms, list) else [transforms]
def __call__(self, data):
for t in self.transforms:
data = t(data)
return data
def append(self, transform):
self.transforms.append(transform)
def insert(self, index, transform):
self.transforms.insert(index, transform)
def __getitem__(self, index: list | int) -> Compose:
assert isinstance(index, (int, list)), f"The indices should be either list or int type but got {type(index)}"
return Compose([self.transforms[i] for i in index]) if isinstance(index, list) else self.transforms[index]
def __setitem__(self, index: list | int, value: list | int) -> None:
assert isinstance(index, (int, list)), f"The indices should be either list or int type but got {type(index)}"
if isinstance(index, list):
assert isinstance(value, list), (
f"The indices should be the same type as values, but got {type(index)} and {type(value)}"
)
if isinstance(index, int):
index, value = [index], [value]
for i, v in zip(index, value):
assert i < len(self.transforms), f"list index {i} out of range {len(self.transforms)}."
self.transforms[i] = v
def tolist(self):
return self.transforms
def __repr__(self):
return f"{self.__class__.__name__}({', '.join([f'{t}' for t in self.transforms])})"
class BaseMixTransform:
def __init__(self, dataset, pre_transform=None, p=0.0) -> None:
self.dataset = dataset
self.pre_transform = pre_transform
self.p = p
def __call__(self, labels: dict[str, Any]) -> dict[str, Any]:
if random.uniform(0, 1) > self.p:
return labels
# Get index of one or three other images
indexes = self.get_indexes()
if isinstance(indexes, int):
indexes = [indexes]
# Get images information will be used for Mosaic, CutMix or MixUp
mix_labels = [self.dataset.get_image_and_label(i) for i in indexes]
if self.pre_transform is not None:
for i, data in enumerate(mix_labels):
mix_labels[i] = self.pre_transform(data)
labels["mix_labels"] = mix_labels
# Update cls and texts
labels = self._update_label_text(labels)
# Mosaic, CutMix or MixUp
labels = self._mix_transform(labels)
labels.pop("mix_labels", None)
return labels
def _mix_transform(self, labels: dict[str, Any]):
raise NotImplementedError
def get_indexes(self):
return random.randint(0, len(self.dataset) - 1)
@staticmethod
def _update_label_text(labels: dict[str, Any]) -> dict[str, Any]:
if "texts" not in labels:
return labels
mix_texts = [*labels["texts"], *(item for x in labels["mix_labels"] for item in x["texts"])]
mix_texts = list({tuple(x) for x in mix_texts})
text2id = {text: i for i, text in enumerate(mix_texts)}
for label in [labels] + labels["mix_labels"]:
for i, cls in enumerate(label["cls"].squeeze(-1).tolist()):
text = label["texts"][int(cls)]
label["cls"][i] = text2id[tuple(text)]
label["texts"] = mix_texts
return labels
class Mosaic(BaseMixTransform):
def __init__(self, dataset, imgsz: int = 640, p: float = 1.0, n: int = 4):
assert 0 <= p <= 1.0, f"The probability should be in range [0, 1], but got {p}."
assert n in {4, 9}, "grid must be equal to 4 or 9."
super().__init__(dataset=dataset, p=p)
self.imgsz = imgsz
self.border = (-imgsz // 2, -imgsz // 2) # width, height
self.n = n
self.buffer_enabled = self.dataset.cache != "ram"
def get_indexes(self):
if self.buffer_enabled: # select images from buffer
return random.choices(list(self.dataset.buffer), k=self.n - 1)
else: # select any images
return [random.randint(0, len(self.dataset) - 1) for _ in range(self.n - 1)]
def _mix_transform(self, labels: dict[str, Any]) -> dict[str, Any]:
assert labels.get("rect_shape") is None, "rect and mosaic are mutually exclusive."
assert len(labels.get("mix_labels", [])), "There are no other images for mosaic augment."
return (
self._mosaic3(labels) if self.n == 3 else self._mosaic4(labels) if self.n == 4 else self._mosaic9(labels)
) # This code is modified for mosaic3 method.
def _mosaic3(self, labels: dict[str, Any]) -> dict[str, Any]:
mosaic_labels = []
s = self.imgsz
for i in range(3):
labels_patch = labels if i == 0 else labels["mix_labels"][i - 1]
# Load image
img = labels_patch["img"]
h, w = labels_patch.pop("resized_shape")
# Place img in img3
if i == 0: # center
img3 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 3 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 2: # left
c = s - w, s + h0 - h, s, s + h0
padw, padh = c[:2]
x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coordinates
img3[y1:y2, x1:x2] = img[y1 - padh :, x1 - padw :] # img3[ymin:ymax, xmin:xmax]
# hp, wp = h, w # height, width previous for next iteration
# Labels assuming imgsz*2 mosaic size
labels_patch = self._update_labels(labels_patch, padw + self.border[0], padh + self.border[1])
mosaic_labels.append(labels_patch)
final_labels = self._cat_labels(mosaic_labels)
final_labels["img"] = img3[-self.border[0] : self.border[0], -self.border[1] : self.border[1]]
return final_labels
def _mosaic4(self, labels: dict[str, Any]) -> dict[str, Any]:
mosaic_labels = []
s = self.imgsz
yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.border) # mosaic center x, y
for i in range(4):
labels_patch = labels if i == 0 else labels["mix_labels"][i - 1]
# Load image
img = labels_patch["img"]
h, w = labels_patch.pop("resized_shape")
# Place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
labels_patch = self._update_labels(labels_patch, padw, padh)
mosaic_labels.append(labels_patch)
final_labels = self._cat_labels(mosaic_labels)
final_labels["img"] = img4
return final_labels
def _mosaic9(self, labels: dict[str, Any]) -> dict[str, Any]:
mosaic_labels = []
s = self.imgsz
hp, wp = -1, -1 # height, width previous
for i in range(9):
labels_patch = labels if i == 0 else labels["mix_labels"][i - 1]
# Load image
img = labels_patch["img"]
h, w = labels_patch.pop("resized_shape")
# Place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padw, padh = c[:2]
x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coordinates
# Image
img9[y1:y2, x1:x2] = img[y1 - padh :, x1 - padw :] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous for next iteration
# Labels assuming imgsz*2 mosaic size
labels_patch = self._update_labels(labels_patch, padw + self.border[0], padh + self.border[1])
mosaic_labels.append(labels_patch)
final_labels = self._cat_labels(mosaic_labels)
final_labels["img"] = img9[-self.border[0] : self.border[0], -self.border[1] : self.border[1]]
return final_labels
@staticmethod
def _update_labels(labels, padw: int, padh: int) -> dict[str, Any]:
nh, nw = labels["img"].shape[:2]
labels["instances"].convert_bbox(format="xyxy")
labels["instances"].denormalize(nw, nh)
labels["instances"].add_padding(padw, padh)
return labels
def _cat_labels(self, mosaic_labels: list[dict[str, Any]]) -> dict[str, Any]:
if not mosaic_labels:
return {}
cls = []
instances = []
imgsz = self.imgsz * 2 # mosaic imgsz
for labels in mosaic_labels:
cls.append(labels["cls"])
instances.append(labels["instances"])
# Final labels
final_labels = {
"im_file": mosaic_labels[0]["im_file"],
"ori_shape": mosaic_labels[0]["ori_shape"],
"resized_shape": (imgsz, imgsz),
"cls": np.concatenate(cls, 0),
"instances": Instances.concatenate(instances, axis=0),
"mosaic_border": self.border,
}
final_labels["instances"].clip(imgsz, imgsz)
good = final_labels["instances"].remove_zero_area_boxes()
final_labels["cls"] = final_labels["cls"][good]
if "texts" in mosaic_labels[0]:
final_labels["texts"] = mosaic_labels[0]["texts"]
return final_labels
class MixUp(BaseMixTransform):
def __init__(self, dataset, pre_transform=None, p: float = 0.0) -> None:
super().__init__(dataset=dataset, pre_transform=pre_transform, p=p)
def _mix_transform(self, labels: dict[str, Any]) -> dict[str, Any]:
r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0
labels2 = labels["mix_labels"][0]
labels["img"] = (labels["img"] * r + labels2["img"] * (1 - r)).astype(np.uint8)
labels["instances"] = Instances.concatenate([labels["instances"], labels2["instances"]], axis=0)
labels["cls"] = np.concatenate([labels["cls"], labels2["cls"]], 0)
return labels
class CutMix(BaseMixTransform):
def __init__(self, dataset, pre_transform=None, p: float = 0.0, beta: float = 1.0, num_areas: int = 3) -> None:
super().__init__(dataset=dataset, pre_transform=pre_transform, p=p)
self.beta = beta
self.num_areas = num_areas
def _rand_bbox(self, width: int, height: int) -> tuple[int, int, int, int]:
# Sample mixing ratio from Beta distribution
lam = np.random.beta(self.beta, self.beta)
cut_ratio = np.sqrt(1.0 - lam)
cut_w = int(width * cut_ratio)
cut_h = int(height * cut_ratio)
# Random center
cx = np.random.randint(width)
cy = np.random.randint(height)
# Bounding box coordinates
x1 = np.clip(cx - cut_w // 2, 0, width)
y1 = np.clip(cy - cut_h // 2, 0, height)
x2 = np.clip(cx + cut_w // 2, 0, width)
y2 = np.clip(cy + cut_h // 2, 0, height)
return x1, y1, x2, y2
def _mix_transform(self, labels: dict[str, Any]) -> dict[str, Any]:
# Get a random second image
h, w = labels["img"].shape[:2]
cut_areas = np.asarray([self._rand_bbox(w, h) for _ in range(self.num_areas)], dtype=np.float32)
ioa1 = bbox_ioa(cut_areas, labels["instances"].bboxes) # (self.num_areas, num_boxes)
idx = np.nonzero(ioa1.sum(axis=1) <= 0)[0]
if len(idx) == 0:
return labels
labels2 = labels.pop("mix_labels")[0]
area = cut_areas[np.random.choice(idx)] # randomly select one
ioa2 = bbox_ioa(area[None], labels2["instances"].bboxes).squeeze(0)
indexes2 = np.nonzero(ioa2 >= (0.01 if len(labels["instances"].segments) else 0.1))[0]
if len(indexes2) == 0:
return labels
instances2 = labels2["instances"][indexes2]
instances2.convert_bbox("xyxy")
instances2.denormalize(w, h)
# Apply CutMix
x1, y1, x2, y2 = area.astype(np.int32)
labels["img"][y1:y2, x1:x2] = labels2["img"][y1:y2, x1:x2]
# Restrain instances2 to the random bounding border
instances2.add_padding(-x1, -y1)
instances2.clip(x2 - x1, y2 - y1)
instances2.add_padding(x1, y1)
labels["cls"] = np.concatenate([labels["cls"], labels2["cls"][indexes2]], axis=0)
labels["instances"] = Instances.concatenate([labels["instances"], instances2], axis=0)
return labels
class RandomPerspective:
def __init__(
self,
degrees: float = 0.0,
translate: float = 0.1,
scale: float = 0.5,
shear: float = 0.0,
perspective: float = 0.0,
border: tuple[int, int] = (0, 0),
pre_transform=None,
):
self.degrees = degrees
self.translate = translate
self.scale = scale
self.shear = shear
self.perspective = perspective
self.border = border # mosaic border
self.pre_transform = pre_transform
def affine_transform(self, img: np.ndarray, border: tuple[int, int]) -> tuple[np.ndarray, np.ndarray, float]:
# Center
C = np.eye(3, dtype=np.float32)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3, dtype=np.float32)
P[2, 0] = random.uniform(-self.perspective, self.perspective) # x perspective (about y)
P[2, 1] = random.uniform(-self.perspective, self.perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3, dtype=np.float32)
a = random.uniform(-self.degrees, self.degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - self.scale, 1 + self.scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3, dtype=np.float32)
S[0, 1] = math.tan(random.uniform(-self.shear, self.shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-self.shear, self.shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3, dtype=np.float32)
T[0, 2] = random.uniform(0.5 - self.translate, 0.5 + self.translate) * self.size[0] # x translation (pixels)
T[1, 2] = random.uniform(0.5 - self.translate, 0.5 + self.translate) * self.size[1] # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
# Affine image
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if self.perspective:
img = cv2.warpPerspective(img, M, dsize=self.size, borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=self.size, borderValue=(114, 114, 114))
if img.ndim == 2:
img = img[..., None]
return img, M, s
def apply_bboxes(self, bboxes: np.ndarray, M: np.ndarray) -> np.ndarray:
n = len(bboxes)
if n == 0:
return bboxes
xy = np.ones((n * 4, 3), dtype=bboxes.dtype)
xy[:, :2] = bboxes[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
xy = (xy[:, :2] / xy[:, 2:3] if self.perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine
# Create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
return np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1)), dtype=bboxes.dtype).reshape(4, n).T
def apply_segments(self, segments: np.ndarray, M: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
n, num = segments.shape[:2]
if n == 0:
return [], segments
xy = np.ones((n * num, 3), dtype=segments.dtype)
segments = segments.reshape(-1, 2)
xy[:, :2] = segments
xy = xy @ M.T # transform
xy = xy[:, :2] / xy[:, 2:3]
segments = xy.reshape(n, -1, 2)
bboxes = np.stack([segment2box(xy, self.size[0], self.size[1]) for xy in segments], 0)
segments[..., 0] = segments[..., 0].clip(bboxes[:, 0:1], bboxes[:, 2:3])
segments[..., 1] = segments[..., 1].clip(bboxes[:, 1:2], bboxes[:, 3:4])
return bboxes, segments
def apply_keypoints(self, keypoints: np.ndarray, M: np.ndarray) -> np.ndarray:
n, nkpt = keypoints.shape[:2]
if n == 0:
return keypoints
xy = np.ones((n * nkpt, 3), dtype=keypoints.dtype)
visible = keypoints[..., 2].reshape(n * nkpt, 1)
xy[:, :2] = keypoints[..., :2].reshape(n * nkpt, 2)
xy = xy @ M.T # transform
xy = xy[:, :2] / xy[:, 2:3] # perspective rescale or affine
out_mask = (xy[:, 0] < 0) | (xy[:, 1] < 0) | (xy[:, 0] > self.size[0]) | (xy[:, 1] > self.size[1])
visible[out_mask] = 0
return np.concatenate([xy, visible], axis=-1).reshape(n, nkpt, 3)
def __call__(self, labels: dict[str, Any]) -> dict[str, Any]:
if self.pre_transform and "mosaic_border" not in labels:
labels = self.pre_transform(labels)
labels.pop("ratio_pad", None) # do not need ratio pad
img = labels["img"]
cls = labels["cls"]
instances = labels.pop("instances")
# Make sure the coord formats are right
instances.convert_bbox(format="xyxy")
instances.denormalize(*img.shape[:2][::-1])
border = labels.pop("mosaic_border", self.border)
self.size = img.shape[1] + border[1] * 2, img.shape[0] + border[0] * 2 # w, h
# M is affine matrix
# Scale for func:`box_candidates`
img, M, scale = self.affine_transform(img, border)
bboxes = self.apply_bboxes(instances.bboxes, M)
segments = instances.segments
keypoints = instances.keypoints
# Update bboxes if there are segments.
if len(segments):
bboxes, segments = self.apply_segments(segments, M)
if keypoints is not None:
keypoints = self.apply_keypoints(keypoints, M)
new_instances = Instances(bboxes, segments, keypoints, bbox_format="xyxy", normalized=False)
# Clip
new_instances.clip(*self.size)
# Filter instances
instances.scale(scale_w=scale, scale_h=scale, bbox_only=True)
# Make the bboxes have the same scale with new_bboxes
i = self.box_candidates(
box1=instances.bboxes.T, box2=new_instances.bboxes.T, area_thr=0.01 if len(segments) else 0.10
)
labels["instances"] = new_instances[i]
labels["cls"] = cls[i]
labels["img"] = img
labels["resized_shape"] = img.shape[:2]
return labels
@staticmethod
def box_candidates(
box1: np.ndarray,
box2: np.ndarray,
wh_thr: int = 2,
ar_thr: int = 100,
area_thr: float = 0.1,
eps: float = 1e-16,
) -> np.ndarray:
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
class RandomHSV:
def __init__(self, hgain: float = 0.5, sgain: float = 0.5, vgain: float = 0.5) -> None:
self.hgain = hgain
self.sgain = sgain
self.vgain = vgain
def __call__(self, labels: dict[str, Any]) -> dict[str, Any]:
img = labels["img"]
if img.shape[-1] != 3: # only apply to RGB images
return labels
if self.hgain or self.sgain or self.vgain:
dtype = img.dtype # uint8
r = np.random.uniform(-1, 1, 3) * [self.hgain, self.sgain, self.vgain] # random gains
x = np.arange(0, 256, dtype=r.dtype)
# lut_hue = ((x * (r[0] + 1)) % 180).astype(dtype) # original hue implementation from ultralytics<=8.3.78
lut_hue = ((x + r[0] * 180) % 180).astype(dtype)
lut_sat = np.clip(x * (r[1] + 1), 0, 255).astype(dtype)
lut_val = np.clip(x * (r[2] + 1), 0, 255).astype(dtype)
lut_sat[0] = 0 # prevent pure white changing color, introduced in 8.3.79
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))
cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
return labels
class RandomFlip:
def __init__(self, p: float = 0.5, direction: str = "horizontal", flip_idx: list[int] | None = None) -> None:
assert direction in {"horizontal", "vertical"}, f"Support direction `horizontal` or `vertical`, got {direction}"
assert 0 <= p <= 1.0, f"The probability should be in range [0, 1], but got {p}."
self.p = p
self.direction = direction
self.flip_idx = flip_idx
def __call__(self, labels: dict[str, Any]) -> dict[str, Any]:
img = labels["img"]
instances = labels.pop("instances")
instances.convert_bbox(format="xywh")
h, w = img.shape[:2]
h = 1 if instances.normalized else h
w = 1 if instances.normalized else w
# WARNING: two separate if and calls to random.random() intentional for reproducibility with older versions
if self.direction == "vertical" and random.random() < self.p:
img = np.flipud(img)
instances.flipud(h)
if self.flip_idx is not None and instances.keypoints is not None:
instances.keypoints = np.ascontiguousarray(instances.keypoints[:, self.flip_idx, :])
if self.direction == "horizontal" and random.random() < self.p:
img = np.fliplr(img)
instances.fliplr(w)
if self.flip_idx is not None and instances.keypoints is not None:
instances.keypoints = np.ascontiguousarray(instances.keypoints[:, self.flip_idx, :])
labels["img"] = np.ascontiguousarray(img)
labels["instances"] = instances
return labels
class LetterBox:
def __init__(
self,
new_shape: tuple[int, int] = (640, 640),
auto: bool = False,
scale_fill: bool = False,
scaleup: bool = True,
center: bool = True,
stride: int = 32,
padding_value: int = 114,
interpolation: int = cv2.INTER_LINEAR,
):
self.new_shape = new_shape
self.auto = auto
self.scale_fill = scale_fill
self.scaleup = scaleup
self.stride = stride
self.center = center # Put the image in the middle or top-left
self.padding_value = padding_value
self.interpolation = interpolation
def __call__(self, labels: dict[str, Any] | None = None, image: np.ndarray = None) -> dict[str, Any] | np.ndarray:
if labels is None:
labels = {}
img = labels.get("img") if image is None else image
shape = img.shape[:2] # current shape [height, width]
new_shape = labels.pop("rect_shape", self.new_shape)
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not self.scaleup: # only scale down, do not scale up (for better val mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = round(shape[1] * r), round(shape[0] * r)
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if self.auto: # minimum rectangle
dw, dh = np.mod(dw, self.stride), np.mod(dh, self.stride) # wh padding
elif self.scale_fill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
if self.center:
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=self.interpolation)
if img.ndim == 2:
img = img[..., None]
top, bottom = round(dh - 0.1) if self.center else 0, round(dh + 0.1)
left, right = round(dw - 0.1) if self.center else 0, round(dw + 0.1)
h, w, c = img.shape
if c == 3:
img = cv2.copyMakeBorder(
img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=(self.padding_value,) * 3
)
else: # multispectral
pad_img = np.full((h + top + bottom, w + left + right, c), fill_value=self.padding_value, dtype=img.dtype)
pad_img[top : top + h, left : left + w] = img
img = pad_img
if labels.get("ratio_pad"):
labels["ratio_pad"] = (labels["ratio_pad"], (left, top)) # for evaluation
if len(labels):
labels = self._update_labels(labels, ratio, left, top)
labels["img"] = img
labels["resized_shape"] = new_shape
return labels
else:
return img
@staticmethod
def _update_labels(labels: dict[str, Any], ratio: tuple[float, float], padw: float, padh: float) -> dict[str, Any]:
labels["instances"].convert_bbox(format="xyxy")
labels["instances"].denormalize(*labels["img"].shape[:2][::-1])
labels["instances"].scale(*ratio)
labels["instances"].add_padding(padw, padh)
return labels
class CopyPaste(BaseMixTransform):
def __init__(self, dataset=None, pre_transform=None, p: float = 0.5, mode: str = "flip") -> None:
super().__init__(dataset=dataset, pre_transform=pre_transform, p=p)
assert mode in {"flip", "mixup"}, f"Expected `mode` to be `flip` or `mixup`, but got {mode}."
self.mode = mode
def _mix_transform(self, labels: dict[str, Any]) -> dict[str, Any]:
labels2 = labels["mix_labels"][0]
return self._transform(labels, labels2)
def __call__(self, labels: dict[str, Any]) -> dict[str, Any]:
if len(labels["instances"].segments) == 0 or self.p == 0:
return labels
if self.mode == "flip":
return self._transform(labels)
# Get index of one or three other images
indexes = self.get_indexes()
if isinstance(indexes, int):
indexes = [indexes]
# Get images information will be used for Mosaic or MixUp
mix_labels = [self.dataset.get_image_and_label(i) for i in indexes]
if self.pre_transform is not None:
for i, data in enumerate(mix_labels):
mix_labels[i] = self.pre_transform(data)
labels["mix_labels"] = mix_labels
# Update cls and texts
labels = self._update_label_text(labels)
# Mosaic or MixUp
labels = self._mix_transform(labels)
labels.pop("mix_labels", None)
return labels
def _transform(self, labels1: dict[str, Any], labels2: dict[str, Any] = {}) -> dict[str, Any]:
im = labels1["img"]
if "mosaic_border" not in labels1:
im = im.copy() # avoid modifying original non-mosaic image
cls = labels1["cls"]
h, w = im.shape[:2]
instances = labels1.pop("instances")
instances.convert_bbox(format="xyxy")
instances.denormalize(w, h)
im_new = np.zeros(im.shape[:2], np.uint8)
instances2 = labels2.pop("instances", None)
if instances2 is None:
instances2 = deepcopy(instances)
instances2.fliplr(w)
ioa = bbox_ioa(instances2.bboxes, instances.bboxes) # intersection over area, (N, M)
indexes = np.nonzero((ioa < 0.30).all(1))[0] # (N, )
n = len(indexes)
sorted_idx = np.argsort(ioa.max(1)[indexes])
indexes = indexes[sorted_idx]
for j in indexes[: round(self.p * n)]:
cls = np.concatenate((cls, labels2.get("cls", cls)[[j]]), axis=0)
instances = Instances.concatenate((instances, instances2[[j]]), axis=0)
cv2.drawContours(im_new, instances2.segments[[j]].astype(np.int32), -1, 1, cv2.FILLED)
result = labels2.get("img", cv2.flip(im, 1)) # augment segments
if result.ndim == 2: # cv2.flip would eliminate the last dimension for grayscale images
result = result[..., None]
i = im_new.astype(bool)
im[i] = result[i]
labels1["img"] = im
labels1["cls"] = cls
labels1["instances"] = instances
return labels1
class Albumentations:
def __init__(self, p: float = 1.0, transforms: list | None = None) -> None:
self.p = p
self.transform = None
prefix = colorstr("albumentations: ")
try:
import os
os.environ["NO_ALBUMENTATIONS_UPDATE"] = "1" # suppress Albumentations upgrade message
import albumentations as A
check_version(A.__version__, "1.0.3", hard=True) # version requirement
# List of possible spatial transforms
spatial_transforms = {
"Affine",
"BBoxSafeRandomCrop",
"CenterCrop",
"CoarseDropout",
"Crop",
"CropAndPad",
"CropNonEmptyMaskIfExists",
"D4",
"ElasticTransform",
"Flip",
"GridDistortion",
"GridDropout",
"HorizontalFlip",
"Lambda",
"LongestMaxSize",
"MaskDropout",
"MixUp",
"Morphological",
"NoOp",
"OpticalDistortion",
"PadIfNeeded",
"Perspective",
"PiecewiseAffine",
"PixelDropout",
"RandomCrop",
"RandomCropFromBorders",
"RandomGridShuffle",
"RandomResizedCrop",
"RandomRotate90",
"RandomScale",
"RandomSizedBBoxSafeCrop",
"RandomSizedCrop",
"Resize",
"Rotate",
"SafeRotate",
"ShiftScaleRotate",
"SmallestMaxSize",
"Transpose",
"VerticalFlip",
"XYMasking",
} # from https://albumentations.ai/docs/getting_started/transforms_and_targets/#spatial-level-transforms
# Transforms, use custom transforms if provided, otherwise use defaults
T = (
[
A.Blur(p=0.01),
A.MedianBlur(p=0.01),
A.ToGray(p=0.01),
A.CLAHE(p=0.01),
A.RandomBrightnessContrast(p=0.0),
A.RandomGamma(p=0.0),
A.ImageCompression(quality_range=(75, 100), p=0.0),
]
if transforms is None
else transforms
)
# Compose transforms
self.contains_spatial = any(transform.__class__.__name__ in spatial_transforms for transform in T)
self.transform = (
A.Compose(T, bbox_params=A.BboxParams(format="yolo", label_fields=["class_labels"]))
if self.contains_spatial
else A.Compose(T)
)
if hasattr(self.transform, "set_random_seed"):
# Required for deterministic transforms in albumentations>=1.4.21
self.transform.set_random_seed(torch.initial_seed())
LOGGER.info(prefix + ", ".join(f"{x}".replace("always_apply=False, ", "") for x in T if x.p))
except ImportError: # package not installed, skip
pass
except Exception as e:
LOGGER.info(f"{prefix}{e}")
def __call__(self, labels: dict[str, Any]) -> dict[str, Any]:
if self.transform is None or random.random() > self.p:
return labels
im = labels["img"]
if im.shape[2] != 3: # Only apply Albumentation on 3-channel images
return labels
if self.contains_spatial:
cls = labels["cls"]
if len(cls):
labels["instances"].convert_bbox("xywh")
labels["instances"].normalize(*im.shape[:2][::-1])
bboxes = labels["instances"].bboxes
# TODO: add supports of segments and keypoints
new = self.transform(image=im, bboxes=bboxes, class_labels=cls) # transformed
if len(new["class_labels"]) > 0: # skip update if no bbox in new im
labels["img"] = new["image"]
labels["cls"] = np.array(new["class_labels"]).reshape(-1, 1)
bboxes = np.array(new["bboxes"], dtype=np.float32)
labels["instances"].update(bboxes=bboxes)
else:
labels["img"] = self.transform(image=labels["img"])["image"] # transformed
return labels
class Format:
def __init__(
self,
bbox_format: str = "xywh",
normalize: bool = True,
return_mask: bool = False,
return_keypoint: bool = False,
return_obb: bool = False,
mask_ratio: int = 4,
mask_overlap: bool = True,
batch_idx: bool = True,
bgr: float = 0.0,
):
self.bbox_format = bbox_format
self.normalize = normalize
self.return_mask = return_mask # set False when training detection only
self.return_keypoint = return_keypoint
self.return_obb = return_obb
self.mask_ratio = mask_ratio
self.mask_overlap = mask_overlap
self.batch_idx = batch_idx # keep the batch indexes
self.bgr = bgr
def __call__(self, labels: dict[str, Any]) -> dict[str, Any]:
img = labels.pop("img")
h, w = img.shape[:2]
cls = labels.pop("cls")
instances = labels.pop("instances")
instances.convert_bbox(format=self.bbox_format)
instances.denormalize(w, h)
nl = len(instances)
if self.return_mask:
if nl:
masks, instances, cls = self._format_segments(instances, cls, w, h)
masks = torch.from_numpy(masks)
cls_tensor = torch.from_numpy(cls.squeeze(1))
if self.mask_overlap:
sem_masks = cls_tensor[masks[0].long() - 1] # (H, W) from (1, H, W) instance indices
else:
# Create sem_masks consistent with mask_overlap=True
sem_masks = (masks * cls_tensor[:, None, None]).max(0).values # (H, W) from (N, H, W) binary
overlap = masks.sum(dim=0) > 1 # (H, W)
if overlap.any():
weights = masks.sum(axis=(1, 2))
weighted_masks = masks * weights[:, None, None] # (N, H, W)
weighted_masks[masks == 0] = weights.max() + 1 # handle background
smallest_idx = weighted_masks.argmin(dim=0) # (H, W)
sem_masks[overlap] = cls_tensor[smallest_idx[overlap]]
else:
masks = torch.zeros(
1 if self.mask_overlap else nl, img.shape[0] // self.mask_ratio, img.shape[1] // self.mask_ratio
)
sem_masks = torch.zeros(img.shape[0] // self.mask_ratio, img.shape[1] // self.mask_ratio)
labels["masks"] = masks
labels["sem_masks"] = sem_masks.float()
labels["img"] = self._format_img(img)
labels["cls"] = torch.from_numpy(cls) if nl else torch.zeros(nl, 1)
labels["bboxes"] = torch.from_numpy(instances.bboxes) if nl else torch.zeros((nl, 4))
if self.return_keypoint:
labels["keypoints"] = (
torch.empty(0, 3) if instances.keypoints is None else torch.from_numpy(instances.keypoints)
)
if self.normalize:
labels["keypoints"][..., 0] /= w
labels["keypoints"][..., 1] /= h
if self.return_obb:
labels["bboxes"] = (
xyxyxyxy2xywhr(torch.from_numpy(instances.segments)) if len(instances.segments) else torch.zeros((0, 5))
)
# NOTE: need to normalize obb in xywhr format for width-height consistency
if self.normalize:
labels["bboxes"][:, [0, 2]] /= w
labels["bboxes"][:, [1, 3]] /= h
# Then we can use collate_fn
if self.batch_idx:
labels["batch_idx"] = torch.zeros(nl)
return labels
def _format_img(self, img: np.ndarray) -> torch.Tensor:
if len(img.shape) < 3:
img = img[..., None]
img = img.transpose(2, 0, 1)
img = np.ascontiguousarray(img[::-1] if random.uniform(0, 1) > self.bgr and img.shape[0] == 3 else img)
img = torch.from_numpy(img)
return img
def _format_segments(
self, instances: Instances, cls: np.ndarray, w: int, h: int
) -> tuple[np.ndarray, Instances, np.ndarray]:
segments = instances.segments
if self.mask_overlap:
masks, sorted_idx = polygons2masks_overlap((h, w), segments, downsample_ratio=self.mask_ratio)
masks = masks[None] # (640, 640) -> (1, 640, 640)
instances = instances[sorted_idx]
cls = cls[sorted_idx]
else:
masks = polygons2masks((h, w), segments, color=1, downsample_ratio=self.mask_ratio)
return masks, instances, cls
class LoadVisualPrompt:
def __init__(self, scale_factor: float = 1 / 8) -> None:
self.scale_factor = scale_factor
@staticmethod
def make_mask(boxes: torch.Tensor, h: int, w: int) -> torch.Tensor:
x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(n,1,1)
r = torch.arange(w)[None, None, :] # rows shape(1,1,w)
c = torch.arange(h)[None, :, None] # cols shape(1,h,1)
return (r >= x1) * (r < x2) * (c >= y1) * (c < y2)
def __call__(self, labels: dict[str, Any]) -> dict[str, Any]:
imgsz = labels["img"].shape[1:]
bboxes, masks = None, None
if "bboxes" in labels:
bboxes = labels["bboxes"]
bboxes = xywh2xyxy(bboxes) * torch.tensor(imgsz)[[1, 0, 1, 0]] # denormalize boxes
cls = labels["cls"].squeeze(-1).to(torch.int)
visuals = self.get_visuals(cls, imgsz, bboxes=bboxes, masks=masks)
labels["visuals"] = visuals
return labels
def get_visuals(
self,
category: int | np.ndarray | torch.Tensor,
shape: tuple[int, int],
bboxes: np.ndarray | torch.Tensor = None,
masks: np.ndarray | torch.Tensor = None,
) -> torch.Tensor:
masksz = (int(shape[0] * self.scale_factor), int(shape[1] * self.scale_factor))
if bboxes is not None:
if isinstance(bboxes, np.ndarray):
bboxes = torch.from_numpy(bboxes)
bboxes *= self.scale_factor
masks = self.make_mask(bboxes, *masksz).float()
elif masks is not None:
if isinstance(masks, np.ndarray):
masks = torch.from_numpy(masks) # (N, H, W)
masks = F.interpolate(masks.unsqueeze(1), masksz, mode="nearest").squeeze(1).float()
else:
raise ValueError("LoadVisualPrompt must have bboxes or masks in the label")
if not isinstance(category, torch.Tensor):
category = torch.tensor(category, dtype=torch.int)
cls_unique, inverse_indices = torch.unique(category, sorted=True, return_inverse=True)
# NOTE: `cls` indices from RandomLoadText should be continuous.
# if len(cls_unique):
# assert len(cls_unique) == cls_unique[-1] + 1, (
# f"Expected a continuous range of class indices, but got {cls_unique}"
# )
visuals = torch.zeros(cls_unique.shape[0], *masksz)
for idx, mask in zip(inverse_indices, masks):
visuals[idx] = torch.logical_or(visuals[idx], mask)
return visuals
class RandomLoadText:
def __init__(
self,
prompt_format: str = "{}",
neg_samples: tuple[int, int] = (80, 80),
max_samples: int = 80,
padding: bool = False,
padding_value: list[str] = [""],
) -> None:
self.prompt_format = prompt_format
self.neg_samples = neg_samples
self.max_samples = max_samples
self.padding = padding
self.padding_value = padding_value
def __call__(self, labels: dict[str, Any]) -> dict[str, Any]:
assert "texts" in labels, "No texts found in labels."
class_texts = labels["texts"]
num_classes = len(class_texts)
cls = np.asarray(labels.pop("cls"), dtype=int)
pos_labels = np.unique(cls).tolist()
if len(pos_labels) > self.max_samples:
pos_labels = random.sample(pos_labels, k=self.max_samples)
neg_samples = min(min(num_classes, self.max_samples) - len(pos_labels), random.randint(*self.neg_samples))
neg_labels = [i for i in range(num_classes) if i not in pos_labels]
neg_labels = random.sample(neg_labels, k=neg_samples)
sampled_labels = pos_labels + neg_labels
# Randomness
# random.shuffle(sampled_labels)
label2ids = {label: i for i, label in enumerate(sampled_labels)}
valid_idx = np.zeros(len(labels["instances"]), dtype=bool)
new_cls = []
for i, label in enumerate(cls.squeeze(-1).tolist()):
if label not in label2ids:
continue
valid_idx[i] = True
new_cls.append([label2ids[label]])
labels["instances"] = labels["instances"][valid_idx]
labels["cls"] = np.array(new_cls)
# Randomly select one prompt when there's more than one prompts
texts = []
for label in sampled_labels:
prompts = class_texts[label]
assert len(prompts) > 0
prompt = self.prompt_format.format(prompts[random.randrange(len(prompts))])
texts.append(prompt)
if self.padding:
valid_labels = len(pos_labels) + len(neg_labels)
num_padding = self.max_samples - valid_labels
if num_padding > 0:
texts += random.choices(self.padding_value, k=num_padding)
assert len(texts) == self.max_samples
labels["texts"] = texts
return labels
def v8_transforms(dataset, imgsz: int, hyp: IterableSimpleNamespace, stretch: bool = False):
mosaic = Mosaic(dataset, imgsz=imgsz, p=hyp.mosaic)
affine = RandomPerspective(
degrees=hyp.degrees,
translate=hyp.translate,
scale=hyp.scale,
shear=hyp.shear,
perspective=hyp.perspective,
pre_transform=None if stretch else LetterBox(new_shape=(imgsz, imgsz)),
)
pre_transform = Compose([mosaic, affine])
if hyp.copy_paste_mode == "flip":
pre_transform.insert(1, CopyPaste(p=hyp.copy_paste, mode=hyp.copy_paste_mode))
else:
pre_transform.append(
CopyPaste(
dataset,
pre_transform=Compose([Mosaic(dataset, imgsz=imgsz, p=hyp.mosaic), affine]),
p=hyp.copy_paste,
mode=hyp.copy_paste_mode,
)
)
flip_idx = dataset.data.get("flip_idx", []) # for keypoints augmentation
if dataset.use_keypoints:
kpt_shape = dataset.data.get("kpt_shape", None)
if len(flip_idx) == 0 and (hyp.fliplr > 0.0 or hyp.flipud > 0.0):
hyp.fliplr = hyp.flipud = 0.0 # both fliplr and flipud require flip_idx
LOGGER.warning("No 'flip_idx' array defined in data.yaml, disabling 'fliplr' and 'flipud' augmentations.")
elif flip_idx and (len(flip_idx) != kpt_shape[0]):
raise ValueError(f"data.yaml flip_idx={flip_idx} length must be equal to kpt_shape[0]={kpt_shape[0]}")
return Compose(
[
pre_transform,
MixUp(dataset, pre_transform=pre_transform, p=hyp.mixup),
CutMix(dataset, pre_transform=pre_transform, p=hyp.cutmix),
Albumentations(p=1.0, transforms=getattr(hyp, "augmentations", None)),
RandomHSV(hgain=hyp.hsv_h, sgain=hyp.hsv_s, vgain=hyp.hsv_v),
RandomFlip(direction="vertical", p=hyp.flipud, flip_idx=flip_idx),
RandomFlip(direction="horizontal", p=hyp.fliplr, flip_idx=flip_idx),
]
) # transforms
# Classification augmentations -----------------------------------------------------------------------------------------
def classify_transforms(
size: tuple[int, int] | int = 224,
mean: tuple[float, float, float] = DEFAULT_MEAN,
std: tuple[float, float, float] = DEFAULT_STD,
interpolation: str = "BILINEAR",
crop_fraction: float | None = None,
):
import torchvision.transforms as T # scope for faster 'import ultralytics'
scale_size = size if isinstance(size, (tuple, list)) and len(size) == 2 else (size, size)
if crop_fraction:
raise DeprecationWarning(
"'crop_fraction' arg of classify_transforms is deprecated, will be removed in a future version."
)
# Aspect ratio is preserved, crops center within image, no borders are added, image is lost
if scale_size[0] == scale_size[1]:
# Simple case, use torchvision built-in Resize with the shortest edge mode (scalar size arg)
tfl = [T.Resize(scale_size[0], interpolation=getattr(T.InterpolationMode, interpolation))]
else:
# Resize the shortest edge to matching target dim for non-square target
tfl = [T.Resize(scale_size)]
tfl += [T.CenterCrop(size), T.ToTensor(), T.Normalize(mean=torch.tensor(mean), std=torch.tensor(std))]
return T.Compose(tfl)
# Classification training augmentations --------------------------------------------------------------------------------
def classify_augmentations(
size: int = 224,
mean: tuple[float, float, float] = DEFAULT_MEAN,
std: tuple[float, float, float] = DEFAULT_STD,
scale: tuple[float, float] | None = None,
ratio: tuple[float, float] | None = None,
hflip: float = 0.5,
vflip: float = 0.0,
auto_augment: str | None = None,
hsv_h: float = 0.015, # image HSV-Hue augmentation (fraction)
hsv_s: float = 0.4, # image HSV-Saturation augmentation (fraction)
hsv_v: float = 0.4, # image HSV-Value augmentation (fraction)
force_color_jitter: bool = False,
erasing: float = 0.0,
interpolation: str = "BILINEAR",
):
# Transforms to apply if Albumentations not installed
import torchvision.transforms as T # scope for faster 'import ultralytics'
if not isinstance(size, int):
raise TypeError(f"classify_augmentations() size {size} must be integer, not (list, tuple)")
scale = tuple(scale or (0.08, 1.0)) # default imagenet scale range
ratio = tuple(ratio or (3.0 / 4.0, 4.0 / 3.0)) # default imagenet ratio range
interpolation = getattr(T.InterpolationMode, interpolation)
primary_tfl = [T.RandomResizedCrop(size, scale=scale, ratio=ratio, interpolation=interpolation)]
if hflip > 0.0:
primary_tfl.append(T.RandomHorizontalFlip(p=hflip))
if vflip > 0.0:
primary_tfl.append(T.RandomVerticalFlip(p=vflip))
secondary_tfl = []
disable_color_jitter = False
if auto_augment:
assert isinstance(auto_augment, str), f"Provided argument should be string, but got type {type(auto_augment)}"
# color jitter is typically disabled if AA/RA on,
# this allows override without breaking old hparm cfgs
disable_color_jitter = not force_color_jitter
if auto_augment == "randaugment":
if TORCHVISION_0_11:
secondary_tfl.append(T.RandAugment(interpolation=interpolation))
else:
LOGGER.warning('"auto_augment=randaugment" requires torchvision >= 0.11.0. Disabling it.')
elif auto_augment == "augmix":
if TORCHVISION_0_13:
secondary_tfl.append(T.AugMix(interpolation=interpolation))
else:
LOGGER.warning('"auto_augment=augmix" requires torchvision >= 0.13.0. Disabling it.')
elif auto_augment == "autoaugment":
if TORCHVISION_0_10:
secondary_tfl.append(T.AutoAugment(interpolation=interpolation))
else:
LOGGER.warning('"auto_augment=autoaugment" requires torchvision >= 0.10.0. Disabling it.')
else:
raise ValueError(
f'Invalid auto_augment policy: {auto_augment}. Should be one of "randaugment", '
f'"augmix", "autoaugment" or None'
)
if not disable_color_jitter:
secondary_tfl.append(T.ColorJitter(brightness=hsv_v, contrast=hsv_v, saturation=hsv_s, hue=hsv_h))
final_tfl = [
T.ToTensor(),
T.Normalize(mean=torch.tensor(mean), std=torch.tensor(std)),
T.RandomErasing(p=erasing, inplace=True),
]
return T.Compose(primary_tfl + secondary_tfl + final_tfl)
# NOTE: keep this class for backward compatibility
class ClassifyLetterBox:
def __init__(self, size: int | tuple[int, int] = (640, 640), auto: bool = False, stride: int = 32):
super().__init__()
self.h, self.w = (size, size) if isinstance(size, int) else size
self.auto = auto # pass max size integer, automatically solve for short side using stride
self.stride = stride # used with auto
def __call__(self, im: np.ndarray) -> np.ndarray:
imh, imw = im.shape[:2]
r = min(self.h / imh, self.w / imw) # ratio of new/old dimensions
h, w = round(imh * r), round(imw * r) # resized image dimensions
# Calculate padding dimensions
hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else (self.h, self.w)
top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1)
# Create padded image
im_out = np.full((hs, ws, 3), 114, dtype=im.dtype)
im_out[top : top + h, left : left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR)
return im_out
# NOTE: keep this class for backward compatibility
class CenterCrop:
def __init__(self, size: int | tuple[int, int] = (640, 640)):
super().__init__()
self.h, self.w = (size, size) if isinstance(size, int) else size
def __call__(self, im: Image.Image | np.ndarray) -> np.ndarray:
if isinstance(im, Image.Image): # convert from PIL to numpy array if required
im = np.asarray(im)
imh, imw = im.shape[:2]
m = min(imh, imw) # min dimension
top, left = (imh - m) // 2, (imw - m) // 2
return cv2.resize(im[top : top + m, left : left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR)
# NOTE: keep this class for backward compatibility
class ToTensor:
def __init__(self, half: bool = False):
super().__init__()
self.half = half
def __call__(self, im: np.ndarray) -> torch.Tensor:
im = np.ascontiguousarray(im.transpose((2, 0, 1))) # HWC to CHW -> contiguous
im = torch.from_numpy(im) # to torch
im = im.half() if self.half else im.float() # uint8 to fp16/32
im /= 255.0 # 0-255 to 0.0-1.0
return im | --- +++ @@ -26,46 +26,230 @@
class BaseTransform:
+ """Base class for image transformations in the Ultralytics library.
+
+ This class serves as a foundation for implementing various image processing operations, designed to be compatible
+ with both classification and semantic segmentation tasks.
+
+ Methods:
+ apply_image: Apply image transformations to labels.
+ apply_instances: Apply transformations to object instances in labels.
+ apply_semantic: Apply semantic segmentation to an image.
+ __call__: Apply all label transformations to an image, instances, and semantic masks.
+
+ Examples:
+ >>> transform = BaseTransform()
+ >>> labels = {"image": np.array(...), "instances": [...], "semantic": np.array(...)}
+ >>> transformed_labels = transform(labels)
+ """
def __init__(self) -> None:
+ """Initialize the BaseTransform object.
+
+ This constructor sets up the base transformation object, which can be extended for specific image processing
+ tasks. It is designed to be compatible with both classification and semantic segmentation.
+ """
pass
def apply_image(self, labels):
+ """Apply image transformations to labels.
+
+ This method is intended to be overridden by subclasses to implement specific image transformation
+ logic. In its base form, it returns the input labels unchanged.
+
+ Args:
+ labels (Any): The input labels to be transformed. The exact type and structure of labels may vary depending
+ on the specific implementation.
+
+ Returns:
+ (Any): The transformed labels. In the base implementation, this is identical to the input.
+
+ Examples:
+ >>> transform = BaseTransform()
+ >>> original_labels = [1, 2, 3]
+ >>> transformed_labels = transform.apply_image(original_labels)
+ >>> print(transformed_labels)
+ [1, 2, 3]
+ """
pass
def apply_instances(self, labels):
+ """Apply transformations to object instances in labels.
+
+ This method is responsible for applying various transformations to object instances within the given
+ labels. It is designed to be overridden by subclasses to implement specific instance transformation
+ logic.
+
+ Args:
+ labels (dict): A dictionary containing label information, including object instances.
+
+ Returns:
+ (dict): The modified labels dictionary with transformed object instances.
+
+ Examples:
+ >>> transform = BaseTransform()
+ >>> labels = {"instances": Instances(xyxy=torch.rand(5, 4), cls=torch.randint(0, 80, (5,)))}
+ >>> transformed_labels = transform.apply_instances(labels)
+ """
pass
def apply_semantic(self, labels):
+ """Apply semantic segmentation transformations to an image.
+
+ This method is intended to be overridden by subclasses to implement specific semantic segmentation
+ transformations. In its base form, it does not perform any operations.
+
+ Args:
+ labels (Any): The input labels or semantic segmentation mask to be transformed.
+
+ Returns:
+ (Any): The transformed semantic segmentation mask or labels.
+
+ Examples:
+ >>> transform = BaseTransform()
+ >>> semantic_mask = np.zeros((100, 100), dtype=np.uint8)
+ >>> transformed_mask = transform.apply_semantic(semantic_mask)
+ """
pass
def __call__(self, labels):
+ """Apply all label transformations to an image, instances, and semantic masks.
+
+ This method orchestrates the application of various transformations defined in the BaseTransform class to the
+ input labels. It sequentially calls the apply_image and apply_instances methods to process the image and object
+ instances, respectively.
+
+ Args:
+ labels (dict): A dictionary containing image data and annotations. Expected keys include 'img' for the image
+ data, and 'instances' for object instances.
+
+ Returns:
+ (dict): The input labels dictionary with transformed image and instances.
+
+ Examples:
+ >>> transform = BaseTransform()
+ >>> labels = {"img": np.random.rand(640, 640, 3), "instances": []}
+ >>> transformed_labels = transform(labels)
+ """
self.apply_image(labels)
self.apply_instances(labels)
self.apply_semantic(labels)
class Compose:
+ """A class for composing multiple image transformations.
+
+ Attributes:
+ transforms (list[Callable]): A list of transformation functions to be applied sequentially.
+
+ Methods:
+ __call__: Apply a series of transformations to input data.
+ append: Append a new transform to the existing list of transforms.
+ insert: Insert a new transform at a specified index in the list of transforms.
+ __getitem__: Retrieve a specific transform or a set of transforms using indexing.
+ __setitem__: Set a specific transform or a set of transforms using indexing.
+ tolist: Convert the list of transforms to a standard Python list.
+
+ Examples:
+ >>> transforms = [RandomFlip(), RandomPerspective(30)]
+ >>> compose = Compose(transforms)
+ >>> transformed_data = compose(data)
+ >>> compose.append(CenterCrop((224, 224)))
+ >>> compose.insert(0, RandomFlip())
+ """
def __init__(self, transforms):
+ """Initialize the Compose object with a list of transforms.
+
+ Args:
+ transforms (list[Callable]): A list of callable transform objects to be applied sequentially.
+ """
self.transforms = transforms if isinstance(transforms, list) else [transforms]
def __call__(self, data):
+ """Apply a series of transformations to input data.
+
+ This method sequentially applies each transformation in the Compose object's transforms to the input data.
+
+ Args:
+ data (Any): The input data to be transformed. This can be of any type, depending on the transformations in
+ the list.
+
+ Returns:
+ (Any): The transformed data after applying all transformations in sequence.
+
+ Examples:
+ >>> transforms = [Transform1(), Transform2(), Transform3()]
+ >>> compose = Compose(transforms)
+ >>> transformed_data = compose(input_data)
+ """
for t in self.transforms:
data = t(data)
return data
def append(self, transform):
+ """Append a new transform to the existing list of transforms.
+
+ Args:
+ transform (BaseTransform): The transformation to be added to the composition.
+
+ Examples:
+ >>> compose = Compose([RandomFlip(), RandomPerspective()])
+ >>> compose.append(RandomHSV())
+ """
self.transforms.append(transform)
def insert(self, index, transform):
+ """Insert a new transform at a specified index in the existing list of transforms.
+
+ Args:
+ index (int): The index at which to insert the new transform.
+ transform (BaseTransform): The transform object to be inserted.
+
+ Examples:
+ >>> compose = Compose([Transform1(), Transform2()])
+ >>> compose.insert(1, Transform3())
+ >>> len(compose.transforms)
+ 3
+ """
self.transforms.insert(index, transform)
def __getitem__(self, index: list | int) -> Compose:
+ """Retrieve a specific transform or a set of transforms using indexing.
+
+ Args:
+ index (int | list[int]): Index or list of indices of the transforms to retrieve.
+
+ Returns:
+ (Compose | Any): A new Compose object if index is a list, or a single transform if index is an int.
+
+ Raises:
+ AssertionError: If the index is not of type int or list.
+
+ Examples:
+ >>> transforms = [RandomFlip(), RandomPerspective(10), RandomHSV(0.5, 0.5, 0.5)]
+ >>> compose = Compose(transforms)
+ >>> single_transform = compose[1] # Returns the RandomPerspective transform directly
+ >>> multiple_transforms = compose[[0, 1]] # Returns a Compose object with RandomFlip and RandomPerspective
+ """
assert isinstance(index, (int, list)), f"The indices should be either list or int type but got {type(index)}"
return Compose([self.transforms[i] for i in index]) if isinstance(index, list) else self.transforms[index]
def __setitem__(self, index: list | int, value: list | int) -> None:
+ """Set one or more transforms in the composition using indexing.
+
+ Args:
+ index (int | list[int]): Index or list of indices to set transforms at.
+ value (Any | list[Any]): Transform or list of transforms to set at the specified index(es).
+
+ Raises:
+ AssertionError: If index type is invalid, value type doesn't match index type, or index is out of range.
+
+ Examples:
+ >>> compose = Compose([Transform1(), Transform2(), Transform3()])
+ >>> compose[1] = NewTransform() # Replace second transform
+ >>> compose[[0, 1]] = [NewTransform1(), NewTransform2()] # Replace first two transforms
+ """
assert isinstance(index, (int, list)), f"The indices should be either list or int type but got {type(index)}"
if isinstance(index, list):
assert isinstance(value, list), (
@@ -78,20 +262,98 @@ self.transforms[i] = v
def tolist(self):
+ """Convert the list of transforms to a standard Python list.
+
+ Returns:
+ (list): A list containing all the transform objects in the Compose instance.
+
+ Examples:
+ >>> transforms = [RandomFlip(), RandomPerspective(10), CenterCrop()]
+ >>> compose = Compose(transforms)
+ >>> transform_list = compose.tolist()
+ >>> print(len(transform_list))
+ 3
+ """
return self.transforms
def __repr__(self):
+ """Return a string representation of the Compose object.
+
+ Returns:
+ (str): A string representation of the Compose object, including the list of transforms.
+
+ Examples:
+ >>> transforms = [RandomFlip(), RandomPerspective(degrees=10, translate=0.1, scale=0.1)]
+ >>> compose = Compose(transforms)
+ >>> print(compose)
+ Compose([
+ RandomFlip(),
+ RandomPerspective(degrees=10, translate=0.1, scale=0.1)
+ ])
+ """
return f"{self.__class__.__name__}({', '.join([f'{t}' for t in self.transforms])})"
class BaseMixTransform:
+ """Base class for mix transformations like Cutmix, MixUp and Mosaic.
+
+ This class provides a foundation for implementing mix transformations on datasets. It handles the probability-based
+ application of transforms and manages the mixing of multiple images and labels.
+
+ Attributes:
+ dataset (Any): The dataset object containing images and labels.
+ pre_transform (Callable | None): Optional transform to apply before mixing.
+ p (float): Probability of applying the mix transformation.
+
+ Methods:
+ __call__: Apply the mix transformation to the input labels.
+ _mix_transform: Abstract method to be implemented by subclasses for specific mix operations.
+ get_indexes: Abstract method to get indexes of images to be mixed.
+ _update_label_text: Update label text for mixed images.
+
+ Examples:
+ >>> class CustomMixTransform(BaseMixTransform):
+ ... def _mix_transform(self, labels):
+ ... # Implement custom mix logic here
+ ... return labels
+ ...
+ ... def get_indexes(self):
+ ... return [random.randint(0, len(self.dataset) - 1) for _ in range(3)]
+ >>> dataset = YourDataset()
+ >>> transform = CustomMixTransform(dataset, p=0.5)
+ >>> mixed_labels = transform(original_labels)
+ """
def __init__(self, dataset, pre_transform=None, p=0.0) -> None:
+ """Initialize the BaseMixTransform object for mix transformations like CutMix, MixUp and Mosaic.
+
+ This class serves as a base for implementing mix transformations in image processing pipelines.
+
+ Args:
+ dataset (Any): The dataset object containing images and labels for mixing.
+ pre_transform (Callable | None): Optional transform to apply before mixing.
+ p (float): Probability of applying the mix transformation. Should be in the range [0.0, 1.0].
+ """
self.dataset = dataset
self.pre_transform = pre_transform
self.p = p
def __call__(self, labels: dict[str, Any]) -> dict[str, Any]:
+ """Apply pre-processing transforms and cutmix/mixup/mosaic transforms to labels data.
+
+ This method determines whether to apply the mix transform based on a probability factor. If applied, it selects
+ additional images, applies pre-transforms if specified, and then performs the mix transform.
+
+ Args:
+ labels (dict[str, Any]): A dictionary containing label data for an image.
+
+ Returns:
+ (dict[str, Any]): The transformed labels dictionary, which may include mixed data from other images.
+
+ Examples:
+ >>> transform = BaseMixTransform(dataset, pre_transform=None, p=0.5)
+ >>> result = transform({"image": img, "bboxes": boxes, "cls": classes})
+ """
if random.uniform(0, 1) > self.p:
return labels
@@ -116,13 +378,68 @@ return labels
def _mix_transform(self, labels: dict[str, Any]):
+ """Apply CutMix, MixUp or Mosaic augmentation to the label dictionary.
+
+ This method should be implemented by subclasses to perform specific mix transformations like CutMix, MixUp or
+ Mosaic. It modifies the input label dictionary in-place with the augmented data.
+
+ Args:
+ labels (dict[str, Any]): A dictionary containing image and label data. Expected to have a 'mix_labels' key
+ with a list of additional image and label data for mixing.
+
+ Returns:
+ (dict[str, Any]): The modified labels dictionary with augmented data after applying the mix transform.
+
+ Examples:
+ >>> transform = BaseMixTransform(dataset)
+ >>> labels = {"image": img, "bboxes": boxes, "mix_labels": [{"image": img2, "bboxes": boxes2}]}
+ >>> augmented_labels = transform._mix_transform(labels)
+ """
raise NotImplementedError
def get_indexes(self):
+ """Get a random index for mosaic augmentation.
+
+ Returns:
+ (int): A random index from the dataset.
+
+ Examples:
+ >>> transform = BaseMixTransform(dataset)
+ >>> index = transform.get_indexes()
+ >>> print(index) # 7
+ """
return random.randint(0, len(self.dataset) - 1)
@staticmethod
def _update_label_text(labels: dict[str, Any]) -> dict[str, Any]:
+ """Update label text and class IDs for mixed labels in image augmentation.
+
+ This method processes the 'texts' and 'cls' fields of the input labels dictionary and any mixed labels, creating
+ a unified set of text labels and updating class IDs accordingly.
+
+ Args:
+ labels (dict[str, Any]): A dictionary containing label information, including 'texts' and 'cls' fields, and
+ optionally a 'mix_labels' field with additional label dictionaries.
+
+ Returns:
+ (dict[str, Any]): The updated labels dictionary with unified text labels and updated class IDs.
+
+ Examples:
+ >>> labels = {
+ ... "texts": [["cat"], ["dog"]],
+ ... "cls": torch.tensor([[0], [1]]),
+ ... "mix_labels": [{"texts": [["bird"], ["fish"]], "cls": torch.tensor([[0], [1]])}],
+ ... }
+ >>> updated_labels = BaseMixTransform._update_label_text(labels)
+ >>> print(updated_labels["texts"])
+ [['cat'], ['dog'], ['bird'], ['fish']]
+ >>> print(updated_labels["cls"])
+ tensor([[0],
+ [1]])
+ >>> print(updated_labels["mix_labels"][0]["cls"])
+ tensor([[2],
+ [3]])
+ """
if "texts" not in labels:
return labels
@@ -139,8 +456,46 @@
class Mosaic(BaseMixTransform):
+ """Mosaic augmentation for image datasets.
+
+ This class performs mosaic augmentation by combining multiple (4 or 9) images into a single mosaic image. The
+ augmentation is applied to a dataset with a given probability.
+
+ Attributes:
+ dataset: The dataset on which the mosaic augmentation is applied.
+ imgsz (int): Image size (height and width) after mosaic pipeline of a single image.
+ p (float): Probability of applying the mosaic augmentation. Must be in the range 0-1.
+ n (int): The grid size, either 4 (for 2x2) or 9 (for 3x3).
+ border (tuple[int, int]): Border size for height and width.
+
+ Methods:
+ get_indexes: Return a list of random indexes from the dataset.
+ _mix_transform: Apply mosaic transformation to the input image and labels.
+ _mosaic3: Create a 1x3 image mosaic.
+ _mosaic4: Create a 2x2 image mosaic.
+ _mosaic9: Create a 3x3 image mosaic.
+ _update_labels: Update labels with padding.
+ _cat_labels: Concatenate labels and clips mosaic border instances.
+
+ Examples:
+ >>> from ultralytics.data.augment import Mosaic
+ >>> dataset = YourDataset(...) # Your image dataset
+ >>> mosaic_aug = Mosaic(dataset, imgsz=640, p=0.5, n=4)
+ >>> augmented_labels = mosaic_aug(original_labels)
+ """
def __init__(self, dataset, imgsz: int = 640, p: float = 1.0, n: int = 4):
+ """Initialize the Mosaic augmentation object.
+
+ This class performs mosaic augmentation by combining multiple (4 or 9) images into a single mosaic image. The
+ augmentation is applied to a dataset with a given probability.
+
+ Args:
+ dataset (Any): The dataset on which the mosaic augmentation is applied.
+ imgsz (int): Image size (height and width) after mosaic pipeline of a single image.
+ p (float): Probability of applying the mosaic augmentation. Must be in the range 0-1.
+ n (int): The grid size, either 4 (for 2x2) or 9 (for 3x3).
+ """
assert 0 <= p <= 1.0, f"The probability should be in range [0, 1], but got {p}."
assert n in {4, 9}, "grid must be equal to 4 or 9."
super().__init__(dataset=dataset, p=p)
@@ -150,12 +505,47 @@ self.buffer_enabled = self.dataset.cache != "ram"
def get_indexes(self):
+ """Return a list of random indexes from the dataset for mosaic augmentation.
+
+ This method selects random image indexes either from a buffer or from the entire dataset, depending on the
+ 'buffer_enabled' attribute. It is used to choose images for creating mosaic augmentations.
+
+ Returns:
+ (list[int]): A list of random image indexes. The length of the list is n-1, where n is the number of images
+ used in the mosaic (either 3 or 8, depending on whether n is 4 or 9).
+
+ Examples:
+ >>> mosaic = Mosaic(dataset, imgsz=640, p=1.0, n=4)
+ >>> indexes = mosaic.get_indexes()
+ >>> print(len(indexes)) # Output: 3
+ """
if self.buffer_enabled: # select images from buffer
return random.choices(list(self.dataset.buffer), k=self.n - 1)
else: # select any images
return [random.randint(0, len(self.dataset) - 1) for _ in range(self.n - 1)]
def _mix_transform(self, labels: dict[str, Any]) -> dict[str, Any]:
+ """Apply mosaic augmentation to the input image and labels.
+
+ This method combines multiple images (3, 4, or 9) into a single mosaic image based on the 'n' attribute. It
+ ensures that rectangular annotations are not present and that there are other images available for mosaic
+ augmentation.
+
+ Args:
+ labels (dict[str, Any]): A dictionary containing image data and annotations. Expected keys include:
+ - 'rect_shape': Should be None as rect and mosaic are mutually exclusive.
+ - 'mix_labels': A list of dictionaries containing data for other images to be used in the mosaic.
+
+ Returns:
+ (dict[str, Any]): A dictionary containing the mosaic-augmented image and updated annotations.
+
+ Raises:
+ AssertionError: If 'rect_shape' is not None or if 'mix_labels' is empty.
+
+ Examples:
+ >>> mosaic = Mosaic(dataset, imgsz=640, p=1.0, n=4)
+ >>> augmented_data = mosaic._mix_transform(labels)
+ """
assert labels.get("rect_shape") is None, "rect and mosaic are mutually exclusive."
assert len(labels.get("mix_labels", [])), "There are no other images for mosaic augment."
return (
@@ -163,6 +553,31 @@ ) # This code is modified for mosaic3 method.
def _mosaic3(self, labels: dict[str, Any]) -> dict[str, Any]:
+ """Create a 1x3 image mosaic by combining three images.
+
+ This method arranges three images in a horizontal layout, with the main image in the center and two additional
+ images on either side. It's part of the Mosaic augmentation technique used in object detection.
+
+ Args:
+ labels (dict[str, Any]): A dictionary containing image and label information for the main (center) image.
+ Must include 'img' key with the image array, and 'mix_labels' key with a list of two dictionaries
+ containing information for the side images.
+
+ Returns:
+ (dict[str, Any]): A dictionary with the mosaic image and updated labels. Keys include:
+ - 'img' (np.ndarray): The mosaic image array with shape (H, W, C).
+ - Other keys from the input labels, updated to reflect the new image dimensions.
+
+ Examples:
+ >>> mosaic = Mosaic(dataset, imgsz=640, p=1.0, n=3)
+ >>> labels = {
+ ... "img": np.random.rand(480, 640, 3),
+ ... "mix_labels": [{"img": np.random.rand(480, 640, 3)} for _ in range(2)],
+ ... }
+ >>> result = mosaic._mosaic3(labels)
+ >>> print(result["img"].shape)
+ (640, 640, 3)
+ """
mosaic_labels = []
s = self.imgsz
for i in range(3):
@@ -196,6 +611,29 @@ return final_labels
def _mosaic4(self, labels: dict[str, Any]) -> dict[str, Any]:
+ """Create a 2x2 image mosaic from four input images.
+
+ This method combines four images into a single mosaic image by placing them in a 2x2 grid. It also updates the
+ corresponding labels for each image in the mosaic.
+
+ Args:
+ labels (dict[str, Any]): A dictionary containing image data and labels for the base image (index 0) and
+ three additional images (indices 1-3) in the 'mix_labels' key.
+
+ Returns:
+ (dict[str, Any]): A dictionary containing the mosaic image and updated labels. The 'img' key contains the
+ mosaic image as a numpy array, and other keys contain the combined and adjusted labels for all
+ four images.
+
+ Examples:
+ >>> mosaic = Mosaic(dataset, imgsz=640, p=1.0, n=4)
+ >>> labels = {
+ ... "img": np.random.rand(480, 640, 3),
+ ... "mix_labels": [{"img": np.random.rand(480, 640, 3)} for _ in range(3)],
+ ... }
+ >>> result = mosaic._mosaic4(labels)
+ >>> assert result["img"].shape == (1280, 1280, 3)
+ """
mosaic_labels = []
s = self.imgsz
yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.border) # mosaic center x, y
@@ -231,6 +669,29 @@ return final_labels
def _mosaic9(self, labels: dict[str, Any]) -> dict[str, Any]:
+ """Create a 3x3 image mosaic from the input image and eight additional images.
+
+ This method combines nine images into a single mosaic image. The input image is placed at the center, and eight
+ additional images from the dataset are placed around it in a 3x3 grid pattern.
+
+ Args:
+ labels (dict[str, Any]): A dictionary containing the input image and its associated labels. It should have
+ the following keys: 'img' (np.ndarray) the input image, 'resized_shape' (tuple[int, int]) the shape
+ of the resized image (height, width), and 'mix_labels' (list[dict]) a list of dictionaries containing
+ information for the additional eight images, each with the same structure as the input labels.
+
+ Returns:
+ (dict[str, Any]): A dictionary containing the mosaic image and updated labels. It includes the following
+ keys:
+ - 'img' (np.ndarray): The final mosaic image.
+ - Other keys from the input labels, updated to reflect the new mosaic arrangement.
+
+ Examples:
+ >>> mosaic = Mosaic(dataset, imgsz=640, p=1.0, n=9)
+ >>> input_labels = dataset[0]
+ >>> mosaic_result = mosaic._mosaic9(input_labels)
+ >>> mosaic_image = mosaic_result["img"]
+ """
mosaic_labels = []
s = self.imgsz
hp, wp = -1, -1 # height, width previous
@@ -279,6 +740,24 @@
@staticmethod
def _update_labels(labels, padw: int, padh: int) -> dict[str, Any]:
+ """Update label coordinates with padding values.
+
+ This method adjusts the bounding box coordinates of object instances in the labels by adding padding
+ values. It also denormalizes the coordinates if they were previously normalized.
+
+ Args:
+ labels (dict[str, Any]): A dictionary containing image and instance information.
+ padw (int): Padding width to be added to the x-coordinates.
+ padh (int): Padding height to be added to the y-coordinates.
+
+ Returns:
+ (dict): Updated labels dictionary with adjusted instance coordinates.
+
+ Examples:
+ >>> labels = {"img": np.zeros((100, 100, 3)), "instances": Instances(...)}
+ >>> padw, padh = 50, 50
+ >>> updated_labels = Mosaic._update_labels(labels, padw, padh)
+ """
nh, nw = labels["img"].shape[:2]
labels["instances"].convert_bbox(format="xyxy")
labels["instances"].denormalize(nw, nh)
@@ -286,6 +765,31 @@ return labels
def _cat_labels(self, mosaic_labels: list[dict[str, Any]]) -> dict[str, Any]:
+ """Concatenate and process labels for mosaic augmentation.
+
+ This method combines labels from multiple images used in mosaic augmentation, clips instances to the mosaic
+ border, and removes zero-area boxes.
+
+ Args:
+ mosaic_labels (list[dict[str, Any]]): A list of label dictionaries for each image in the mosaic.
+
+ Returns:
+ (dict[str, Any]): A dictionary containing concatenated and processed labels for the mosaic image, including:
+ - im_file (str): File path of the first image in the mosaic.
+ - ori_shape (tuple[int, int]): Original shape of the first image.
+ - resized_shape (tuple[int, int]): Shape of the mosaic image (imgsz * 2, imgsz * 2).
+ - cls (np.ndarray): Concatenated class labels.
+ - instances (Instances): Concatenated instance annotations.
+ - mosaic_border (tuple[int, int]): Mosaic border size.
+ - texts (list[str], optional): Text labels if present in the original labels.
+
+ Examples:
+ >>> mosaic = Mosaic(dataset, imgsz=640)
+ >>> mosaic_labels = [{"cls": np.array([0, 1]), "instances": Instances(...)} for _ in range(4)]
+ >>> result = mosaic._cat_labels(mosaic_labels)
+ >>> print(result.keys())
+ dict_keys(['im_file', 'ori_shape', 'resized_shape', 'cls', 'instances', 'mosaic_border'])
+ """
if not mosaic_labels:
return {}
cls = []
@@ -312,11 +816,55 @@
class MixUp(BaseMixTransform):
+ """Apply MixUp augmentation to image datasets.
+
+ This class implements the MixUp augmentation technique as described in the paper [mixup: Beyond Empirical Risk
+ Minimization](https://arxiv.org/abs/1710.09412). MixUp combines two images and their labels using a random weight.
+
+ Attributes:
+ dataset (Any): The dataset to which MixUp augmentation will be applied.
+ pre_transform (Callable | None): Optional transform to apply before MixUp.
+ p (float): Probability of applying MixUp augmentation.
+
+ Methods:
+ _mix_transform: Apply MixUp augmentation to the input labels.
+
+ Examples:
+ >>> from ultralytics.data.augment import MixUp
+ >>> dataset = YourDataset(...) # Your image dataset
+ >>> mixup = MixUp(dataset, p=0.5)
+ >>> augmented_labels = mixup(original_labels)
+ """
def __init__(self, dataset, pre_transform=None, p: float = 0.0) -> None:
+ """Initialize the MixUp augmentation object.
+
+ MixUp is an image augmentation technique that combines two images by taking a weighted sum of their pixel values
+ and labels. This implementation is designed for use with the Ultralytics YOLO framework.
+
+ Args:
+ dataset (Any): The dataset to which MixUp augmentation will be applied.
+ pre_transform (Callable | None): Optional transform to apply to images before MixUp.
+ p (float): Probability of applying MixUp augmentation to an image. Must be in the range [0, 1].
+ """
super().__init__(dataset=dataset, pre_transform=pre_transform, p=p)
def _mix_transform(self, labels: dict[str, Any]) -> dict[str, Any]:
+ """Apply MixUp augmentation to the input labels.
+
+ This method implements the MixUp augmentation technique as described in the paper "mixup: Beyond Empirical Risk
+ Minimization" (https://arxiv.org/abs/1710.09412).
+
+ Args:
+ labels (dict[str, Any]): A dictionary containing the original image and label information.
+
+ Returns:
+ (dict[str, Any]): A dictionary containing the mixed-up image and combined label information.
+
+ Examples:
+ >>> mixer = MixUp(dataset)
+ >>> mixed_labels = mixer._mix_transform(labels)
+ """
r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0
labels2 = labels["mix_labels"][0]
labels["img"] = (labels["img"] * r + labels2["img"] * (1 - r)).astype(np.uint8)
@@ -326,13 +874,53 @@
class CutMix(BaseMixTransform):
+ """Apply CutMix augmentation to image datasets as described in the paper https://arxiv.org/abs/1905.04899.
+
+ CutMix combines two images by replacing a random rectangular region of one image with the corresponding region from
+ another image, and adjusts the labels proportionally to the area of the mixed region.
+
+ Attributes:
+ dataset (Any): The dataset to which CutMix augmentation will be applied.
+ pre_transform (Callable | None): Optional transform to apply before CutMix.
+ p (float): Probability of applying CutMix augmentation.
+ beta (float): Beta distribution parameter for sampling the mixing ratio.
+ num_areas (int): Number of areas to try to cut and mix.
+
+ Methods:
+ _mix_transform: Apply CutMix augmentation to the input labels.
+ _rand_bbox: Generate random bounding box coordinates for the cut region.
+
+ Examples:
+ >>> from ultralytics.data.augment import CutMix
+ >>> dataset = YourDataset(...) # Your image dataset
+ >>> cutmix = CutMix(dataset, p=0.5)
+ >>> augmented_labels = cutmix(original_labels)
+ """
def __init__(self, dataset, pre_transform=None, p: float = 0.0, beta: float = 1.0, num_areas: int = 3) -> None:
+ """Initialize the CutMix augmentation object.
+
+ Args:
+ dataset (Any): The dataset to which CutMix augmentation will be applied.
+ pre_transform (Callable | None): Optional transform to apply before CutMix.
+ p (float): Probability of applying CutMix augmentation.
+ beta (float): Beta distribution parameter for sampling the mixing ratio.
+ num_areas (int): Number of areas to try to cut and mix.
+ """
super().__init__(dataset=dataset, pre_transform=pre_transform, p=p)
self.beta = beta
self.num_areas = num_areas
def _rand_bbox(self, width: int, height: int) -> tuple[int, int, int, int]:
+ """Generate random bounding box coordinates for the cut region.
+
+ Args:
+ width (int): Width of the image.
+ height (int): Height of the image.
+
+ Returns:
+ (tuple[int]): (x1, y1, x2, y2) coordinates of the bounding box.
+ """
# Sample mixing ratio from Beta distribution
lam = np.random.beta(self.beta, self.beta)
@@ -353,6 +941,18 @@ return x1, y1, x2, y2
def _mix_transform(self, labels: dict[str, Any]) -> dict[str, Any]:
+ """Apply CutMix augmentation to the input labels.
+
+ Args:
+ labels (dict[str, Any]): A dictionary containing the original image and label information.
+
+ Returns:
+ (dict[str, Any]): A dictionary containing the mixed image and adjusted labels.
+
+ Examples:
+ >>> cutter = CutMix(dataset)
+ >>> mixed_labels = cutter._mix_transform(labels)
+ """
# Get a random second image
h, w = labels["img"].shape[:2]
@@ -388,6 +988,37 @@
class RandomPerspective:
+ """Implement random perspective and affine transformations on images and corresponding annotations.
+
+ This class applies random rotations, translations, scaling, shearing, and perspective transformations to images and
+ their associated bounding boxes, segments, and keypoints. It can be used as part of an augmentation pipeline for
+ object detection and instance segmentation tasks.
+
+ Attributes:
+ degrees (float): Maximum absolute degree range for random rotations.
+ translate (float): Maximum translation as a fraction of the image size.
+ scale (float): Scaling factor range, e.g., scale=0.1 means 0.9-1.1.
+ shear (float): Maximum shear angle in degrees.
+ perspective (float): Perspective distortion factor.
+ border (tuple[int, int]): Mosaic border size as (y, x).
+ pre_transform (Callable | None): Optional transform to apply before the random perspective.
+
+ Methods:
+ affine_transform: Apply affine transformations to the input image.
+ apply_bboxes: Transform bounding boxes using the affine matrix.
+ apply_segments: Transform segments and generate new bounding boxes.
+ apply_keypoints: Transform keypoints using the affine matrix.
+ __call__: Apply the random perspective transformation to images and annotations.
+ box_candidates: Filter transformed bounding boxes based on size and aspect ratio.
+
+ Examples:
+ >>> transform = RandomPerspective(degrees=10, translate=0.1, scale=0.1, shear=10)
+ >>> image = np.random.randint(0, 255, (640, 640, 3), dtype=np.uint8)
+ >>> labels = {"img": image, "cls": np.array([0, 1]), "instances": Instances(...)}
+ >>> result = transform(labels)
+ >>> transformed_image = result["img"]
+ >>> transformed_instances = result["instances"]
+ """
def __init__(
self,
@@ -399,6 +1030,21 @@ border: tuple[int, int] = (0, 0),
pre_transform=None,
):
+ """Initialize RandomPerspective object with transformation parameters.
+
+ This class implements random perspective and affine transformations on images and corresponding bounding boxes,
+ segments, and keypoints. Transformations include rotation, translation, scaling, and shearing.
+
+ Args:
+ degrees (float): Degree range for random rotations.
+ translate (float): Fraction of total width and height for random translation.
+ scale (float): Scaling factor interval, e.g., a scale factor of 0.5 allows a resize between 50%-150%.
+ shear (float): Shear intensity (angle in degrees).
+ perspective (float): Perspective distortion factor.
+ border (tuple[int, int]): Tuple specifying mosaic border (y, x).
+ pre_transform (Callable | None): Function/transform to apply to the image before starting the random
+ transformation.
+ """
self.degrees = degrees
self.translate = translate
self.scale = scale
@@ -408,6 +1054,28 @@ self.pre_transform = pre_transform
def affine_transform(self, img: np.ndarray, border: tuple[int, int]) -> tuple[np.ndarray, np.ndarray, float]:
+ """Apply a sequence of affine transformations centered around the image center.
+
+ This function performs a series of geometric transformations on the input image, including translation,
+ perspective change, rotation, scaling, and shearing. The transformations are applied in a specific order to
+ maintain consistency.
+
+ Args:
+ img (np.ndarray): Input image to be transformed.
+ border (tuple[int, int]): Border dimensions for the transformed image.
+
+ Returns:
+ img (np.ndarray): Transformed image.
+ M (np.ndarray): 3x3 transformation matrix.
+ s (float): Scale factor applied during the transformation.
+
+ Examples:
+ >>> import numpy as np
+ >>> img = np.random.rand(100, 100, 3)
+ >>> border = (10, 10)
+ >>> rp = RandomPerspective()
+ >>> transformed_img, matrix, scale = rp.affine_transform(img, border)
+ """
# Center
C = np.eye(3, dtype=np.float32)
@@ -450,6 +1118,25 @@ return img, M, s
def apply_bboxes(self, bboxes: np.ndarray, M: np.ndarray) -> np.ndarray:
+ """Apply affine transformation to bounding boxes.
+
+ This function applies an affine transformation to a set of bounding boxes using the provided transformation
+ matrix.
+
+ Args:
+ bboxes (np.ndarray): Bounding boxes in xyxy format with shape (N, 4), where N is the number of bounding
+ boxes.
+ M (np.ndarray): Affine transformation matrix with shape (3, 3).
+
+ Returns:
+ (np.ndarray): Transformed bounding boxes in xyxy format with shape (N, 4).
+
+ Examples:
+ >>> rp = RandomPerspective()
+ >>> bboxes = np.array([[10, 10, 20, 20], [30, 30, 40, 40]], dtype=np.float32)
+ >>> M = np.eye(3, dtype=np.float32)
+ >>> transformed_bboxes = rp.apply_bboxes(bboxes, M)
+ """
n = len(bboxes)
if n == 0:
return bboxes
@@ -465,6 +1152,26 @@ return np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1)), dtype=bboxes.dtype).reshape(4, n).T
def apply_segments(self, segments: np.ndarray, M: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
+ """Apply affine transformations to segments and generate new bounding boxes.
+
+ This function applies affine transformations to input segments and generates new bounding boxes based on the
+ transformed segments. It clips the transformed segments to fit within the new bounding boxes.
+
+ Args:
+ segments (np.ndarray): Input segments with shape (N, M, 2), where N is the number of segments and M is the
+ number of points in each segment.
+ M (np.ndarray): Affine transformation matrix with shape (3, 3).
+
+ Returns:
+ bboxes (np.ndarray): New bounding boxes with shape (N, 4) in xyxy format.
+ segments (np.ndarray): Transformed and clipped segments with shape (N, M, 2).
+
+ Examples:
+ >>> rp = RandomPerspective()
+ >>> segments = np.random.rand(10, 500, 2) # 10 segments with 500 points each
+ >>> M = np.eye(3) # Identity transformation matrix
+ >>> new_bboxes, new_segments = rp.apply_segments(segments, M)
+ """
n, num = segments.shape[:2]
if n == 0:
return [], segments
@@ -481,6 +1188,26 @@ return bboxes, segments
def apply_keypoints(self, keypoints: np.ndarray, M: np.ndarray) -> np.ndarray:
+ """Apply affine transformation to keypoints.
+
+ This method transforms the input keypoints using the provided affine transformation matrix. It handles
+ perspective rescaling if necessary and updates the visibility of keypoints that fall outside the image
+ boundaries after transformation.
+
+ Args:
+ keypoints (np.ndarray): Array of keypoints with shape (N, K, 3), where N is the number of instances, K is
+ the number of keypoints per instance, and 3 represents (x, y, visibility).
+ M (np.ndarray): 3x3 affine transformation matrix.
+
+ Returns:
+ (np.ndarray): Transformed keypoints array with the same shape as input (N, K, 3).
+
+ Examples:
+ >>> random_perspective = RandomPerspective()
+ >>> keypoints = np.random.rand(5, 17, 3) # 5 instances, 17 keypoints each
+ >>> M = np.eye(3) # Identity transformation
+ >>> transformed_keypoints = random_perspective.apply_keypoints(keypoints, M)
+ """
n, nkpt = keypoints.shape[:2]
if n == 0:
return keypoints
@@ -494,6 +1221,41 @@ return np.concatenate([xy, visible], axis=-1).reshape(n, nkpt, 3)
def __call__(self, labels: dict[str, Any]) -> dict[str, Any]:
+ """Apply random perspective and affine transformations to an image and its associated labels.
+
+ This method performs a series of transformations including rotation, translation, scaling, shearing, and
+ perspective distortion on the input image and adjusts the corresponding bounding boxes, segments, and keypoints
+ accordingly.
+
+ Args:
+ labels (dict[str, Any]): A dictionary containing image data and annotations.
+
+ Returns:
+ (dict[str, Any]): Transformed labels dictionary containing:
+ - 'img' (np.ndarray): The transformed image.
+ - 'cls' (np.ndarray): Updated class labels.
+ - 'instances' (Instances): Updated object instances.
+ - 'resized_shape' (tuple[int, int]): New image shape after transformation.
+
+ Examples:
+ >>> transform = RandomPerspective()
+ >>> image = np.random.randint(0, 255, (640, 640, 3), dtype=np.uint8)
+ >>> labels = {
+ ... "img": image,
+ ... "cls": np.array([0, 1, 2]),
+ ... "instances": Instances(bboxes=np.array([[10, 10, 50, 50], [100, 100, 150, 150]])),
+ ... }
+ >>> result = transform(labels)
+ >>> assert result["img"].shape[:2] == result["resized_shape"]
+
+ Notes:
+ 'labels' arg must include:
+ - 'img' (np.ndarray): The input image.
+ - 'cls' (np.ndarray): Class labels.
+ - 'instances' (Instances): Object instances with bounding boxes, segments, and keypoints.
+ May include:
+ - 'mosaic_border' (tuple[int, int]): Border size for mosaic augmentation.
+ """
if self.pre_transform and "mosaic_border" not in labels:
labels = self.pre_transform(labels)
labels.pop("ratio_pad", None) # do not need ratio pad
@@ -546,6 +1308,36 @@ area_thr: float = 0.1,
eps: float = 1e-16,
) -> np.ndarray:
+ """Compute candidate boxes for further processing based on size and aspect ratio criteria.
+
+ This method compares boxes before and after augmentation to determine if they meet specified thresholds for
+ width, height, aspect ratio, and area. It's used to filter out boxes that have been overly distorted or reduced
+ by the augmentation process.
+
+ Args:
+ box1 (np.ndarray): Original boxes before augmentation, shape (4, N) where N is the number of boxes. Format
+ is [x1, y1, x2, y2] in absolute coordinates.
+ box2 (np.ndarray): Augmented boxes after transformation, shape (4, N). Format is [x1, y1, x2, y2] in
+ absolute coordinates.
+ wh_thr (int): Width and height threshold in pixels. Boxes smaller than this in either dimension are
+ rejected.
+ ar_thr (int): Aspect ratio threshold. Boxes with an aspect ratio greater than this value are rejected.
+ area_thr (float): Area ratio threshold. Boxes with an area ratio (new/old) less than this value are
+ rejected.
+ eps (float): Small epsilon value to prevent division by zero.
+
+ Returns:
+ (np.ndarray): Boolean array of shape (N,) indicating which boxes are candidates. True values correspond to
+ boxes that meet all criteria.
+
+ Examples:
+ >>> random_perspective = RandomPerspective()
+ >>> box1 = np.array([[0, 0, 100, 100], [0, 0, 50, 50]]).T
+ >>> box2 = np.array([[10, 10, 90, 90], [5, 5, 45, 45]]).T
+ >>> candidates = random_perspective.box_candidates(box1, box2)
+ >>> print(candidates)
+ [True True]
+ """
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
@@ -553,13 +1345,61 @@
class RandomHSV:
+ """Randomly adjust the Hue, Saturation, and Value (HSV) channels of an image.
+
+ This class applies random HSV augmentation to images within predefined limits set by hgain, sgain, and vgain.
+
+ Attributes:
+ hgain (float): Maximum variation for hue. Range is typically [0, 1].
+ sgain (float): Maximum variation for saturation. Range is typically [0, 1].
+ vgain (float): Maximum variation for value. Range is typically [0, 1].
+
+ Methods:
+ __call__: Apply random HSV augmentation to an image.
+
+ Examples:
+ >>> import numpy as np
+ >>> from ultralytics.data.augment import RandomHSV
+ >>> augmenter = RandomHSV(hgain=0.5, sgain=0.5, vgain=0.5)
+ >>> image = np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8)
+ >>> labels = {"img": image}
+ >>> labels = augmenter(labels)
+ >>> augmented_image = labels["img"]
+ """
def __init__(self, hgain: float = 0.5, sgain: float = 0.5, vgain: float = 0.5) -> None:
+ """Initialize the RandomHSV object for random HSV (Hue, Saturation, Value) augmentation.
+
+ This class applies random adjustments to the HSV channels of an image within specified limits.
+
+ Args:
+ hgain (float): Maximum variation for hue. Should be in the range [0, 1].
+ sgain (float): Maximum variation for saturation. Should be in the range [0, 1].
+ vgain (float): Maximum variation for value. Should be in the range [0, 1].
+ """
self.hgain = hgain
self.sgain = sgain
self.vgain = vgain
def __call__(self, labels: dict[str, Any]) -> dict[str, Any]:
+ """Apply random HSV augmentation to an image within predefined limits.
+
+ This method modifies the input image by randomly adjusting its Hue, Saturation, and Value (HSV) channels. The
+ adjustments are made within the limits set by hgain, sgain, and vgain during initialization.
+
+ Args:
+ labels (dict[str, Any]): A dictionary containing image data and metadata. Must include an 'img' key with the
+ image as a numpy array.
+
+ Returns:
+ (dict[str, Any]): The labels dictionary with the HSV-augmented image.
+
+ Examples:
+ >>> hsv_augmenter = RandomHSV(hgain=0.5, sgain=0.5, vgain=0.5)
+ >>> labels = {"img": np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8)}
+ >>> labels = hsv_augmenter(labels)
+ >>> augmented_img = labels["img"]
+ """
img = labels["img"]
if img.shape[-1] != 3: # only apply to RGB images
return labels
@@ -581,8 +1421,40 @@
class RandomFlip:
+ """Apply a random horizontal or vertical flip to an image with a given probability.
+
+ This class performs random image flipping and updates corresponding instance annotations such as bounding boxes and
+ keypoints.
+
+ Attributes:
+ p (float): Probability of applying the flip. Must be between 0 and 1.
+ direction (str): Direction of flip, either 'horizontal' or 'vertical'.
+ flip_idx (array-like): Index mapping for flipping keypoints, if applicable.
+
+ Methods:
+ __call__: Apply the random flip transformation to an image and its annotations.
+
+ Examples:
+ >>> transform = RandomFlip(p=0.5, direction="horizontal")
+ >>> result = transform({"img": image, "instances": instances})
+ >>> flipped_image = result["img"]
+ >>> flipped_instances = result["instances"]
+ """
def __init__(self, p: float = 0.5, direction: str = "horizontal", flip_idx: list[int] | None = None) -> None:
+ """Initialize the RandomFlip class with probability and direction.
+
+ This class applies a random horizontal or vertical flip to an image with a given probability. It also updates
+ any instances (bounding boxes, keypoints, etc.) accordingly.
+
+ Args:
+ p (float): The probability of applying the flip. Must be between 0 and 1.
+ direction (str): The direction to apply the flip. Must be 'horizontal' or 'vertical'.
+ flip_idx (list[int] | None): Index mapping for flipping keypoints, if any.
+
+ Raises:
+ AssertionError: If direction is not 'horizontal' or 'vertical', or if p is not between 0 and 1.
+ """
assert direction in {"horizontal", "vertical"}, f"Support direction `horizontal` or `vertical`, got {direction}"
assert 0 <= p <= 1.0, f"The probability should be in range [0, 1], but got {p}."
@@ -591,6 +1463,27 @@ self.flip_idx = flip_idx
def __call__(self, labels: dict[str, Any]) -> dict[str, Any]:
+ """Apply random flip to an image and update any instances like bounding boxes or keypoints accordingly.
+
+ This method randomly flips the input image either horizontally or vertically based on the initialized
+ probability and direction. It also updates the corresponding instances (bounding boxes, keypoints) to match the
+ flipped image.
+
+ Args:
+ labels (dict[str, Any]): A dictionary containing the following keys:
+ - 'img' (np.ndarray): The image to be flipped.
+ - 'instances' (ultralytics.utils.instance.Instances): Object containing boxes and optionally keypoints.
+
+ Returns:
+ (dict[str, Any]): The same dictionary with the flipped image and updated instances:
+ - 'img' (np.ndarray): The flipped image.
+ - 'instances' (ultralytics.utils.instance.Instances): Updated instances matching the flipped image.
+
+ Examples:
+ >>> labels = {"img": np.random.rand(640, 640, 3), "instances": Instances(...)}
+ >>> random_flip = RandomFlip(p=0.5, direction="horizontal")
+ >>> flipped_labels = random_flip(labels)
+ """
img = labels["img"]
instances = labels.pop("instances")
instances.convert_bbox(format="xywh")
@@ -615,6 +1508,28 @@
class LetterBox:
+ """Resize image and padding for detection, instance segmentation, pose.
+
+ This class resizes and pads images to a specified shape while preserving aspect ratio. It also updates corresponding
+ labels and bounding boxes.
+
+ Attributes:
+ new_shape (tuple): Target shape (height, width) for resizing.
+ auto (bool): Whether to use minimum rectangle.
+ scale_fill (bool): Whether to stretch the image to new_shape.
+ scaleup (bool): Whether to allow scaling up. If False, only scale down.
+ stride (int): Stride for rounding padding.
+ center (bool): Whether to center the image or align to top-left.
+
+ Methods:
+ __call__: Resize and pad image, update labels and bounding boxes.
+
+ Examples:
+ >>> transform = LetterBox(new_shape=(640, 640))
+ >>> result = transform(labels)
+ >>> resized_img = result["img"]
+ >>> updated_instances = result["instances"]
+ """
def __init__(
self,
@@ -627,6 +1542,21 @@ padding_value: int = 114,
interpolation: int = cv2.INTER_LINEAR,
):
+ """Initialize LetterBox object for resizing and padding images.
+
+ This class is designed to resize and pad images for object detection, instance segmentation, and pose estimation
+ tasks. It supports various resizing modes including auto-sizing, scale-fill, and letterboxing.
+
+ Args:
+ new_shape (tuple[int, int]): Target size (height, width) for the resized image.
+ auto (bool): If True, use minimum rectangle to resize. If False, use new_shape directly.
+ scale_fill (bool): If True, stretch the image to new_shape without padding.
+ scaleup (bool): If True, allow scaling up. If False, only scale down.
+ center (bool): If True, center the placed image. If False, place image in top-left corner.
+ stride (int): Stride of the model (e.g., 32 for YOLOv5).
+ padding_value (int): Value for padding the image. Default is 114.
+ interpolation (int): Interpolation method for resizing. Default is cv2.INTER_LINEAR.
+ """
self.new_shape = new_shape
self.auto = auto
self.scale_fill = scale_fill
@@ -637,6 +1567,27 @@ self.interpolation = interpolation
def __call__(self, labels: dict[str, Any] | None = None, image: np.ndarray = None) -> dict[str, Any] | np.ndarray:
+ """Resize and pad an image for object detection, instance segmentation, or pose estimation tasks.
+
+ This method applies letterboxing to the input image, which involves resizing the image while maintaining its
+ aspect ratio and adding padding to fit the new shape. It also updates any associated labels accordingly.
+
+ Args:
+ labels (dict[str, Any] | None): A dictionary containing image data and associated labels, or empty dict if
+ None.
+ image (np.ndarray | None): The input image as a numpy array. If None, the image is taken from 'labels'.
+
+ Returns:
+ (dict[str, Any] | np.ndarray): If 'labels' is provided, returns an updated dictionary with the resized and
+ padded image, updated labels, and additional metadata. If 'labels' is empty, returns the resized and
+ padded image only.
+
+ Examples:
+ >>> letterbox = LetterBox(new_shape=(640, 640))
+ >>> result = letterbox(labels={"img": np.zeros((480, 640, 3)), "instances": Instances(...)})
+ >>> resized_img = result["img"]
+ >>> updated_instances = result["instances"]
+ """
if labels is None:
labels = {}
img = labels.get("img") if image is None else image
@@ -695,6 +1646,27 @@
@staticmethod
def _update_labels(labels: dict[str, Any], ratio: tuple[float, float], padw: float, padh: float) -> dict[str, Any]:
+ """Update labels after applying letterboxing to an image.
+
+ This method modifies the bounding box coordinates of instances in the labels to account for resizing and padding
+ applied during letterboxing.
+
+ Args:
+ labels (dict[str, Any]): A dictionary containing image labels and instances.
+ ratio (tuple[float, float]): Scaling ratios (width, height) applied to the image.
+ padw (float): Padding width added to the image.
+ padh (float): Padding height added to the image.
+
+ Returns:
+ (dict[str, Any]): Updated labels dictionary with modified instance coordinates.
+
+ Examples:
+ >>> letterbox = LetterBox(new_shape=(640, 640))
+ >>> labels = {"instances": Instances(...)}
+ >>> ratio = (0.5, 0.5)
+ >>> padw, padh = 10, 20
+ >>> updated_labels = letterbox._update_labels(labels, ratio, padw, padh)
+ """
labels["instances"].convert_bbox(format="xyxy")
labels["instances"].denormalize(*labels["img"].shape[:2][::-1])
labels["instances"].scale(*ratio)
@@ -703,17 +1675,41 @@
class CopyPaste(BaseMixTransform):
+ """CopyPaste class for applying Copy-Paste augmentation to image datasets.
+
+ This class implements the Copy-Paste augmentation technique as described in the paper "Simple Copy-Paste is a Strong
+ Data Augmentation Method for Instance Segmentation" (https://arxiv.org/abs/2012.07177). It combines objects from
+ different images to create new training samples.
+
+ Attributes:
+ dataset (Any): The dataset to which Copy-Paste augmentation will be applied.
+ pre_transform (Callable | None): Optional transform to apply before Copy-Paste.
+ p (float): Probability of applying Copy-Paste augmentation.
+
+ Methods:
+ _mix_transform: Apply Copy-Paste augmentation to the input labels.
+ __call__: Apply the Copy-Paste transformation to images and annotations.
+
+ Examples:
+ >>> from ultralytics.data.augment import CopyPaste
+ >>> dataset = YourDataset(...) # Your image dataset
+ >>> copypaste = CopyPaste(dataset, p=0.5)
+ >>> augmented_labels = copypaste(original_labels)
+ """
def __init__(self, dataset=None, pre_transform=None, p: float = 0.5, mode: str = "flip") -> None:
+ """Initialize CopyPaste object with dataset, pre_transform, and probability of applying CopyPaste."""
super().__init__(dataset=dataset, pre_transform=pre_transform, p=p)
assert mode in {"flip", "mixup"}, f"Expected `mode` to be `flip` or `mixup`, but got {mode}."
self.mode = mode
def _mix_transform(self, labels: dict[str, Any]) -> dict[str, Any]:
+ """Apply Copy-Paste augmentation to combine objects from another image into the current image."""
labels2 = labels["mix_labels"][0]
return self._transform(labels, labels2)
def __call__(self, labels: dict[str, Any]) -> dict[str, Any]:
+ """Apply Copy-Paste augmentation to an image and its labels."""
if len(labels["instances"].segments) == 0 or self.p == 0:
return labels
if self.mode == "flip":
@@ -740,6 +1736,7 @@ return labels
def _transform(self, labels1: dict[str, Any], labels2: dict[str, Any] = {}) -> dict[str, Any]:
+ """Apply Copy-Paste augmentation to combine objects from another image into the current image."""
im = labels1["img"]
if "mosaic_border" not in labels1:
im = im.copy() # avoid modifying original non-mosaic image
@@ -777,8 +1774,41 @@
class Albumentations:
+ """Albumentations transformations for image augmentation.
+
+ This class applies various image transformations using the Albumentations library. It includes operations such as
+ Blur, Median Blur, conversion to grayscale, Contrast Limited Adaptive Histogram Equalization (CLAHE), random changes
+ in brightness and contrast, RandomGamma, and image quality reduction through compression.
+
+ Attributes:
+ p (float): Probability of applying the transformations.
+ transform (albumentations.Compose): Composed Albumentations transforms.
+ contains_spatial (bool): Indicates if the transforms include spatial operations.
+
+ Methods:
+ __call__: Apply the Albumentations transformations to the input labels.
+
+ Examples:
+ >>> transform = Albumentations(p=0.5)
+ >>> augmented_labels = transform(labels)
+
+ Notes:
+ - Requires Albumentations version 1.0.3 or higher.
+ - Spatial transforms are handled differently to ensure bbox compatibility.
+ - Some transforms are applied with very low probability (0.01) by default.
+ """
def __init__(self, p: float = 1.0, transforms: list | None = None) -> None:
+ """Initialize the Albumentations transform object for YOLO bbox formatted parameters.
+
+ This class applies various image augmentations using the Albumentations library, including Blur, Median Blur,
+ conversion to grayscale, Contrast Limited Adaptive Histogram Equalization, random changes of brightness and
+ contrast, RandomGamma, and image quality reduction through compression.
+
+ Args:
+ p (float): Probability of applying the augmentations. Must be between 0 and 1.
+ transforms (list | None): List of custom Albumentations transforms. If None, uses default transforms.
+ """
self.p = p
self.transform = None
prefix = colorstr("albumentations: ")
@@ -867,6 +1897,35 @@ LOGGER.info(f"{prefix}{e}")
def __call__(self, labels: dict[str, Any]) -> dict[str, Any]:
+ """Apply Albumentations transformations to input labels.
+
+ This method applies a series of image augmentations using the Albumentations library. It can perform both
+ spatial and non-spatial transformations on the input image and its corresponding labels.
+
+ Args:
+ labels (dict[str, Any]): A dictionary containing image data and annotations. Expected keys are:
+ - 'img': np.ndarray representing the image
+ - 'cls': np.ndarray of class labels
+ - 'instances': object containing bounding boxes and other instance information
+
+ Returns:
+ (dict[str, Any]): The input dictionary with augmented image and updated annotations.
+
+ Examples:
+ >>> transform = Albumentations(p=0.5)
+ >>> labels = {
+ ... "img": np.random.rand(640, 640, 3),
+ ... "cls": np.array([0, 1]),
+ ... "instances": Instances(bboxes=np.array([[0, 0, 1, 1], [0.5, 0.5, 0.8, 0.8]])),
+ ... }
+ >>> augmented = transform(labels)
+ >>> assert augmented["img"].shape == (640, 640, 3)
+
+ Notes:
+ - The method applies transformations with probability self.p.
+ - Spatial transforms update bounding boxes, while non-spatial transforms only modify the image.
+ - Requires the Albumentations library to be installed.
+ """
if self.transform is None or random.random() > self.p:
return labels
@@ -894,6 +1953,33 @@
class Format:
+ """A class for formatting image annotations for object detection, instance segmentation, and pose estimation tasks.
+
+ This class standardizes image and instance annotations to be used by the `collate_fn` in PyTorch DataLoader.
+
+ Attributes:
+ bbox_format (str): Format for bounding boxes. Options are 'xywh' or 'xyxy'.
+ normalize (bool): Whether to normalize bounding boxes.
+ return_mask (bool): Whether to return instance masks for segmentation.
+ return_keypoint (bool): Whether to return keypoints for pose estimation.
+ return_obb (bool): Whether to return oriented bounding boxes.
+ mask_ratio (int): Downsample ratio for masks.
+ mask_overlap (bool): Whether to overlap masks.
+ batch_idx (bool): Whether to keep batch indexes.
+ bgr (float): The probability to return BGR images.
+
+ Methods:
+ __call__: Format labels dictionary with image, classes, bounding boxes, and optionally masks and keypoints.
+ _format_img: Convert image from Numpy array to PyTorch tensor.
+ _format_segments: Convert polygon points to bitmap masks.
+
+ Examples:
+ >>> formatter = Format(bbox_format="xywh", normalize=True, return_mask=True)
+ >>> formatted_labels = formatter(labels)
+ >>> img = formatted_labels["img"]
+ >>> bboxes = formatted_labels["bboxes"]
+ >>> masks = formatted_labels["masks"]
+ """
def __init__(
self,
@@ -907,6 +1993,22 @@ batch_idx: bool = True,
bgr: float = 0.0,
):
+ """Initialize the Format class with given parameters for image and instance annotation formatting.
+
+ This class standardizes image and instance annotations for object detection, instance segmentation, and pose
+ estimation tasks, preparing them for use in PyTorch DataLoader's `collate_fn`.
+
+ Args:
+ bbox_format (str): Format for bounding boxes. Options are 'xywh', 'xyxy', etc.
+ normalize (bool): Whether to normalize bounding boxes to [0,1].
+ return_mask (bool): If True, returns instance masks for segmentation tasks.
+ return_keypoint (bool): If True, returns keypoints for pose estimation tasks.
+ return_obb (bool): If True, returns oriented bounding boxes.
+ mask_ratio (int): Downsample ratio for masks.
+ mask_overlap (bool): If True, allows mask overlap.
+ batch_idx (bool): If True, keeps batch indexes.
+ bgr (float): Probability of returning BGR images instead of RGB.
+ """
self.bbox_format = bbox_format
self.normalize = normalize
self.return_mask = return_mask # set False when training detection only
@@ -918,6 +2020,33 @@ self.bgr = bgr
def __call__(self, labels: dict[str, Any]) -> dict[str, Any]:
+ """Format image annotations for object detection, instance segmentation, and pose estimation tasks.
+
+ This method standardizes the image and instance annotations to be used by the `collate_fn` in PyTorch
+ DataLoader. It processes the input labels dictionary, converting annotations to the specified format and
+ applying normalization if required.
+
+ Args:
+ labels (dict[str, Any]): A dictionary containing image and annotation data with the following keys:
+ - 'img': The input image as a numpy array.
+ - 'cls': Class labels for instances.
+ - 'instances': An Instances object containing bounding boxes, segments, and keypoints.
+
+ Returns:
+ (dict[str, Any]): A dictionary with formatted data, including:
+ - 'img': Formatted image tensor.
+ - 'cls': Class labels tensor.
+ - 'bboxes': Bounding boxes tensor in the specified format.
+ - 'masks': Instance masks tensor (if return_mask is True).
+ - 'keypoints': Keypoints tensor (if return_keypoint is True).
+ - 'batch_idx': Batch index tensor (if batch_idx is True).
+
+ Examples:
+ >>> formatter = Format(bbox_format="xywh", normalize=True, return_mask=True)
+ >>> labels = {"img": np.random.rand(640, 640, 3), "cls": np.array([0, 1]), "instances": Instances(...)}
+ >>> formatted_labels = formatter(labels)
+ >>> print(formatted_labels.keys())
+ """
img = labels.pop("img")
h, w = img.shape[:2]
cls = labels.pop("cls")
@@ -974,6 +2103,28 @@ return labels
def _format_img(self, img: np.ndarray) -> torch.Tensor:
+ """Format an image for YOLO from a Numpy array to a PyTorch tensor.
+
+ This function performs the following operations:
+ 1. Ensures the image has 3 dimensions (adds a channel dimension if needed).
+ 2. Transposes the image from HWC to CHW format.
+ 3. Optionally reverses the color channels (e.g., BGR to RGB) based on the bgr probability.
+ 4. Converts the image to a contiguous array.
+ 5. Converts the Numpy array to a PyTorch tensor.
+
+ Args:
+ img (np.ndarray): Input image as a Numpy array with shape (H, W, C) or (H, W).
+
+ Returns:
+ (torch.Tensor): Formatted image as a PyTorch tensor with shape (C, H, W).
+
+ Examples:
+ >>> import numpy as np
+ >>> img = np.random.rand(100, 100, 3)
+ >>> formatted_img = self._format_img(img)
+ >>> print(formatted_img.shape)
+ torch.Size([3, 100, 100])
+ """
if len(img.shape) < 3:
img = img[..., None]
img = img.transpose(2, 0, 1)
@@ -984,6 +2135,24 @@ def _format_segments(
self, instances: Instances, cls: np.ndarray, w: int, h: int
) -> tuple[np.ndarray, Instances, np.ndarray]:
+ """Convert polygon segments to bitmap masks.
+
+ Args:
+ instances (Instances): Object containing segment information.
+ cls (np.ndarray): Class labels for each instance.
+ w (int): Width of the image.
+ h (int): Height of the image.
+
+ Returns:
+ masks (np.ndarray): Bitmap masks with shape (N, H, W) or (1, H, W) if mask_overlap is True.
+ instances (Instances): Updated instances object with sorted segments if mask_overlap is True.
+ cls (np.ndarray): Updated class labels, sorted if mask_overlap is True.
+
+ Notes:
+ - If self.mask_overlap is True, masks are overlapped and sorted by area.
+ - If self.mask_overlap is False, each mask is represented separately.
+ - Masks are downsampled according to self.mask_ratio.
+ """
segments = instances.segments
if self.mask_overlap:
masks, sorted_idx = polygons2masks_overlap((h, w), segments, downsample_ratio=self.mask_ratio)
@@ -997,12 +2166,28 @@
class LoadVisualPrompt:
+ """Create visual prompts from bounding boxes or masks for model input."""
def __init__(self, scale_factor: float = 1 / 8) -> None:
+ """Initialize the LoadVisualPrompt with a scale factor.
+
+ Args:
+ scale_factor (float): Factor to scale the input image dimensions.
+ """
self.scale_factor = scale_factor
@staticmethod
def make_mask(boxes: torch.Tensor, h: int, w: int) -> torch.Tensor:
+ """Create binary masks from bounding boxes.
+
+ Args:
+ boxes (torch.Tensor): Bounding boxes in xyxy format, shape: (N, 4).
+ h (int): Height of the mask.
+ w (int): Width of the mask.
+
+ Returns:
+ (torch.Tensor): Binary masks with shape (N, h, w).
+ """
x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(n,1,1)
r = torch.arange(w)[None, None, :] # rows shape(1,1,w)
c = torch.arange(h)[None, :, None] # cols shape(1,h,1)
@@ -1010,6 +2195,14 @@ return (r >= x1) * (r < x2) * (c >= y1) * (c < y2)
def __call__(self, labels: dict[str, Any]) -> dict[str, Any]:
+ """Process labels to create visual prompts.
+
+ Args:
+ labels (dict[str, Any]): Dictionary containing image data and annotations.
+
+ Returns:
+ (dict[str, Any]): Updated labels with visual prompts added.
+ """
imgsz = labels["img"].shape[1:]
bboxes, masks = None, None
if "bboxes" in labels:
@@ -1028,6 +2221,20 @@ bboxes: np.ndarray | torch.Tensor = None,
masks: np.ndarray | torch.Tensor = None,
) -> torch.Tensor:
+ """Generate visual masks based on bounding boxes or masks.
+
+ Args:
+ category (int | np.ndarray | torch.Tensor): The category labels for the objects.
+ shape (tuple[int, int]): The shape of the image (height, width).
+ bboxes (np.ndarray | torch.Tensor, optional): Bounding boxes for the objects, xyxy format.
+ masks (np.ndarray | torch.Tensor, optional): Masks for the objects.
+
+ Returns:
+ (torch.Tensor): A tensor containing the visual masks for each category.
+
+ Raises:
+ ValueError: If neither bboxes nor masks are provided.
+ """
masksz = (int(shape[0] * self.scale_factor), int(shape[1] * self.scale_factor))
if bboxes is not None:
if isinstance(bboxes, np.ndarray):
@@ -1055,6 +2262,29 @@
class RandomLoadText:
+ """Randomly sample positive and negative texts and update class indices accordingly.
+
+ This class is responsible for sampling texts from a given set of class texts, including both positive (present in
+ the image) and negative (not present in the image) samples. It updates the class indices to reflect the sampled
+ texts and can optionally pad the text list to a fixed length.
+
+ Attributes:
+ prompt_format (str): Format string for text prompts.
+ neg_samples (tuple[int, int]): Range for randomly sampling negative texts.
+ max_samples (int): Maximum number of different text samples in one image.
+ padding (bool): Whether to pad texts to max_samples.
+ padding_value (list[str]): The text used for padding when padding is True.
+
+ Methods:
+ __call__: Process the input labels and return updated classes and texts.
+
+ Examples:
+ >>> loader = RandomLoadText(prompt_format="Object: {}", neg_samples=(5, 10), max_samples=20)
+ >>> labels = {"cls": [0, 1, 2], "texts": [["cat"], ["dog"], ["bird"]], "instances": [...]}
+ >>> updated_labels = loader(labels)
+ >>> print(updated_labels["texts"])
+ ['Object: cat', 'Object: dog', 'Object: bird', 'Object: elephant', 'Object: car']
+ """
def __init__(
self,
@@ -1064,6 +2294,21 @@ padding: bool = False,
padding_value: list[str] = [""],
) -> None:
+ """Initialize the RandomLoadText class for randomly sampling positive and negative texts.
+
+ This class is designed to randomly sample positive texts and negative texts, and update the class indices
+ accordingly to the number of samples. It can be used for text-based object detection tasks.
+
+ Args:
+ prompt_format (str): Format string for the prompt. The format string should contain a single pair of curly
+ braces {} where the text will be inserted.
+ neg_samples (tuple[int, int]): A range to randomly sample negative texts. The first integer specifies the
+ minimum number of negative samples, and the second integer specifies the maximum.
+ max_samples (int): The maximum number of different text samples in one image.
+ padding (bool): Whether to pad texts to max_samples. If True, the number of texts will always be equal to
+ max_samples.
+ padding_value (list[str]): The padding text to use when padding is True.
+ """
self.prompt_format = prompt_format
self.neg_samples = neg_samples
self.max_samples = max_samples
@@ -1071,6 +2316,24 @@ self.padding_value = padding_value
def __call__(self, labels: dict[str, Any]) -> dict[str, Any]:
+ """Randomly sample positive and negative texts and update class indices accordingly.
+
+ This method samples positive texts based on the existing class labels in the image, and randomly selects
+ negative texts from the remaining classes. It then updates the class indices to match the new sampled text
+ order.
+
+ Args:
+ labels (dict[str, Any]): A dictionary containing image labels and metadata. Must include 'texts' and 'cls'
+ keys.
+
+ Returns:
+ (dict[str, Any]): Updated labels dictionary with new 'cls' and 'texts' entries.
+
+ Examples:
+ >>> loader = RandomLoadText(prompt_format="A photo of {}", neg_samples=(5, 10), max_samples=20)
+ >>> labels = {"cls": np.array([[0], [1], [2]]), "texts": [["dog"], ["cat"], ["bird"]]}
+ >>> updated_labels = loader(labels)
+ """
assert "texts" in labels, "No texts found in labels."
class_texts = labels["texts"]
num_classes = len(class_texts)
@@ -1119,6 +2382,35 @@
def v8_transforms(dataset, imgsz: int, hyp: IterableSimpleNamespace, stretch: bool = False):
+ """Apply a series of image transformations for training.
+
+ This function creates a composition of image augmentation techniques to prepare images for YOLO training. It
+ includes operations such as mosaic, copy-paste, random perspective, mixup, and various color adjustments.
+
+ Args:
+ dataset (Dataset): The dataset object containing image data and annotations.
+ imgsz (int): The target image size for resizing.
+ hyp (IterableSimpleNamespace): A namespace of hyperparameters controlling various aspects of the
+ transformations.
+ stretch (bool): If True, applies stretching to the image. If False, uses LetterBox resizing.
+
+ Returns:
+ (Compose): A composition of image transformations to be applied to the dataset.
+
+ Examples:
+ >>> from ultralytics.data.dataset import YOLODataset
+ >>> from ultralytics.utils import IterableSimpleNamespace
+ >>> dataset = YOLODataset(img_path="path/to/images", imgsz=640)
+ >>> hyp = IterableSimpleNamespace(mosaic=1.0, copy_paste=0.5, degrees=10.0, translate=0.2, scale=0.9)
+ >>> transforms = v8_transforms(dataset, imgsz=640, hyp=hyp)
+ >>> augmented_data = transforms(dataset[0])
+
+ >>> # With custom albumentations
+ >>> import albumentations as A
+ >>> augmentations = [A.Blur(p=0.01), A.CLAHE(p=0.01)]
+ >>> hyp.augmentations = augmentations
+ >>> transforms = v8_transforms(dataset, imgsz=640, hyp=hyp)
+ """
mosaic = Mosaic(dataset, imgsz=imgsz, p=hyp.mosaic)
affine = RandomPerspective(
degrees=hyp.degrees,
@@ -1171,6 +2463,28 @@ interpolation: str = "BILINEAR",
crop_fraction: float | None = None,
):
+ """Create a composition of image transforms for classification tasks.
+
+ This function generates a sequence of torchvision transforms suitable for preprocessing images for classification
+ models during evaluation or inference. The transforms include resizing, center cropping, conversion to tensor, and
+ normalization.
+
+ Args:
+ size (tuple[int, int] | int): The target size for the transformed image. If an int, it defines the shortest
+ edge. If a tuple, it defines (height, width).
+ mean (tuple[float, float, float]): Mean values for each RGB channel used in normalization.
+ std (tuple[float, float, float]): Standard deviation values for each RGB channel used in normalization.
+ interpolation (str): Interpolation method of either 'NEAREST', 'BILINEAR' or 'BICUBIC'.
+ crop_fraction (float | None): Deprecated, will be removed in a future version.
+
+ Returns:
+ (torchvision.transforms.Compose): A composition of torchvision transforms.
+
+ Examples:
+ >>> transforms = classify_transforms(size=224)
+ >>> img = Image.open("path/to/image.jpg")
+ >>> transformed_img = transforms(img)
+ """
import torchvision.transforms as T # scope for faster 'import ultralytics'
scale_size = size if isinstance(size, (tuple, list)) and len(size) == 2 else (size, size)
@@ -1208,6 +2522,34 @@ erasing: float = 0.0,
interpolation: str = "BILINEAR",
):
+ """Create a composition of image augmentation transforms for classification tasks.
+
+ This function generates a set of image transformations suitable for training classification models. It includes
+ options for resizing, flipping, color jittering, auto augmentation, and random erasing.
+
+ Args:
+ size (int): Target size for the image after transformations.
+ mean (tuple[float, float, float]): Mean values for each RGB channel used in normalization.
+ std (tuple[float, float, float]): Standard deviation values for each RGB channel used in normalization.
+ scale (tuple[float, float] | None): Range of the proportion of the original image area to crop.
+ ratio (tuple[float, float] | None): Range of aspect ratio for the cropped area.
+ hflip (float): Probability of horizontal flip.
+ vflip (float): Probability of vertical flip.
+ auto_augment (str | None): Auto augmentation policy. Can be 'randaugment', 'augmix', 'autoaugment' or None.
+ hsv_h (float): Image HSV-Hue augmentation factor.
+ hsv_s (float): Image HSV-Saturation augmentation factor.
+ hsv_v (float): Image HSV-Value augmentation factor.
+ force_color_jitter (bool): Whether to apply color jitter even if auto augment is enabled.
+ erasing (float): Probability of random erasing.
+ interpolation (str): Interpolation method of either 'NEAREST', 'BILINEAR' or 'BICUBIC'.
+
+ Returns:
+ (torchvision.transforms.Compose): A composition of image augmentation transforms.
+
+ Examples:
+ >>> transforms = classify_augmentations(size=224, auto_augment="randaugment")
+ >>> augmented_image = transforms(original_image)
+ """
# Transforms to apply if Albumentations not installed
import torchvision.transforms as T # scope for faster 'import ultralytics'
@@ -1268,14 +2610,65 @@
# NOTE: keep this class for backward compatibility
class ClassifyLetterBox:
+ """A class for resizing and padding images for classification tasks.
+
+ This class is designed to be part of a transformation pipeline, e.g., T.Compose([LetterBox(size), ToTensor()]). It
+ resizes and pads images to a specified size while maintaining the original aspect ratio.
+
+ Attributes:
+ h (int): Target height of the image.
+ w (int): Target width of the image.
+ auto (bool): If True, automatically calculates the short side using stride.
+ stride (int): The stride value, used when 'auto' is True.
+
+ Methods:
+ __call__: Apply the letterbox transformation to an input image.
+
+ Examples:
+ >>> transform = ClassifyLetterBox(size=(640, 640), auto=False, stride=32)
+ >>> img = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
+ >>> result = transform(img)
+ >>> print(result.shape)
+ (640, 640, 3)
+ """
def __init__(self, size: int | tuple[int, int] = (640, 640), auto: bool = False, stride: int = 32):
+ """Initialize the ClassifyLetterBox object for image preprocessing.
+
+ This class is designed to be part of a transformation pipeline for image classification tasks. It resizes and
+ pads images to a specified size while maintaining the original aspect ratio.
+
+ Args:
+ size (int | tuple[int, int]): Target size for the letterboxed image. If an int, a square image of (size,
+ size) is created. If a tuple, it should be (height, width).
+ auto (bool): If True, automatically calculates the short side based on stride.
+ stride (int): The stride value, used when 'auto' is True.
+ """
super().__init__()
self.h, self.w = (size, size) if isinstance(size, int) else size
self.auto = auto # pass max size integer, automatically solve for short side using stride
self.stride = stride # used with auto
def __call__(self, im: np.ndarray) -> np.ndarray:
+ """Resize and pad an image using the letterbox method.
+
+ This method resizes the input image to fit within the specified dimensions while maintaining its aspect ratio,
+ then pads the resized image to match the target size.
+
+ Args:
+ im (np.ndarray): Input image as a numpy array with shape (H, W, C).
+
+ Returns:
+ (np.ndarray): Resized and padded image as a numpy array with shape (hs, ws, 3), where hs and ws are the
+ target height and width respectively.
+
+ Examples:
+ >>> letterbox = ClassifyLetterBox(size=(640, 640))
+ >>> image = np.random.randint(0, 255, (720, 1280, 3), dtype=np.uint8)
+ >>> resized_image = letterbox(image)
+ >>> print(resized_image.shape)
+ (640, 640, 3)
+ """
imh, imw = im.shape[:2]
r = min(self.h / imh, self.w / imw) # ratio of new/old dimensions
h, w = round(imh * r), round(imw * r) # resized image dimensions
@@ -1292,12 +2685,57 @@
# NOTE: keep this class for backward compatibility
class CenterCrop:
+ """Apply center cropping to images for classification tasks.
+
+ This class performs center cropping on input images, resizing them to a specified size while maintaining the aspect
+ ratio. It is designed to be part of a transformation pipeline, e.g., T.Compose([CenterCrop(size), ToTensor()]).
+
+ Attributes:
+ h (int): Target height of the cropped image.
+ w (int): Target width of the cropped image.
+
+ Methods:
+ __call__: Apply the center crop transformation to an input image.
+
+ Examples:
+ >>> transform = CenterCrop(640)
+ >>> image = np.random.randint(0, 255, (1080, 1920, 3), dtype=np.uint8)
+ >>> cropped_image = transform(image)
+ >>> print(cropped_image.shape)
+ (640, 640, 3)
+ """
def __init__(self, size: int | tuple[int, int] = (640, 640)):
+ """Initialize the CenterCrop object for image preprocessing.
+
+ This class is designed to be part of a transformation pipeline, e.g., T.Compose([CenterCrop(size), ToTensor()]).
+ It performs a center crop on input images to a specified size.
+
+ Args:
+ size (int | tuple[int, int]): The desired output size of the crop. If size is an int, a square crop (size,
+ size) is made. If size is a sequence like (h, w), it is used as the output size.
+ """
super().__init__()
self.h, self.w = (size, size) if isinstance(size, int) else size
def __call__(self, im: Image.Image | np.ndarray) -> np.ndarray:
+ """Apply center cropping to an input image.
+
+ This method crops the largest centered square from the image and resizes it to the specified dimensions.
+
+ Args:
+ im (np.ndarray | PIL.Image.Image): The input image as a numpy array of shape (H, W, C) or a PIL Image
+ object.
+
+ Returns:
+ (np.ndarray): The center-cropped and resized image as a numpy array of shape (self.h, self.w, C).
+
+ Examples:
+ >>> transform = CenterCrop(size=224)
+ >>> image = np.random.randint(0, 255, (640, 480, 3), dtype=np.uint8)
+ >>> cropped_image = transform(image)
+ >>> assert cropped_image.shape == (224, 224, 3)
+ """
if isinstance(im, Image.Image): # convert from PIL to numpy array if required
im = np.asarray(im)
imh, imw = im.shape[:2]
@@ -1308,14 +2746,63 @@
# NOTE: keep this class for backward compatibility
class ToTensor:
+ """Convert an image from a numpy array to a PyTorch tensor.
+
+ This class is designed to be part of a transformation pipeline, e.g., T.Compose([LetterBox(size), ToTensor()]).
+
+ Attributes:
+ half (bool): If True, converts the image to half precision (float16).
+
+ Methods:
+ __call__: Apply the tensor conversion to an input image.
+
+ Examples:
+ >>> transform = ToTensor(half=True)
+ >>> img = np.random.randint(0, 255, (640, 640, 3), dtype=np.uint8)
+ >>> tensor_img = transform(img)
+ >>> print(tensor_img.shape, tensor_img.dtype)
+ torch.Size([3, 640, 640]) torch.float16
+
+ Notes:
+ The input image is expected to be in BGR format with shape (H, W, C).
+ The output tensor will be in BGR format with shape (C, H, W), normalized to [0, 1].
+ """
def __init__(self, half: bool = False):
+ """Initialize the ToTensor object for converting images to PyTorch tensors.
+
+ This class is designed to be used as part of a transformation pipeline for image preprocessing in the
+ Ultralytics YOLO framework. It converts numpy arrays or PIL Images to PyTorch tensors, with an option for
+ half-precision (float16) conversion.
+
+ Args:
+ half (bool): If True, converts the tensor to half precision (float16).
+ """
super().__init__()
self.half = half
def __call__(self, im: np.ndarray) -> torch.Tensor:
+ """Transform an image from a numpy array to a PyTorch tensor.
+
+ This method converts the input image from a numpy array to a PyTorch tensor, applying optional half-precision
+ conversion and normalization. The image is transposed from HWC to CHW format.
+
+ Args:
+ im (np.ndarray): Input image as a numpy array with shape (H, W, C) in BGR order.
+
+ Returns:
+ (torch.Tensor): The transformed image as a PyTorch tensor in float32 or float16, normalized to [0, 1] with
+ shape (C, H, W) in BGR order.
+
+ Examples:
+ >>> transform = ToTensor(half=True)
+ >>> img = np.random.randint(0, 255, (640, 640, 3), dtype=np.uint8)
+ >>> tensor_img = transform(img)
+ >>> print(tensor_img.shape, tensor_img.dtype)
+ torch.Size([3, 640, 640]) torch.float16
+ """
im = np.ascontiguousarray(im.transpose((2, 0, 1))) # HWC to CHW -> contiguous
im = torch.from_numpy(im) # to torch
im = im.half() if self.half else im.float() # uint8 to fp16/32
im /= 255.0 # 0-255 to 0.0-1.0
- return im+ return im
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/data/augment.py |
Write docstrings describing each step | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from ultralytics.hub.utils import HUB_API_ROOT, HUB_WEB_ROOT, PREFIX, request_with_credentials
from ultralytics.utils import IS_COLAB, LOGGER, SETTINGS, emojis
API_KEY_URL = f"{HUB_WEB_ROOT}/settings?tab=api+keys"
class Auth:
id_token = api_key = model_key = False
def __init__(self, api_key: str = "", verbose: bool = False):
# Split the input API key in case it contains a combined key_model and keep only the API key part
api_key = api_key.split("_", 1)[0]
# Set API key attribute as value passed or SETTINGS API key if none passed
self.api_key = api_key or SETTINGS.get("api_key", "")
# If an API key is provided
if self.api_key:
# If the provided API key matches the API key in the SETTINGS
if self.api_key == SETTINGS.get("api_key"):
# Log that the user is already logged in
if verbose:
LOGGER.info(f"{PREFIX}Authenticated ✅")
return
else:
# Attempt to authenticate with the provided API key
success = self.authenticate()
# If the API key is not provided and the environment is a Google Colab notebook
elif IS_COLAB:
# Attempt to authenticate using browser cookies
success = self.auth_with_cookies()
else:
# Request an API key
success = self.request_api_key()
# Update SETTINGS with the new API key after successful authentication
if success:
SETTINGS.update({"api_key": self.api_key})
# Log that the new login was successful
if verbose:
LOGGER.info(f"{PREFIX}New authentication successful ✅")
elif verbose:
LOGGER.info(f"{PREFIX}Get API key from {API_KEY_URL} and then run 'yolo login API_KEY'")
def request_api_key(self, max_attempts: int = 3) -> bool:
import getpass
for attempts in range(max_attempts):
LOGGER.info(f"{PREFIX}Login. Attempt {attempts + 1} of {max_attempts}")
input_key = getpass.getpass(f"Enter API key from {API_KEY_URL} ")
self.api_key = input_key.split("_", 1)[0] # remove model id if present
if self.authenticate():
return True
raise ConnectionError(emojis(f"{PREFIX}Failed to authenticate ❌"))
def authenticate(self) -> bool:
import requests # scoped as slow import
try:
if header := self.get_auth_header():
r = requests.post(f"{HUB_API_ROOT}/v1/auth", headers=header)
if not r.json().get("success", False):
raise ConnectionError("Unable to authenticate.")
return True
raise ConnectionError("User has not authenticated locally.")
except ConnectionError:
self.id_token = self.api_key = False # reset invalid
LOGGER.warning(f"{PREFIX}Invalid API key")
return False
def auth_with_cookies(self) -> bool:
if not IS_COLAB:
return False # Currently only works with Colab
try:
authn = request_with_credentials(f"{HUB_API_ROOT}/v1/auth/auto")
if authn.get("success", False):
self.id_token = authn.get("data", {}).get("idToken", None)
self.authenticate()
return True
raise ConnectionError("Unable to fetch browser authentication details.")
except ConnectionError:
self.id_token = False # reset invalid
return False
def get_auth_header(self):
if self.id_token:
return {"authorization": f"Bearer {self.id_token}"}
elif self.api_key:
return {"x-api-key": self.api_key} | --- +++ @@ -9,10 +9,44 @@
class Auth:
+ """Manages authentication processes including API key handling, cookie-based authentication, and header generation.
+
+ The class supports different methods of authentication:
+ 1. Directly using an API key.
+ 2. Authenticating using browser cookies (specifically in Google Colab).
+ 3. Prompting the user to enter an API key.
+
+ Attributes:
+ id_token (str | bool): Token used for identity verification, initialized as False.
+ api_key (str | bool): API key for authentication, initialized as False.
+ model_key (bool): Placeholder for model key, initialized as False.
+
+ Methods:
+ authenticate: Attempt to authenticate with the server using either id_token or API key.
+ auth_with_cookies: Attempt to fetch authentication via cookies and set id_token.
+ get_auth_header: Get the authentication header for making API requests.
+ request_api_key: Prompt the user to input their API key.
+
+ Examples:
+ Initialize Auth with an API key
+ >>> auth = Auth(api_key="your_api_key_here")
+
+ Initialize Auth without API key (will prompt for input)
+ >>> auth = Auth()
+ """
id_token = api_key = model_key = False
def __init__(self, api_key: str = "", verbose: bool = False):
+ """Initialize Auth class and authenticate user.
+
+ Handles API key validation, Google Colab authentication, and new key requests. Updates SETTINGS upon successful
+ authentication.
+
+ Args:
+ api_key (str): API key or combined key_id format.
+ verbose (bool): Enable verbose logging.
+ """
# Split the input API key in case it contains a combined key_model and keep only the API key part
api_key = api_key.split("_", 1)[0]
@@ -48,6 +82,14 @@ LOGGER.info(f"{PREFIX}Get API key from {API_KEY_URL} and then run 'yolo login API_KEY'")
def request_api_key(self, max_attempts: int = 3) -> bool:
+ """Prompt the user to input their API key.
+
+ Args:
+ max_attempts (int): Maximum number of authentication attempts.
+
+ Returns:
+ (bool): True if authentication is successful, False otherwise.
+ """
import getpass
for attempts in range(max_attempts):
@@ -59,6 +101,11 @@ raise ConnectionError(emojis(f"{PREFIX}Failed to authenticate ❌"))
def authenticate(self) -> bool:
+ """Attempt to authenticate with the server using either id_token or API key.
+
+ Returns:
+ (bool): True if authentication is successful, False otherwise.
+ """
import requests # scoped as slow import
try:
@@ -74,6 +121,13 @@ return False
def auth_with_cookies(self) -> bool:
+ """Attempt to fetch authentication via cookies and set id_token.
+
+ User must be logged in to HUB and running in a supported browser.
+
+ Returns:
+ (bool): True if authentication is successful, False otherwise.
+ """
if not IS_COLAB:
return False # Currently only works with Colab
try:
@@ -88,7 +142,12 @@ return False
def get_auth_header(self):
+ """Get the authentication header for making API requests.
+
+ Returns:
+ (dict | None): The authentication header if id_token or API key is set, None otherwise.
+ """
if self.id_token:
return {"authorization": f"Bearer {self.id_token}"}
elif self.api_key:
- return {"x-api-key": self.api_key}+ return {"x-api-key": self.api_key}
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/hub/auth.py |
Write clean docstrings for readability | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import asyncio
import hashlib
import json
import random
import shutil
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
from pathlib import Path
import cv2
import numpy as np
from PIL import Image
from ultralytics.utils import ASSETS_URL, DATASETS_DIR, LOGGER, NUM_THREADS, TQDM, YAML
from ultralytics.utils.checks import check_file
from ultralytics.utils.downloads import download, zip_directory
from ultralytics.utils.files import increment_path
def coco91_to_coco80_class() -> list[int]:
return [
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
None,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
None,
24,
25,
None,
None,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
None,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
None,
60,
None,
None,
61,
None,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
None,
73,
74,
75,
76,
77,
78,
79,
None,
]
def coco80_to_coco91_class() -> list[int]:
return [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
27,
28,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
67,
70,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82,
84,
85,
86,
87,
88,
89,
90,
]
def convert_coco(
labels_dir: str = "../coco/annotations/",
save_dir: str = "coco_converted/",
use_segments: bool = False,
use_keypoints: bool = False,
cls91to80: bool = True,
lvis: bool = False,
):
# Create dataset directory
save_dir = increment_path(save_dir) # increment if save directory already exists
for p in save_dir / "labels", save_dir / "images":
p.mkdir(parents=True, exist_ok=True) # make dir
# Convert classes
coco80 = coco91_to_coco80_class()
# Import json
for json_file in sorted(Path(labels_dir).resolve().glob("*.json")):
lname = "" if lvis else json_file.stem.replace("instances_", "")
fn = Path(save_dir) / "labels" / lname # folder name
fn.mkdir(parents=True, exist_ok=True)
if lvis:
# NOTE: create folders for both train and val in advance,
# since LVIS val set contains images from COCO 2017 train in addition to the COCO 2017 val split.
(fn / "train2017").mkdir(parents=True, exist_ok=True)
(fn / "val2017").mkdir(parents=True, exist_ok=True)
with open(json_file, encoding="utf-8") as f:
data = json.load(f)
# Create image dict
images = {f"{x['id']:d}": x for x in data["images"]}
# Create image-annotations dict
annotations = defaultdict(list)
for ann in data["annotations"]:
annotations[ann["image_id"]].append(ann)
image_txt = []
# Write labels file
for img_id, anns in TQDM(annotations.items(), desc=f"Annotations {json_file}"):
img = images[f"{img_id:d}"]
h, w = img["height"], img["width"]
f = str(Path(img["coco_url"]).relative_to("http://images.cocodataset.org")) if lvis else img["file_name"]
if lvis:
image_txt.append(str(Path("./images") / f))
bboxes = []
segments = []
keypoints = []
for ann in anns:
if ann.get("iscrowd", False):
continue
# The COCO box format is [top left x, top left y, width, height]
box = np.array(ann["bbox"], dtype=np.float64)
box[:2] += box[2:] / 2 # xy top-left corner to center
box[[0, 2]] /= w # normalize x
box[[1, 3]] /= h # normalize y
if box[2] <= 0 or box[3] <= 0: # if w <= 0 and h <= 0
continue
cls = coco80[ann["category_id"] - 1] if cls91to80 else ann["category_id"] - 1 # class
box = [cls, *box.tolist()]
if box not in bboxes:
bboxes.append(box)
if use_segments and ann.get("segmentation") is not None:
if len(ann["segmentation"]) == 0:
segments.append([])
continue
elif len(ann["segmentation"]) > 1:
s = merge_multi_segment(ann["segmentation"])
s = (np.concatenate(s, axis=0) / np.array([w, h])).reshape(-1).tolist()
else:
s = [j for i in ann["segmentation"] for j in i] # all segments concatenated
s = (np.array(s).reshape(-1, 2) / np.array([w, h])).reshape(-1).tolist()
s = [cls, *s]
segments.append(s)
if use_keypoints and ann.get("keypoints") is not None:
keypoints.append(
box + (np.array(ann["keypoints"]).reshape(-1, 3) / np.array([w, h, 1])).reshape(-1).tolist()
)
# Write
with open((fn / f).with_suffix(".txt"), "a", encoding="utf-8") as file:
for i in range(len(bboxes)):
if use_keypoints:
line = (*(keypoints[i]),) # cls, box, keypoints
else:
line = (
*(segments[i] if use_segments and len(segments[i]) > 0 else bboxes[i]),
) # cls, box or segments
file.write(("%g " * len(line)).rstrip() % line + "\n")
if lvis:
filename = Path(save_dir) / json_file.name.replace("lvis_v1_", "").replace(".json", ".txt")
with open(filename, "a", encoding="utf-8") as f:
f.writelines(f"{line}\n" for line in image_txt)
LOGGER.info(f"{'LVIS' if lvis else 'COCO'} data converted successfully.\nResults saved to {save_dir.resolve()}")
def convert_segment_masks_to_yolo_seg(masks_dir: str, output_dir: str, classes: int):
pixel_to_class_mapping = {i + 1: i for i in range(classes)}
for mask_path in Path(masks_dir).iterdir():
if mask_path.suffix in {".png", ".jpg"}:
mask = cv2.imread(str(mask_path), cv2.IMREAD_GRAYSCALE) # Read the mask image in grayscale
img_height, img_width = mask.shape # Get image dimensions
LOGGER.info(f"Processing {mask_path} imgsz = {img_height} x {img_width}")
unique_values = np.unique(mask) # Get unique pixel values representing different classes
yolo_format_data = []
for value in unique_values:
if value == 0:
continue # Skip background
class_index = pixel_to_class_mapping.get(value, -1)
if class_index == -1:
LOGGER.warning(f"Unknown class for pixel value {value} in file {mask_path}, skipping.")
continue
# Create a binary mask for the current class and find contours
contours, _ = cv2.findContours(
(mask == value).astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
) # Find contours
for contour in contours:
if len(contour) >= 3: # YOLO requires at least 3 points for a valid segmentation
contour = contour.squeeze() # Remove single-dimensional entries
yolo_format = [class_index]
for point in contour:
# Normalize the coordinates
yolo_format.append(round(point[0] / img_width, 6)) # Rounding to 6 decimal places
yolo_format.append(round(point[1] / img_height, 6))
yolo_format_data.append(yolo_format)
# Save Ultralytics YOLO format data to file
output_path = Path(output_dir) / f"{mask_path.stem}.txt"
with open(output_path, "w", encoding="utf-8") as file:
for item in yolo_format_data:
line = " ".join(map(str, item))
file.write(line + "\n")
LOGGER.info(f"Processed and stored at {output_path} imgsz = {img_height} x {img_width}")
def convert_dota_to_yolo_obb(dota_root_path: str):
dota_root_path = Path(dota_root_path)
# Class names to indices mapping
class_mapping = {
"plane": 0,
"ship": 1,
"storage-tank": 2,
"baseball-diamond": 3,
"tennis-court": 4,
"basketball-court": 5,
"ground-track-field": 6,
"harbor": 7,
"bridge": 8,
"large-vehicle": 9,
"small-vehicle": 10,
"helicopter": 11,
"roundabout": 12,
"soccer-ball-field": 13,
"swimming-pool": 14,
"container-crane": 15,
"airport": 16,
"helipad": 17,
}
def convert_label(image_name: str, image_width: int, image_height: int, orig_label_dir: Path, save_dir: Path):
orig_label_path = orig_label_dir / f"{image_name}.txt"
save_path = save_dir / f"{image_name}.txt"
with orig_label_path.open("r") as f, save_path.open("w") as g:
lines = f.readlines()
for line in lines:
parts = line.strip().split()
if len(parts) < 9:
continue
class_name = parts[8]
class_idx = class_mapping[class_name]
coords = [float(p) for p in parts[:8]]
normalized_coords = [
coords[i] / image_width if i % 2 == 0 else coords[i] / image_height for i in range(8)
]
formatted_coords = [f"{coord:.6g}" for coord in normalized_coords]
g.write(f"{class_idx} {' '.join(formatted_coords)}\n")
for phase in {"train", "val"}:
image_dir = dota_root_path / "images" / phase
orig_label_dir = dota_root_path / "labels" / f"{phase}_original"
save_dir = dota_root_path / "labels" / phase
save_dir.mkdir(parents=True, exist_ok=True)
image_paths = list(image_dir.iterdir())
for image_path in TQDM(image_paths, desc=f"Processing {phase} images"):
if image_path.suffix != ".png":
continue
image_name_without_ext = image_path.stem
img = cv2.imread(str(image_path))
h, w = img.shape[:2]
convert_label(image_name_without_ext, w, h, orig_label_dir, save_dir)
def min_index(arr1: np.ndarray, arr2: np.ndarray):
dis = ((arr1[:, None, :] - arr2[None, :, :]) ** 2).sum(-1)
return np.unravel_index(np.argmin(dis, axis=None), dis.shape)
def merge_multi_segment(segments: list[list]):
s = []
segments = [np.array(i).reshape(-1, 2) for i in segments]
idx_list = [[] for _ in range(len(segments))]
# Record the indexes with min distance between each segment
for i in range(1, len(segments)):
idx1, idx2 = min_index(segments[i - 1], segments[i])
idx_list[i - 1].append(idx1)
idx_list[i].append(idx2)
# Use two round to connect all the segments
for k in range(2):
# Forward connection
if k == 0:
for i, idx in enumerate(idx_list):
# Middle segments have two indexes, reverse the index of middle segments
if len(idx) == 2 and idx[0] > idx[1]:
idx = idx[::-1]
segments[i] = segments[i][::-1, :]
segments[i] = np.roll(segments[i], -idx[0], axis=0)
segments[i] = np.concatenate([segments[i], segments[i][:1]])
# Deal with the first segment and the last one
if i in {0, len(idx_list) - 1}:
s.append(segments[i])
else:
idx = [0, idx[1] - idx[0]]
s.append(segments[i][idx[0] : idx[1] + 1])
else:
for i in range(len(idx_list) - 1, -1, -1):
if i not in {0, len(idx_list) - 1}:
idx = idx_list[i]
nidx = abs(idx[1] - idx[0])
s.append(segments[i][nidx:])
return s
def yolo_bbox2segment(im_dir: str | Path, save_dir: str | Path | None = None, sam_model: str = "sam_b.pt", device=None):
from ultralytics import SAM
from ultralytics.data import YOLODataset
from ultralytics.utils.ops import xywh2xyxy
# NOTE: add placeholder to pass class index check
dataset = YOLODataset(im_dir, data=dict(names=list(range(1000)), channels=3))
if len(dataset.labels[0]["segments"]) > 0: # if it's segment data
LOGGER.info("Segmentation labels detected, no need to generate new ones!")
return
LOGGER.info("Detection labels detected, generating segment labels by SAM model!")
sam_model = SAM(sam_model)
for label in TQDM(dataset.labels, total=len(dataset.labels), desc="Generating segment labels"):
h, w = label["shape"]
boxes = label["bboxes"]
if len(boxes) == 0: # skip empty labels
continue
boxes[:, [0, 2]] *= w
boxes[:, [1, 3]] *= h
im = cv2.imread(label["im_file"])
sam_results = sam_model(im, bboxes=xywh2xyxy(boxes), verbose=False, save=False, device=device)
label["segments"] = sam_results[0].masks.xyn
save_dir = Path(save_dir) if save_dir else Path(im_dir).parent / "labels-segment"
save_dir.mkdir(parents=True, exist_ok=True)
for label in dataset.labels:
texts = []
lb_name = Path(label["im_file"]).with_suffix(".txt").name
txt_file = save_dir / lb_name
cls = label["cls"]
for i, s in enumerate(label["segments"]):
if len(s) == 0:
continue
line = (int(cls[i]), *s.reshape(-1))
texts.append(("%g " * len(line)).rstrip() % line)
with open(txt_file, "a", encoding="utf-8") as f:
f.writelines(text + "\n" for text in texts)
LOGGER.info(f"Generated segment labels saved in {save_dir}")
def create_synthetic_coco_dataset():
def create_synthetic_image(image_file: Path):
if not image_file.exists():
size = (random.randint(480, 640), random.randint(480, 640))
Image.new(
"RGB",
size=size,
color=(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)),
).save(image_file)
# Download labels
dir = DATASETS_DIR / "coco"
download([f"{ASSETS_URL}/coco2017labels-segments.zip"], dir=dir.parent)
# Create synthetic images
shutil.rmtree(dir / "labels" / "test2017", ignore_errors=True) # Remove test2017 directory as not needed
with ThreadPoolExecutor(max_workers=NUM_THREADS) as executor:
for subset in {"train2017", "val2017"}:
subset_dir = dir / "images" / subset
subset_dir.mkdir(parents=True, exist_ok=True)
# Read image filenames from label list file
label_list_file = dir / f"{subset}.txt"
if label_list_file.exists():
with open(label_list_file, encoding="utf-8") as f:
image_files = [dir / line.strip() for line in f]
# Submit all tasks
futures = [executor.submit(create_synthetic_image, image_file) for image_file in image_files]
for _ in TQDM(as_completed(futures), total=len(futures), desc=f"Generating images for {subset}"):
pass # The actual work is done in the background
else:
LOGGER.warning(f"Labels file {label_list_file} does not exist. Skipping image creation for {subset}.")
LOGGER.info("Synthetic COCO dataset created successfully.")
def convert_to_multispectral(path: str | Path, n_channels: int = 10, replace: bool = False, zip: bool = False):
from scipy.interpolate import interp1d
from ultralytics.data.utils import IMG_FORMATS
path = Path(path)
if path.is_dir():
# Process directory
im_files = [f for ext in (IMG_FORMATS - {"tif", "tiff"}) for f in path.rglob(f"*.{ext}")]
for im_path in im_files:
try:
convert_to_multispectral(im_path, n_channels)
if replace:
im_path.unlink()
except Exception as e:
LOGGER.info(f"Error converting {im_path}: {e}")
if zip:
zip_directory(path)
else:
# Process a single image
output_path = path.with_suffix(".tiff")
img = cv2.cvtColor(cv2.imread(str(path)), cv2.COLOR_BGR2RGB)
# Interpolate all pixels at once
rgb_wavelengths = np.array([650, 510, 475]) # R, G, B wavelengths (nm)
target_wavelengths = np.linspace(450, 700, n_channels)
f = interp1d(rgb_wavelengths.T, img, kind="linear", bounds_error=False, fill_value="extrapolate")
multispectral = f(target_wavelengths)
cv2.imwritemulti(str(output_path), np.clip(multispectral, 0, 255).astype(np.uint8).transpose(2, 0, 1))
LOGGER.info(f"Converted {output_path}")
def _infer_ndjson_kpt_shape(image_records: list) -> list:
kpt_lengths = []
samples = [] # raw keypoint value slices for visibility checking
for record in image_records:
for ann in record.get("annotations", {}).get("pose", []):
kpt_len = len(ann) - 5 # subtract classId + bbox (4 values)
if kpt_len > 0:
kpt_lengths.append(kpt_len)
samples.append(ann[5:])
if len(kpt_lengths) >= 50:
break
if len(kpt_lengths) >= 50:
break
if not kpt_lengths or len(set(kpt_lengths)) != 1:
raise ValueError("Pose dataset missing required 'kpt_shape'. See https://docs.ultralytics.com/datasets/pose/")
n = kpt_lengths[0]
# Try dims=3: requires divisible by 3 and every 3rd value (visibility) in {0, 1, 2}
if n % 3 == 0 and all(v in (0, 1, 2) for s in samples for v in s[2::3]):
return [n // 3, 3]
# Try dims=2: only when NOT divisible by 3 (avoids misclassifying dims=3 data)
if n % 2 == 0 and n % 3 != 0:
return [n // 2, 2]
raise ValueError("Pose dataset missing required 'kpt_shape'. See https://docs.ultralytics.com/datasets/pose/")
async def convert_ndjson_to_yolo(ndjson_path: str | Path, output_path: str | Path | None = None) -> Path:
from ultralytics.utils.checks import check_requirements
check_requirements("aiohttp")
import aiohttp
ndjson_path = Path(check_file(ndjson_path))
output_path = Path(output_path or DATASETS_DIR)
with open(ndjson_path) as f:
lines = [json.loads(line.strip()) for line in f if line.strip()]
dataset_record, image_records = lines[0], lines[1:]
# Hash semantic content only (excludes signed URLs which change on every export)
_h = hashlib.sha256()
for r in lines:
_h.update(json.dumps({k: v for k, v in r.items() if k != "url"}, sort_keys=True).encode())
_hash = _h.hexdigest()[:16]
# Early exit if dataset content unchanged (hash stored in data.yaml)
dataset_dir = output_path / ndjson_path.stem
yaml_path = dataset_dir / "data.yaml"
if yaml_path.is_file():
try:
if YAML.load(yaml_path).get("hash") == _hash:
return yaml_path
except Exception:
pass
splits = {record["split"] for record in image_records}
# Check if this is a classification dataset
is_classification = dataset_record.get("task") == "classify"
class_names = {int(k): v for k, v in dataset_record.get("class_names", {}).items()}
len(class_names)
# Validate required fields before downloading images
task = dataset_record.get("task", "detect")
if not is_classification:
if "train" not in splits:
raise ValueError(f"Dataset missing required 'train' split. Found splits: {sorted(splits)}")
if "val" not in splits and "test" not in splits:
raise ValueError(f"Dataset missing required 'val' split. Found splits: {sorted(splits)}")
if task == "pose" and "kpt_shape" not in dataset_record:
dataset_record["kpt_shape"] = _infer_ndjson_kpt_shape(image_records)
# Check if dataset already exists (enables image reuse across split changes)
_reuse = dataset_dir.exists()
if _reuse:
yaml_path.unlink(missing_ok=True) # Invalidate hash before destructive ops (crash safety)
if not is_classification:
shutil.rmtree(dataset_dir / "labels", ignore_errors=True)
dataset_dir.mkdir(parents=True, exist_ok=True)
data_yaml = None
if not is_classification:
# Detection/segmentation/pose/obb: prepare YAML and create base structure
data_yaml = dict(dataset_record)
data_yaml["names"] = class_names
data_yaml.pop("class_names", None)
data_yaml.pop("type", None) # Remove NDJSON-specific fields
for split in sorted(splits):
(dataset_dir / "images" / split).mkdir(parents=True, exist_ok=True)
(dataset_dir / "labels" / split).mkdir(parents=True, exist_ok=True)
data_yaml[split] = f"images/{split}"
async def process_record(session, semaphore, record):
async with semaphore:
split, original_name = record["split"], record["file"]
annotations = record.get("annotations", {})
if is_classification:
# Classification: place image in {split}/{class_name}/ folder
class_ids = annotations.get("classification", [])
class_id = class_ids[0] if class_ids else 0
class_name = class_names.get(class_id, str(class_id))
image_path = dataset_dir / split / class_name / original_name
else:
# Detection: write label file and place image in images/{split}/
image_path = dataset_dir / "images" / split / original_name
label_path = dataset_dir / "labels" / split / f"{Path(original_name).stem}.txt"
lines_to_write = []
for key in annotations.keys():
lines_to_write = [" ".join(map(str, item)) for item in annotations[key]]
break
label_path.write_text("\n".join(lines_to_write) + "\n" if lines_to_write else "")
# Reuse existing image from another split dir (avoids redownload on resplit) or download
if not image_path.exists():
if _reuse:
for s in ("train", "val", "test"):
if s == split:
continue
candidate = (
(dataset_dir / s / class_name / original_name)
if is_classification
else (dataset_dir / "images" / s / original_name)
)
if candidate.exists():
image_path.parent.mkdir(parents=True, exist_ok=True)
candidate.rename(image_path)
break
if not image_path.exists() and (http_url := record.get("url")):
image_path.parent.mkdir(parents=True, exist_ok=True)
# Retry with exponential backoff (3 attempts: 0s, 2s, 4s delays)
for attempt in range(3):
try:
async with session.get(http_url, timeout=aiohttp.ClientTimeout(total=30)) as response:
response.raise_for_status()
image_path.write_bytes(await response.read())
return True
except Exception as e:
if attempt < 2: # Don't sleep after last attempt
await asyncio.sleep(2**attempt) # 1s, 2s backoff
else:
LOGGER.warning(f"Failed to download {http_url} after 3 attempts: {e}")
return False
return True
# Process all images with async downloads (limit connections for small datasets)
semaphore = asyncio.Semaphore(min(128, len(image_records)))
async with aiohttp.ClientSession() as session:
pbar = TQDM(
total=len(image_records),
desc=f"Converting {ndjson_path.name} → {dataset_dir} ({len(image_records)} images)",
)
async def tracked_process(record):
result = await process_record(session, semaphore, record)
pbar.update(1)
return result
results = await asyncio.gather(*[tracked_process(record) for record in image_records])
pbar.close()
# Validate images were downloaded successfully
success_count = sum(1 for r in results if r)
if success_count == 0:
raise RuntimeError(f"Failed to download any images from {ndjson_path}. Check network connection and URLs.")
if success_count < len(image_records):
LOGGER.warning(f"Downloaded {success_count}/{len(image_records)} images from {ndjson_path}")
# Remove orphaned images no longer in the dataset (prevents stale background images in training)
if _reuse:
expected_paths = set()
for r in image_records:
s, name = r["split"], r["file"]
if is_classification:
ann = r.get("annotations", {})
cids = ann.get("classification", [])
cid = cids[0] if cids else 0
expected_paths.add(dataset_dir / s / class_names.get(cid, str(cid)) / name)
else:
expected_paths.add(dataset_dir / "images" / s / name)
img_root = dataset_dir if is_classification else (dataset_dir / "images")
for p in img_root.rglob("*"):
if p.is_file() and p not in expected_paths:
p.unlink()
if is_classification:
# Classification: return dataset directory (check_cls_dataset expects a directory path)
return dataset_dir
else:
# Detection: write data.yaml with hash for future change detection
data_yaml["hash"] = _hash
YAML.save(yaml_path, data_yaml)
return yaml_path | --- +++ @@ -22,6 +22,12 @@
def coco91_to_coco80_class() -> list[int]:
+ """Convert 91-index COCO class IDs to 80-index COCO class IDs.
+
+ Returns:
+ (list[int | None]): A list of 91 elements where the index represents the 91-index class ID and the value is the
+ corresponding 80-index class ID, or None if there is no mapping.
+ """
return [
0,
1,
@@ -118,6 +124,25 @@
def coco80_to_coco91_class() -> list[int]:
+ r"""Convert 80-index (val2014) to 91-index (paper).
+
+ Returns:
+ (list[int]): A list of 80 class IDs where each value is the corresponding 91-index class ID.
+
+ Examples:
+ >>> import numpy as np
+ >>> a = np.loadtxt("data/coco.names", dtype="str", delimiter="\n")
+ >>> b = np.loadtxt("data/coco_paper.names", dtype="str", delimiter="\n")
+
+ Convert the darknet to COCO format
+ >>> x1 = [list(a[i] == b).index(True) + 1 for i in range(80)]
+
+ Convert the COCO to darknet format
+ >>> x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)]
+
+ References:
+ https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
+ """
return [
1,
2,
@@ -210,6 +235,25 @@ cls91to80: bool = True,
lvis: bool = False,
):
+ """Convert COCO dataset annotations to a YOLO annotation format suitable for training YOLO models.
+
+ Args:
+ labels_dir (str, optional): Path to directory containing COCO dataset annotation files.
+ save_dir (str, optional): Path to directory to save results to.
+ use_segments (bool, optional): Whether to include segmentation masks in the output.
+ use_keypoints (bool, optional): Whether to include keypoint annotations in the output.
+ cls91to80 (bool, optional): Whether to map 91 COCO class IDs to the corresponding 80 COCO class IDs.
+ lvis (bool, optional): Whether to convert data in lvis dataset way.
+
+ Examples:
+ >>> from ultralytics.data.converter import convert_coco
+
+ Convert COCO annotations to YOLO format
+ >>> convert_coco("coco/annotations/", use_segments=True, use_keypoints=False, cls91to80=False)
+
+ Convert LVIS annotations to YOLO format
+ >>> convert_coco("lvis/annotations/", use_segments=True, use_keypoints=False, cls91to80=False, lvis=True)
+ """
# Create dataset directory
save_dir = increment_path(save_dir) # increment if save directory already exists
for p in save_dir / "labels", save_dir / "images":
@@ -302,6 +346,39 @@
def convert_segment_masks_to_yolo_seg(masks_dir: str, output_dir: str, classes: int):
+ """Convert a dataset of segmentation mask images to the YOLO segmentation format.
+
+ This function takes the directory containing the binary format mask images and converts them into YOLO segmentation
+ format. The converted masks are saved in the specified output directory.
+
+ Args:
+ masks_dir (str): The path to the directory where all mask images (png, jpg) are stored.
+ output_dir (str): The path to the directory where the converted YOLO segmentation masks will be stored.
+ classes (int): Total number of classes in the dataset, e.g., 80 for COCO.
+
+ Examples:
+ >>> from ultralytics.data.converter import convert_segment_masks_to_yolo_seg
+
+ The classes here is the total classes in the dataset, for COCO dataset we have 80 classes
+ >>> convert_segment_masks_to_yolo_seg("path/to/masks_directory", "path/to/output/directory", classes=80)
+
+ Notes:
+ The expected directory structure for the masks is:
+
+ - masks
+ ├─ mask_image_01.png or mask_image_01.jpg
+ ├─ mask_image_02.png or mask_image_02.jpg
+ ├─ mask_image_03.png or mask_image_03.jpg
+ └─ mask_image_04.png or mask_image_04.jpg
+
+ After execution, the labels will be organized in the following structure:
+
+ - output_dir
+ ├─ mask_yolo_01.txt
+ ├─ mask_yolo_02.txt
+ ├─ mask_yolo_03.txt
+ └─ mask_yolo_04.txt
+ """
pixel_to_class_mapping = {i + 1: i for i in range(classes)}
for mask_path in Path(masks_dir).iterdir():
if mask_path.suffix in {".png", ".jpg"}:
@@ -344,6 +421,36 @@
def convert_dota_to_yolo_obb(dota_root_path: str):
+ """Convert DOTA dataset annotations to YOLO OBB (Oriented Bounding Box) format.
+
+ The function processes images in the 'train' and 'val' folders of the DOTA dataset. For each image, it reads the
+ associated label from the original labels directory and writes new labels in YOLO OBB format to a new directory.
+
+ Args:
+ dota_root_path (str): The root directory path of the DOTA dataset.
+
+ Examples:
+ >>> from ultralytics.data.converter import convert_dota_to_yolo_obb
+ >>> convert_dota_to_yolo_obb("path/to/DOTA")
+
+ Notes:
+ The directory structure assumed for the DOTA dataset:
+
+ - DOTA
+ ├─ images
+ │ ├─ train
+ │ └─ val
+ └─ labels
+ ├─ train_original
+ └─ val_original
+
+ After execution, the function will organize the labels into:
+
+ - DOTA
+ └─ labels
+ ├─ train
+ └─ val
+ """
dota_root_path = Path(dota_root_path)
# Class names to indices mapping
@@ -369,6 +476,7 @@ }
def convert_label(image_name: str, image_width: int, image_height: int, orig_label_dir: Path, save_dir: Path):
+ """Convert a single image's DOTA annotation to YOLO OBB format and save it to a specified directory."""
orig_label_path = orig_label_dir / f"{image_name}.txt"
save_path = save_dir / f"{image_name}.txt"
@@ -405,11 +513,33 @@
def min_index(arr1: np.ndarray, arr2: np.ndarray):
+ """Find a pair of indexes with the shortest distance between two arrays of 2D points.
+
+ Args:
+ arr1 (np.ndarray): A NumPy array of shape (N, 2) representing N 2D points.
+ arr2 (np.ndarray): A NumPy array of shape (M, 2) representing M 2D points.
+
+ Returns:
+ (tuple[int, int]): A tuple (idx1, idx2) where idx1 is the index in arr1 and idx2 is the index in arr2 of the
+ pair with the shortest distance.
+ """
dis = ((arr1[:, None, :] - arr2[None, :, :]) ** 2).sum(-1)
return np.unravel_index(np.argmin(dis, axis=None), dis.shape)
def merge_multi_segment(segments: list[list]):
+ """Merge multiple segments into one list by connecting the coordinates with the minimum distance between each
+ segment.
+
+ This function connects these coordinates with a thin line to merge all segments into one.
+
+ Args:
+ segments (list[list]): Original segmentations in COCO's JSON file. Each element is a list of coordinates, like
+ [segmentation1, segmentation2,...].
+
+ Returns:
+ (list[np.ndarray]): A list of connected segments represented as NumPy arrays.
+ """
s = []
segments = [np.array(i).reshape(-1, 2) for i in segments]
idx_list = [[] for _ in range(len(segments))]
@@ -449,6 +579,29 @@
def yolo_bbox2segment(im_dir: str | Path, save_dir: str | Path | None = None, sam_model: str = "sam_b.pt", device=None):
+ """Convert existing object detection dataset (bounding boxes) to segmentation dataset in YOLO format.
+
+ Generates segmentation data using SAM auto-annotator as needed.
+
+ Args:
+ im_dir (str | Path): Path to image directory to convert.
+ save_dir (str | Path, optional): Path to save the generated labels, labels will be saved into `labels-segment`
+ in the same directory level of `im_dir` if save_dir is None.
+ sam_model (str): Segmentation model to use for intermediate segmentation data.
+ device (int | str, optional): The specific device to run SAM models.
+
+ Notes:
+ The input directory structure assumed for dataset:
+
+ - im_dir
+ ├─ 001.jpg
+ ├─ ...
+ └─ NNN.jpg
+ - labels
+ ├─ 001.txt
+ ├─ ...
+ └─ NNN.txt
+ """
from ultralytics import SAM
from ultralytics.data import YOLODataset
from ultralytics.utils.ops import xywh2xyxy
@@ -490,8 +643,25 @@
def create_synthetic_coco_dataset():
+ """Create a synthetic COCO dataset with random images based on filenames from label lists.
+
+ This function downloads COCO labels, reads image filenames from label list files, creates synthetic images for
+ train2017 and val2017 subsets, and organizes them in the COCO dataset structure. It uses multithreading to generate
+ images efficiently.
+
+ Examples:
+ >>> from ultralytics.data.converter import create_synthetic_coco_dataset
+ >>> create_synthetic_coco_dataset()
+
+ Notes:
+ - Requires internet connection to download label files.
+ - Generates random RGB images of varying sizes (480x480 to 640x640 pixels).
+ - Existing test2017 directory is removed as it's not needed.
+ - Reads image filenames from train2017.txt and val2017.txt files.
+ """
def create_synthetic_image(image_file: Path):
+ """Generate a synthetic image with random size and color for dataset augmentation or testing purposes."""
if not image_file.exists():
size = (random.randint(480, 640), random.randint(480, 640))
Image.new(
@@ -528,6 +698,24 @@
def convert_to_multispectral(path: str | Path, n_channels: int = 10, replace: bool = False, zip: bool = False):
+ """Convert RGB images to multispectral images by interpolating across wavelength bands.
+
+ This function takes RGB images and interpolates them to create multispectral images with a specified number of
+ channels. It can process either a single image or a directory of images.
+
+ Args:
+ path (str | Path): Path to an image file or directory containing images to convert.
+ n_channels (int): Number of spectral channels to generate in the output image.
+ replace (bool): Whether to replace the original image file with the converted one.
+ zip (bool): Whether to zip the converted images into a zip file.
+
+ Examples:
+ Convert a single image
+ >>> convert_to_multispectral("path/to/image.jpg", n_channels=10)
+
+ Convert a dataset
+ >>> convert_to_multispectral("coco8", n_channels=10)
+ """
from scipy.interpolate import interp1d
from ultralytics.data.utils import IMG_FORMATS
@@ -561,6 +749,14 @@
def _infer_ndjson_kpt_shape(image_records: list) -> list:
+ """Infer kpt_shape [num_keypoints, dims] from NDJSON pose annotations.
+
+ Scans up to 50 pose annotations across image records. Annotation format is [classId, cx, cy, w, h, kp1_x, kp1_y,
+ kp1_vis, ...] so keypoint values start at index 5.
+
+ Tries dims=3 first (x, y, visibility) with visibility validation ({0, 1, 2}), then falls back to dims=2 (x, y only)
+ when values are unambiguously not divisible by 3.
+ """
kpt_lengths = []
samples = [] # raw keypoint value slices for visibility checking
for record in image_records:
@@ -591,6 +787,38 @@
async def convert_ndjson_to_yolo(ndjson_path: str | Path, output_path: str | Path | None = None) -> Path:
+ """Convert NDJSON dataset format to Ultralytics YOLO dataset structure.
+
+ This function converts datasets stored in NDJSON (Newline Delimited JSON) format to the standard YOLO format. For
+ detection/segmentation/pose/obb tasks, it creates separate directories for images and labels. For classification
+ tasks, it creates the ImageNet-style {split}/{class_name}/ folder structure. It supports parallel processing for
+ efficient conversion of large datasets and can download images from URLs.
+
+ The NDJSON format consists of:
+ - First line: Dataset metadata with class names, task type, and configuration
+ - Subsequent lines: Individual image records with annotations and optional URLs
+
+ Args:
+ ndjson_path (str | Path): Path to the input NDJSON file containing dataset information.
+ output_path (str | Path | None, optional): Directory where the converted YOLO dataset will be saved. If None,
+ uses the DATASETS_DIR directory. Defaults to None.
+
+ Returns:
+ (Path): Path to the generated data.yaml file (detection) or dataset directory (classification).
+
+ Examples:
+ Convert a local NDJSON file:
+ >>> yaml_path = await convert_ndjson_to_yolo("dataset.ndjson")
+ >>> print(f"Dataset converted to: {yaml_path}")
+
+ Convert with custom output directory:
+ >>> yaml_path = await convert_ndjson_to_yolo("dataset.ndjson", output_path="./converted_datasets")
+
+ Use with YOLO training
+ >>> from ultralytics import YOLO
+ >>> model = YOLO("yolo26n.pt")
+ >>> model.train(data="https://github.com/ultralytics/assets/releases/download/v0.0.0/coco8-ndjson.ndjson")
+ """
from ultralytics.utils.checks import check_requirements
check_requirements("aiohttp")
@@ -655,6 +883,7 @@ data_yaml[split] = f"images/{split}"
async def process_record(session, semaphore, record):
+ """Process single image record with async session."""
async with semaphore:
split, original_name = record["split"], record["file"]
annotations = record.get("annotations", {})
@@ -754,4 +983,4 @@ # Detection: write data.yaml with hash for future change detection
data_yaml["hash"] = _hash
YAML.save(yaml_path, data_yaml)
- return yaml_path+ return yaml_path
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/data/converter.py |
Generate docstrings with parameter types | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import math
import torch
from torch import Tensor, nn
from ultralytics.nn.modules import MLPBlock
class TwoWayTransformer(nn.Module):
def __init__(
self,
depth: int,
embedding_dim: int,
num_heads: int,
mlp_dim: int,
activation: type[nn.Module] = nn.ReLU,
attention_downsample_rate: int = 2,
) -> None:
super().__init__()
self.depth = depth
self.embedding_dim = embedding_dim
self.num_heads = num_heads
self.mlp_dim = mlp_dim
self.layers = nn.ModuleList()
for i in range(depth):
self.layers.append(
TwoWayAttentionBlock(
embedding_dim=embedding_dim,
num_heads=num_heads,
mlp_dim=mlp_dim,
activation=activation,
attention_downsample_rate=attention_downsample_rate,
skip_first_layer_pe=(i == 0),
)
)
self.final_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate)
self.norm_final_attn = nn.LayerNorm(embedding_dim)
def forward(
self,
image_embedding: torch.Tensor,
image_pe: torch.Tensor,
point_embedding: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
# BxCxHxW -> BxHWxC == B x N_image_tokens x C
image_embedding = image_embedding.flatten(2).permute(0, 2, 1)
image_pe = image_pe.flatten(2).permute(0, 2, 1)
# Prepare queries
queries = point_embedding
keys = image_embedding
# Apply transformer blocks and final layernorm
for layer in self.layers:
queries, keys = layer(
queries=queries,
keys=keys,
query_pe=point_embedding,
key_pe=image_pe,
)
# Apply the final attention layer from the points to the image
q = queries + point_embedding
k = keys + image_pe
attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)
queries = queries + attn_out
queries = self.norm_final_attn(queries)
return queries, keys
class TwoWayAttentionBlock(nn.Module):
def __init__(
self,
embedding_dim: int,
num_heads: int,
mlp_dim: int = 2048,
activation: type[nn.Module] = nn.ReLU,
attention_downsample_rate: int = 2,
skip_first_layer_pe: bool = False,
) -> None:
super().__init__()
self.self_attn = Attention(embedding_dim, num_heads)
self.norm1 = nn.LayerNorm(embedding_dim)
self.cross_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate)
self.norm2 = nn.LayerNorm(embedding_dim)
self.mlp = MLPBlock(embedding_dim, mlp_dim, activation)
self.norm3 = nn.LayerNorm(embedding_dim)
self.norm4 = nn.LayerNorm(embedding_dim)
self.cross_attn_image_to_token = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate)
self.skip_first_layer_pe = skip_first_layer_pe
def forward(
self, queries: torch.Tensor, keys: torch.Tensor, query_pe: torch.Tensor, key_pe: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor]:
# Self attention block
if self.skip_first_layer_pe:
queries = self.self_attn(q=queries, k=queries, v=queries)
else:
q = queries + query_pe
attn_out = self.self_attn(q=q, k=q, v=queries)
queries = queries + attn_out
queries = self.norm1(queries)
# Cross attention block, tokens attending to image embedding
q = queries + query_pe
k = keys + key_pe
attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys)
queries = queries + attn_out
queries = self.norm2(queries)
# MLP block
mlp_out = self.mlp(queries)
queries = queries + mlp_out
queries = self.norm3(queries)
# Cross attention block, image embedding attending to tokens
q = queries + query_pe
k = keys + key_pe
attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries)
keys = keys + attn_out
keys = self.norm4(keys)
return queries, keys
class Attention(nn.Module):
def __init__(
self,
embedding_dim: int,
num_heads: int,
downsample_rate: int = 1,
kv_in_dim: int | None = None,
) -> None:
super().__init__()
self.embedding_dim = embedding_dim
self.kv_in_dim = kv_in_dim if kv_in_dim is not None else embedding_dim
self.internal_dim = embedding_dim // downsample_rate
self.num_heads = num_heads
assert self.internal_dim % num_heads == 0, "num_heads must divide embedding_dim."
self.q_proj = nn.Linear(embedding_dim, self.internal_dim)
self.k_proj = nn.Linear(self.kv_in_dim, self.internal_dim)
self.v_proj = nn.Linear(self.kv_in_dim, self.internal_dim)
self.out_proj = nn.Linear(self.internal_dim, embedding_dim)
@staticmethod
def _separate_heads(x: torch.Tensor, num_heads: int) -> torch.Tensor:
b, n, c = x.shape
x = x.reshape(b, n, num_heads, c // num_heads)
return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head
@staticmethod
def _recombine_heads(x: Tensor) -> Tensor:
b, n_heads, n_tokens, c_per_head = x.shape
x = x.transpose(1, 2)
return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C
def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor) -> torch.Tensor:
# Input projections
q = self.q_proj(q)
k = self.k_proj(k)
v = self.v_proj(v)
# Separate into heads
q = self._separate_heads(q, self.num_heads)
k = self._separate_heads(k, self.num_heads)
v = self._separate_heads(v, self.num_heads)
# Attention
_, _, _, c_per_head = q.shape
attn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokens
attn = attn / math.sqrt(c_per_head)
attn = torch.softmax(attn, dim=-1)
# Get output
out = attn @ v
out = self._recombine_heads(out)
return self.out_proj(out) | --- +++ @@ -11,6 +11,31 @@
class TwoWayTransformer(nn.Module):
+ """A Two-Way Transformer module for simultaneous attention to image and query points.
+
+ This class implements a specialized transformer decoder that attends to an input image using queries with supplied
+ positional embeddings. It's useful for tasks like object detection, image segmentation, and point cloud processing.
+
+ Attributes:
+ depth (int): Number of layers in the transformer.
+ embedding_dim (int): Channel dimension for input embeddings.
+ num_heads (int): Number of heads for multihead attention.
+ mlp_dim (int): Internal channel dimension for the MLP block.
+ layers (nn.ModuleList): List of TwoWayAttentionBlock layers composing the transformer.
+ final_attn_token_to_image (Attention): Final attention layer from queries to image.
+ norm_final_attn (nn.LayerNorm): Layer normalization applied to final queries.
+
+ Methods:
+ forward: Process image and point embeddings through the transformer.
+
+ Examples:
+ >>> transformer = TwoWayTransformer(depth=6, embedding_dim=256, num_heads=8, mlp_dim=2048)
+ >>> image_embedding = torch.randn(1, 256, 32, 32)
+ >>> image_pe = torch.randn(1, 256, 32, 32)
+ >>> point_embedding = torch.randn(1, 100, 256)
+ >>> output_queries, output_image = transformer(image_embedding, image_pe, point_embedding)
+ >>> print(output_queries.shape, output_image.shape)
+ """
def __init__(
self,
@@ -21,6 +46,16 @@ activation: type[nn.Module] = nn.ReLU,
attention_downsample_rate: int = 2,
) -> None:
+ """Initialize a Two-Way Transformer for simultaneous attention to image and query points.
+
+ Args:
+ depth (int): Number of layers in the transformer.
+ embedding_dim (int): Channel dimension for input embeddings.
+ num_heads (int): Number of heads for multihead attention. Must divide embedding_dim.
+ mlp_dim (int): Internal channel dimension for the MLP block.
+ activation (type[nn.Module], optional): Activation function to use in the MLP block.
+ attention_downsample_rate (int, optional): Downsampling rate for attention mechanism.
+ """
super().__init__()
self.depth = depth
self.embedding_dim = embedding_dim
@@ -49,6 +84,17 @@ image_pe: torch.Tensor,
point_embedding: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
+ """Process image and point embeddings through the Two-Way Transformer.
+
+ Args:
+ image_embedding (torch.Tensor): Image to attend to, with shape (B, embedding_dim, H, W).
+ image_pe (torch.Tensor): Positional encoding to add to the image, with same shape as image_embedding.
+ point_embedding (torch.Tensor): Embedding to add to query points, with shape (B, N_points, embedding_dim).
+
+ Returns:
+ queries (torch.Tensor): Processed point embeddings with shape (B, N_points, embedding_dim).
+ keys (torch.Tensor): Processed image embeddings with shape (B, H*W, embedding_dim).
+ """
# BxCxHxW -> BxHWxC == B x N_image_tokens x C
image_embedding = image_embedding.flatten(2).permute(0, 2, 1)
image_pe = image_pe.flatten(2).permute(0, 2, 1)
@@ -77,6 +123,35 @@
class TwoWayAttentionBlock(nn.Module):
+ """A two-way attention block for simultaneous attention to image and query points.
+
+ This class implements a specialized transformer block with four main layers: self-attention on sparse inputs,
+ cross-attention of sparse inputs to dense inputs, MLP block on sparse inputs, and cross-attention of dense inputs to
+ sparse inputs.
+
+ Attributes:
+ self_attn (Attention): Self-attention layer for queries.
+ norm1 (nn.LayerNorm): Layer normalization after self-attention.
+ cross_attn_token_to_image (Attention): Cross-attention layer from queries to keys.
+ norm2 (nn.LayerNorm): Layer normalization after token-to-image attention.
+ mlp (MLPBlock): MLP block for transforming query embeddings.
+ norm3 (nn.LayerNorm): Layer normalization after MLP block.
+ norm4 (nn.LayerNorm): Layer normalization after image-to-token attention.
+ cross_attn_image_to_token (Attention): Cross-attention layer from keys to queries.
+ skip_first_layer_pe (bool): Whether to skip positional encoding in the first layer.
+
+ Methods:
+ forward: Apply self-attention and cross-attention to queries and keys.
+
+ Examples:
+ >>> embedding_dim, num_heads = 256, 8
+ >>> block = TwoWayAttentionBlock(embedding_dim, num_heads)
+ >>> queries = torch.randn(1, 100, embedding_dim)
+ >>> keys = torch.randn(1, 1000, embedding_dim)
+ >>> query_pe = torch.randn(1, 100, embedding_dim)
+ >>> key_pe = torch.randn(1, 1000, embedding_dim)
+ >>> processed_queries, processed_keys = block(queries, keys, query_pe, key_pe)
+ """
def __init__(
self,
@@ -87,6 +162,20 @@ attention_downsample_rate: int = 2,
skip_first_layer_pe: bool = False,
) -> None:
+ """Initialize a TwoWayAttentionBlock for simultaneous attention to image and query points.
+
+ This block implements a specialized transformer layer with four main components: self-attention on sparse
+ inputs, cross-attention of sparse inputs to dense inputs, MLP block on sparse inputs, and cross-attention of
+ dense inputs to sparse inputs.
+
+ Args:
+ embedding_dim (int): Channel dimension of the embeddings.
+ num_heads (int): Number of attention heads in the attention layers.
+ mlp_dim (int, optional): Hidden dimension of the MLP block.
+ activation (type[nn.Module], optional): Activation function for the MLP block.
+ attention_downsample_rate (int, optional): Downsampling rate for the attention mechanism.
+ skip_first_layer_pe (bool, optional): Whether to skip positional encoding in the first layer.
+ """
super().__init__()
self.self_attn = Attention(embedding_dim, num_heads)
self.norm1 = nn.LayerNorm(embedding_dim)
@@ -105,6 +194,18 @@ def forward(
self, queries: torch.Tensor, keys: torch.Tensor, query_pe: torch.Tensor, key_pe: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor]:
+ """Apply two-way attention to process query and key embeddings in a transformer block.
+
+ Args:
+ queries (torch.Tensor): Query embeddings with shape (B, N_queries, embedding_dim).
+ keys (torch.Tensor): Key embeddings with shape (B, N_keys, embedding_dim).
+ query_pe (torch.Tensor): Positional encodings for queries with same shape as queries.
+ key_pe (torch.Tensor): Positional encodings for keys with same shape as keys.
+
+ Returns:
+ queries (torch.Tensor): Processed query embeddings with shape (B, N_queries, embedding_dim).
+ keys (torch.Tensor): Processed key embeddings with shape (B, N_keys, embedding_dim).
+ """
# Self attention block
if self.skip_first_layer_pe:
queries = self.self_attn(q=queries, k=queries, v=queries)
@@ -137,6 +238,34 @@
class Attention(nn.Module):
+ """An attention layer with downscaling capability for embedding size after projection.
+
+ This class implements a multi-head attention mechanism with the option to downsample the internal dimension of
+ queries, keys, and values.
+
+ Attributes:
+ embedding_dim (int): Dimensionality of input embeddings.
+ kv_in_dim (int): Dimensionality of key and value inputs.
+ internal_dim (int): Internal dimension after downsampling.
+ num_heads (int): Number of attention heads.
+ q_proj (nn.Linear): Linear projection for queries.
+ k_proj (nn.Linear): Linear projection for keys.
+ v_proj (nn.Linear): Linear projection for values.
+ out_proj (nn.Linear): Linear projection for output.
+
+ Methods:
+ _separate_heads: Separate input tensor into attention heads.
+ _recombine_heads: Recombine separated attention heads.
+ forward: Compute attention output for given query, key, and value tensors.
+
+ Examples:
+ >>> attn = Attention(embedding_dim=256, num_heads=8, downsample_rate=2)
+ >>> q = torch.randn(1, 100, 256)
+ >>> k = v = torch.randn(1, 50, 256)
+ >>> output = attn(q, k, v)
+ >>> print(output.shape)
+ torch.Size([1, 100, 256])
+ """
def __init__(
self,
@@ -145,6 +274,17 @@ downsample_rate: int = 1,
kv_in_dim: int | None = None,
) -> None:
+ """Initialize the Attention module with specified dimensions and settings.
+
+ Args:
+ embedding_dim (int): Dimensionality of input embeddings.
+ num_heads (int): Number of attention heads.
+ downsample_rate (int, optional): Factor by which internal dimensions are downsampled.
+ kv_in_dim (int | None, optional): Dimensionality of key and value inputs. If None, uses embedding_dim.
+
+ Raises:
+ AssertionError: If num_heads does not evenly divide the internal dim (embedding_dim / downsample_rate).
+ """
super().__init__()
self.embedding_dim = embedding_dim
self.kv_in_dim = kv_in_dim if kv_in_dim is not None else embedding_dim
@@ -159,17 +299,29 @@
@staticmethod
def _separate_heads(x: torch.Tensor, num_heads: int) -> torch.Tensor:
+ """Separate the input tensor into the specified number of attention heads."""
b, n, c = x.shape
x = x.reshape(b, n, num_heads, c // num_heads)
return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head
@staticmethod
def _recombine_heads(x: Tensor) -> Tensor:
+ """Recombine separated attention heads into a single tensor."""
b, n_heads, n_tokens, c_per_head = x.shape
x = x.transpose(1, 2)
return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C
def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor) -> torch.Tensor:
+ """Apply multi-head attention to query, key, and value tensors with optional downsampling.
+
+ Args:
+ q (torch.Tensor): Query tensor with shape (B, N_q, embedding_dim).
+ k (torch.Tensor): Key tensor with shape (B, N_k, kv_in_dim).
+ v (torch.Tensor): Value tensor with shape (B, N_k, kv_in_dim).
+
+ Returns:
+ (torch.Tensor): Output tensor after attention with shape (B, N_q, embedding_dim).
+ """
# Input projections
q = self.q_proj(q)
k = self.k_proj(k)
@@ -189,4 +341,4 @@ # Get output
out = attn @ v
out = self._recombine_heads(out)
- return self.out_proj(out)+ return self.out_proj(out)
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/sam/modules/transformer.py |
Auto-generate documentation strings for this file | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from copy import copy
from pathlib import Path
from ultralytics.models import yolo
from ultralytics.nn.tasks import OBBModel
from ultralytics.utils import DEFAULT_CFG, RANK
class OBBTrainer(yolo.detect.DetectionTrainer):
def __init__(self, cfg=DEFAULT_CFG, overrides: dict | None = None, _callbacks: dict | None = None):
if overrides is None:
overrides = {}
overrides["task"] = "obb"
super().__init__(cfg, overrides, _callbacks)
def get_model(
self, cfg: str | dict | None = None, weights: str | Path | None = None, verbose: bool = True
) -> OBBModel:
model = OBBModel(cfg, nc=self.data["nc"], ch=self.data["channels"], verbose=verbose and RANK == -1)
if weights:
model.load(weights)
return model
def get_validator(self):
self.loss_names = "box_loss", "cls_loss", "dfl_loss", "angle_loss"
return yolo.obb.OBBValidator(
self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks
) | --- +++ @@ -11,8 +11,36 @@
class OBBTrainer(yolo.detect.DetectionTrainer):
+ """A class extending the DetectionTrainer class for training based on an Oriented Bounding Box (OBB) model.
+
+ This trainer specializes in training YOLO models that detect oriented bounding boxes, which are useful for detecting
+ objects at arbitrary angles rather than just axis-aligned rectangles.
+
+ Attributes:
+ loss_names (tuple): Names of the loss components used during training including box_loss, cls_loss, dfl_loss,
+ and angle_loss.
+
+ Methods:
+ get_model: Return OBBModel initialized with specified config and weights.
+ get_validator: Return an instance of OBBValidator for validation of YOLO model.
+
+ Examples:
+ >>> from ultralytics.models.yolo.obb import OBBTrainer
+ >>> args = dict(model="yolo26n-obb.pt", data="dota8.yaml", epochs=3)
+ >>> trainer = OBBTrainer(overrides=args)
+ >>> trainer.train()
+ """
def __init__(self, cfg=DEFAULT_CFG, overrides: dict | None = None, _callbacks: dict | None = None):
+ """Initialize an OBBTrainer object for training Oriented Bounding Box (OBB) models.
+
+ Args:
+ cfg (dict, optional): Configuration dictionary for the trainer. Contains training parameters and model
+ configuration.
+ overrides (dict, optional): Dictionary of parameter overrides for the configuration. Any values here will
+ take precedence over those in cfg.
+ _callbacks (dict, optional): Dictionary of callback functions to be invoked during training.
+ """
if overrides is None:
overrides = {}
overrides["task"] = "obb"
@@ -21,6 +49,21 @@ def get_model(
self, cfg: str | dict | None = None, weights: str | Path | None = None, verbose: bool = True
) -> OBBModel:
+ """Return OBBModel initialized with specified config and weights.
+
+ Args:
+ cfg (str | dict, optional): Model configuration. Can be a path to a YAML config file, a dictionary
+ containing configuration parameters, or None to use default configuration.
+ weights (str | Path, optional): Path to pretrained weights file. If None, random initialization is used.
+ verbose (bool): Whether to display model information during initialization.
+
+ Returns:
+ (OBBModel): Initialized OBBModel with the specified configuration and weights.
+
+ Examples:
+ >>> trainer = OBBTrainer()
+ >>> model = trainer.get_model(cfg="yolo26n-obb.yaml", weights="yolo26n-obb.pt")
+ """
model = OBBModel(cfg, nc=self.data["nc"], ch=self.data["channels"], verbose=verbose and RANK == -1)
if weights:
model.load(weights)
@@ -28,7 +71,8 @@ return model
def get_validator(self):
+ """Return an instance of OBBValidator for validation of YOLO model."""
self.loss_names = "box_loss", "cls_loss", "dfl_loss", "angle_loss"
return yolo.obb.OBBValidator(
self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks
- )+ )
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/yolo/obb/train.py |
Add docstrings for production code | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import torch
from torch import nn
from ultralytics.nn.modules import MLP, LayerNorm2d
class MaskDecoder(nn.Module):
def __init__(
self,
transformer_dim: int,
transformer: nn.Module,
num_multimask_outputs: int = 3,
activation: type[nn.Module] = nn.GELU,
iou_head_depth: int = 3,
iou_head_hidden_dim: int = 256,
) -> None:
super().__init__()
self.transformer_dim = transformer_dim
self.transformer = transformer
self.num_multimask_outputs = num_multimask_outputs
self.iou_token = nn.Embedding(1, transformer_dim)
self.num_mask_tokens = num_multimask_outputs + 1
self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
self.output_upscaling = nn.Sequential(
nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),
LayerNorm2d(transformer_dim // 4),
activation(),
nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),
activation(),
)
self.output_hypernetworks_mlps = nn.ModuleList(
[MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) for _ in range(self.num_mask_tokens)]
)
self.iou_prediction_head = MLP(transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth)
def forward(
self,
image_embeddings: torch.Tensor,
image_pe: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
multimask_output: bool,
) -> tuple[torch.Tensor, torch.Tensor]:
masks, iou_pred = self.predict_masks(
image_embeddings=image_embeddings,
image_pe=image_pe,
sparse_prompt_embeddings=sparse_prompt_embeddings,
dense_prompt_embeddings=dense_prompt_embeddings,
)
# Select the correct mask or masks for output
mask_slice = slice(1, None) if multimask_output else slice(0, 1)
masks = masks[:, mask_slice, :, :]
iou_pred = iou_pred[:, mask_slice]
return masks, iou_pred
def predict_masks(
self,
image_embeddings: torch.Tensor,
image_pe: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
# Concatenate output tokens
output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)
output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.shape[0], -1, -1)
tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
# Expand per-image data in batch direction to be per-mask
src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
src = src + dense_prompt_embeddings
pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
b, c, h, w = src.shape
# Run the transformer
hs, src = self.transformer(src, pos_src, tokens)
iou_token_out = hs[:, 0, :]
mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]
# Upscale mask embeddings and predict masks using the mask tokens
src = src.transpose(1, 2).view(b, c, h, w)
upscaled_embedding = self.output_upscaling(src)
hyper_in_list: list[torch.Tensor] = [
self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]) for i in range(self.num_mask_tokens)
]
hyper_in = torch.stack(hyper_in_list, dim=1)
b, c, h, w = upscaled_embedding.shape
masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
# Generate mask quality predictions
iou_pred = self.iou_prediction_head(iou_token_out)
return masks, iou_pred
class SAM2MaskDecoder(nn.Module):
def __init__(
self,
transformer_dim: int,
transformer: nn.Module,
num_multimask_outputs: int = 3,
activation: type[nn.Module] = nn.GELU,
iou_head_depth: int = 3,
iou_head_hidden_dim: int = 256,
use_high_res_features: bool = False,
iou_prediction_use_sigmoid=False,
dynamic_multimask_via_stability=False,
dynamic_multimask_stability_delta=0.05,
dynamic_multimask_stability_thresh=0.98,
pred_obj_scores: bool = False,
pred_obj_scores_mlp: bool = False,
use_multimask_token_for_obj_ptr: bool = False,
) -> None:
super().__init__()
self.transformer_dim = transformer_dim
self.transformer = transformer
self.num_multimask_outputs = num_multimask_outputs
self.iou_token = nn.Embedding(1, transformer_dim)
self.num_mask_tokens = num_multimask_outputs + 1
self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
self.pred_obj_scores = pred_obj_scores
if self.pred_obj_scores:
self.obj_score_token = nn.Embedding(1, transformer_dim)
self.use_multimask_token_for_obj_ptr = use_multimask_token_for_obj_ptr
self.output_upscaling = nn.Sequential(
nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),
LayerNorm2d(transformer_dim // 4),
activation(),
nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),
activation(),
)
self.use_high_res_features = use_high_res_features
if use_high_res_features:
self.conv_s0 = nn.Conv2d(transformer_dim, transformer_dim // 8, kernel_size=1, stride=1)
self.conv_s1 = nn.Conv2d(transformer_dim, transformer_dim // 4, kernel_size=1, stride=1)
self.output_hypernetworks_mlps = nn.ModuleList(
[MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) for _ in range(self.num_mask_tokens)]
)
self.iou_prediction_head = MLP(
transformer_dim,
iou_head_hidden_dim,
self.num_mask_tokens,
iou_head_depth,
sigmoid=iou_prediction_use_sigmoid,
)
if self.pred_obj_scores:
self.pred_obj_score_head = nn.Linear(transformer_dim, 1)
if pred_obj_scores_mlp:
self.pred_obj_score_head = MLP(transformer_dim, transformer_dim, 1, 3)
# When outputting a single mask, optionally we can dynamically fall back to the best
# multimask output token if the single mask output token gives low stability scores.
self.dynamic_multimask_via_stability = dynamic_multimask_via_stability
self.dynamic_multimask_stability_delta = dynamic_multimask_stability_delta
self.dynamic_multimask_stability_thresh = dynamic_multimask_stability_thresh
def forward(
self,
image_embeddings: torch.Tensor,
image_pe: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
multimask_output: bool,
repeat_image: bool,
high_res_features: list[torch.Tensor] | None = None,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
masks, iou_pred, mask_tokens_out, object_score_logits = self.predict_masks(
image_embeddings=image_embeddings,
image_pe=image_pe,
sparse_prompt_embeddings=sparse_prompt_embeddings,
dense_prompt_embeddings=dense_prompt_embeddings,
repeat_image=repeat_image,
high_res_features=high_res_features,
)
# Select the correct mask or masks for output
if multimask_output:
masks = masks[:, 1:, :, :]
iou_pred = iou_pred[:, 1:]
elif self.dynamic_multimask_via_stability and not self.training:
masks, iou_pred = self._dynamic_multimask_via_stability(masks, iou_pred)
else:
masks = masks[:, 0:1, :, :]
iou_pred = iou_pred[:, 0:1]
if multimask_output and self.use_multimask_token_for_obj_ptr:
sam_tokens_out = mask_tokens_out[:, 1:] # [b, 3, c] shape
else:
# Take the mask output token. Here we *always* use the token for single mask output.
# At test time, even if we track after 1-click (and using multimask_output=True),
# we still take the single mask token here. The rationale is that we always track
# after multiple clicks during training, so the past tokens seen during training
# are always the single mask token (and we'll let it be the object-memory token).
sam_tokens_out = mask_tokens_out[:, 0:1] # [b, 1, c] shape
return masks, iou_pred, sam_tokens_out, object_score_logits
def predict_masks(
self,
image_embeddings: torch.Tensor,
image_pe: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
repeat_image: bool,
high_res_features: list[torch.Tensor] | None = None,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
# Concatenate output tokens
s = 0
if self.pred_obj_scores:
output_tokens = torch.cat(
[
self.obj_score_token.weight,
self.iou_token.weight,
self.mask_tokens.weight,
],
dim=0,
)
s = 1
else:
output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)
output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.shape[0], -1, -1)
tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
# Expand per-image data in batch direction to be per-mask
if repeat_image:
src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
else:
assert image_embeddings.shape[0] == tokens.shape[0]
src = image_embeddings
src = src + dense_prompt_embeddings
assert image_pe.shape[0] == 1, "image_pe should have size 1 in batch dim (from `get_dense_pe()`)"
pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
b, c, h, w = src.shape
# Run the transformer
hs, src = self.transformer(src, pos_src, tokens)
iou_token_out = hs[:, s, :]
mask_tokens_out = hs[:, s + 1 : (s + 1 + self.num_mask_tokens), :]
# Upscale mask embeddings and predict masks using the mask tokens
src = src.transpose(1, 2).view(b, c, h, w)
if not self.use_high_res_features or high_res_features is None:
upscaled_embedding = self.output_upscaling(src)
else:
dc1, ln1, act1, dc2, act2 = self.output_upscaling
feat_s0, feat_s1 = high_res_features
upscaled_embedding = act1(ln1(dc1(src) + feat_s1))
upscaled_embedding = act2(dc2(upscaled_embedding) + feat_s0)
hyper_in_list: list[torch.Tensor] = [
self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]) for i in range(self.num_mask_tokens)
]
hyper_in = torch.stack(hyper_in_list, dim=1)
b, c, h, w = upscaled_embedding.shape
masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
# Generate mask quality predictions
iou_pred = self.iou_prediction_head(iou_token_out)
if self.pred_obj_scores:
assert s == 1
object_score_logits = self.pred_obj_score_head(hs[:, 0, :])
else:
# Obj scores logits - default to 10.0, i.e. assuming the object is present, sigmoid(10)=1
object_score_logits = 10.0 * iou_pred.new_ones(iou_pred.shape[0], 1)
return masks, iou_pred, mask_tokens_out, object_score_logits
def _get_stability_scores(self, mask_logits):
mask_logits = mask_logits.flatten(-2)
area_i = torch.sum(mask_logits > self.dynamic_multimask_stability_delta, dim=-1).float()
area_u = torch.sum(mask_logits > -self.dynamic_multimask_stability_delta, dim=-1).float()
return torch.where(area_u > 0, area_i / area_u, 1.0)
def _dynamic_multimask_via_stability(self, all_mask_logits, all_iou_scores):
# The best mask from multimask output tokens (1~3)
multimask_logits = all_mask_logits[:, 1:, :, :]
multimask_iou_scores = all_iou_scores[:, 1:]
best_scores_inds = torch.argmax(multimask_iou_scores, dim=-1)
batch_inds = torch.arange(multimask_iou_scores.shape[0], device=all_iou_scores.device)
best_multimask_logits = multimask_logits[batch_inds, best_scores_inds]
best_multimask_logits = best_multimask_logits.unsqueeze(1)
best_multimask_iou_scores = multimask_iou_scores[batch_inds, best_scores_inds]
best_multimask_iou_scores = best_multimask_iou_scores.unsqueeze(1)
# The mask from singlemask output token 0 and its stability score
singlemask_logits = all_mask_logits[:, 0:1, :, :]
singlemask_iou_scores = all_iou_scores[:, 0:1]
stability_scores = self._get_stability_scores(singlemask_logits)
is_stable = stability_scores >= self.dynamic_multimask_stability_thresh
# Dynamically fall back to best multimask output upon low stability scores.
mask_logits_out = torch.where(
is_stable[..., None, None].expand_as(singlemask_logits),
singlemask_logits,
best_multimask_logits,
)
iou_scores_out = torch.where(
is_stable.expand_as(singlemask_iou_scores),
singlemask_iou_scores,
best_multimask_iou_scores,
)
return mask_logits_out, iou_scores_out | --- +++ @@ -9,6 +9,33 @@
class MaskDecoder(nn.Module):
+ """Decoder module for generating masks and their associated quality scores using a transformer architecture.
+
+ This class predicts masks given image and prompt embeddings, utilizing a transformer to process the inputs and
+ generate mask predictions along with their quality scores.
+
+ Attributes:
+ transformer_dim (int): Channel dimension for the transformer module.
+ transformer (nn.Module): Transformer module used for mask prediction.
+ num_multimask_outputs (int): Number of masks to predict for disambiguating masks.
+ iou_token (nn.Embedding): Embedding for the IoU token.
+ num_mask_tokens (int): Number of mask tokens.
+ mask_tokens (nn.Embedding): Embedding for the mask tokens.
+ output_upscaling (nn.Sequential): Neural network sequence for upscaling the output.
+ output_hypernetworks_mlps (nn.ModuleList): Hypernetwork MLPs for generating masks.
+ iou_prediction_head (nn.Module): MLP for predicting mask quality.
+
+ Methods:
+ forward: Predict masks given image and prompt embeddings.
+ predict_masks: Internal method for mask prediction.
+
+ Examples:
+ >>> decoder = MaskDecoder(transformer_dim=256, transformer=transformer_module)
+ >>> masks, iou_pred = decoder(
+ ... image_embeddings, image_pe, sparse_prompt_embeddings, dense_prompt_embeddings, multimask_output=True
+ ... )
+ >>> print(f"Predicted masks shape: {masks.shape}, IoU predictions shape: {iou_pred.shape}")
+ """
def __init__(
self,
@@ -19,6 +46,16 @@ iou_head_depth: int = 3,
iou_head_hidden_dim: int = 256,
) -> None:
+ """Initialize the MaskDecoder module for generating masks and their associated quality scores.
+
+ Args:
+ transformer_dim (int): Channel dimension for the transformer module.
+ transformer (nn.Module): Transformer module used for mask prediction.
+ num_multimask_outputs (int): Number of masks to predict for disambiguating masks.
+ activation (type[nn.Module]): Type of activation to use when upscaling masks.
+ iou_head_depth (int): Depth of the MLP used to predict mask quality.
+ iou_head_hidden_dim (int): Hidden dimension of the MLP used to predict mask quality.
+ """
super().__init__()
self.transformer_dim = transformer_dim
self.transformer = transformer
@@ -50,6 +87,28 @@ dense_prompt_embeddings: torch.Tensor,
multimask_output: bool,
) -> tuple[torch.Tensor, torch.Tensor]:
+ """Predict masks given image and prompt embeddings.
+
+ Args:
+ image_embeddings (torch.Tensor): Embeddings from the image encoder.
+ image_pe (torch.Tensor): Positional encoding with the shape of image_embeddings.
+ sparse_prompt_embeddings (torch.Tensor): Embeddings of the points and boxes.
+ dense_prompt_embeddings (torch.Tensor): Embeddings of the mask inputs.
+ multimask_output (bool): Whether to return multiple masks or a single mask.
+
+ Returns:
+ masks (torch.Tensor): Batched predicted masks.
+ iou_pred (torch.Tensor): Batched predictions of mask quality.
+
+ Examples:
+ >>> decoder = MaskDecoder(transformer_dim=256, transformer=transformer_module)
+ >>> image_emb = torch.rand(1, 256, 64, 64)
+ >>> image_pe = torch.rand(1, 256, 64, 64)
+ >>> sparse_emb = torch.rand(1, 2, 256)
+ >>> dense_emb = torch.rand(1, 256, 64, 64)
+ >>> masks, iou_pred = decoder(image_emb, image_pe, sparse_emb, dense_emb, multimask_output=True)
+ >>> print(f"Masks shape: {masks.shape}, IoU predictions shape: {iou_pred.shape}")
+ """
masks, iou_pred = self.predict_masks(
image_embeddings=image_embeddings,
image_pe=image_pe,
@@ -71,6 +130,7 @@ sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
+ """Predict masks and quality scores using image and prompt embeddings via transformer architecture."""
# Concatenate output tokens
output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)
output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.shape[0], -1, -1)
@@ -104,6 +164,48 @@
class SAM2MaskDecoder(nn.Module):
+ """Transformer-based decoder for predicting instance segmentation masks from image and prompt embeddings.
+
+ This class extends the functionality of the MaskDecoder, incorporating additional features such as high-resolution
+ feature processing, dynamic multimask output, and object score prediction.
+
+ Attributes:
+ transformer_dim (int): Channel dimension of the transformer.
+ transformer (nn.Module): Transformer used to predict masks.
+ num_multimask_outputs (int): Number of masks to predict when disambiguating masks.
+ iou_token (nn.Embedding): Embedding for IOU token.
+ num_mask_tokens (int): Total number of mask tokens.
+ mask_tokens (nn.Embedding): Embedding for mask tokens.
+ pred_obj_scores (bool): Whether to predict object scores.
+ obj_score_token (nn.Embedding): Embedding for object score token.
+ use_multimask_token_for_obj_ptr (bool): Whether to use multimask token for object pointer.
+ output_upscaling (nn.Sequential): Upscaling layers for output.
+ use_high_res_features (bool): Whether to use high-resolution features.
+ conv_s0 (nn.Conv2d): Convolutional layer for high-resolution features (s0).
+ conv_s1 (nn.Conv2d): Convolutional layer for high-resolution features (s1).
+ output_hypernetworks_mlps (nn.ModuleList): List of MLPs for output hypernetworks.
+ iou_prediction_head (MLP): MLP for IOU prediction.
+ pred_obj_score_head (nn.Linear | MLP): Linear layer or MLP for object score prediction.
+ dynamic_multimask_via_stability (bool): Whether to use dynamic multimask via stability.
+ dynamic_multimask_stability_delta (float): Delta value for dynamic multimask stability.
+ dynamic_multimask_stability_thresh (float): Threshold for dynamic multimask stability.
+
+ Methods:
+ forward: Predict masks given image and prompt embeddings.
+ predict_masks: Predict instance segmentation masks from image and prompt embeddings.
+ _get_stability_scores: Compute mask stability scores based on IoU between thresholds.
+ _dynamic_multimask_via_stability: Dynamically select the most stable mask output.
+
+ Examples:
+ >>> image_embeddings = torch.rand(1, 256, 64, 64)
+ >>> image_pe = torch.rand(1, 256, 64, 64)
+ >>> sparse_prompt_embeddings = torch.rand(1, 2, 256)
+ >>> dense_prompt_embeddings = torch.rand(1, 256, 64, 64)
+ >>> decoder = SAM2MaskDecoder(256, transformer)
+ >>> masks, iou_pred, sam_tokens_out, obj_score_logits = decoder.forward(
+ ... image_embeddings, image_pe, sparse_prompt_embeddings, dense_prompt_embeddings, True, False
+ ... )
+ """
def __init__(
self,
@@ -122,6 +224,27 @@ pred_obj_scores_mlp: bool = False,
use_multimask_token_for_obj_ptr: bool = False,
) -> None:
+ """Initialize the SAM2MaskDecoder module for predicting instance segmentation masks.
+
+ This decoder extends the functionality of MaskDecoder, incorporating additional features such as high-resolution
+ feature processing, dynamic multimask output, and object score prediction.
+
+ Args:
+ transformer_dim (int): Channel dimension of the transformer.
+ transformer (nn.Module): Transformer used to predict masks.
+ num_multimask_outputs (int): Number of masks to predict when disambiguating masks.
+ activation (type[nn.Module]): Type of activation to use when upscaling masks.
+ iou_head_depth (int): Depth of the MLP used to predict mask quality.
+ iou_head_hidden_dim (int): Hidden dimension of the MLP used to predict mask quality.
+ use_high_res_features (bool): Whether to use high-resolution features.
+ iou_prediction_use_sigmoid (bool): Whether to use sigmoid for IOU prediction.
+ dynamic_multimask_via_stability (bool): Whether to use dynamic multimask via stability.
+ dynamic_multimask_stability_delta (float): Delta value for dynamic multimask stability.
+ dynamic_multimask_stability_thresh (float): Threshold for dynamic multimask stability.
+ pred_obj_scores (bool): Whether to predict object scores.
+ pred_obj_scores_mlp (bool): Whether to use MLP for object score prediction.
+ use_multimask_token_for_obj_ptr (bool): Whether to use multimask token for object pointer.
+ """
super().__init__()
self.transformer_dim = transformer_dim
self.transformer = transformer
@@ -181,6 +304,33 @@ repeat_image: bool,
high_res_features: list[torch.Tensor] | None = None,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
+ """Predict masks given image and prompt embeddings.
+
+ Args:
+ image_embeddings (torch.Tensor): Embeddings from the image encoder with shape (B, C, H, W).
+ image_pe (torch.Tensor): Positional encoding with the shape of image_embeddings (B, C, H, W).
+ sparse_prompt_embeddings (torch.Tensor): Embeddings of the points and boxes with shape (B, N, C).
+ dense_prompt_embeddings (torch.Tensor): Embeddings of the mask inputs with shape (B, C, H, W).
+ multimask_output (bool): Whether to return multiple masks or a single mask.
+ repeat_image (bool): Flag to repeat the image embeddings.
+ high_res_features (list[torch.Tensor] | None, optional): Optional high-resolution features.
+
+ Returns:
+ masks (torch.Tensor): Batched predicted masks with shape (B, N, H, W).
+ iou_pred (torch.Tensor): Batched predictions of mask quality with shape (B, N).
+ sam_tokens_out (torch.Tensor): Batched SAM token for mask output with shape (B, N, C).
+ object_score_logits (torch.Tensor): Batched object score logits with shape (B, 1).
+
+ Examples:
+ >>> image_embeddings = torch.rand(1, 256, 64, 64)
+ >>> image_pe = torch.rand(1, 256, 64, 64)
+ >>> sparse_prompt_embeddings = torch.rand(1, 2, 256)
+ >>> dense_prompt_embeddings = torch.rand(1, 256, 64, 64)
+ >>> decoder = SAM2MaskDecoder(256, transformer)
+ >>> masks, iou_pred, sam_tokens_out, obj_score_logits = decoder.forward(
+ ... image_embeddings, image_pe, sparse_prompt_embeddings, dense_prompt_embeddings, True, False
+ ... )
+ """
masks, iou_pred, mask_tokens_out, object_score_logits = self.predict_masks(
image_embeddings=image_embeddings,
image_pe=image_pe,
@@ -221,6 +371,7 @@ repeat_image: bool,
high_res_features: list[torch.Tensor] | None = None,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
+ """Predict instance segmentation masks from image and prompt embeddings using a transformer."""
# Concatenate output tokens
s = 0
if self.pred_obj_scores:
@@ -283,12 +434,37 @@ return masks, iou_pred, mask_tokens_out, object_score_logits
def _get_stability_scores(self, mask_logits):
+ """Compute mask stability scores based on IoU between upper and lower thresholds."""
mask_logits = mask_logits.flatten(-2)
area_i = torch.sum(mask_logits > self.dynamic_multimask_stability_delta, dim=-1).float()
area_u = torch.sum(mask_logits > -self.dynamic_multimask_stability_delta, dim=-1).float()
return torch.where(area_u > 0, area_i / area_u, 1.0)
def _dynamic_multimask_via_stability(self, all_mask_logits, all_iou_scores):
+ """Dynamically select the most stable mask output based on stability scores and IoU predictions.
+
+ This method is used when outputting a single mask. If the stability score from the current single-mask output
+ (based on output token 0) falls below a threshold, it instead selects from multi-mask outputs (based on output
+ tokens 1-3) the mask with the highest predicted IoU score. This ensures a valid mask for both clicking and
+ tracking scenarios.
+
+ Args:
+ all_mask_logits (torch.Tensor): Logits for all predicted masks, shape (B, N, H, W) where B is batch size, N
+ is number of masks (typically 4), and H, W are mask dimensions.
+ all_iou_scores (torch.Tensor): Predicted IoU scores for all masks, shape (B, N).
+
+ Returns:
+ mask_logits_out (torch.Tensor): Selected mask logits, shape (B, 1, H, W).
+ iou_scores_out (torch.Tensor): Selected IoU scores, shape (B, 1).
+
+ Examples:
+ >>> decoder = SAM2MaskDecoder(...)
+ >>> all_mask_logits = torch.rand(2, 4, 256, 256) # 2 images, 4 masks each
+ >>> all_iou_scores = torch.rand(2, 4)
+ >>> mask_logits, iou_scores = decoder._dynamic_multimask_via_stability(all_mask_logits, all_iou_scores)
+ >>> print(mask_logits.shape, iou_scores.shape)
+ torch.Size([2, 1, 256, 256]) torch.Size([2, 1])
+ """
# The best mask from multimask output tokens (1~3)
multimask_logits = all_mask_logits[:, 1:, :, :]
multimask_iou_scores = all_iou_scores[:, 1:]
@@ -316,4 +492,4 @@ singlemask_iou_scores,
best_multimask_iou_scores,
)
- return mask_logits_out, iou_scores_out+ return mask_logits_out, iou_scores_out
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/sam/modules/decoders.py |
Write docstrings for algorithm functions | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import os
import threading
import time
from typing import Any
from ultralytics.utils import (
IS_COLAB,
LOGGER,
TQDM,
TryExcept,
colorstr,
)
HUB_API_ROOT = os.environ.get("ULTRALYTICS_HUB_API", "https://api.ultralytics.com")
HUB_WEB_ROOT = os.environ.get("ULTRALYTICS_HUB_WEB", "https://hub.ultralytics.com")
PREFIX = colorstr("Ultralytics HUB: ")
HELP_MSG = "If this issue persists please visit https://github.com/ultralytics/hub/issues for assistance."
def request_with_credentials(url: str) -> Any:
if not IS_COLAB:
raise OSError("request_with_credentials() must run in a Colab environment")
from google.colab import output
from IPython import display
display.display(
display.Javascript(
f"""
window._hub_tmp = new Promise((resolve, reject) => {{
const timeout = setTimeout(() => reject("Failed authenticating existing browser session"), 5000)
fetch("{url}", {{
method: 'POST',
credentials: 'include'
}})
.then((response) => resolve(response.json()))
.then((json) => {{
clearTimeout(timeout);
}}).catch((err) => {{
clearTimeout(timeout);
reject(err);
}});
}});
"""
)
)
return output.eval_js("_hub_tmp")
def requests_with_progress(method: str, url: str, **kwargs):
import requests # scoped as slow import
progress = kwargs.pop("progress", False)
if not progress:
return requests.request(method, url, **kwargs)
response = requests.request(method, url, stream=True, **kwargs)
total = int(response.headers.get("content-length", 0) if isinstance(progress, bool) else progress) # total size
try:
pbar = TQDM(total=total, unit="B", unit_scale=True, unit_divisor=1024)
for data in response.iter_content(chunk_size=1024):
pbar.update(len(data))
pbar.close()
except requests.exceptions.ChunkedEncodingError: # avoid 'Connection broken: IncompleteRead' warnings
response.close()
return response
def smart_request(
method: str,
url: str,
retry: int = 3,
timeout: int = 30,
thread: bool = True,
code: int = -1,
verbose: bool = True,
progress: bool = False,
**kwargs,
):
retry_codes = (408, 500) # retry only these codes
@TryExcept(verbose=verbose)
def func(func_method, func_url, **func_kwargs):
r = None # response
t0 = time.time() # initial time for timer
for i in range(retry + 1):
if (time.time() - t0) > timeout:
break
r = requests_with_progress(func_method, func_url, **func_kwargs) # i.e. get(url, data, json, files)
if r.status_code < 300: # return codes in the 2xx range are generally considered "good" or "successful"
break
try:
m = r.json().get("message", "No JSON message.")
except AttributeError:
m = "Unable to read JSON."
if i == 0:
if r.status_code in retry_codes:
m += f" Retrying {retry}x for {timeout}s." if retry else ""
elif r.status_code == 429: # rate limit
h = r.headers # response headers
m = (
f"Rate limit reached ({h['X-RateLimit-Remaining']}/{h['X-RateLimit-Limit']}). "
f"Please retry after {h['Retry-After']}s."
)
if verbose:
LOGGER.warning(f"{PREFIX}{m} {HELP_MSG} ({r.status_code} #{code})")
if r.status_code not in retry_codes:
return r
time.sleep(2**i) # exponential standoff
return r
args = method, url
kwargs["progress"] = progress
if thread:
threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True).start()
else:
return func(*args, **kwargs) | --- +++ @@ -21,6 +21,17 @@
def request_with_credentials(url: str) -> Any:
+ """Make an AJAX request with cookies attached in a Google Colab environment.
+
+ Args:
+ url (str): The URL to make the request to.
+
+ Returns:
+ (Any): The response data from the AJAX request.
+
+ Raises:
+ OSError: If the function is not run in a Google Colab environment.
+ """
if not IS_COLAB:
raise OSError("request_with_credentials() must run in a Colab environment")
from google.colab import output
@@ -50,6 +61,21 @@
def requests_with_progress(method: str, url: str, **kwargs):
+ """Make an HTTP request using the specified method and URL, with an optional progress bar.
+
+ Args:
+ method (str): The HTTP method to use (e.g. 'GET', 'POST').
+ url (str): The URL to send the request to.
+ **kwargs (Any): Additional keyword arguments to pass to the underlying `requests.request` function.
+
+ Returns:
+ (requests.Response): The response object from the HTTP request.
+
+ Notes:
+ - If 'progress' is set to True, the progress bar will display the download progress for responses with a known
+ content length.
+ - If 'progress' is a number then progress bar will display assuming content length = progress.
+ """
import requests # scoped as slow import
progress = kwargs.pop("progress", False)
@@ -78,10 +104,28 @@ progress: bool = False,
**kwargs,
):
+ """Make an HTTP request using the 'requests' library, with exponential backoff retries up to a specified timeout.
+
+ Args:
+ method (str): The HTTP method to use for the request. Choices are 'post' and 'get'.
+ url (str): The URL to make the request to.
+ retry (int, optional): Number of retries to attempt before giving up.
+ timeout (int, optional): Timeout in seconds after which the function will give up retrying.
+ thread (bool, optional): Whether to execute the request in a separate daemon thread.
+ code (int, optional): An identifier for the request, used for logging purposes.
+ verbose (bool, optional): A flag to determine whether to print out to console or not.
+ progress (bool, optional): Whether to show a progress bar during the request.
+ **kwargs (Any): Keyword arguments to be passed to the requests function specified in method.
+
+ Returns:
+ (requests.Response | None): The HTTP response object. If the request is executed in a separate thread, returns
+ None.
+ """
retry_codes = (408, 500) # retry only these codes
@TryExcept(verbose=verbose)
def func(func_method, func_url, **func_kwargs):
+ """Make HTTP requests with retries and timeouts, with optional progress tracking."""
r = None # response
t0 = time.time() # initial time for timer
for i in range(retry + 1):
@@ -115,4 +159,4 @@ if thread:
threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True).start()
else:
- return func(*args, **kwargs)+ return func(*args, **kwargs)
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/hub/utils.py |
Add docstrings for better understanding | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from pathlib import Path
from typing import Any
import numpy as np
import torch
from ultralytics.models.yolo.detect import DetectionValidator
from ultralytics.utils import ops
from ultralytics.utils.metrics import OKS_SIGMA, PoseMetrics, kpt_iou
class PoseValidator(DetectionValidator):
def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks: dict | None = None) -> None:
super().__init__(dataloader, save_dir, args, _callbacks)
self.sigma = None
self.kpt_shape = None
self.args.task = "pose"
self.metrics = PoseMetrics()
def preprocess(self, batch: dict[str, Any]) -> dict[str, Any]:
batch = super().preprocess(batch)
batch["keypoints"] = batch["keypoints"].float()
return batch
def get_desc(self) -> str:
return ("%22s" + "%11s" * 10) % (
"Class",
"Images",
"Instances",
"Box(P",
"R",
"mAP50",
"mAP50-95)",
"Pose(P",
"R",
"mAP50",
"mAP50-95)",
)
def init_metrics(self, model: torch.nn.Module) -> None:
super().init_metrics(model)
self.kpt_shape = self.data["kpt_shape"]
is_pose = self.kpt_shape == [17, 3]
nkpt = self.kpt_shape[0]
self.sigma = OKS_SIGMA if is_pose else np.ones(nkpt) / nkpt
def postprocess(self, preds: torch.Tensor) -> list[dict[str, torch.Tensor]]:
preds = super().postprocess(preds)
for pred in preds:
pred["keypoints"] = pred.pop("extra").view(-1, *self.kpt_shape) # remove extra if exists
return preds
def _prepare_batch(self, si: int, batch: dict[str, Any]) -> dict[str, Any]:
pbatch = super()._prepare_batch(si, batch)
kpts = batch["keypoints"][batch["batch_idx"] == si]
h, w = pbatch["imgsz"]
kpts = kpts.clone()
kpts[..., 0] *= w
kpts[..., 1] *= h
pbatch["keypoints"] = kpts
return pbatch
def _process_batch(self, preds: dict[str, torch.Tensor], batch: dict[str, Any]) -> dict[str, np.ndarray]:
tp = super()._process_batch(preds, batch)
gt_cls = batch["cls"]
if gt_cls.shape[0] == 0 or preds["cls"].shape[0] == 0:
tp_p = np.zeros((preds["cls"].shape[0], self.niou), dtype=bool)
else:
# `0.53` is from https://github.com/jin-s13/xtcocoapi/blob/master/xtcocotools/cocoeval.py#L384
area = ops.xyxy2xywh(batch["bboxes"])[:, 2:].prod(1) * 0.53
iou = kpt_iou(batch["keypoints"], preds["keypoints"], sigma=self.sigma, area=area)
tp_p = self.match_predictions(preds["cls"], gt_cls, iou).cpu().numpy()
tp.update({"tp_p": tp_p}) # update tp with kpts IoU
return tp
def save_one_txt(self, predn: dict[str, torch.Tensor], save_conf: bool, shape: tuple[int, int], file: Path) -> None:
from ultralytics.engine.results import Results
Results(
np.zeros((shape[0], shape[1]), dtype=np.uint8),
path=None,
names=self.names,
boxes=torch.cat([predn["bboxes"], predn["conf"].unsqueeze(-1), predn["cls"].unsqueeze(-1)], dim=1),
keypoints=predn["keypoints"],
).save_txt(file, save_conf=save_conf)
def pred_to_json(self, predn: dict[str, torch.Tensor], pbatch: dict[str, Any]) -> None:
super().pred_to_json(predn, pbatch)
kpts = predn["kpts"]
for i, k in enumerate(kpts.flatten(1, 2).tolist()):
self.jdict[-len(kpts) + i]["keypoints"] = k # keypoints
def scale_preds(self, predn: dict[str, torch.Tensor], pbatch: dict[str, Any]) -> dict[str, torch.Tensor]:
return {
**super().scale_preds(predn, pbatch),
"kpts": ops.scale_coords(
pbatch["imgsz"],
predn["keypoints"].clone(),
pbatch["ori_shape"],
ratio_pad=pbatch["ratio_pad"],
),
}
def eval_json(self, stats: dict[str, Any]) -> dict[str, Any]:
anno_json = self.data["path"] / "annotations/person_keypoints_val2017.json" # annotations
pred_json = self.save_dir / "predictions.json" # predictions
return super().coco_evaluate(stats, pred_json, anno_json, ["bbox", "keypoints"], suffix=["Box", "Pose"]) | --- +++ @@ -14,8 +14,56 @@
class PoseValidator(DetectionValidator):
+ """A class extending the DetectionValidator class for validation based on a pose model.
+
+ This validator is specifically designed for pose estimation tasks, handling keypoints and implementing specialized
+ metrics for pose evaluation.
+
+ Attributes:
+ sigma (np.ndarray): Sigma values for OKS calculation, either OKS_SIGMA or ones divided by number of keypoints.
+ kpt_shape (list[int]): Shape of the keypoints, typically [17, 3] for COCO format.
+ args (dict): Arguments for the validator including task set to "pose".
+ metrics (PoseMetrics): Metrics object for pose evaluation.
+
+ Methods:
+ preprocess: Preprocess batch by converting keypoints data to float and moving it to the device.
+ get_desc: Return description of evaluation metrics in string format.
+ init_metrics: Initialize pose estimation metrics for YOLO model.
+ _prepare_batch: Prepare a batch for processing by converting keypoints to float and scaling to original
+ dimensions.
+ _prepare_pred: Prepare and scale keypoints in predictions for pose processing.
+ _process_batch: Return correct prediction matrix by computing Intersection over Union (IoU) between detections
+ and ground truth.
+ plot_val_samples: Plot and save validation set samples with ground truth bounding boxes and keypoints.
+ plot_predictions: Plot and save model predictions with bounding boxes and keypoints.
+ save_one_txt: Save YOLO pose detections to a text file in normalized coordinates.
+ pred_to_json: Convert YOLO predictions to COCO JSON format.
+ eval_json: Evaluate object detection model using COCO JSON format.
+
+ Examples:
+ >>> from ultralytics.models.yolo.pose import PoseValidator
+ >>> args = dict(model="yolo26n-pose.pt", data="coco8-pose.yaml")
+ >>> validator = PoseValidator(args=args)
+ >>> validator()
+
+ Notes:
+ This class extends DetectionValidator with pose-specific functionality. It initializes with sigma values
+ for OKS calculation and sets up PoseMetrics for evaluation. A warning is displayed when using Apple MPS
+ due to a known bug with pose models.
+ """
def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks: dict | None = None) -> None:
+ """Initialize a PoseValidator object for pose estimation validation.
+
+ This validator is specifically designed for pose estimation tasks, handling keypoints and implementing
+ specialized metrics for pose evaluation.
+
+ Args:
+ dataloader (torch.utils.data.DataLoader, optional): DataLoader to be used for validation.
+ save_dir (Path | str, optional): Directory to save results.
+ args (dict, optional): Arguments for the validator including task set to "pose".
+ _callbacks (dict, optional): Dictionary of callback functions to be executed during validation.
+ """
super().__init__(dataloader, save_dir, args, _callbacks)
self.sigma = None
self.kpt_shape = None
@@ -23,11 +71,13 @@ self.metrics = PoseMetrics()
def preprocess(self, batch: dict[str, Any]) -> dict[str, Any]:
+ """Preprocess batch by converting keypoints data to float and moving it to the device."""
batch = super().preprocess(batch)
batch["keypoints"] = batch["keypoints"].float()
return batch
def get_desc(self) -> str:
+ """Return description of evaluation metrics in string format."""
return ("%22s" + "%11s" * 10) % (
"Class",
"Images",
@@ -43,6 +93,11 @@ )
def init_metrics(self, model: torch.nn.Module) -> None:
+ """Initialize evaluation metrics for YOLO pose validation.
+
+ Args:
+ model (torch.nn.Module): Model to validate.
+ """
super().init_metrics(model)
self.kpt_shape = self.data["kpt_shape"]
is_pose = self.kpt_shape == [17, 3]
@@ -50,12 +105,47 @@ self.sigma = OKS_SIGMA if is_pose else np.ones(nkpt) / nkpt
def postprocess(self, preds: torch.Tensor) -> list[dict[str, torch.Tensor]]:
+ """Postprocess YOLO predictions to extract and reshape keypoints for pose estimation.
+
+ This method extends the parent class postprocessing by extracting keypoints from the 'extra' field of
+ predictions and reshaping them according to the keypoint shape configuration. The keypoints are reshaped from a
+ flattened format to the proper dimensional structure (typically [N, 17, 3] for COCO pose format).
+
+ Args:
+ preds (torch.Tensor): Raw prediction tensor from the YOLO pose model containing bounding boxes, confidence
+ scores, class predictions, and keypoint data.
+
+ Returns:
+ (list[dict[str, torch.Tensor]]): List of processed prediction dictionaries, each containing:
+ - 'bboxes': Bounding box coordinates
+ - 'conf': Confidence scores
+ - 'cls': Class predictions
+ - 'keypoints': Reshaped keypoint coordinates with shape (-1, *self.kpt_shape)
+
+ Notes:
+ If no keypoints are present in a prediction (empty keypoints), that prediction is skipped and continues
+ to the next one. The keypoints are extracted from the 'extra' field which contains additional
+ task-specific data beyond basic detection.
+ """
preds = super().postprocess(preds)
for pred in preds:
pred["keypoints"] = pred.pop("extra").view(-1, *self.kpt_shape) # remove extra if exists
return preds
def _prepare_batch(self, si: int, batch: dict[str, Any]) -> dict[str, Any]:
+ """Prepare a batch for processing by converting keypoints to float and scaling to original dimensions.
+
+ Args:
+ si (int): Sample index within the batch.
+ batch (dict[str, Any]): Dictionary containing batch data with keys like 'keypoints', 'batch_idx', etc.
+
+ Returns:
+ (dict[str, Any]): Prepared batch with keypoints scaled to original image dimensions.
+
+ Notes:
+ This method extends the parent class's _prepare_batch method by adding keypoint processing.
+ Keypoints are scaled from normalized coordinates to original image dimensions.
+ """
pbatch = super()._prepare_batch(si, batch)
kpts = batch["keypoints"][batch["batch_idx"] == si]
h, w = pbatch["imgsz"]
@@ -66,6 +156,23 @@ return pbatch
def _process_batch(self, preds: dict[str, torch.Tensor], batch: dict[str, Any]) -> dict[str, np.ndarray]:
+ """Return correct prediction matrix by computing Intersection over Union (IoU) between detections and ground
+ truth.
+
+ Args:
+ preds (dict[str, torch.Tensor]): Dictionary containing prediction data with keys 'cls' for class predictions
+ and 'keypoints' for keypoint predictions.
+ batch (dict[str, Any]): Dictionary containing ground truth data with keys 'cls' for class labels, 'bboxes'
+ for bounding boxes, and 'keypoints' for keypoint annotations.
+
+ Returns:
+ (dict[str, np.ndarray]): Dictionary containing the correct prediction matrix including 'tp_p' for pose true
+ positives across 10 IoU levels.
+
+ Notes:
+ `0.53` scale factor used in area computation is referenced from
+ https://github.com/jin-s13/xtcocoapi/blob/master/xtcocotools/cocoeval.py#L384.
+ """
tp = super()._process_batch(preds, batch)
gt_cls = batch["cls"]
if gt_cls.shape[0] == 0 or preds["cls"].shape[0] == 0:
@@ -79,6 +186,18 @@ return tp
def save_one_txt(self, predn: dict[str, torch.Tensor], save_conf: bool, shape: tuple[int, int], file: Path) -> None:
+ """Save YOLO pose detections to a text file in normalized coordinates.
+
+ Args:
+ predn (dict[str, torch.Tensor]): Prediction dict with keys 'bboxes', 'conf', 'cls', and 'keypoints'.
+ save_conf (bool): Whether to save confidence scores.
+ shape (tuple[int, int]): Shape of the original image (height, width).
+ file (Path): Output file path to save detections.
+
+ Notes:
+ The output format is: class_id x_center y_center width height confidence keypoints where keypoints are
+ normalized (x, y, visibility) values for each point.
+ """
from ultralytics.engine.results import Results
Results(
@@ -90,12 +209,28 @@ ).save_txt(file, save_conf=save_conf)
def pred_to_json(self, predn: dict[str, torch.Tensor], pbatch: dict[str, Any]) -> None:
+ """Convert YOLO predictions to COCO JSON format.
+
+ This method takes prediction tensors and batch data, converts the bounding boxes from YOLO format to COCO
+ format, and appends the results with keypoints to the internal JSON dictionary (self.jdict).
+
+ Args:
+ predn (dict[str, torch.Tensor]): Prediction dictionary containing 'bboxes', 'conf', 'cls', and 'kpts'
+ tensors.
+ pbatch (dict[str, Any]): Batch dictionary containing 'imgsz', 'ori_shape', 'ratio_pad', and 'im_file'.
+
+ Notes:
+ The method extracts the image ID from the filename stem (either as an integer if numeric, or as a string),
+ converts bounding boxes from xyxy to xywh format, and adjusts coordinates from center to top-left corner
+ before saving to the JSON dictionary.
+ """
super().pred_to_json(predn, pbatch)
kpts = predn["kpts"]
for i, k in enumerate(kpts.flatten(1, 2).tolist()):
self.jdict[-len(kpts) + i]["keypoints"] = k # keypoints
def scale_preds(self, predn: dict[str, torch.Tensor], pbatch: dict[str, Any]) -> dict[str, torch.Tensor]:
+ """Scales predictions to the original image size."""
return {
**super().scale_preds(predn, pbatch),
"kpts": ops.scale_coords(
@@ -107,6 +242,7 @@ }
def eval_json(self, stats: dict[str, Any]) -> dict[str, Any]:
+ """Evaluate object detection model using COCO JSON format."""
anno_json = self.data["path"] / "annotations/person_keypoints_val2017.json" # annotations
pred_json = self.save_dir / "predictions.json" # predictions
- return super().coco_evaluate(stats, pred_json, anno_json, ["bbox", "keypoints"], suffix=["Box", "Pose"])+ return super().coco_evaluate(stats, pred_json, anno_json, ["bbox", "keypoints"], suffix=["Box", "Pose"])
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/yolo/pose/val.py |
Write documentation strings for class attributes | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from pathlib import Path
from typing import Any
from ultralytics.engine.model import Model
from .predict import FastSAMPredictor
from .val import FastSAMValidator
class FastSAM(Model):
def __init__(self, model: str | Path = "FastSAM-x.pt"):
if str(model) == "FastSAM.pt":
model = "FastSAM-x.pt"
assert Path(model).suffix not in {".yaml", ".yml"}, "FastSAM only supports pre-trained weights."
super().__init__(model=model, task="segment")
def predict(
self,
source,
stream: bool = False,
bboxes: list | None = None,
points: list | None = None,
labels: list | None = None,
texts: list | None = None,
**kwargs: Any,
):
prompts = dict(bboxes=bboxes, points=points, labels=labels, texts=texts)
return super().predict(source, stream, prompts=prompts, **kwargs)
@property
def task_map(self) -> dict[str, dict[str, Any]]:
return {"segment": {"predictor": FastSAMPredictor, "validator": FastSAMValidator}} | --- +++ @@ -12,8 +12,31 @@
class FastSAM(Model):
+ """FastSAM model interface for Segment Anything tasks.
+
+ This class extends the base Model class to provide specific functionality for the FastSAM (Fast Segment Anything
+ Model) implementation, allowing for efficient and accurate image segmentation with optional prompting support.
+
+ Attributes:
+ model (str): Path to the pre-trained FastSAM model file.
+ task (str): The task type, set to "segment" for FastSAM models.
+
+ Methods:
+ predict: Perform segmentation prediction on image or video source with optional prompts.
+ task_map: Returns mapping of segment task to predictor and validator classes.
+
+ Examples:
+ Initialize FastSAM model and run prediction
+ >>> from ultralytics import FastSAM
+ >>> model = FastSAM("FastSAM-x.pt")
+ >>> results = model.predict("ultralytics/assets/bus.jpg")
+
+ Run prediction with bounding box prompts
+ >>> results = model.predict("image.jpg", bboxes=[[100, 100, 200, 200]])
+ """
def __init__(self, model: str | Path = "FastSAM-x.pt"):
+ """Initialize the FastSAM model with the specified pre-trained weights."""
if str(model) == "FastSAM.pt":
model = "FastSAM-x.pt"
assert Path(model).suffix not in {".yaml", ".yml"}, "FastSAM only supports pre-trained weights."
@@ -29,9 +52,28 @@ texts: list | None = None,
**kwargs: Any,
):
+ """Perform segmentation prediction on image or video source.
+
+ Supports prompted segmentation with bounding boxes, points, labels, and texts. The method packages these prompts
+ and passes them to the parent class predict method for processing.
+
+ Args:
+ source (str | PIL.Image | np.ndarray): Input source for prediction, can be a file path, URL, PIL image, or
+ numpy array.
+ stream (bool): Whether to enable real-time streaming mode for video inputs.
+ bboxes (list, optional): Bounding box coordinates for prompted segmentation in format [[x1, y1, x2, y2]].
+ points (list, optional): Point coordinates for prompted segmentation in format [[x, y]].
+ labels (list, optional): Class labels for prompted segmentation.
+ texts (list, optional): Text prompts for segmentation guidance.
+ **kwargs (Any): Additional keyword arguments passed to the predictor.
+
+ Returns:
+ (list): List of Results objects containing the prediction results.
+ """
prompts = dict(bboxes=bboxes, points=points, labels=labels, texts=texts)
return super().predict(source, stream, prompts=prompts, **kwargs)
@property
def task_map(self) -> dict[str, dict[str, Any]]:
- return {"segment": {"predictor": FastSAMPredictor, "validator": FastSAMValidator}}+ """Returns a dictionary mapping segment task to corresponding predictor and validator classes."""
+ return {"segment": {"predictor": FastSAMPredictor, "validator": FastSAMValidator}}
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/fastsam/model.py |
Add docstrings explaining edge cases | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import torch
from ultralytics.models.yolo.detect import DetectionValidator
from ultralytics.utils import ops
__all__ = ["NASValidator"]
class NASValidator(DetectionValidator):
def postprocess(self, preds_in):
boxes = ops.xyxy2xywh(preds_in[0][0]) # Convert bounding box format from xyxy to xywh
preds = torch.cat((boxes, preds_in[0][1]), -1).permute(0, 2, 1) # Concatenate boxes with scores and permute
return super().postprocess(preds) | --- +++ @@ -9,8 +9,30 @@
class NASValidator(DetectionValidator):
+ """Ultralytics YOLO NAS Validator for object detection.
+
+ Extends DetectionValidator from the Ultralytics models package and is designed to post-process the raw predictions
+ generated by YOLO NAS models. It performs non-maximum suppression to remove overlapping and low-confidence boxes,
+ ultimately producing the final detections.
+
+ Attributes:
+ args (Namespace): Namespace containing various configurations for post-processing, such as confidence and IoU
+ thresholds.
+ lb (torch.Tensor): Optional tensor for multilabel NMS.
+
+ Examples:
+ >>> from ultralytics import NAS
+ >>> model = NAS("yolo_nas_s")
+ >>> validator = model.validator
+ >>> # Assumes that raw_preds are available
+ >>> final_preds = validator.postprocess(raw_preds)
+
+ Notes:
+ This class is generally not instantiated directly but is used internally within the NAS class.
+ """
def postprocess(self, preds_in):
+ """Apply Non-maximum suppression to prediction outputs."""
boxes = ops.xyxy2xywh(preds_in[0][0]) # Convert bounding box format from xyxy to xywh
preds = torch.cat((boxes, preds_in[0][1]), -1).permute(0, 2, 1) # Concatenate boxes with scores and permute
- return super().postprocess(preds)+ return super().postprocess(preds)
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/nas/val.py |
Add docstrings that explain inputs and outputs | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import torch
from ultralytics.models.yolo.detect.predict import DetectionPredictor
from ultralytics.utils import ops
class NASPredictor(DetectionPredictor):
def postprocess(self, preds_in, img, orig_imgs):
boxes = ops.xyxy2xywh(preds_in[0][0]) # Convert bounding boxes from xyxy to xywh format
preds = torch.cat((boxes, preds_in[0][1]), -1).permute(0, 2, 1) # Concatenate boxes with class scores
return super().postprocess(preds, img, orig_imgs) | --- +++ @@ -7,8 +7,50 @@
class NASPredictor(DetectionPredictor):
+ """Ultralytics YOLO NAS Predictor for object detection.
+
+ This class extends the DetectionPredictor from Ultralytics engine and is responsible for post-processing the raw
+ predictions generated by the YOLO NAS models. It applies operations like non-maximum suppression and scaling the
+ bounding boxes to fit the original image dimensions.
+
+ Attributes:
+ args (Namespace): Namespace containing various configurations for post-processing including confidence
+ threshold, IoU threshold, agnostic NMS flag, maximum detections, and class filtering options.
+ model (torch.nn.Module): The YOLO NAS model used for inference.
+ batch (list): Batch of inputs for processing.
+
+ Examples:
+ >>> from ultralytics import NAS
+ >>> model = NAS("yolo_nas_s")
+ >>> predictor = model.predictor
+
+ Assume that raw_preds, img, orig_imgs are available
+ >>> results = predictor.postprocess(raw_preds, img, orig_imgs)
+
+ Notes:
+ Typically, this class is not instantiated directly. It is used internally within the NAS class.
+ """
def postprocess(self, preds_in, img, orig_imgs):
+ """Postprocess NAS model predictions to generate final detection results.
+
+ This method takes raw predictions from a YOLO NAS model, converts bounding box formats, and applies
+ post-processing operations to generate the final detection results compatible with Ultralytics result
+ visualization and analysis tools.
+
+ Args:
+ preds_in (list): Raw predictions from the NAS model, typically containing bounding boxes and class scores.
+ img (torch.Tensor): Input image tensor that was fed to the model, with shape (B, C, H, W).
+ orig_imgs (list | torch.Tensor | np.ndarray): Original images before preprocessing, used for scaling
+ coordinates back to original dimensions.
+
+ Returns:
+ (list): List of Results objects containing the processed predictions for each image in the batch.
+
+ Examples:
+ >>> predictor = NAS("yolo_nas_s").predictor
+ >>> results = predictor.postprocess(raw_preds, img, orig_imgs)
+ """
boxes = ops.xyxy2xywh(preds_in[0][0]) # Convert bounding boxes from xyxy to xywh format
preds = torch.cat((boxes, preds_in[0][1]), -1).permute(0, 2, 1) # Concatenate boxes with class scores
- return super().postprocess(preds, img, orig_imgs)+ return super().postprocess(preds, img, orig_imgs)
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/nas/predict.py |
Include argument descriptions in docstrings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import json
import os
import re
import shutil
import subprocess
import time
from copy import deepcopy
from datetime import datetime
from pathlib import Path
import numpy as np
import torch
from ultralytics import __version__
from ultralytics.cfg import TASK2DATA, get_cfg
from ultralytics.data import build_dataloader
from ultralytics.data.dataset import YOLODataset
from ultralytics.data.utils import check_cls_dataset, check_det_dataset
from ultralytics.nn.autobackend import check_class_names, default_class_names
from ultralytics.nn.modules import C2f, Classify, Detect, RTDETRDecoder
from ultralytics.nn.tasks import ClassificationModel, DetectionModel, SegmentationModel, WorldModel
from ultralytics.utils import (
ARM64,
DEFAULT_CFG,
IS_COLAB,
IS_DEBIAN_BOOKWORM,
IS_DEBIAN_TRIXIE,
IS_JETSON,
IS_RASPBERRYPI,
IS_UBUNTU,
LINUX,
LOGGER,
MACOS,
MACOS_VERSION,
RKNN_CHIPS,
SETTINGS,
TORCH_VERSION,
WINDOWS,
YAML,
callbacks,
colorstr,
get_default_args,
is_dgx,
is_jetson,
)
from ultralytics.utils.checks import (
IS_PYTHON_3_10,
IS_PYTHON_MINIMUM_3_9,
check_apt_requirements,
check_executorch_requirements,
check_imgsz,
check_requirements,
check_tensorrt,
check_version,
is_intel,
is_sudo_available,
)
from ultralytics.utils.export import (
keras2pb,
onnx2engine,
onnx2saved_model,
pb2tfjs,
tflite2edgetpu,
torch2executorch,
torch2imx,
torch2onnx,
)
from ultralytics.utils.files import file_size
from ultralytics.utils.metrics import batch_probiou
from ultralytics.utils.nms import TorchNMS
from ultralytics.utils.ops import Profile
from ultralytics.utils.patches import arange_patch
from ultralytics.utils.torch_utils import (
TORCH_1_10,
TORCH_1_11,
TORCH_1_13,
TORCH_2_1,
TORCH_2_3,
TORCH_2_4,
TORCH_2_9,
select_device,
)
def export_formats():
x = [
["PyTorch", "-", ".pt", True, True, []],
["TorchScript", "torchscript", ".torchscript", True, True, ["batch", "optimize", "half", "nms", "dynamic"]],
["ONNX", "onnx", ".onnx", True, True, ["batch", "dynamic", "half", "opset", "simplify", "nms"]],
[
"OpenVINO",
"openvino",
"_openvino_model",
True,
False,
["batch", "dynamic", "half", "int8", "nms", "fraction"],
],
[
"TensorRT",
"engine",
".engine",
False,
True,
["batch", "dynamic", "half", "int8", "simplify", "nms", "fraction"],
],
["CoreML", "coreml", ".mlpackage", True, False, ["batch", "dynamic", "half", "int8", "nms"]],
["TensorFlow SavedModel", "saved_model", "_saved_model", True, True, ["batch", "int8", "keras", "nms"]],
["TensorFlow GraphDef", "pb", ".pb", True, True, ["batch"]],
["TensorFlow Lite", "tflite", ".tflite", True, False, ["batch", "half", "int8", "nms", "fraction"]],
["TensorFlow Edge TPU", "edgetpu", "_edgetpu.tflite", True, False, []],
["TensorFlow.js", "tfjs", "_web_model", True, False, ["batch", "half", "int8", "nms"]],
["PaddlePaddle", "paddle", "_paddle_model", True, True, ["batch"]],
["MNN", "mnn", ".mnn", True, True, ["batch", "half", "int8"]],
["NCNN", "ncnn", "_ncnn_model", True, True, ["batch", "half"]],
["IMX", "imx", "_imx_model", True, True, ["int8", "fraction", "nms"]],
["RKNN", "rknn", "_rknn_model", False, False, ["batch", "name"]],
["ExecuTorch", "executorch", "_executorch_model", True, False, ["batch"]],
["Axelera", "axelera", "_axelera_model", False, False, ["batch", "int8", "fraction"]],
]
return dict(zip(["Format", "Argument", "Suffix", "CPU", "GPU", "Arguments"], zip(*x)))
def best_onnx_opset(onnx, cuda=False) -> int:
if TORCH_2_4: # _constants.ONNX_MAX_OPSET first defined in torch 1.13
opset = torch.onnx.utils._constants.ONNX_MAX_OPSET - 1 # use second-latest version for safety
if TORCH_2_9:
opset = min(opset, 20) # legacy TorchScript exporter caps at opset 20 in torch 2.9+
if cuda:
opset -= 2 # fix CUDA ONNXRuntime NMS squeeze op errors
else:
version = ".".join(TORCH_VERSION.split(".")[:2])
opset = {
"1.8": 12,
"1.9": 12,
"1.10": 13,
"1.11": 14,
"1.12": 15,
"1.13": 17,
"2.0": 17, # reduced from 18 to fix ONNX errors
"2.1": 17, # reduced from 19
"2.2": 17, # reduced from 19
"2.3": 17, # reduced from 19
"2.4": 20,
"2.5": 20,
"2.6": 20,
"2.7": 20,
"2.8": 23,
}.get(version, 12)
return min(opset, onnx.defs.onnx_opset_version())
def validate_args(format, passed_args, valid_args):
export_args = ["half", "int8", "dynamic", "keras", "nms", "batch", "fraction"]
assert valid_args is not None, f"ERROR ❌️ valid arguments for '{format}' not listed."
custom = {"batch": 1, "data": None, "device": None} # exporter defaults
default_args = get_cfg(DEFAULT_CFG, custom)
for arg in export_args:
not_default = getattr(passed_args, arg, None) != getattr(default_args, arg, None)
if not_default:
assert arg in valid_args, f"ERROR ❌️ argument '{arg}' is not supported for format='{format}'"
def try_export(inner_func):
inner_args = get_default_args(inner_func)
def outer_func(*args, **kwargs):
prefix = inner_args["prefix"]
dt = 0.0
try:
with Profile() as dt:
f = inner_func(*args, **kwargs) # exported file/dir or tuple of (file/dir, *)
path = f if isinstance(f, (str, Path)) else f[0]
mb = file_size(path)
assert mb > 0.0, "0.0 MB output model size"
LOGGER.info(f"{prefix} export success ✅ {dt.t:.1f}s, saved as '{path}' ({mb:.1f} MB)")
return f
except Exception as e:
LOGGER.error(f"{prefix} export failure {dt.t:.1f}s: {e}")
raise e
return outer_func
class Exporter:
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks: dict | None = None):
self.args = get_cfg(cfg, overrides)
self.callbacks = _callbacks or callbacks.get_default_callbacks()
callbacks.add_integration_callbacks(self)
def __call__(self, model=None) -> str:
t = time.time()
fmt = self.args.format.lower() # to lowercase
if fmt in {"tensorrt", "trt"}: # 'engine' aliases
fmt = "engine"
if fmt in {"mlmodel", "mlpackage", "mlprogram", "apple", "ios", "coreml"}: # 'coreml' aliases
fmt = "coreml"
fmts_dict = export_formats()
fmts = tuple(fmts_dict["Argument"][1:]) # available export formats
if fmt not in fmts:
import difflib
# Get the closest match if format is invalid
matches = difflib.get_close_matches(fmt, fmts, n=1, cutoff=0.6) # 60% similarity required to match
if not matches:
msg = "Model is already in PyTorch format." if fmt == "pt" else f"Invalid export format='{fmt}'."
raise ValueError(f"{msg} Valid formats are {fmts}")
LOGGER.warning(f"Invalid export format='{fmt}', updating to format='{matches[0]}'")
fmt = matches[0]
flags = [x == fmt for x in fmts]
if sum(flags) != 1:
raise ValueError(f"Invalid export format='{fmt}'. Valid formats are {fmts}")
(
jit,
onnx,
xml,
engine,
coreml,
saved_model,
pb,
tflite,
edgetpu,
tfjs,
paddle,
mnn,
ncnn,
imx,
rknn,
executorch,
axelera,
) = flags # export booleans
is_tf_format = any((saved_model, pb, tflite, edgetpu, tfjs))
# Device
dla = None
if engine and self.args.device is None:
LOGGER.warning("TensorRT requires GPU export, automatically assigning device=0")
self.args.device = "0"
if engine and "dla" in str(self.args.device): # convert int/list to str first
device_str = str(self.args.device)
dla = device_str.rsplit(":", 1)[-1]
self.args.device = "0" # update device to "0"
assert dla in {"0", "1"}, f"Expected device 'dla:0' or 'dla:1', but got {device_str}."
if imx and self.args.device is None and torch.cuda.is_available():
LOGGER.warning("Exporting on CPU while CUDA is available, setting device=0 for faster export on GPU.")
self.args.device = "0" # update device to "0"
self.device = select_device("cpu" if self.args.device is None else self.args.device)
# Argument compatibility checks
fmt_keys = fmts_dict["Arguments"][flags.index(True) + 1]
validate_args(fmt, self.args, fmt_keys)
if axelera:
if not IS_PYTHON_3_10:
raise SystemError("Axelera export only supported on Python 3.10.")
if not self.args.int8:
LOGGER.warning("Setting int8=True for Axelera mixed-precision export.")
self.args.int8 = True
if model.task not in {"detect"}:
raise ValueError("Axelera export only supported for detection models.")
if not self.args.data:
self.args.data = "coco128.yaml" # Axelera default to coco128.yaml
if imx:
if not self.args.int8:
LOGGER.warning("IMX export requires int8=True, setting int8=True.")
self.args.int8 = True
if not self.args.nms and model.task in {"detect", "pose", "segment"}:
LOGGER.warning("IMX export requires nms=True, setting nms=True.")
self.args.nms = True
if model.task not in {"detect", "pose", "classify", "segment"}:
raise ValueError(
"IMX export only supported for detection, pose estimation, classification, and segmentation models."
)
if not hasattr(model, "names"):
model.names = default_class_names()
model.names = check_class_names(model.names)
if hasattr(model, "end2end"):
if self.args.end2end is not None:
model.end2end = self.args.end2end
if rknn or ncnn or executorch or paddle or imx or edgetpu:
# Disable end2end branch for certain export formats as they does not support topk
model.end2end = False
LOGGER.warning(f"{fmt.upper()} export does not support end2end models, disabling end2end branch.")
if engine and self.args.int8:
# TensorRT<=10.3.0 with int8 has known end2end build issues
# https://github.com/ultralytics/ultralytics/issues/23841
try:
import tensorrt as trt
if check_version(trt.__version__, "<=10.3.0", hard=True):
model.end2end = False
LOGGER.warning(
"TensorRT<=10.3.0 with int8 has known end2end build issues, disabling end2end branch."
)
except ImportError:
pass
if self.args.half and self.args.int8:
LOGGER.warning("half=True and int8=True are mutually exclusive, setting half=False.")
self.args.half = False
if self.args.half and jit and self.device.type == "cpu":
LOGGER.warning(
"half=True only compatible with GPU export for TorchScript, i.e. use device=0, setting half=False."
)
self.args.half = False
self.imgsz = check_imgsz(self.args.imgsz, stride=model.stride, min_dim=2) # check image size
if self.args.optimize:
assert not ncnn, "optimize=True not compatible with format='ncnn', i.e. use optimize=False"
assert self.device.type == "cpu", "optimize=True not compatible with cuda devices, i.e. use device='cpu'"
if rknn:
if not self.args.name:
LOGGER.warning(
"Rockchip RKNN export requires a missing 'name' arg for processor type. "
"Using default name='rk3588'."
)
self.args.name = "rk3588"
self.args.name = self.args.name.lower()
assert self.args.name in RKNN_CHIPS, (
f"Invalid processor name '{self.args.name}' for Rockchip RKNN export. Valid names are {RKNN_CHIPS}."
)
if self.args.nms:
assert not isinstance(model, ClassificationModel), "'nms=True' is not valid for classification models."
assert not tflite or not ARM64 or not LINUX, "TFLite export with NMS unsupported on ARM64 Linux"
assert not is_tf_format or TORCH_1_13, "TensorFlow exports with NMS require torch>=1.13"
assert not onnx or TORCH_1_13, "ONNX export with NMS requires torch>=1.13"
if getattr(model, "end2end", False) or isinstance(model.model[-1], RTDETRDecoder):
LOGGER.warning("'nms=True' is not available for end2end models. Forcing 'nms=False'.")
self.args.nms = False
self.args.conf = self.args.conf or 0.25 # set conf default value for nms export
if (engine or coreml or self.args.nms) and self.args.dynamic and self.args.batch == 1:
LOGGER.warning(
f"'dynamic=True' model with '{'nms=True' if self.args.nms else f'format={self.args.format}'}' requires max batch size, i.e. 'batch=16'"
)
if edgetpu:
if not LINUX or ARM64:
raise SystemError(
"Edge TPU export only supported on non-aarch64 Linux. See https://coral.ai/docs/edgetpu/compiler"
)
elif self.args.batch != 1: # see github.com/ultralytics/ultralytics/pull/13420
LOGGER.warning("Edge TPU export requires batch size 1, setting batch=1.")
self.args.batch = 1
if isinstance(model, WorldModel):
LOGGER.warning(
"YOLOWorld (original version) export is not supported to any format. "
"YOLOWorldv2 models (i.e. 'yolov8s-worldv2.pt') only support export to "
"(torchscript, onnx, openvino, engine, coreml) formats. "
"See https://docs.ultralytics.com/models/yolo-world for details."
)
model.clip_model = None # openvino int8 export error: https://github.com/ultralytics/ultralytics/pull/18445
if self.args.int8 and not self.args.data:
self.args.data = DEFAULT_CFG.data or TASK2DATA[getattr(model, "task", "detect")] # assign default data
LOGGER.warning(
f"INT8 export requires a missing 'data' arg for calibration. Using default 'data={self.args.data}'."
)
if tfjs and (ARM64 and LINUX):
raise SystemError("TF.js exports are not currently supported on ARM64 Linux")
# Recommend OpenVINO if export and Intel CPU
if SETTINGS.get("openvino_msg"):
if is_intel():
LOGGER.info(
"💡 ProTip: Export to OpenVINO format for best performance on Intel hardware."
" Learn more at https://docs.ultralytics.com/integrations/openvino/"
)
SETTINGS["openvino_msg"] = False
# Input
im = torch.zeros(self.args.batch, model.yaml.get("channels", 3), *self.imgsz).to(self.device)
file = Path(
getattr(model, "pt_path", None) or getattr(model, "yaml_file", None) or model.yaml.get("yaml_file", "")
)
if file.suffix in {".yaml", ".yml"}:
file = Path(file.name)
# Update model
model = deepcopy(model).to(self.device)
for p in model.parameters():
p.requires_grad = False
model.eval()
model.float()
model = model.fuse()
if imx:
from ultralytics.utils.export.imx import FXModel
model = FXModel(model, self.imgsz)
if tflite or edgetpu:
from ultralytics.utils.export.tensorflow import tf_wrapper
model = tf_wrapper(model)
if executorch:
from ultralytics.utils.export.executorch import executorch_wrapper
model = executorch_wrapper(model)
for m in model.modules():
if isinstance(m, Classify):
m.export = True
if isinstance(m, (Detect, RTDETRDecoder)): # includes all Detect subclasses like Segment, Pose, OBB
m.dynamic = self.args.dynamic
m.export = True
m.format = self.args.format
# Clamp max_det to anchor count for small image sizes (required for TensorRT compatibility)
anchors = sum(int(self.imgsz[0] / s) * int(self.imgsz[1] / s) for s in model.stride.tolist())
m.max_det = min(self.args.max_det, anchors)
m.agnostic_nms = self.args.agnostic_nms
m.xyxy = self.args.nms and not coreml
m.shape = None # reset cached shape for new export input size
if hasattr(model, "pe") and hasattr(m, "fuse"): # for YOLOE models
m.fuse(model.pe.to(self.device))
elif isinstance(m, C2f) and not is_tf_format:
# EdgeTPU does not support FlexSplitV while split provides cleaner ONNX graph
m.forward = m.forward_split
y = None
for _ in range(2): # dry runs
y = NMSModel(model, self.args)(im) if self.args.nms and not coreml and not imx else model(im)
if self.args.half and (onnx or jit) and self.device.type != "cpu":
im, model = im.half(), model.half() # to FP16
# Assign
self.im = im
self.model = model
self.file = file
self.output_shape = (
tuple(y.shape)
if isinstance(y, torch.Tensor)
else tuple(tuple(x.shape if isinstance(x, torch.Tensor) else []) for x in y)
)
self.pretty_name = Path(self.model.yaml.get("yaml_file", self.file)).stem.replace("yolo", "YOLO")
data = model.args["data"] if hasattr(model, "args") and isinstance(model.args, dict) else ""
description = f"Ultralytics {self.pretty_name} model {f'trained on {data}' if data else ''}"
self.metadata = {
"description": description,
"author": "Ultralytics",
"date": datetime.now().isoformat(),
"version": __version__,
"license": "AGPL-3.0 License (https://ultralytics.com/license)",
"docs": "https://docs.ultralytics.com",
"stride": int(max(model.stride)),
"task": model.task,
"batch": self.args.batch,
"imgsz": self.imgsz,
"names": model.names,
"args": {k: v for k, v in self.args if k in fmt_keys},
"channels": model.yaml.get("channels", 3),
"end2end": getattr(model, "end2end", False),
} # model metadata
if dla is not None:
self.metadata["dla"] = dla # make sure `AutoBackend` uses correct dla device if it has one
if model.task == "pose":
self.metadata["kpt_shape"] = model.model[-1].kpt_shape
if hasattr(model, "kpt_names"):
self.metadata["kpt_names"] = model.kpt_names
LOGGER.info(
f"\n{colorstr('PyTorch:')} starting from '{file}' with input shape {tuple(im.shape)} BCHW and "
f"output shape(s) {self.output_shape} ({file_size(file):.1f} MB)"
)
self.run_callbacks("on_export_start")
# Exports
f = [""] * len(fmts) # exported filenames
if jit: # TorchScript
f[0] = self.export_torchscript()
if engine: # TensorRT required before ONNX
f[1] = self.export_engine(dla=dla)
if onnx: # ONNX
f[2] = self.export_onnx()
if xml: # OpenVINO
f[3] = self.export_openvino()
if coreml: # CoreML
f[4] = self.export_coreml()
if is_tf_format: # TensorFlow formats
self.args.int8 |= edgetpu
f[5], keras_model = self.export_saved_model()
if pb or tfjs: # pb prerequisite to tfjs
f[6] = self.export_pb(keras_model=keras_model)
if tflite:
f[7] = self.export_tflite()
if edgetpu:
f[8] = self.export_edgetpu(tflite_model=Path(f[5]) / f"{self.file.stem}_full_integer_quant.tflite")
if tfjs:
f[9] = self.export_tfjs()
if paddle: # PaddlePaddle
f[10] = self.export_paddle()
if mnn: # MNN
f[11] = self.export_mnn()
if ncnn: # NCNN
f[12] = self.export_ncnn()
if imx:
f[13] = self.export_imx()
if rknn:
f[14] = self.export_rknn()
if executorch:
f[15] = self.export_executorch()
if axelera:
f[16] = self.export_axelera()
# Finish
f = [str(x) for x in f if x] # filter out '' and None
if any(f):
f = str(Path(f[-1]))
square = self.imgsz[0] == self.imgsz[1]
s = (
""
if square
else f"WARNING ⚠️ non-PyTorch val requires square images, 'imgsz={self.imgsz}' will not "
f"work. Use export 'imgsz={max(self.imgsz)}' if val is required."
)
imgsz = self.imgsz[0] if square else str(self.imgsz)[1:-1].replace(" ", "")
q = "int8" if self.args.int8 else "half" if self.args.half else "" # quantization
LOGGER.info(
f"\nExport complete ({time.time() - t:.1f}s)"
f"\nResults saved to {colorstr('bold', file.parent.resolve())}"
f"\nPredict: yolo predict task={model.task} model={f} imgsz={imgsz} {q}"
f"\nValidate: yolo val task={model.task} model={f} imgsz={imgsz} data={data} {q} {s}"
f"\nVisualize: https://netron.app"
)
self.run_callbacks("on_export_end")
return f # path to final export artifact
def get_int8_calibration_dataloader(self, prefix=""):
LOGGER.info(f"{prefix} collecting INT8 calibration images from 'data={self.args.data}'")
data = (check_cls_dataset if self.model.task == "classify" else check_det_dataset)(self.args.data)
dataset = YOLODataset(
data[self.args.split or "val"],
data=data,
fraction=self.args.fraction,
task=self.model.task,
imgsz=self.imgsz[0],
augment=False,
batch_size=self.args.batch,
)
n = len(dataset)
if n < 1:
raise ValueError(f"The calibration dataset must have at least 1 image, but found {n} images.")
batch = min(self.args.batch, n)
if n < self.args.batch:
LOGGER.warning(
f"{prefix} calibration dataset has only {n} images, reducing calibration batch size to {batch}."
)
if self.args.format == "axelera" and n < 100:
LOGGER.warning(f"{prefix} >100 images required for Axelera calibration, found {n} images.")
elif self.args.format != "axelera" and n < 300:
LOGGER.warning(f"{prefix} >300 images recommended for INT8 calibration, found {n} images.")
return build_dataloader(dataset, batch=batch, workers=0, drop_last=True) # required for batch loading
@try_export
def export_torchscript(self, prefix=colorstr("TorchScript:")):
LOGGER.info(f"\n{prefix} starting export with torch {TORCH_VERSION}...")
f = self.file.with_suffix(".torchscript")
ts = torch.jit.trace(NMSModel(self.model, self.args) if self.args.nms else self.model, self.im, strict=False)
extra_files = {"config.txt": json.dumps(self.metadata)} # torch._C.ExtraFilesMap()
if self.args.optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html
LOGGER.info(f"{prefix} optimizing for mobile...")
from torch.utils.mobile_optimizer import optimize_for_mobile
optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files)
else:
ts.save(str(f), _extra_files=extra_files)
return f
@try_export
def export_onnx(self, prefix=colorstr("ONNX:")):
requirements = ["onnx>=1.12.0,<2.0.0"]
if self.args.simplify:
requirements += ["onnxslim>=0.1.71", "onnxruntime" + ("-gpu" if torch.cuda.is_available() else "")]
check_requirements(requirements)
import onnx
opset = self.args.opset or best_onnx_opset(onnx, cuda="cuda" in self.device.type)
LOGGER.info(f"\n{prefix} starting export with onnx {onnx.__version__} opset {opset}...")
if self.args.nms:
assert TORCH_1_13, f"'nms=True' ONNX export requires torch>=1.13 (found torch=={TORCH_VERSION})"
f = str(self.file.with_suffix(".onnx"))
output_names = ["output0", "output1"] if self.model.task == "segment" else ["output0"]
dynamic = self.args.dynamic
if dynamic:
dynamic = {"images": {0: "batch", 2: "height", 3: "width"}} # shape(1,3,640,640)
if isinstance(self.model, SegmentationModel):
dynamic["output0"] = {0: "batch", 2: "anchors"} # shape(1, 116, 8400)
dynamic["output1"] = {0: "batch", 2: "mask_height", 3: "mask_width"} # shape(1,32,160,160)
elif isinstance(self.model, DetectionModel):
dynamic["output0"] = {0: "batch", 2: "anchors"} # shape(1, 84, 8400)
if self.args.nms: # only batch size is dynamic with NMS
dynamic["output0"].pop(2)
if self.args.nms and self.model.task == "obb":
self.args.opset = opset # for NMSModel
self.args.simplify = True # fix OBB runtime error related to topk
with arange_patch(self.args):
torch2onnx(
NMSModel(self.model, self.args) if self.args.nms else self.model,
self.im,
f,
opset=opset,
input_names=["images"],
output_names=output_names,
dynamic=dynamic or None,
)
# Checks
model_onnx = onnx.load(f) # load onnx model
# Simplify
if self.args.simplify:
try:
import onnxslim
LOGGER.info(f"{prefix} slimming with onnxslim {onnxslim.__version__}...")
model_onnx = onnxslim.slim(model_onnx)
except Exception as e:
LOGGER.warning(f"{prefix} simplifier failure: {e}")
# Metadata
for k, v in self.metadata.items():
meta = model_onnx.metadata_props.add()
meta.key, meta.value = k, str(v)
# IR version
if getattr(model_onnx, "ir_version", 0) > 10:
LOGGER.info(f"{prefix} limiting IR version {model_onnx.ir_version} to 10 for ONNXRuntime compatibility...")
model_onnx.ir_version = 10
# FP16 conversion for CPU export (GPU exports are already FP16 from model.half() during tracing)
if self.args.half and self.args.format == "onnx" and self.device.type == "cpu":
try:
from onnxruntime.transformers import float16
LOGGER.info(f"{prefix} converting to FP16...")
model_onnx = float16.convert_float_to_float16(model_onnx, keep_io_types=True)
except Exception as e:
LOGGER.warning(f"{prefix} FP16 conversion failure: {e}")
onnx.save(model_onnx, f)
return f
@try_export
def export_openvino(self, prefix=colorstr("OpenVINO:")):
# OpenVINO <= 2025.1.0 error on macOS 15.4+: https://github.com/openvinotoolkit/openvino/issues/30023"
check_requirements("openvino>=2025.2.0" if MACOS and MACOS_VERSION >= "15.4" else "openvino>=2024.0.0")
import openvino as ov
LOGGER.info(f"\n{prefix} starting export with openvino {ov.__version__}...")
assert TORCH_2_1, f"OpenVINO export requires torch>=2.1 but torch=={TORCH_VERSION} is installed"
ov_model = ov.convert_model(
NMSModel(self.model, self.args) if self.args.nms else self.model,
input=None if self.args.dynamic else [self.im.shape],
example_input=self.im,
)
def serialize(ov_model, file):
ov_model.set_rt_info("YOLO", ["model_info", "model_type"])
ov_model.set_rt_info(True, ["model_info", "reverse_input_channels"])
ov_model.set_rt_info(114, ["model_info", "pad_value"])
ov_model.set_rt_info([255.0], ["model_info", "scale_values"])
ov_model.set_rt_info(self.args.iou, ["model_info", "iou_threshold"])
ov_model.set_rt_info([v.replace(" ", "_") for v in self.model.names.values()], ["model_info", "labels"])
if self.model.task != "classify":
ov_model.set_rt_info("fit_to_window_letterbox", ["model_info", "resize_type"])
ov.save_model(ov_model, file, compress_to_fp16=self.args.half)
YAML.save(Path(file).parent / "metadata.yaml", self.metadata) # add metadata.yaml
if self.args.int8:
fq = str(self.file).replace(self.file.suffix, f"_int8_openvino_model{os.sep}")
fq_ov = str(Path(fq) / self.file.with_suffix(".xml").name)
# INT8 requires nncf, nncf requires packaging>=23.2 https://github.com/openvinotoolkit/nncf/issues/3463
check_requirements("packaging>=23.2") # must be installed first to build nncf wheel
check_requirements("nncf>=2.14.0,<3.0.0" if not TORCH_2_3 else "nncf>=2.14.0")
import nncf
# Generate calibration data for integer quantization
ignored_scope = None
if isinstance(self.model.model[-1], Detect):
# Includes all Detect subclasses like Segment, Pose, OBB, WorldDetect, YOLOEDetect
head_module_name = ".".join(list(self.model.named_modules())[-1][0].split(".")[:2])
ignored_scope = nncf.IgnoredScope( # ignore operations
patterns=[
f".*{head_module_name}/.*/Add",
f".*{head_module_name}/.*/Sub*",
f".*{head_module_name}/.*/Mul*",
f".*{head_module_name}/.*/Div*",
],
types=["Sigmoid"],
)
quantized_ov_model = nncf.quantize(
model=ov_model,
calibration_dataset=nncf.Dataset(self.get_int8_calibration_dataloader(prefix), self._transform_fn),
preset=nncf.QuantizationPreset.MIXED,
ignored_scope=ignored_scope,
)
serialize(quantized_ov_model, fq_ov)
return fq
f = str(self.file).replace(self.file.suffix, f"_openvino_model{os.sep}")
f_ov = str(Path(f) / self.file.with_suffix(".xml").name)
serialize(ov_model, f_ov)
return f
@try_export
def export_paddle(self, prefix=colorstr("PaddlePaddle:")):
assert not IS_JETSON, "Jetson Paddle exports not supported yet"
check_requirements(
(
"paddlepaddle-gpu>=3.0.0,!=3.3.0" # exclude 3.3.0 https://github.com/PaddlePaddle/Paddle/issues/77340
if torch.cuda.is_available()
else "paddlepaddle==3.0.0" # pin 3.0.0 for ARM64
if ARM64
else "paddlepaddle>=3.0.0,!=3.3.0", # exclude 3.3.0 https://github.com/PaddlePaddle/Paddle/issues/77340
"x2paddle",
)
)
import x2paddle
from x2paddle.convert import pytorch2paddle
LOGGER.info(f"\n{prefix} starting export with X2Paddle {x2paddle.__version__}...")
f = str(self.file).replace(self.file.suffix, f"_paddle_model{os.sep}")
pytorch2paddle(module=self.model, save_dir=f, jit_type="trace", input_examples=[self.im]) # export
YAML.save(Path(f) / "metadata.yaml", self.metadata) # add metadata.yaml
return f
@try_export
def export_mnn(self, prefix=colorstr("MNN:")):
assert TORCH_1_10, "MNN export requires torch>=1.10.0 to avoid segmentation faults"
f_onnx = self.export_onnx() # get onnx model first
check_requirements("MNN>=2.9.6")
import MNN
from MNN.tools import mnnconvert
# Setup and checks
LOGGER.info(f"\n{prefix} starting export with MNN {MNN.version()}...")
assert Path(f_onnx).exists(), f"failed to export ONNX file: {f_onnx}"
f = str(self.file.with_suffix(".mnn")) # MNN model file
args = ["", "-f", "ONNX", "--modelFile", f_onnx, "--MNNModel", f, "--bizCode", json.dumps(self.metadata)]
if self.args.int8:
args.extend(("--weightQuantBits", "8"))
if self.args.half:
args.append("--fp16")
mnnconvert.convert(args)
# remove scratch file for model convert optimize
convert_scratch = Path(self.file.parent / ".__convert_external_data.bin")
if convert_scratch.exists():
convert_scratch.unlink()
return f
@try_export
def export_ncnn(self, prefix=colorstr("NCNN:")):
check_requirements("ncnn", cmds="--no-deps") # no deps to avoid installing opencv-python
check_requirements("pnnx")
import ncnn
import pnnx
LOGGER.info(f"\n{prefix} starting export with NCNN {ncnn.__version__} and PNNX {pnnx.__version__}...")
f = Path(str(self.file).replace(self.file.suffix, f"_ncnn_model{os.sep}"))
ncnn_args = dict(
ncnnparam=(f / "model.ncnn.param").as_posix(),
ncnnbin=(f / "model.ncnn.bin").as_posix(),
ncnnpy=(f / "model_ncnn.py").as_posix(),
)
pnnx_args = dict(
ptpath=(f / "model.pt").as_posix(),
pnnxparam=(f / "model.pnnx.param").as_posix(),
pnnxbin=(f / "model.pnnx.bin").as_posix(),
pnnxpy=(f / "model_pnnx.py").as_posix(),
pnnxonnx=(f / "model.pnnx.onnx").as_posix(),
)
f.mkdir(exist_ok=True) # make ncnn_model directory
pnnx.export(self.model, inputs=self.im, **ncnn_args, **pnnx_args, fp16=self.args.half, device=self.device.type)
for f_debug in ("debug.bin", "debug.param", "debug2.bin", "debug2.param", *pnnx_args.values()):
Path(f_debug).unlink(missing_ok=True)
YAML.save(f / "metadata.yaml", self.metadata) # add metadata.yaml
return str(f)
@try_export
def export_coreml(self, prefix=colorstr("CoreML:")):
mlmodel = self.args.format.lower() == "mlmodel" # legacy *.mlmodel export format requested
check_requirements(
["coremltools>=9.0", "numpy>=1.14.5,<=2.3.5"]
) # latest numpy 2.4.0rc1 breaks coremltools exports
import coremltools as ct
LOGGER.info(f"\n{prefix} starting export with coremltools {ct.__version__}...")
assert not WINDOWS, "CoreML export is not supported on Windows, please run on macOS or Linux."
assert TORCH_1_11, "CoreML export requires torch>=1.11"
if self.args.batch > 1:
assert self.args.dynamic, (
"batch sizes > 1 are not supported without 'dynamic=True' for CoreML export. Please retry at 'dynamic=True'."
)
if self.args.dynamic:
assert not self.args.nms, (
"'nms=True' cannot be used together with 'dynamic=True' for CoreML export. Please disable one of them."
)
assert self.model.task != "classify", "'dynamic=True' is not supported for CoreML classification models."
f = self.file.with_suffix(".mlmodel" if mlmodel else ".mlpackage")
if f.is_dir():
shutil.rmtree(f)
classifier_config = None
if self.model.task == "classify":
classifier_config = ct.ClassifierConfig(list(self.model.names.values()))
model = self.model
elif self.model.task == "detect":
model = IOSDetectModel(self.model, self.im, mlprogram=not mlmodel) if self.args.nms else self.model
else:
if self.args.nms:
LOGGER.warning(f"{prefix} 'nms=True' is only available for Detect models like 'yolo26n.pt'.")
# TODO CoreML Segment and Pose model pipelining
model = self.model
ts = torch.jit.trace(model.eval(), self.im, strict=False) # TorchScript model
if self.args.dynamic:
input_shape = ct.Shape(
shape=(
ct.RangeDim(lower_bound=1, upper_bound=self.args.batch, default=1),
self.im.shape[1],
ct.RangeDim(lower_bound=32, upper_bound=self.imgsz[0] * 2, default=self.imgsz[0]),
ct.RangeDim(lower_bound=32, upper_bound=self.imgsz[1] * 2, default=self.imgsz[1]),
)
)
inputs = [ct.TensorType("image", shape=input_shape)]
else:
inputs = [ct.ImageType("image", shape=self.im.shape, scale=1 / 255, bias=[0.0, 0.0, 0.0])]
# Based on apple's documentation it is better to leave out the minimum_deployment target and let that get set
# Internally based on the model conversion and output type.
# Setting minimum_deployment_target >= iOS16 will require setting compute_precision=ct.precision.FLOAT32.
# iOS16 adds in better support for FP16, but none of the CoreML NMS specifications handle FP16 as input.
ct_model = ct.convert(
ts,
inputs=inputs,
classifier_config=classifier_config,
convert_to="neuralnetwork" if mlmodel else "mlprogram",
)
bits, mode = (8, "kmeans") if self.args.int8 else (16, "linear") if self.args.half else (32, None)
if bits < 32:
if "kmeans" in mode:
check_requirements("scikit-learn") # scikit-learn package required for k-means quantization
if mlmodel:
ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode)
elif bits == 8: # mlprogram already quantized to FP16
import coremltools.optimize.coreml as cto
op_config = cto.OpPalettizerConfig(mode="kmeans", nbits=bits, weight_threshold=512)
config = cto.OptimizationConfig(global_config=op_config)
ct_model = cto.palettize_weights(ct_model, config=config)
if self.args.nms and self.model.task == "detect":
ct_model = self._pipeline_coreml(ct_model, weights_dir=None if mlmodel else ct_model.weights_dir)
m = self.metadata # metadata dict
ct_model.short_description = m.pop("description")
ct_model.author = m.pop("author")
ct_model.license = m.pop("license")
ct_model.version = m.pop("version")
ct_model.user_defined_metadata.update({k: str(v) for k, v in m.items()})
if self.model.task == "classify":
ct_model.user_defined_metadata.update({"com.apple.coreml.model.preview.type": "imageClassifier"})
try:
ct_model.save(str(f)) # save *.mlpackage
except Exception as e:
LOGGER.warning(
f"{prefix} CoreML export to *.mlpackage failed ({e}), reverting to *.mlmodel export. "
f"Known coremltools Python 3.11 and Windows bugs https://github.com/apple/coremltools/issues/1928."
)
f = f.with_suffix(".mlmodel")
ct_model.save(str(f))
return f
@try_export
def export_engine(self, dla=None, prefix=colorstr("TensorRT:")):
assert self.im.device.type != "cpu", "export running on CPU but must be on GPU, i.e. use 'device=0'"
f_onnx = self.export_onnx() # run before TRT import https://github.com/ultralytics/ultralytics/issues/7016
# Force re-install TensorRT on CUDA 13 ARM devices to 10.15.x versions for RT-DETR exports
# https://github.com/ultralytics/ultralytics/issues/22873
if is_jetson(jetpack=7) or is_dgx():
check_tensorrt("10.15")
try:
import tensorrt as trt
except ImportError:
check_tensorrt()
import tensorrt as trt
check_version(trt.__version__, ">=7.0.0", hard=True)
check_version(trt.__version__, "!=10.1.0", msg="https://github.com/ultralytics/ultralytics/pull/14239")
# Setup and checks
LOGGER.info(f"\n{prefix} starting export with TensorRT {trt.__version__}...")
assert Path(f_onnx).exists(), f"failed to export ONNX file: {f_onnx}"
f = self.file.with_suffix(".engine") # TensorRT engine file
onnx2engine(
f_onnx,
f,
self.args.workspace,
self.args.half,
self.args.int8,
self.args.dynamic,
self.im.shape,
dla=dla,
dataset=self.get_int8_calibration_dataloader(prefix) if self.args.int8 else None,
metadata=self.metadata,
verbose=self.args.verbose,
prefix=prefix,
)
return f
@try_export
def export_saved_model(self, prefix=colorstr("TensorFlow SavedModel:")):
cuda = torch.cuda.is_available()
try:
import tensorflow as tf
except ImportError:
check_requirements("tensorflow>=2.0.0,<=2.19.0")
import tensorflow as tf
check_requirements(
(
"tf_keras<=2.19.0", # required by 'onnx2tf' package
"sng4onnx>=1.0.1", # required by 'onnx2tf' package
"onnx_graphsurgeon>=0.3.26", # required by 'onnx2tf' package
"ai-edge-litert>=1.2.0" + (",<1.4.0" if MACOS else ""), # required by 'onnx2tf' package
"onnx>=1.12.0,<2.0.0",
"onnx2tf>=1.26.3,<1.29.0", # pin to avoid h5py build issues on aarch64
"onnxslim>=0.1.71",
"onnxruntime-gpu" if cuda else "onnxruntime",
"protobuf>=5",
),
cmds="--extra-index-url https://pypi.ngc.nvidia.com", # onnx_graphsurgeon only on NVIDIA
)
LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...")
check_version(
tf.__version__,
">=2.0.0",
name="tensorflow",
verbose=True,
msg="https://github.com/ultralytics/ultralytics/issues/5161",
)
f = Path(str(self.file).replace(self.file.suffix, "_saved_model"))
if f.is_dir():
shutil.rmtree(f) # delete output folder
# Export to TF
images = None
if self.args.int8 and self.args.data:
images = [batch["img"] for batch in self.get_int8_calibration_dataloader(prefix)]
images = (
torch.nn.functional.interpolate(torch.cat(images, 0).float(), size=self.imgsz)
.permute(0, 2, 3, 1)
.numpy()
.astype(np.float32)
)
# Export to ONNX
if isinstance(self.model.model[-1], RTDETRDecoder):
self.args.opset = self.args.opset or 19
assert 16 <= self.args.opset <= 19, "RTDETR export requires opset>=16;<=19"
self.args.simplify = True
f_onnx = self.export_onnx() # ensure ONNX is available
keras_model = onnx2saved_model(
f_onnx,
f,
int8=self.args.int8,
images=images,
disable_group_convolution=self.args.format in {"tfjs", "edgetpu"},
prefix=prefix,
)
YAML.save(f / "metadata.yaml", self.metadata) # add metadata.yaml
# Add TFLite metadata
for file in f.rglob("*.tflite"):
file.unlink() if "quant_with_int16_act.tflite" in str(file) else self._add_tflite_metadata(file)
return str(f), keras_model # or keras_model = tf.saved_model.load(f, tags=None, options=None)
@try_export
def export_pb(self, keras_model, prefix=colorstr("TensorFlow GraphDef:")):
f = self.file.with_suffix(".pb")
keras2pb(keras_model, f, prefix)
return f
@try_export
def export_tflite(self, prefix=colorstr("TensorFlow Lite:")):
# BUG https://github.com/ultralytics/ultralytics/issues/13436
import tensorflow as tf
LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...")
saved_model = Path(str(self.file).replace(self.file.suffix, "_saved_model"))
if self.args.int8:
f = saved_model / f"{self.file.stem}_int8.tflite" # fp32 in/out
elif self.args.half:
f = saved_model / f"{self.file.stem}_float16.tflite" # fp32 in/out
else:
f = saved_model / f"{self.file.stem}_float32.tflite"
return str(f)
@try_export
def export_axelera(self, prefix=colorstr("Axelera:")):
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
try:
from axelera import compiler
except ImportError:
check_apt_requirements(
["libllvm14", "libgirepository1.0-dev", "pkg-config", "libcairo2-dev", "build-essential", "cmake"]
)
check_requirements(
"axelera-voyager-sdk==1.5.2",
cmds="--extra-index-url https://software.axelera.ai/artifactory/axelera-runtime-pypi "
"--extra-index-url https://software.axelera.ai/artifactory/axelera-dev-pypi",
)
from axelera import compiler
from axelera.compiler import CompilerConfig
self.args.opset = 17 # hardcode opset for Axelera
onnx_path = self.export_onnx()
model_name = Path(onnx_path).stem
export_path = Path(f"{model_name}_axelera_model")
export_path.mkdir(exist_ok=True)
if "C2PSA" in self.model.__str__(): # YOLO11
config = CompilerConfig(
quantization_scheme="per_tensor_min_max",
ignore_weight_buffers=False,
resources_used=0.25,
aipu_cores_used=1,
multicore_mode="batch",
output_axm_format=True,
model_name=model_name,
)
else: # YOLOv8
config = CompilerConfig(
tiling_depth=6,
split_buffer_promotion=True,
resources_used=0.25,
aipu_cores_used=1,
multicore_mode="batch",
output_axm_format=True,
model_name=model_name,
)
qmodel = compiler.quantize(
model=onnx_path,
calibration_dataset=self.get_int8_calibration_dataloader(prefix),
config=config,
transform_fn=self._transform_fn,
)
compiler.compile(model=qmodel, config=config, output_dir=export_path)
axm_name = f"{model_name}.axm"
axm_src = Path(axm_name)
axm_dst = export_path / axm_name
if axm_src.exists():
axm_src.replace(axm_dst)
YAML.save(export_path / "metadata.yaml", self.metadata)
return export_path
@try_export
def export_executorch(self, prefix=colorstr("ExecuTorch:")):
assert TORCH_2_9, f"ExecuTorch requires torch>=2.9.0 but torch=={TORCH_VERSION} is installed"
check_executorch_requirements()
return torch2executorch(self.model, self.file, self.im, metadata=self.metadata, prefix=prefix)
@try_export
def export_edgetpu(self, tflite_model="", prefix=colorstr("Edge TPU:")):
cmd = "edgetpu_compiler --version"
help_url = "https://coral.ai/docs/edgetpu/compiler/"
assert LINUX, f"export only supported on Linux. See {help_url}"
if subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True).returncode != 0:
LOGGER.info(f"\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}")
sudo = "sudo " if is_sudo_available() else ""
for c in (
f"{sudo}mkdir -p /etc/apt/keyrings",
f"curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | {sudo}gpg --dearmor -o /etc/apt/keyrings/google.gpg",
f'echo "deb [signed-by=/etc/apt/keyrings/google.gpg] https://packages.cloud.google.com/apt coral-edgetpu-stable main" | {sudo}tee /etc/apt/sources.list.d/coral-edgetpu.list',
):
subprocess.run(c, shell=True, check=True)
check_apt_requirements(["edgetpu-compiler"])
ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().rsplit(maxsplit=1)[-1]
LOGGER.info(f"\n{prefix} starting export with Edge TPU compiler {ver}...")
tflite2edgetpu(tflite_file=tflite_model, output_dir=tflite_model.parent, prefix=prefix)
f = str(tflite_model).replace(".tflite", "_edgetpu.tflite") # Edge TPU model
self._add_tflite_metadata(f)
return f
@try_export
def export_tfjs(self, prefix=colorstr("TensorFlow.js:")):
check_requirements("tensorflowjs")
f = str(self.file).replace(self.file.suffix, "_web_model") # js dir
f_pb = str(self.file.with_suffix(".pb")) # *.pb path
pb2tfjs(pb_file=f_pb, output_dir=f, half=self.args.half, int8=self.args.int8, prefix=prefix)
# Add metadata
YAML.save(Path(f) / "metadata.yaml", self.metadata) # add metadata.yaml
return f
@try_export
def export_rknn(self, prefix=colorstr("RKNN:")):
LOGGER.info(f"\n{prefix} starting export with rknn-toolkit2...")
check_requirements("rknn-toolkit2")
check_requirements("onnx<1.19.0") # fix AttributeError: module 'onnx' has no attribute 'mapping'
if IS_COLAB:
# Prevent 'exit' from closing the notebook https://github.com/airockchip/rknn-toolkit2/issues/259
import builtins
builtins.exit = lambda: None
from rknn.api import RKNN
self.args.opset = min(self.args.opset or 19, 19) # rknn-toolkit expects opset<=19
f = self.export_onnx()
export_path = Path(f"{Path(f).stem}_rknn_model")
export_path.mkdir(exist_ok=True)
rknn = RKNN(verbose=False)
rknn.config(mean_values=[[0, 0, 0]], std_values=[[255, 255, 255]], target_platform=self.args.name)
rknn.load_onnx(model=f)
rknn.build(do_quantization=False) # TODO: Add quantization support
rknn.export_rknn(str(export_path / f"{Path(f).stem}-{self.args.name}.rknn"))
YAML.save(export_path / "metadata.yaml", self.metadata)
return export_path
@try_export
def export_imx(self, prefix=colorstr("IMX:")):
assert LINUX, (
"Export only supported on Linux."
"See https://developer.aitrios.sony-semicon.com/en/docs/raspberry-pi-ai-camera/imx500-converter?version=3.17.3&progLang="
)
assert IS_PYTHON_MINIMUM_3_9, "IMX export is only supported on Python 3.9 or above."
if getattr(self.model, "end2end", False):
raise ValueError("IMX export is not supported for end2end models.")
check_requirements(
(
"model-compression-toolkit>=2.4.1",
"edge-mdt-cl<1.1.0",
"edge-mdt-tpc>=1.2.0",
"pydantic<=2.11.7",
)
)
check_requirements("imx500-converter[pt]>=3.17.3")
# Install Java>=17
try:
java_output = subprocess.run(["java", "--version"], check=True, capture_output=True).stdout.decode()
version_match = re.search(r"(?:openjdk|java) (\d+)", java_output)
java_version = int(version_match.group(1)) if version_match else 0
assert java_version >= 17, "Java version too old"
except (FileNotFoundError, subprocess.CalledProcessError, AssertionError):
if IS_UBUNTU or IS_DEBIAN_TRIXIE:
LOGGER.info(f"\n{prefix} installing Java 21 for Ubuntu...")
check_apt_requirements(["openjdk-21-jre"])
elif IS_RASPBERRYPI or IS_DEBIAN_BOOKWORM:
LOGGER.info(f"\n{prefix} installing Java 17 for Raspberry Pi or Debian ...")
check_apt_requirements(["openjdk-17-jre"])
return torch2imx(
self.model,
self.file,
self.args.conf,
self.args.iou,
self.args.max_det,
metadata=self.metadata,
dataset=self.get_int8_calibration_dataloader(prefix),
prefix=prefix,
)
def _add_tflite_metadata(self, file):
import zipfile
with zipfile.ZipFile(file, "a", zipfile.ZIP_DEFLATED) as zf:
zf.writestr("metadata.json", json.dumps(self.metadata, indent=2))
def _pipeline_coreml(self, model, weights_dir=None, prefix=colorstr("CoreML Pipeline:")):
import coremltools as ct
LOGGER.info(f"{prefix} starting pipeline with coremltools {ct.__version__}...")
# Output shapes
spec = model.get_spec()
outs = list(iter(spec.description.output))
if self.args.format == "mlmodel": # mlmodel doesn't infer shapes automatically
outs[0].type.multiArrayType.shape[:] = self.output_shape[2], self.output_shape[1] - 4
outs[1].type.multiArrayType.shape[:] = self.output_shape[2], 4
# Checks
names = self.metadata["names"]
nx, ny = spec.description.input[0].type.imageType.width, spec.description.input[0].type.imageType.height
nc = outs[0].type.multiArrayType.shape[-1]
if len(names) != nc: # Hack fix for MLProgram NMS bug https://github.com/ultralytics/ultralytics/issues/22309
names = {**names, **{i: str(i) for i in range(len(names), nc)}}
# Model from spec
model = ct.models.MLModel(spec, weights_dir=weights_dir)
# Create NMS protobuf
nms_spec = ct.proto.Model_pb2.Model()
nms_spec.specificationVersion = spec.specificationVersion
for i in range(len(outs)):
decoder_output = model._spec.description.output[i].SerializeToString()
nms_spec.description.input.add()
nms_spec.description.input[i].ParseFromString(decoder_output)
nms_spec.description.output.add()
nms_spec.description.output[i].ParseFromString(decoder_output)
output_names = ["confidence", "coordinates"]
for i, name in enumerate(output_names):
nms_spec.description.output[i].name = name
for i, out in enumerate(outs):
ma_type = nms_spec.description.output[i].type.multiArrayType
ma_type.shapeRange.sizeRanges.add()
ma_type.shapeRange.sizeRanges[0].lowerBound = 0
ma_type.shapeRange.sizeRanges[0].upperBound = -1
ma_type.shapeRange.sizeRanges.add()
ma_type.shapeRange.sizeRanges[1].lowerBound = out.type.multiArrayType.shape[-1]
ma_type.shapeRange.sizeRanges[1].upperBound = out.type.multiArrayType.shape[-1]
del ma_type.shape[:]
nms = nms_spec.nonMaximumSuppression
nms.confidenceInputFeatureName = outs[0].name # 1x507x80
nms.coordinatesInputFeatureName = outs[1].name # 1x507x4
nms.confidenceOutputFeatureName = output_names[0]
nms.coordinatesOutputFeatureName = output_names[1]
nms.iouThresholdInputFeatureName = "iouThreshold"
nms.confidenceThresholdInputFeatureName = "confidenceThreshold"
nms.iouThreshold = self.args.iou
nms.confidenceThreshold = self.args.conf
nms.pickTop.perClass = not self.args.agnostic_nms
nms.stringClassLabels.vector.extend(names.values())
nms_model = ct.models.MLModel(nms_spec)
# Pipeline models together
pipeline = ct.models.pipeline.Pipeline(
input_features=[
("image", ct.models.datatypes.Array(3, ny, nx)),
("iouThreshold", ct.models.datatypes.Double()),
("confidenceThreshold", ct.models.datatypes.Double()),
],
output_features=output_names,
)
pipeline.add_model(model)
pipeline.add_model(nms_model)
# Correct datatypes
pipeline.spec.description.input[0].ParseFromString(model._spec.description.input[0].SerializeToString())
pipeline.spec.description.output[0].ParseFromString(nms_model._spec.description.output[0].SerializeToString())
pipeline.spec.description.output[1].ParseFromString(nms_model._spec.description.output[1].SerializeToString())
# Update metadata
pipeline.spec.specificationVersion = spec.specificationVersion
pipeline.spec.description.metadata.userDefined.update(
{"IoU threshold": str(nms.iouThreshold), "Confidence threshold": str(nms.confidenceThreshold)}
)
# Save the model
model = ct.models.MLModel(pipeline.spec, weights_dir=weights_dir)
model.input_description["image"] = "Input image"
model.input_description["iouThreshold"] = f"(optional) IoU threshold override (default: {nms.iouThreshold})"
model.input_description["confidenceThreshold"] = (
f"(optional) Confidence threshold override (default: {nms.confidenceThreshold})"
)
model.output_description["confidence"] = 'Boxes × Class confidence (see user-defined metadata "classes")'
model.output_description["coordinates"] = "Boxes × [x, y, width, height] (relative to image size)"
LOGGER.info(f"{prefix} pipeline success")
return model
@staticmethod
def _transform_fn(data_item) -> np.ndarray:
data_item: torch.Tensor = data_item["img"] if isinstance(data_item, dict) else data_item
assert data_item.dtype == torch.uint8, "Input image must be uint8 for the quantization preprocessing"
im = data_item.numpy().astype(np.float32) / 255.0 # uint8 to fp16/32 and 0 - 255 to 0.0 - 1.0
return im[None] if im.ndim == 3 else im
def add_callback(self, event: str, callback):
self.callbacks[event].append(callback)
def run_callbacks(self, event: str):
for callback in self.callbacks.get(event, []):
callback(self)
class IOSDetectModel(torch.nn.Module):
def __init__(self, model, im, mlprogram=True):
super().__init__()
_, _, h, w = im.shape # batch, channel, height, width
self.model = model
self.nc = len(model.names) # number of classes
self.mlprogram = mlprogram
if w == h:
self.normalize = 1.0 / w # scalar
else:
self.normalize = torch.tensor(
[1.0 / w, 1.0 / h, 1.0 / w, 1.0 / h], # broadcast (slower, smaller)
device=next(model.parameters()).device,
)
def forward(self, x):
xywh, cls = self.model(x)[0].transpose(0, 1).split((4, self.nc), 1)
if self.mlprogram and self.nc % 80 != 0: # NMS bug https://github.com/ultralytics/ultralytics/issues/22309
pad_length = int(((self.nc + 79) // 80) * 80) - self.nc # pad class length to multiple of 80
cls = torch.nn.functional.pad(cls, (0, pad_length, 0, 0), "constant", 0)
return cls, xywh * self.normalize
class NMSModel(torch.nn.Module):
def __init__(self, model, args):
super().__init__()
self.model = model
self.args = args
self.obb = model.task == "obb"
self.is_tf = self.args.format in frozenset({"saved_model", "tflite", "tfjs"})
def forward(self, x):
from functools import partial
from torchvision.ops import nms
preds = self.model(x)
pred = preds[0] if isinstance(preds, tuple) else preds
kwargs = dict(device=pred.device, dtype=pred.dtype)
bs = pred.shape[0]
pred = pred.transpose(-1, -2) # shape(1,84,6300) to shape(1,6300,84)
extra_shape = pred.shape[-1] - (4 + len(self.model.names)) # extras from Segment, OBB, Pose
if self.args.dynamic and self.args.batch > 1: # batch size needs to always be same due to loop unroll
pad = torch.zeros(torch.max(torch.tensor(self.args.batch - bs), torch.tensor(0)), *pred.shape[1:], **kwargs)
pred = torch.cat((pred, pad))
boxes, scores, extras = pred.split([4, len(self.model.names), extra_shape], dim=2)
scores, classes = scores.max(dim=-1)
self.args.max_det = min(pred.shape[1], self.args.max_det) # in case num_anchors < max_det
# (N, max_det, 4 coords + 1 class score + 1 class label + extra_shape).
out = torch.zeros(pred.shape[0], self.args.max_det, boxes.shape[-1] + 2 + extra_shape, **kwargs)
for i in range(bs):
box, cls, score, extra = boxes[i], classes[i], scores[i], extras[i]
mask = score > self.args.conf
if self.is_tf or (self.args.format == "onnx" and self.obb):
# TFLite GatherND error if mask is empty
score *= mask
# Explicit length otherwise reshape error, hardcoded to `self.args.max_det * 5`
mask = score.topk(min(self.args.max_det * 5, score.shape[0])).indices
box, score, cls, extra = box[mask], score[mask], cls[mask], extra[mask]
nmsbox = box.clone()
# `8` is the minimum value experimented to get correct NMS results for obb
multiplier = 8 if self.obb else 1 / max(len(self.model.names), 1)
# Normalize boxes for NMS since large values for class offset causes issue with int8 quantization
if self.args.format == "tflite": # TFLite is already normalized
nmsbox *= multiplier
else:
nmsbox = multiplier * (nmsbox / torch.tensor(x.shape[2:], **kwargs).max())
if not self.args.agnostic_nms: # class-wise NMS
end = 2 if self.obb else 4
# fully explicit expansion otherwise reshape error
cls_offset = cls.view(cls.shape[0], 1).expand(cls.shape[0], end)
offbox = nmsbox[:, :end] + cls_offset * multiplier
nmsbox = torch.cat((offbox, nmsbox[:, end:]), dim=-1)
nms_fn = (
partial(
TorchNMS.fast_nms,
use_triu=not (
self.is_tf
or (self.args.opset or 14) < 14
or (self.args.format == "openvino" and self.args.int8) # OpenVINO int8 error with triu
),
iou_func=batch_probiou,
exit_early=False,
)
if self.obb
else nms
)
keep = nms_fn(
torch.cat([nmsbox, extra], dim=-1) if self.obb else nmsbox,
score,
self.args.iou,
)[: self.args.max_det]
dets = torch.cat(
[box[keep], score[keep].view(-1, 1), cls[keep].view(-1, 1).to(out.dtype), extra[keep]], dim=-1
)
# Zero-pad to max_det size to avoid reshape error
pad = (0, 0, 0, self.args.max_det - dets.shape[0])
out[i] = torch.nn.functional.pad(dets, pad)
return (out[:bs], preds[1]) if self.model.task == "segment" else out[:bs] | --- +++ @@ -1,4 +1,64 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""
+Export a YOLO PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit.
+
+Format | `format=argument` | Model
+--- | --- | ---
+PyTorch | - | yolo26n.pt
+TorchScript | `torchscript` | yolo26n.torchscript
+ONNX | `onnx` | yolo26n.onnx
+OpenVINO | `openvino` | yolo26n_openvino_model/
+TensorRT | `engine` | yolo26n.engine
+CoreML | `coreml` | yolo26n.mlpackage
+TensorFlow SavedModel | `saved_model` | yolo26n_saved_model/
+TensorFlow GraphDef | `pb` | yolo26n.pb
+TensorFlow Lite | `tflite` | yolo26n.tflite
+TensorFlow Edge TPU | `edgetpu` | yolo26n_edgetpu.tflite
+TensorFlow.js | `tfjs` | yolo26n_web_model/
+PaddlePaddle | `paddle` | yolo26n_paddle_model/
+MNN | `mnn` | yolo26n.mnn
+NCNN | `ncnn` | yolo26n_ncnn_model/
+IMX | `imx` | yolo26n_imx_model/
+RKNN | `rknn` | yolo26n_rknn_model/
+ExecuTorch | `executorch` | yolo26n_executorch_model/
+Axelera | `axelera` | yolo26n_axelera_model/
+
+Requirements:
+ $ pip install "ultralytics[export]"
+
+Python:
+ from ultralytics import YOLO
+ model = YOLO('yolo26n.pt')
+ results = model.export(format='onnx')
+
+CLI:
+ $ yolo mode=export model=yolo26n.pt format=onnx
+
+Inference:
+ $ yolo predict model=yolo26n.pt # PyTorch
+ yolo26n.torchscript # TorchScript
+ yolo26n.onnx # ONNX Runtime or OpenCV DNN with dnn=True
+ yolo26n_openvino_model # OpenVINO
+ yolo26n.engine # TensorRT
+ yolo26n.mlpackage # CoreML (macOS-only)
+ yolo26n_saved_model # TensorFlow SavedModel
+ yolo26n.pb # TensorFlow GraphDef
+ yolo26n.tflite # TensorFlow Lite
+ yolo26n_edgetpu.tflite # TensorFlow Edge TPU
+ yolo26n_paddle_model # PaddlePaddle
+ yolo26n.mnn # MNN
+ yolo26n_ncnn_model # NCNN
+ yolo26n_imx_model # IMX
+ yolo26n_rknn_model # RKNN
+ yolo26n_executorch_model # ExecuTorch
+ yolo26n_axelera_model # Axelera
+
+TensorFlow.js:
+ $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example
+ $ npm install
+ $ ln -s ../../yolo26n_web_model public/yolo26n_web_model
+ $ npm start
+"""
from __future__ import annotations
@@ -87,6 +147,7 @@
def export_formats():
+ """Return a dictionary of Ultralytics YOLO export formats."""
x = [
["PyTorch", "-", ".pt", True, True, []],
["TorchScript", "torchscript", ".torchscript", True, True, ["batch", "optimize", "half", "nms", "dynamic"]],
@@ -125,6 +186,7 @@
def best_onnx_opset(onnx, cuda=False) -> int:
+ """Return max ONNX opset for this torch version with ONNX fallback."""
if TORCH_2_4: # _constants.ONNX_MAX_OPSET first defined in torch 1.13
opset = torch.onnx.utils._constants.ONNX_MAX_OPSET - 1 # use second-latest version for safety
if TORCH_2_9:
@@ -154,6 +216,16 @@
def validate_args(format, passed_args, valid_args):
+ """Validate arguments based on the export format.
+
+ Args:
+ format (str): The export format.
+ passed_args (SimpleNamespace): The arguments used during export.
+ valid_args (list): List of valid arguments for the format.
+
+ Raises:
+ AssertionError: If an unsupported argument is used, or if the format lacks supported argument listings.
+ """
export_args = ["half", "int8", "dynamic", "keras", "nms", "batch", "fraction"]
assert valid_args is not None, f"ERROR ❌️ valid arguments for '{format}' not listed."
@@ -166,9 +238,11 @@
def try_export(inner_func):
+ """YOLO export decorator, i.e. @try_export."""
inner_args = get_default_args(inner_func)
def outer_func(*args, **kwargs):
+ """Export a model."""
prefix = inner_args["prefix"]
dt = 0.0
try:
@@ -187,13 +261,75 @@
class Exporter:
+ """A class for exporting YOLO models to various formats.
+
+ This class provides functionality to export YOLO models to different formats including ONNX, TensorRT, CoreML,
+ TensorFlow, and others. It handles format validation, device selection, model preparation, and the actual export
+ process for each supported format.
+
+ Attributes:
+ args (SimpleNamespace): Configuration arguments for the exporter.
+ callbacks (dict): Dictionary of callback functions for different export events.
+ im (torch.Tensor): Input tensor for model inference during export.
+ model (torch.nn.Module): The YOLO model to be exported.
+ file (Path): Path to the model file being exported.
+ output_shape (tuple): Shape of the model output tensor(s).
+ pretty_name (str): Formatted model name for display purposes.
+ metadata (dict): Model metadata including description, author, version, etc.
+ device (torch.device): Device on which the model is loaded.
+ imgsz (list): Input image size for the model.
+
+ Methods:
+ __call__: Main export method that handles the export process.
+ get_int8_calibration_dataloader: Build dataloader for INT8 calibration.
+ export_torchscript: Export model to TorchScript format.
+ export_onnx: Export model to ONNX format.
+ export_openvino: Export model to OpenVINO format.
+ export_paddle: Export model to PaddlePaddle format.
+ export_mnn: Export model to MNN format.
+ export_ncnn: Export model to NCNN format.
+ export_coreml: Export model to CoreML format.
+ export_engine: Export model to TensorRT format.
+ export_saved_model: Export model to TensorFlow SavedModel format.
+ export_pb: Export model to TensorFlow GraphDef format.
+ export_tflite: Export model to TensorFlow Lite format.
+ export_edgetpu: Export model to Edge TPU format.
+ export_tfjs: Export model to TensorFlow.js format.
+ export_rknn: Export model to RKNN format.
+ export_imx: Export model to IMX format.
+ export_executorch: Export model to ExecuTorch format.
+ export_axelera: Export model to Axelera format.
+
+ Examples:
+ Export a YOLO26 model to ONNX format
+ >>> from ultralytics.engine.exporter import Exporter
+ >>> exporter = Exporter()
+ >>> exporter(model="yolo26n.pt") # exports to yolo26n.onnx
+
+ Export with specific arguments
+ >>> args = {"format": "onnx", "dynamic": True, "half": True}
+ >>> exporter = Exporter(overrides=args)
+ >>> exporter(model="yolo26n.pt")
+ """
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks: dict | None = None):
+ """Initialize the Exporter class.
+
+ Args:
+ cfg (str | Path | dict | SimpleNamespace, optional): Configuration file path or configuration object.
+ overrides (dict, optional): Configuration overrides.
+ _callbacks (dict, optional): Dictionary of callback functions.
+ """
self.args = get_cfg(cfg, overrides)
self.callbacks = _callbacks or callbacks.get_default_callbacks()
callbacks.add_integration_callbacks(self)
def __call__(self, model=None) -> str:
+ """Export a model and return the final exported path as a string.
+
+ Returns:
+ (str): Path to the exported file or directory (the last export artifact).
+ """
t = time.time()
fmt = self.args.format.lower() # to lowercase
if fmt in {"tensorrt", "trt"}: # 'engine' aliases
@@ -523,6 +659,7 @@ return f # path to final export artifact
def get_int8_calibration_dataloader(self, prefix=""):
+ """Build and return a dataloader for calibration of INT8 models."""
LOGGER.info(f"{prefix} collecting INT8 calibration images from 'data={self.args.data}'")
data = (check_cls_dataset if self.model.task == "classify" else check_det_dataset)(self.args.data)
dataset = YOLODataset(
@@ -550,6 +687,7 @@
@try_export
def export_torchscript(self, prefix=colorstr("TorchScript:")):
+ """Export YOLO model to TorchScript format."""
LOGGER.info(f"\n{prefix} starting export with torch {TORCH_VERSION}...")
f = self.file.with_suffix(".torchscript")
@@ -566,6 +704,7 @@
@try_export
def export_onnx(self, prefix=colorstr("ONNX:")):
+ """Export YOLO model to ONNX format."""
requirements = ["onnx>=1.12.0,<2.0.0"]
if self.args.simplify:
requirements += ["onnxslim>=0.1.71", "onnxruntime" + ("-gpu" if torch.cuda.is_available() else "")]
@@ -643,6 +782,7 @@
@try_export
def export_openvino(self, prefix=colorstr("OpenVINO:")):
+ """Export YOLO model to OpenVINO format."""
# OpenVINO <= 2025.1.0 error on macOS 15.4+: https://github.com/openvinotoolkit/openvino/issues/30023"
check_requirements("openvino>=2025.2.0" if MACOS and MACOS_VERSION >= "15.4" else "openvino>=2024.0.0")
import openvino as ov
@@ -656,6 +796,7 @@ )
def serialize(ov_model, file):
+ """Set RT info, serialize, and save metadata YAML."""
ov_model.set_rt_info("YOLO", ["model_info", "model_type"])
ov_model.set_rt_info(True, ["model_info", "reverse_input_channels"])
ov_model.set_rt_info(114, ["model_info", "pad_value"])
@@ -708,6 +849,7 @@
@try_export
def export_paddle(self, prefix=colorstr("PaddlePaddle:")):
+ """Export YOLO model to PaddlePaddle format."""
assert not IS_JETSON, "Jetson Paddle exports not supported yet"
check_requirements(
(
@@ -731,6 +873,7 @@
@try_export
def export_mnn(self, prefix=colorstr("MNN:")):
+ """Export YOLO model to MNN format using MNN https://github.com/alibaba/MNN."""
assert TORCH_1_10, "MNN export requires torch>=1.10.0 to avoid segmentation faults"
f_onnx = self.export_onnx() # get onnx model first
@@ -756,6 +899,7 @@
@try_export
def export_ncnn(self, prefix=colorstr("NCNN:")):
+ """Export YOLO model to NCNN format using PNNX https://github.com/pnnx/pnnx."""
check_requirements("ncnn", cmds="--no-deps") # no deps to avoid installing opencv-python
check_requirements("pnnx")
import ncnn
@@ -789,6 +933,7 @@
@try_export
def export_coreml(self, prefix=colorstr("CoreML:")):
+ """Export YOLO model to CoreML format."""
mlmodel = self.args.format.lower() == "mlmodel" # legacy *.mlmodel export format requested
check_requirements(
["coremltools>=9.0", "numpy>=1.14.5,<=2.3.5"]
@@ -884,6 +1029,7 @@
@try_export
def export_engine(self, dla=None, prefix=colorstr("TensorRT:")):
+ """Export YOLO model to TensorRT format https://developer.nvidia.com/tensorrt."""
assert self.im.device.type != "cpu", "export running on CPU but must be on GPU, i.e. use 'device=0'"
f_onnx = self.export_onnx() # run before TRT import https://github.com/ultralytics/ultralytics/issues/7016
@@ -923,6 +1069,7 @@
@try_export
def export_saved_model(self, prefix=colorstr("TensorFlow SavedModel:")):
+ """Export YOLO model to TensorFlow SavedModel format."""
cuda = torch.cuda.is_available()
try:
import tensorflow as tf
@@ -990,12 +1137,14 @@
@try_export
def export_pb(self, keras_model, prefix=colorstr("TensorFlow GraphDef:")):
+ """Export YOLO model to TensorFlow GraphDef *.pb format https://github.com/leimao/Frozen-Graph-TensorFlow."""
f = self.file.with_suffix(".pb")
keras2pb(keras_model, f, prefix)
return f
@try_export
def export_tflite(self, prefix=colorstr("TensorFlow Lite:")):
+ """Export YOLO model to TensorFlow Lite format."""
# BUG https://github.com/ultralytics/ultralytics/issues/13436
import tensorflow as tf
@@ -1011,6 +1160,7 @@
@try_export
def export_axelera(self, prefix=colorstr("Axelera:")):
+ """Export YOLO model to Axelera format."""
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
try:
from axelera import compiler
@@ -1077,12 +1227,14 @@
@try_export
def export_executorch(self, prefix=colorstr("ExecuTorch:")):
+ """Export YOLO model to ExecuTorch *.pte format."""
assert TORCH_2_9, f"ExecuTorch requires torch>=2.9.0 but torch=={TORCH_VERSION} is installed"
check_executorch_requirements()
return torch2executorch(self.model, self.file, self.im, metadata=self.metadata, prefix=prefix)
@try_export
def export_edgetpu(self, tflite_model="", prefix=colorstr("Edge TPU:")):
+ """Export YOLO model to Edge TPU format https://coral.ai/docs/edgetpu/models-intro/."""
cmd = "edgetpu_compiler --version"
help_url = "https://coral.ai/docs/edgetpu/compiler/"
assert LINUX, f"export only supported on Linux. See {help_url}"
@@ -1106,6 +1258,7 @@
@try_export
def export_tfjs(self, prefix=colorstr("TensorFlow.js:")):
+ """Export YOLO model to TensorFlow.js format."""
check_requirements("tensorflowjs")
f = str(self.file).replace(self.file.suffix, "_web_model") # js dir
@@ -1117,6 +1270,7 @@
@try_export
def export_rknn(self, prefix=colorstr("RKNN:")):
+ """Export YOLO model to RKNN format."""
LOGGER.info(f"\n{prefix} starting export with rknn-toolkit2...")
check_requirements("rknn-toolkit2")
@@ -1144,6 +1298,7 @@
@try_export
def export_imx(self, prefix=colorstr("IMX:")):
+ """Export YOLO model to IMX format."""
assert LINUX, (
"Export only supported on Linux."
"See https://developer.aitrios.sony-semicon.com/en/docs/raspberry-pi-ai-camera/imx500-converter?version=3.17.3&progLang="
@@ -1189,12 +1344,14 @@ )
def _add_tflite_metadata(self, file):
+ """Add metadata to *.tflite models per https://ai.google.dev/edge/litert/models/metadata."""
import zipfile
with zipfile.ZipFile(file, "a", zipfile.ZIP_DEFLATED) as zf:
zf.writestr("metadata.json", json.dumps(self.metadata, indent=2))
def _pipeline_coreml(self, model, weights_dir=None, prefix=colorstr("CoreML Pipeline:")):
+ """Create CoreML pipeline with NMS for YOLO detection models."""
import coremltools as ct
LOGGER.info(f"{prefix} starting pipeline with coremltools {ct.__version__}...")
@@ -1290,22 +1447,33 @@
@staticmethod
def _transform_fn(data_item) -> np.ndarray:
+ """The transformation function for Axelera/OpenVINO quantization preprocessing."""
data_item: torch.Tensor = data_item["img"] if isinstance(data_item, dict) else data_item
assert data_item.dtype == torch.uint8, "Input image must be uint8 for the quantization preprocessing"
im = data_item.numpy().astype(np.float32) / 255.0 # uint8 to fp16/32 and 0 - 255 to 0.0 - 1.0
return im[None] if im.ndim == 3 else im
def add_callback(self, event: str, callback):
+ """Append the given callback to the specified event."""
self.callbacks[event].append(callback)
def run_callbacks(self, event: str):
+ """Execute all callbacks for a given event."""
for callback in self.callbacks.get(event, []):
callback(self)
class IOSDetectModel(torch.nn.Module):
+ """Wrap an Ultralytics YOLO model for Apple iOS CoreML export."""
def __init__(self, model, im, mlprogram=True):
+ """Initialize the IOSDetectModel class with a YOLO model and example image.
+
+ Args:
+ model (torch.nn.Module): The YOLO model to wrap.
+ im (torch.Tensor): Example input tensor with shape (B, C, H, W).
+ mlprogram (bool): Whether exporting to MLProgram format.
+ """
super().__init__()
_, _, h, w = im.shape # batch, channel, height, width
self.model = model
@@ -1320,6 +1488,7 @@ )
def forward(self, x):
+ """Normalize predictions of object detection model with input size-dependent factors."""
xywh, cls = self.model(x)[0].transpose(0, 1).split((4, self.nc), 1)
if self.mlprogram and self.nc % 80 != 0: # NMS bug https://github.com/ultralytics/ultralytics/issues/22309
pad_length = int(((self.nc + 79) // 80) * 80) - self.nc # pad class length to multiple of 80
@@ -1329,8 +1498,15 @@
class NMSModel(torch.nn.Module):
+ """Model wrapper with embedded NMS for Detect, Segment, Pose and OBB."""
def __init__(self, model, args):
+ """Initialize the NMSModel.
+
+ Args:
+ model (torch.nn.Module): The model to wrap with NMS postprocessing.
+ args (SimpleNamespace): The export arguments.
+ """
super().__init__()
self.model = model
self.args = args
@@ -1338,6 +1514,15 @@ self.is_tf = self.args.format in frozenset({"saved_model", "tflite", "tfjs"})
def forward(self, x):
+ """Perform inference with NMS post-processing. Supports Detect, Segment, OBB and Pose.
+
+ Args:
+ x (torch.Tensor): The preprocessed tensor with shape (B, C, H, W).
+
+ Returns:
+ (torch.Tensor | tuple): Tensor of shape (B, max_det, 4 + 2 + extra_shape) where B is the batch size, or a
+ tuple of (detections, proto) for segmentation models.
+ """
from functools import partial
from torchvision.ops import nms
@@ -1404,4 +1589,4 @@ # Zero-pad to max_det size to avoid reshape error
pad = (0, 0, 0, self.args.max_det - dets.shape[0])
out[i] = torch.nn.functional.pad(dets, pad)
- return (out[:bs], preds[1]) if self.model.task == "segment" else out[:bs]+ return (out[:bs], preds[1]) if self.model.task == "segment" else out[:bs]
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/engine/exporter.py |
Generate consistent documentation across files | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import inspect
from pathlib import Path
from typing import Any
import numpy as np
import torch
from PIL import Image
from ultralytics.cfg import TASK2DATA, get_cfg, get_save_dir
from ultralytics.engine.results import Results
from ultralytics.nn.tasks import guess_model_task, load_checkpoint, yaml_model_load
from ultralytics.utils import (
ARGV,
ASSETS,
DEFAULT_CFG_DICT,
LOGGER,
RANK,
SETTINGS,
YAML,
callbacks,
checks,
)
class Model(torch.nn.Module):
def __init__(
self,
model: str | Path | Model = "yolo26n.pt",
task: str | None = None,
verbose: bool = False,
) -> None:
if isinstance(model, Model):
self.__dict__ = model.__dict__ # accepts an already initialized Model
return
super().__init__()
self.callbacks = callbacks.get_default_callbacks()
self.predictor = None # reuse predictor
self.model = None # model object
self.trainer = None # trainer object
self.ckpt = {} # if loaded from *.pt
self.cfg = None # if loaded from *.yaml
self.ckpt_path = None
self.overrides = {} # overrides for trainer object
self.metrics = None # validation/training metrics
self.session = None # HUB session
self.task = task # task type
self.model_name = None # model name
model = str(model).strip()
# Check if Ultralytics HUB model from https://hub.ultralytics.com
if self.is_hub_model(model):
from ultralytics.hub import HUBTrainingSession
# Fetch model from HUB
checks.check_requirements("hub-sdk>=0.0.12")
session = HUBTrainingSession.create_session(model)
model = session.model_file
if session.train_args: # training sent from HUB
self.session = session
# Check if Triton Server model
elif self.is_triton_model(model):
self.model_name = self.model = model
self.overrides["task"] = task or "detect" # set `task=detect` if not explicitly set
return
# Load or create new YOLO model
__import__("os").environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8" # to avoid deterministic warnings
if str(model).endswith((".yaml", ".yml")):
self._new(model, task=task, verbose=verbose)
else:
self._load(model, task=task)
# Delete super().training for accessing self.model.training
del self.training
def __call__(
self,
source: str | Path | int | Image.Image | list | tuple | np.ndarray | torch.Tensor = None,
stream: bool = False,
**kwargs: Any,
) -> list:
return self.predict(source, stream, **kwargs)
@staticmethod
def is_triton_model(model: str) -> bool:
from urllib.parse import urlsplit
url = urlsplit(model)
return url.netloc and url.path and url.scheme in {"http", "grpc"}
@staticmethod
def is_hub_model(model: str) -> bool:
from ultralytics.hub import HUB_WEB_ROOT
return model.startswith(f"{HUB_WEB_ROOT}/models/")
def _new(self, cfg: str, task=None, model=None, verbose=False) -> None:
cfg_dict = yaml_model_load(cfg)
self.cfg = cfg
self.task = task or guess_model_task(cfg_dict)
self.model = (model or self._smart_load("model"))(cfg_dict, verbose=verbose and RANK == -1) # build model
self.overrides["model"] = self.cfg
self.overrides["task"] = self.task
# Below added to allow export from YAMLs
self.model.args = {**DEFAULT_CFG_DICT, **self.overrides} # combine default and model args (prefer model args)
self.model.task = self.task
self.model_name = cfg
def _load(self, weights: str, task=None) -> None:
if weights.lower().startswith(("https://", "http://", "rtsp://", "rtmp://", "tcp://", "ul://")):
weights = checks.check_file(weights, download_dir=SETTINGS["weights_dir"]) # download and return local file
weights = checks.check_model_file_from_stem(weights) # add suffix, i.e. yolo26 -> yolo26n.pt
if str(weights).rpartition(".")[-1] == "pt":
self.model, self.ckpt = load_checkpoint(weights)
self.task = self.model.task
self.overrides = self.model.args = self._reset_ckpt_args(self.model.args)
self.ckpt_path = self.model.pt_path
else:
weights = checks.check_file(weights) # runs in all cases, not redundant with above call
self.model, self.ckpt = weights, None
self.task = task or guess_model_task(weights)
self.ckpt_path = weights
self.overrides["model"] = weights
self.overrides["task"] = self.task
self.model_name = weights
def _check_is_pytorch_model(self) -> None:
pt_str = isinstance(self.model, (str, Path)) and str(self.model).rpartition(".")[-1] == "pt"
pt_module = isinstance(self.model, torch.nn.Module)
if not (pt_module or pt_str):
raise TypeError(
f"model='{self.model}' should be a *.pt PyTorch model to run this method, but is a different format. "
f"PyTorch models can train, val, predict and export, i.e. 'model.train(data=...)', but exported "
f"formats like ONNX, TensorRT etc. only support 'predict' and 'val' modes, "
f"i.e. 'yolo predict model=yolo26n.onnx'.\nTo run CUDA or MPS inference please pass the device "
f"argument directly in your inference command, i.e. 'model.predict(source=..., device=0)'"
)
def reset_weights(self) -> Model:
self._check_is_pytorch_model()
for m in self.model.modules():
if hasattr(m, "reset_parameters"):
m.reset_parameters()
for p in self.model.parameters():
p.requires_grad = True
return self
def load(self, weights: str | Path = "yolo26n.pt") -> Model:
self._check_is_pytorch_model()
if isinstance(weights, (str, Path)):
self.overrides["pretrained"] = weights # remember the weights for DDP training
weights, self.ckpt = load_checkpoint(weights)
self.model.load(weights)
return self
def save(self, filename: str | Path = "saved_model.pt") -> None:
self._check_is_pytorch_model()
from copy import deepcopy
from datetime import datetime
from ultralytics import __version__
updates = {
"model": deepcopy(self.model).half() if isinstance(self.model, torch.nn.Module) else self.model,
"date": datetime.now().isoformat(),
"version": __version__,
"license": "AGPL-3.0 License (https://ultralytics.com/license)",
"docs": "https://docs.ultralytics.com",
}
torch.save({**self.ckpt, **updates}, filename)
def info(self, detailed: bool = False, verbose: bool = True, imgsz: int | list[int, int] = 640):
self._check_is_pytorch_model()
return self.model.info(detailed=detailed, verbose=verbose, imgsz=imgsz)
def fuse(self) -> None:
self._check_is_pytorch_model()
self.model.fuse()
def embed(
self,
source: str | Path | int | list | tuple | np.ndarray | torch.Tensor = None,
stream: bool = False,
**kwargs: Any,
) -> list:
if not kwargs.get("embed"):
kwargs["embed"] = [len(self.model.model) - 2] # embed second-to-last layer if no indices passed
return self.predict(source, stream, **kwargs)
def predict(
self,
source: str | Path | int | Image.Image | list | tuple | np.ndarray | torch.Tensor = None,
stream: bool = False,
predictor=None,
**kwargs: Any,
) -> list[Results]:
if source is None:
source = "https://ultralytics.com/images/boats.jpg" if self.task == "obb" else ASSETS
LOGGER.warning(f"'source' is missing. Using 'source={source}'.")
is_cli = (ARGV[0].endswith("yolo") or ARGV[0].endswith("ultralytics")) and any(
x in ARGV for x in ("predict", "track", "mode=predict", "mode=track")
)
custom = {"conf": 0.25, "batch": 1, "save": is_cli, "mode": "predict", "rect": True} # method defaults
args = {**self.overrides, **custom, **kwargs} # highest priority args on the right
prompts = args.pop("prompts", None) # for SAM-type models
if not self.predictor or self.predictor.args.device != args.get("device", self.predictor.args.device):
self.predictor = (predictor or self._smart_load("predictor"))(overrides=args, _callbacks=self.callbacks)
self.predictor.setup_model(model=self.model, verbose=is_cli)
else: # only update args if predictor is already setup
self.predictor.args = get_cfg(self.predictor.args, args)
if "project" in args or "name" in args:
self.predictor.save_dir = get_save_dir(self.predictor.args)
if prompts and hasattr(self.predictor, "set_prompts"): # for SAM-type models
self.predictor.set_prompts(prompts)
return self.predictor.predict_cli(source=source) if is_cli else self.predictor(source=source, stream=stream)
def track(
self,
source: str | Path | int | list | tuple | np.ndarray | torch.Tensor = None,
stream: bool = False,
persist: bool = False,
**kwargs: Any,
) -> list[Results]:
if not hasattr(self.predictor, "trackers"):
from ultralytics.trackers import register_tracker
register_tracker(self, persist)
kwargs["conf"] = kwargs.get("conf") or 0.1 # ByteTrack-based method needs low confidence predictions as input
kwargs["batch"] = kwargs.get("batch") or 1 # batch-size 1 for tracking in videos
kwargs["mode"] = "track"
return self.predict(source=source, stream=stream, **kwargs)
def val(
self,
validator=None,
**kwargs: Any,
):
custom = {"rect": True} # method defaults
args = {**self.overrides, **custom, **kwargs, "mode": "val"} # highest priority args on the right
validator = (validator or self._smart_load("validator"))(args=args, _callbacks=self.callbacks)
validator(model=self.model)
self.metrics = validator.metrics
return validator.metrics
def benchmark(self, data=None, format="", verbose=False, **kwargs: Any):
self._check_is_pytorch_model()
from ultralytics.utils.benchmarks import benchmark
from .exporter import export_formats
custom = {"verbose": False} # method defaults
args = {**DEFAULT_CFG_DICT, **self.model.args, **custom, **kwargs, "mode": "benchmark"}
fmts = export_formats()
export_args = set(dict(zip(fmts["Argument"], fmts["Arguments"])).get(format, [])) - {"batch"}
export_kwargs = {k: v for k, v in args.items() if k in export_args}
return benchmark(
model=self,
data=data, # if no 'data' argument passed set data=None for default datasets
imgsz=args["imgsz"],
device=args["device"],
verbose=verbose,
format=format,
**export_kwargs,
)
def export(
self,
**kwargs: Any,
) -> str:
self._check_is_pytorch_model()
from .exporter import Exporter
custom = {
"imgsz": self.model.args["imgsz"],
"batch": 1,
"data": None,
"device": None, # reset to avoid multi-GPU errors
"verbose": False,
} # method defaults
args = {**self.overrides, **custom, **kwargs, "mode": "export"} # highest priority args on the right
return Exporter(overrides=args, _callbacks=self.callbacks)(model=self.model)
def train(
self,
trainer=None,
**kwargs: Any,
):
self._check_is_pytorch_model()
if hasattr(self.session, "model") and self.session.model.id: # Ultralytics HUB session with loaded model
if any(kwargs):
LOGGER.warning("using HUB training arguments, ignoring local training arguments.")
kwargs = self.session.train_args # overwrite kwargs
checks.check_pip_update_available()
if isinstance(kwargs.get("pretrained", None), (str, Path)):
self.load(kwargs["pretrained"]) # load pretrained weights if provided
overrides = YAML.load(checks.check_yaml(kwargs["cfg"])) if kwargs.get("cfg") else self.overrides
custom = {
# NOTE: handle the case when 'cfg' includes 'data'.
"data": overrides.get("data") or DEFAULT_CFG_DICT["data"] or TASK2DATA[self.task],
"model": self.overrides["model"],
"task": self.task,
} # method defaults
args = {**overrides, **custom, **kwargs, "mode": "train", "session": self.session} # prioritizes rightmost args
if args.get("resume"):
args["resume"] = self.ckpt_path
self.trainer = (trainer or self._smart_load("trainer"))(overrides=args, _callbacks=self.callbacks)
if not args.get("resume"): # manually set model only if not resuming
self.trainer.model = self.trainer.get_model(weights=self.model if self.ckpt else None, cfg=self.model.yaml)
self.model = self.trainer.model
self.trainer.train()
# Update model and cfg after training
if RANK in {-1, 0}:
ckpt = self.trainer.best if self.trainer.best.exists() else self.trainer.last
self.model, self.ckpt = load_checkpoint(ckpt)
self.overrides = self._reset_ckpt_args(self.model.args)
self.metrics = getattr(self.trainer.validator, "metrics", None) # TODO: no metrics returned by DDP
return self.metrics
def tune(
self,
use_ray=False,
iterations=10,
*args: Any,
**kwargs: Any,
):
self._check_is_pytorch_model()
if use_ray:
from ultralytics.utils.tuner import run_ray_tune
return run_ray_tune(self, max_samples=iterations, *args, **kwargs)
else:
from .tuner import Tuner
custom = {} # method defaults
args = {**self.overrides, **custom, **kwargs, "mode": "train"} # highest priority args on the right
return Tuner(args=args, _callbacks=self.callbacks)(iterations=iterations)
def _apply(self, fn) -> Model:
self._check_is_pytorch_model()
self = super()._apply(fn)
self.predictor = None # reset predictor as device may have changed
self.overrides["device"] = self.device # was str(self.device) i.e. device(type='cuda', index=0) -> 'cuda:0'
return self
@property
def names(self) -> dict[int, str]:
from ultralytics.nn.autobackend import check_class_names
if hasattr(self.model, "names"):
return check_class_names(self.model.names)
if not self.predictor: # export formats will not have predictor defined until predict() is called
predictor = self._smart_load("predictor")(overrides=self.overrides, _callbacks=self.callbacks)
predictor.setup_model(model=self.model, verbose=False) # do not mess with self.predictor.model args
return predictor.model.names
return self.predictor.model.names
@property
def device(self) -> torch.device:
return next(self.model.parameters()).device if isinstance(self.model, torch.nn.Module) else None
@property
def transforms(self):
return self.model.transforms if hasattr(self.model, "transforms") else None
def add_callback(self, event: str, func) -> None:
self.callbacks[event].append(func)
def clear_callback(self, event: str) -> None:
self.callbacks[event] = []
def reset_callbacks(self) -> None:
for event in callbacks.default_callbacks.keys():
self.callbacks[event] = [callbacks.default_callbacks[event][0]]
@staticmethod
def _reset_ckpt_args(args: dict[str, Any]) -> dict[str, Any]:
include = {"imgsz", "data", "task", "single_cls"} # only remember these arguments when loading a PyTorch model
return {k: v for k, v in args.items() if k in include}
# def __getattr__(self, attr):
# """Raises error if object has no requested attribute."""
# name = self.__class__.__name__
# raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}")
def _smart_load(self, key: str):
try:
return self.task_map[self.task][key]
except Exception as e:
name = self.__class__.__name__
mode = inspect.stack()[1][3] # get the function name.
raise NotImplementedError(f"'{name}' model does not support '{mode}' mode for '{self.task}' task.") from e
@property
def task_map(self) -> dict:
raise NotImplementedError("Please provide task map for your model!")
def eval(self):
self.model.eval()
return self
def __getattr__(self, name):
return self._modules["model"] if name == "model" else getattr(self.model, name) | --- +++ @@ -27,6 +27,56 @@
class Model(torch.nn.Module):
+ """A base class for implementing YOLO models, unifying APIs across different model types.
+
+ This class provides a common interface for various operations related to YOLO models, such as training, validation,
+ prediction, exporting, and benchmarking. It handles different types of models, including those loaded from local
+ files, Ultralytics HUB, or Triton Server.
+
+ Attributes:
+ callbacks (dict): A dictionary of callback functions for various events during model operations.
+ predictor (BasePredictor): The predictor object used for making predictions.
+ model (torch.nn.Module): The underlying PyTorch model.
+ trainer (BaseTrainer): The trainer object used for training the model.
+ ckpt (dict): The checkpoint data if the model is loaded from a *.pt file.
+ cfg (str): The configuration of the model if loaded from a *.yaml file.
+ ckpt_path (str): The path to the checkpoint file.
+ overrides (dict): A dictionary of overrides for model configuration.
+ metrics (ultralytics.utils.metrics.DetMetrics): The latest training/validation metrics.
+ session (HUBTrainingSession): The Ultralytics HUB session, if applicable.
+ task (str): The type of task the model is intended for.
+ model_name (str): The name of the model.
+
+ Methods:
+ __call__: Alias for the predict method, enabling the model instance to be callable.
+ _new: Initialize a new model based on a configuration file.
+ _load: Load a model from a checkpoint file.
+ _check_is_pytorch_model: Ensure that the model is a PyTorch model.
+ reset_weights: Reset the model's weights to their initial state.
+ load: Load model weights from a specified file.
+ save: Save the current state of the model to a file.
+ info: Log or return information about the model.
+ fuse: Fuse Conv2d and BatchNorm2d layers for optimized inference.
+ predict: Perform predictions on given image sources.
+ track: Perform object tracking.
+ val: Validate the model on a dataset.
+ benchmark: Benchmark the model on various export formats.
+ export: Export the model to different formats.
+ train: Train the model on a dataset.
+ tune: Perform hyperparameter tuning.
+ _apply: Apply a function to the model's tensors.
+ add_callback: Add a callback function for an event.
+ clear_callback: Clear all callbacks for an event.
+ reset_callbacks: Reset all callbacks to their default functions.
+
+ Examples:
+ >>> from ultralytics import YOLO
+ >>> model = YOLO("yolo26n.pt")
+ >>> results = model.predict("image.jpg")
+ >>> model.train(data="coco8.yaml", epochs=3)
+ >>> metrics = model.val()
+ >>> model.export(format="onnx")
+ """
def __init__(
self,
@@ -34,6 +84,23 @@ task: str | None = None,
verbose: bool = False,
) -> None:
+ """Initialize a new instance of the YOLO model class.
+
+ This constructor sets up the model based on the provided model path or name. It handles various types of model
+ sources, including local files, Ultralytics HUB models, and Triton Server models. The method initializes several
+ important attributes of the model and prepares it for operations like training, prediction, or export.
+
+ Args:
+ model (str | Path | Model): Path or name of the model to load or create. Can be a local file path, a model
+ name from Ultralytics HUB, a Triton Server model, or an already initialized Model instance.
+ task (str, optional): The specific task for the model. If None, it will be inferred from the config.
+ verbose (bool): If True, enables verbose output during the model's initialization and subsequent operations.
+
+ Raises:
+ FileNotFoundError: If the specified model file does not exist or is inaccessible.
+ ValueError: If the model file or configuration is invalid or unsupported.
+ ImportError: If required dependencies for specific model types (like HUB SDK) are not installed.
+ """
if isinstance(model, Model):
self.__dict__ = model.__dict__ # accepts an already initialized Model
return
@@ -85,10 +152,49 @@ stream: bool = False,
**kwargs: Any,
) -> list:
+ """Alias for the predict method, enabling the model instance to be callable for predictions.
+
+ This method simplifies the process of making predictions by allowing the model instance to be called directly
+ with the required arguments.
+
+ Args:
+ source (str | Path | int | PIL.Image | np.ndarray | torch.Tensor | list | tuple): The source of the image(s)
+ to make predictions on. Can be a file path, URL, PIL image, numpy array, PyTorch tensor, or a list/tuple
+ of these.
+ stream (bool): If True, treat the input source as a continuous stream for predictions.
+ **kwargs (Any): Additional keyword arguments to configure the prediction process.
+
+ Returns:
+ (list[ultralytics.engine.results.Results]): A list of prediction results, each encapsulated in a Results
+ object.
+
+ Examples:
+ >>> model = YOLO("yolo26n.pt")
+ >>> results = model("https://ultralytics.com/images/bus.jpg")
+ >>> for r in results:
+ ... print(f"Detected {len(r)} objects in image")
+ """
return self.predict(source, stream, **kwargs)
@staticmethod
def is_triton_model(model: str) -> bool:
+ """Check if the given model string is a Triton Server URL.
+
+ This static method determines whether the provided model string represents a valid Triton Server URL by parsing
+ its components using urllib.parse.urlsplit().
+
+ Args:
+ model (str): The model string to be checked.
+
+ Returns:
+ (bool): True if the model string is a valid Triton Server URL, False otherwise.
+
+ Examples:
+ >>> Model.is_triton_model("http://localhost:8000/v2/models/yolo11n")
+ True
+ >>> Model.is_triton_model("yolo26n.pt")
+ False
+ """
from urllib.parse import urlsplit
url = urlsplit(model)
@@ -96,11 +202,48 @@
@staticmethod
def is_hub_model(model: str) -> bool:
+ """Check if the provided model is an Ultralytics HUB model.
+
+ This static method determines whether the given model string represents a valid Ultralytics HUB model
+ identifier.
+
+ Args:
+ model (str): The model string to check.
+
+ Returns:
+ (bool): True if the model is a valid Ultralytics HUB model, False otherwise.
+
+ Examples:
+ >>> Model.is_hub_model("https://hub.ultralytics.com/models/MODEL")
+ True
+ >>> Model.is_hub_model("yolo26n.pt")
+ False
+ """
from ultralytics.hub import HUB_WEB_ROOT
return model.startswith(f"{HUB_WEB_ROOT}/models/")
def _new(self, cfg: str, task=None, model=None, verbose=False) -> None:
+ """Initialize a new model and infer the task type from model definitions.
+
+ Creates a new model instance based on the provided configuration file. Loads the model configuration, infers the
+ task type if not specified, and initializes the model using the appropriate class from the task map.
+
+ Args:
+ cfg (str): Path to the model configuration file in YAML format.
+ task (str, optional): The specific task for the model. If None, it will be inferred from the config.
+ model (type[torch.nn.Module], optional): A custom model class. If provided, it will be used instead of the
+ default model class from the task map.
+ verbose (bool): If True, displays model information during loading.
+
+ Raises:
+ ValueError: If the configuration file is invalid or the task cannot be inferred.
+ ImportError: If the required dependencies for the specified task are not installed.
+
+ Examples:
+ >>> model = Model()
+ >>> model._new("yolo26n.yaml", task="detect", verbose=True)
+ """
cfg_dict = yaml_model_load(cfg)
self.cfg = cfg
self.task = task or guess_model_task(cfg_dict)
@@ -114,6 +257,24 @@ self.model_name = cfg
def _load(self, weights: str, task=None) -> None:
+ """Load a model from a checkpoint file or initialize it from a weights file.
+
+ This method handles loading models from either .pt checkpoint files or other weight file formats. It sets up the
+ model, task, and related attributes based on the loaded weights.
+
+ Args:
+ weights (str): Path to the model weights file to be loaded.
+ task (str, optional): The task associated with the model. If None, it will be inferred from the model.
+
+ Raises:
+ FileNotFoundError: If the specified weights file does not exist or is inaccessible.
+ ValueError: If the weights file format is unsupported or invalid.
+
+ Examples:
+ >>> model = Model()
+ >>> model._load("yolo26n.pt")
+ >>> model._load("path/to/weights.pth", task="detect")
+ """
if weights.lower().startswith(("https://", "http://", "rtsp://", "rtmp://", "tcp://", "ul://")):
weights = checks.check_file(weights, download_dir=SETTINGS["weights_dir"]) # download and return local file
weights = checks.check_model_file_from_stem(weights) # add suffix, i.e. yolo26 -> yolo26n.pt
@@ -133,6 +294,21 @@ self.model_name = weights
def _check_is_pytorch_model(self) -> None:
+ """Check if the model is a PyTorch model and raise TypeError if it's not.
+
+ This method verifies that the model is either a PyTorch module or a .pt file. It's used to ensure that certain
+ operations that require a PyTorch model are only performed on compatible model types.
+
+ Raises:
+ TypeError: If the model is not a PyTorch module or a .pt file. The error message provides detailed
+ information about supported model formats and operations.
+
+ Examples:
+ >>> model = Model("yolo26n.pt")
+ >>> model._check_is_pytorch_model() # No error raised
+ >>> model = Model("yolo26n.onnx")
+ >>> model._check_is_pytorch_model() # Raises TypeError
+ """
pt_str = isinstance(self.model, (str, Path)) and str(self.model).rpartition(".")[-1] == "pt"
pt_module = isinstance(self.model, torch.nn.Module)
if not (pt_module or pt_str):
@@ -145,6 +321,22 @@ )
def reset_weights(self) -> Model:
+ """Reset the model's weights to their initial state.
+
+ This method iterates through all modules in the model and resets their parameters if they have a
+ 'reset_parameters' method. It also ensures that all parameters have 'requires_grad' set to True, enabling them
+ to be updated during training.
+
+ Returns:
+ (Model): The instance of the class with reset weights.
+
+ Raises:
+ TypeError: If the model is not a PyTorch model.
+
+ Examples:
+ >>> model = Model("yolo26n.pt")
+ >>> model.reset_weights()
+ """
self._check_is_pytorch_model()
for m in self.model.modules():
if hasattr(m, "reset_parameters"):
@@ -154,6 +346,25 @@ return self
def load(self, weights: str | Path = "yolo26n.pt") -> Model:
+ """Load parameters from the specified weights file into the model.
+
+ This method supports loading weights from a file or directly from a weights object. It matches parameters by
+ name and shape and transfers them to the model.
+
+ Args:
+ weights (str | Path): Path to the weights file or a weights object.
+
+ Returns:
+ (Model): The instance of the class with loaded weights.
+
+ Raises:
+ TypeError: If the model is not a PyTorch model.
+
+ Examples:
+ >>> model = Model()
+ >>> model.load("yolo26n.pt")
+ >>> model.load(Path("path/to/weights.pt"))
+ """
self._check_is_pytorch_model()
if isinstance(weights, (str, Path)):
self.overrides["pretrained"] = weights # remember the weights for DDP training
@@ -162,6 +373,21 @@ return self
def save(self, filename: str | Path = "saved_model.pt") -> None:
+ """Save the current model state to a file.
+
+ This method exports the model's checkpoint (ckpt) to the specified filename. It includes metadata such as the
+ date, Ultralytics version, license information, and a link to the documentation.
+
+ Args:
+ filename (str | Path): The name of the file to save the model to.
+
+ Raises:
+ TypeError: If the model is not a PyTorch model.
+
+ Examples:
+ >>> model = Model("yolo26n.pt")
+ >>> model.save("my_model.pt")
+ """
self._check_is_pytorch_model()
from copy import deepcopy
from datetime import datetime
@@ -178,10 +404,44 @@ torch.save({**self.ckpt, **updates}, filename)
def info(self, detailed: bool = False, verbose: bool = True, imgsz: int | list[int, int] = 640):
+ """Display model information.
+
+ This method provides an overview or detailed information about the model, depending on the arguments
+ passed. It can control the verbosity of the output.
+
+ Args:
+ detailed (bool): If True, shows detailed information about the model layers and parameters.
+ verbose (bool): If True, prints the information and returns model summary. If False, returns None.
+ imgsz (int | list[int, int]): Input image size used for FLOPs calculation.
+
+ Returns:
+ (tuple): A tuple containing the number of layers (int), number of parameters (int), number of gradients
+ (int), and GFLOPs (float). Returns None if verbose is False.
+
+ Examples:
+ >>> model = Model("yolo26n.pt")
+ >>> model.info() # Prints model summary and returns tuple
+ >>> model.info(detailed=True) # Prints detailed info and returns tuple
+ """
self._check_is_pytorch_model()
return self.model.info(detailed=detailed, verbose=verbose, imgsz=imgsz)
def fuse(self) -> None:
+ """Fuse Conv2d and BatchNorm2d layers in the model for optimized inference.
+
+ This method iterates through the model's modules and fuses consecutive Conv2d and BatchNorm2d layers into a
+ single layer. This fusion can significantly improve inference speed by reducing the number of operations and
+ memory accesses required during forward passes.
+
+ The fusion process typically involves folding the BatchNorm2d parameters (mean, variance, weight, and
+ bias) into the preceding Conv2d layer's weights and biases. This results in a single Conv2d layer that
+ performs both convolution and normalization in one step.
+
+ Examples:
+ >>> model = Model("yolo26n.pt")
+ >>> model.fuse()
+ >>> # Model is now fused and ready for optimized inference
+ """
self._check_is_pytorch_model()
self.model.fuse()
@@ -191,6 +451,26 @@ stream: bool = False,
**kwargs: Any,
) -> list:
+ """Generate image embeddings based on the provided source.
+
+ This method is a wrapper around the 'predict()' method, focusing on generating embeddings from an image
+ source. It allows customization of the embedding process through various keyword arguments.
+
+ Args:
+ source (str | Path | int | list | tuple | np.ndarray | torch.Tensor): The source of the image for generating
+ embeddings. Can be a file path, URL, numpy array, etc.
+ stream (bool): If True, predictions are streamed.
+ **kwargs (Any): Additional keyword arguments for configuring the embedding process.
+
+ Returns:
+ (list[torch.Tensor]): A list containing the image embeddings.
+
+ Examples:
+ >>> model = YOLO("yolo26n.pt")
+ >>> image = "https://ultralytics.com/images/bus.jpg"
+ >>> embeddings = model.embed(image)
+ >>> print(embeddings[0].shape)
+ """
if not kwargs.get("embed"):
kwargs["embed"] = [len(self.model.model) - 2] # embed second-to-last layer if no indices passed
return self.predict(source, stream, **kwargs)
@@ -202,6 +482,36 @@ predictor=None,
**kwargs: Any,
) -> list[Results]:
+ """Perform predictions on the given image source using the YOLO model.
+
+ This method facilitates the prediction process, allowing various configurations through keyword arguments. It
+ supports predictions with custom predictors or the default predictor method. The method handles different types
+ of image sources and can operate in a streaming mode.
+
+ Args:
+ source (str | Path | int | PIL.Image | np.ndarray | torch.Tensor | list | tuple): The source of the image(s)
+ to make predictions on. Accepts various types including file paths, URLs, PIL images, numpy arrays, and
+ torch tensors.
+ stream (bool): If True, treats the input source as a continuous stream for predictions.
+ predictor (BasePredictor, optional): An instance of a custom predictor class for making predictions. If
+ None, the method uses a default predictor.
+ **kwargs (Any): Additional keyword arguments for configuring the prediction process.
+
+ Returns:
+ (list[ultralytics.engine.results.Results]): A list of prediction results, each encapsulated in a Results
+ object.
+
+ Examples:
+ >>> model = YOLO("yolo26n.pt")
+ >>> results = model.predict(source="path/to/image.jpg", conf=0.25)
+ >>> for r in results:
+ ... print(r.boxes.data) # print detection bounding boxes
+
+ Notes:
+ - If 'source' is not provided, it defaults to the ASSETS constant with a warning.
+ - The method sets up a new predictor if not already present and updates its arguments with each call.
+ - For SAM-type models, 'prompts' can be passed as a keyword argument.
+ """
if source is None:
source = "https://ultralytics.com/images/boats.jpg" if self.task == "obb" else ASSETS
LOGGER.warning(f"'source' is missing. Using 'source={source}'.")
@@ -232,6 +542,33 @@ persist: bool = False,
**kwargs: Any,
) -> list[Results]:
+ """Conduct object tracking on the specified input source using the registered trackers.
+
+ This method performs object tracking using the model's predictors and optionally registered trackers. It handles
+ various input sources such as file paths or video streams, and supports customization through keyword arguments.
+ The method registers trackers if not already present and can persist them between calls.
+
+ Args:
+ source (str | Path | int | list | tuple | np.ndarray | torch.Tensor, optional): Input source for object
+ tracking. Can be a file path, URL, or video stream.
+ stream (bool): If True, treats the input source as a continuous video stream.
+ persist (bool): If True, persists trackers between different calls to this method.
+ **kwargs (Any): Additional keyword arguments for configuring the tracking process.
+
+ Returns:
+ (list[ultralytics.engine.results.Results]): A list of tracking results, each a Results object.
+
+ Examples:
+ >>> model = YOLO("yolo26n.pt")
+ >>> results = model.track(source="path/to/video.mp4", show=True)
+ >>> for r in results:
+ ... print(r.boxes.id) # print tracking IDs
+
+ Notes:
+ - This method sets a default confidence threshold of 0.1 for ByteTrack-based tracking.
+ - The tracking mode is explicitly set in the keyword arguments.
+ - Batch size is set to 1 for tracking in videos.
+ """
if not hasattr(self.predictor, "trackers"):
from ultralytics.trackers import register_tracker
@@ -246,6 +583,30 @@ validator=None,
**kwargs: Any,
):
+ """Validate the model using a specified dataset and validation configuration.
+
+ This method facilitates the model validation process, allowing for customization through various settings. It
+ supports validation with a custom validator or the default validation approach. The method combines default
+ configurations, method-specific defaults, and user-provided arguments to configure the validation process.
+
+ Args:
+ validator (ultralytics.engine.validator.BaseValidator, optional): An instance of a custom validator class
+ for validating the model.
+ **kwargs (Any): Arbitrary keyword arguments for customizing the validation process.
+
+ Returns:
+ (ultralytics.utils.metrics.DetMetrics): Validation metrics obtained from the validation process. The
+ specific metrics type depends on the task (e.g., DetMetrics, SegmentMetrics,
+ PoseMetrics, ClassifyMetrics).
+
+ Raises:
+ TypeError: If the model is not a PyTorch model.
+
+ Examples:
+ >>> model = YOLO("yolo26n.pt")
+ >>> results = model.val(data="coco8.yaml", imgsz=640)
+ >>> print(results.box.map) # Print mAP50-95
+ """
custom = {"rect": True} # method defaults
args = {**self.overrides, **custom, **kwargs, "mode": "val"} # highest priority args on the right
@@ -255,6 +616,35 @@ return validator.metrics
def benchmark(self, data=None, format="", verbose=False, **kwargs: Any):
+ """Benchmark the model across various export formats to evaluate performance.
+
+ This method assesses the model's performance in different export formats, such as ONNX, TorchScript, etc. It
+ uses the 'benchmark' function from the ultralytics.utils.benchmarks module. The benchmarking is configured using
+ a combination of default configuration values, model-specific arguments, method-specific defaults, and any
+ additional user-provided keyword arguments.
+
+ Args:
+ data (str | None): Path to the dataset for benchmarking. If None, uses default dataset for the task.
+ format (str): Export format name for specific benchmarking.
+ verbose (bool): Whether to print detailed benchmark information.
+ **kwargs (Any): Arbitrary keyword arguments to customize the benchmarking process. Common options include:
+ - imgsz (int | list[int]): Image size for benchmarking.
+ - half (bool): Whether to use half-precision (FP16) mode.
+ - int8 (bool): Whether to use int8 precision mode.
+ - device (str): Device to run the benchmark on (e.g., 'cpu', 'cuda').
+
+ Returns:
+ (polars.DataFrame): A Polars DataFrame with benchmark results for each format, including file size, metric,
+ and inference time.
+
+ Raises:
+ TypeError: If the model is not a PyTorch model.
+
+ Examples:
+ >>> model = YOLO("yolo26n.pt")
+ >>> results = model.benchmark(data="coco8.yaml", imgsz=640, half=True)
+ >>> print(results)
+ """
self._check_is_pytorch_model()
from ultralytics.utils.benchmarks import benchmark
@@ -279,6 +669,35 @@ self,
**kwargs: Any,
) -> str:
+ """Export the model to a different format suitable for deployment.
+
+ This method facilitates the export of the model to various formats (e.g., ONNX, TorchScript) for deployment
+ purposes. It uses the 'Exporter' class for the export process, combining model-specific overrides, method
+ defaults, and any additional arguments provided.
+
+ Args:
+ **kwargs (Any): Arbitrary keyword arguments for export configuration. Common options include:
+ - format (str): Export format (e.g., 'onnx', 'engine', 'coreml').
+ - half (bool): Export model in half-precision.
+ - int8 (bool): Export model in int8 precision.
+ - device (str): Device to run the export on.
+ - workspace (int): Maximum memory workspace size for TensorRT engines.
+ - nms (bool): Add Non-Maximum Suppression (NMS) module to model.
+ - simplify (bool): Simplify ONNX model.
+
+ Returns:
+ (str): The path to the exported model file.
+
+ Raises:
+ TypeError: If the model is not a PyTorch model.
+ ValueError: If an unsupported export format is specified.
+ RuntimeError: If the export process fails due to errors.
+
+ Examples:
+ >>> model = YOLO("yolo26n.pt")
+ >>> model.export(format="onnx", dynamic=True, simplify=True)
+ 'path/to/exported/model.onnx'
+ """
self._check_is_pytorch_model()
from .exporter import Exporter
@@ -297,6 +716,38 @@ trainer=None,
**kwargs: Any,
):
+ """Train the model using the specified dataset and training configuration.
+
+ This method facilitates model training with a range of customizable settings. It supports training with a custom
+ trainer or the default training approach. The method handles scenarios such as resuming training from a
+ checkpoint, integrating with Ultralytics HUB, and updating model and configuration after training.
+
+ When using Ultralytics HUB, if the session has a loaded model, the method prioritizes HUB training arguments and
+ warns if local arguments are provided. It checks for pip updates and combines default configurations,
+ method-specific defaults, and user-provided arguments to configure the training process.
+
+ Args:
+ trainer (BaseTrainer, optional): Custom trainer instance for model training. If None, uses default.
+ **kwargs (Any): Arbitrary keyword arguments for training configuration. Common options include:
+ - data (str): Path to dataset configuration file.
+ - epochs (int): Number of training epochs.
+ - batch (int): Batch size for training.
+ - imgsz (int): Input image size.
+ - device (str): Device to run training on (e.g., 'cuda', 'cpu').
+ - workers (int): Number of worker threads for data loading.
+ - optimizer (str): Optimizer to use for training.
+ - lr0 (float): Initial learning rate.
+ - patience (int): Epochs to wait for no observable improvement for early stopping of training.
+ - augmentations (list[Callable]): List of augmentation functions to apply during training.
+
+ Returns:
+ (ultralytics.utils.metrics.DetMetrics | None): Training metrics if available and training is successful;
+ otherwise, None. The specific metrics type depends on the task.
+
+ Examples:
+ >>> model = YOLO("yolo26n.pt")
+ >>> results = model.train(data="coco8.yaml", epochs=3)
+ """
self._check_is_pytorch_model()
if hasattr(self.session, "model") and self.session.model.id: # Ultralytics HUB session with loaded model
if any(kwargs):
@@ -339,6 +790,35 @@ *args: Any,
**kwargs: Any,
):
+ """Conduct hyperparameter tuning for the model, with an option to use Ray Tune.
+
+ This method supports two modes of hyperparameter tuning: using Ray Tune or a custom tuning method. When Ray Tune
+ is enabled, it leverages the 'run_ray_tune' function from the ultralytics.utils.tuner module. Otherwise, it uses
+ the internal 'Tuner' class for tuning. The method combines default, overridden, and custom arguments to
+ configure the tuning process.
+
+ Args:
+ use_ray (bool): Whether to use Ray Tune for hyperparameter tuning. If False, uses internal tuning method.
+ iterations (int): Number of tuning iterations to perform.
+ *args (Any): Additional positional arguments to pass to the tuner.
+ **kwargs (Any): Additional keyword arguments for tuning configuration. These are combined with model
+ overrides and defaults to configure the tuning process.
+
+ Returns:
+ (ray.tune.ResultGrid | None): When use_ray=True, returns a ResultGrid with hyperparameter search results.
+ When use_ray=False, returns None and saves best hyperparameters to YAML.
+
+ Raises:
+ TypeError: If the model is not a PyTorch model.
+
+ Examples:
+ >>> model = YOLO("yolo26n.pt")
+ >>> results = model.tune(data="coco8.yaml", iterations=5)
+ >>> print(results)
+
+ # Use Ray Tune for more advanced hyperparameter search
+ >>> results = model.tune(use_ray=True, iterations=20, data="coco8.yaml")
+ """
self._check_is_pytorch_model()
if use_ray:
from ultralytics.utils.tuner import run_ray_tune
@@ -352,6 +832,26 @@ return Tuner(args=args, _callbacks=self.callbacks)(iterations=iterations)
def _apply(self, fn) -> Model:
+ """Apply a function to model parameters, buffers, and tensors.
+
+ This method extends the functionality of the parent class's _apply method by additionally resetting the
+ predictor and updating the device in the model's overrides. It's typically used for operations like moving the
+ model to a different device or changing its precision.
+
+ Args:
+ fn (Callable): A function to be applied to the model's tensors. This is typically a method like to(), cpu(),
+ cuda(), half(), or float().
+
+ Returns:
+ (Model): The model instance with the function applied and updated attributes.
+
+ Raises:
+ TypeError: If the model is not a PyTorch model.
+
+ Examples:
+ >>> model = Model("yolo26n.pt")
+ >>> model = model._apply(lambda t: t.cuda()) # Move model to GPU
+ """
self._check_is_pytorch_model()
self = super()._apply(fn)
self.predictor = None # reset predictor as device may have changed
@@ -360,6 +860,24 @@
@property
def names(self) -> dict[int, str]:
+ """Retrieve the class names associated with the loaded model.
+
+ This property returns the class names if they are defined in the model. It checks the class names for validity
+ using the 'check_class_names' function from the ultralytics.nn.autobackend module. If the predictor is not
+ initialized, it sets it up before retrieving the names.
+
+ Returns:
+ (dict[int, str]): A dictionary of class names associated with the model, where keys are class indices and
+ values are the corresponding class names.
+
+ Raises:
+ AttributeError: If the model or predictor does not have a 'names' attribute.
+
+ Examples:
+ >>> model = YOLO("yolo26n.pt")
+ >>> print(model.names)
+ {0: 'person', 1: 'bicycle', 2: 'car', ...}
+ """
from ultralytics.nn.autobackend import check_class_names
if hasattr(self.model, "names"):
@@ -372,24 +890,137 @@
@property
def device(self) -> torch.device:
+ """Get the device on which the model's parameters are allocated.
+
+ This property determines the device (CPU or GPU) where the model's parameters are currently stored. It is
+ applicable only to models that are instances of torch.nn.Module.
+
+ Returns:
+ (torch.device | None): The device (CPU/GPU) of the model, or None if the model is not a torch.nn.Module
+ instance.
+
+ Examples:
+ >>> model = YOLO("yolo26n.pt")
+ >>> print(model.device)
+ device(type='cuda', index=0) # if CUDA is available
+ >>> model = model.to("cpu")
+ >>> print(model.device)
+ device(type='cpu')
+ """
return next(self.model.parameters()).device if isinstance(self.model, torch.nn.Module) else None
@property
def transforms(self):
+ """Retrieve the transformations applied to the input data of the loaded model.
+
+ This property returns the transformations if they are defined in the model. The transforms typically include
+ preprocessing steps like resizing, normalization, and data augmentation that are applied to input data before it
+ is fed into the model.
+
+ Returns:
+ (object | None): The transform object of the model if available, otherwise None.
+
+ Examples:
+ >>> model = YOLO("yolo26n.pt")
+ >>> transforms = model.transforms
+ >>> if transforms:
+ ... print(f"Model transforms: {transforms}")
+ ... else:
+ ... print("No transforms defined for this model.")
+ """
return self.model.transforms if hasattr(self.model, "transforms") else None
def add_callback(self, event: str, func) -> None:
+ """Add a callback function for a specified event.
+
+ This method allows registering custom callback functions that are triggered on specific events during model
+ operations such as training or inference. Callbacks provide a way to extend and customize the behavior of the
+ model at various stages of its lifecycle.
+
+ Args:
+ event (str): The name of the event to attach the callback to. Must be a valid event name recognized by the
+ Ultralytics framework.
+ func (Callable): The callback function to be registered. This function will be called when the specified
+ event occurs.
+
+ Examples:
+ >>> def on_train_start(trainer):
+ ... print("Training is starting!")
+ >>> model = YOLO("yolo26n.pt")
+ >>> model.add_callback("on_train_start", on_train_start)
+ >>> model.train(data="coco8.yaml", epochs=1)
+ """
self.callbacks[event].append(func)
def clear_callback(self, event: str) -> None:
+ """Clear all callback functions registered for a specified event.
+
+ This method removes all custom and default callback functions associated with the given event. It resets the
+ callback list for the specified event to an empty list, effectively removing all registered callbacks for that
+ event.
+
+ Args:
+ event (str): The name of the event for which to clear the callbacks. This should be a valid event name
+ recognized by the Ultralytics callback system.
+
+ Examples:
+ >>> model = YOLO("yolo26n.pt")
+ >>> model.add_callback("on_train_start", lambda: print("Training started"))
+ >>> model.clear_callback("on_train_start")
+ >>> # All callbacks for 'on_train_start' are now removed
+
+ Notes:
+ - This method affects both custom callbacks added by the user and default callbacks
+ provided by the Ultralytics framework.
+ - After calling this method, no callbacks will be executed for the specified event
+ until new ones are added.
+ - Use with caution as it removes all callbacks, including essential ones that might
+ be required for proper functioning of certain operations.
+ """
self.callbacks[event] = []
def reset_callbacks(self) -> None:
+ """Reset all callbacks to their default functions.
+
+ This method reinstates the default callback functions for all events, removing any custom callbacks that were
+ previously added. It iterates through all default callback events and replaces the current callbacks with the
+ default ones.
+
+ The default callbacks are defined in the 'callbacks.default_callbacks' dictionary, which contains predefined
+ functions for various events in the model's lifecycle, such as on_train_start, on_epoch_end, etc.
+
+ This method is useful when you want to revert to the original set of callbacks after making custom
+ modifications, ensuring consistent behavior across different runs or experiments.
+
+ Examples:
+ >>> model = YOLO("yolo26n.pt")
+ >>> model.add_callback("on_train_start", custom_function)
+ >>> model.reset_callbacks()
+ # All callbacks are now reset to their default functions
+ """
for event in callbacks.default_callbacks.keys():
self.callbacks[event] = [callbacks.default_callbacks[event][0]]
@staticmethod
def _reset_ckpt_args(args: dict[str, Any]) -> dict[str, Any]:
+ """Reset specific arguments when loading a PyTorch model checkpoint.
+
+ This method filters the input arguments dictionary to retain only a specific set of keys that are considered
+ important for model loading. It's used to ensure that only relevant arguments are preserved when loading a model
+ from a checkpoint, discarding any unnecessary or potentially conflicting settings.
+
+ Args:
+ args (dict[str, Any]): A dictionary containing various model arguments and settings.
+
+ Returns:
+ (dict[str, Any]): A new dictionary containing only the specified include keys from the input arguments.
+
+ Examples:
+ >>> original_args = {"imgsz": 640, "data": "coco.yaml", "task": "detect", "batch": 16, "epochs": 100}
+ >>> reset_args = Model._reset_ckpt_args(original_args)
+ >>> print(reset_args)
+ {'imgsz': 640, 'data': 'coco.yaml', 'task': 'detect'}
+ """
include = {"imgsz", "data", "task", "single_cls"} # only remember these arguments when loading a PyTorch model
return {k: v for k, v in args.items() if k in include}
@@ -399,6 +1030,26 @@ # raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}")
def _smart_load(self, key: str):
+ """Intelligently load the appropriate module based on the model task.
+
+ This method dynamically selects and returns the correct module (model, trainer, validator, or predictor) based
+ on the current task of the model and the provided key. It uses the task_map dictionary to determine the
+ appropriate module to load for the specific task.
+
+ Args:
+ key (str): The type of module to load. Must be one of 'model', 'trainer', 'validator', or 'predictor'.
+
+ Returns:
+ (object): The loaded module class corresponding to the specified key and current task.
+
+ Raises:
+ NotImplementedError: If the specified key is not supported for the current task.
+
+ Examples:
+ >>> model = Model(task="detect")
+ >>> predictor_class = model._smart_load("predictor")
+ >>> trainer_class = model._smart_load("trainer")
+ """
try:
return self.task_map[self.task][key]
except Exception as e:
@@ -408,11 +1059,66 @@
@property
def task_map(self) -> dict:
+ """Provide a mapping from model tasks to corresponding classes for different modes.
+
+ This property method returns a dictionary that maps each supported task (e.g., detect, segment, classify) to a
+ nested dictionary. The nested dictionary contains mappings for different operational modes (model, trainer,
+ validator, predictor) to their respective class implementations.
+
+ The mapping allows for dynamic loading of appropriate classes based on the model's task and the desired
+ operational mode. This facilitates a flexible and extensible architecture for handling various tasks and modes
+ within the Ultralytics framework.
+
+ Returns:
+ (dict[str, dict[str, Any]]): A dictionary mapping task names to nested dictionaries. Each nested dictionary
+ contains mappings for 'model', 'trainer', 'validator', and 'predictor' keys to their respective class
+ implementations for that task.
+
+ Examples:
+ >>> model = Model("yolo26n.pt")
+ >>> task_map = model.task_map
+ >>> detect_predictor = task_map["detect"]["predictor"]
+ >>> segment_trainer = task_map["segment"]["trainer"]
+ """
raise NotImplementedError("Please provide task map for your model!")
def eval(self):
+ """Sets the model to evaluation mode.
+
+ This method changes the model's mode to evaluation, which affects layers like dropout and batch normalization
+ that behave differently during training and evaluation. In evaluation mode, these layers use running statistics
+ rather than computing batch statistics, and dropout layers are disabled.
+
+ Returns:
+ (Model): The model instance with evaluation mode set.
+
+ Examples:
+ >>> model = YOLO("yolo26n.pt")
+ >>> model.eval()
+ >>> # Model is now in evaluation mode for inference
+ """
self.model.eval()
return self
def __getattr__(self, name):
- return self._modules["model"] if name == "model" else getattr(self.model, name)+ """Enable accessing model attributes directly through the Model class.
+
+ This method provides a way to access attributes of the underlying model directly through the Model class
+ instance. It first checks if the requested attribute is 'model', in which case it returns the model from
+ the module dictionary. Otherwise, it delegates the attribute lookup to the underlying model.
+
+ Args:
+ name (str): The name of the attribute to retrieve.
+
+ Returns:
+ (Any): The requested attribute value.
+
+ Raises:
+ AttributeError: If the requested attribute does not exist in the model.
+
+ Examples:
+ >>> model = YOLO("yolo26n.pt")
+ >>> print(model.stride) # Access model.stride attribute
+ >>> print(model.names) # Access model.names attribute
+ """
+ return self._modules["model"] if name == "model" else getattr(self.model, name)
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/engine/model.py |
Add docstrings for better understanding | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import glob
import math
import os
import random
from copy import deepcopy
from multiprocessing.pool import ThreadPool
from pathlib import Path
from typing import Any
import cv2
import numpy as np
from torch.utils.data import Dataset
from ultralytics.data.utils import FORMATS_HELP_MSG, HELP_URL, IMG_FORMATS, check_file_speeds
from ultralytics.utils import DEFAULT_CFG, LOCAL_RANK, LOGGER, NUM_THREADS, TQDM
from ultralytics.utils.patches import imread
class BaseDataset(Dataset):
def __init__(
self,
img_path: str | list[str],
imgsz: int = 640,
cache: bool | str = False,
augment: bool = True,
hyp: dict[str, Any] = DEFAULT_CFG,
prefix: str = "",
rect: bool = False,
batch_size: int = 16,
stride: int = 32,
pad: float = 0.5,
single_cls: bool = False,
classes: list[int] | None = None,
fraction: float = 1.0,
channels: int = 3,
):
super().__init__()
self.img_path = img_path
self.imgsz = imgsz
self.augment = augment
self.single_cls = single_cls
self.prefix = prefix
self.fraction = fraction
self.channels = channels
self.cv2_flag = cv2.IMREAD_GRAYSCALE if channels == 1 else cv2.IMREAD_COLOR
self.im_files = self.get_img_files(self.img_path)
self.labels = self.get_labels()
self.update_labels(include_class=classes) # single_cls and include_class
self.ni = len(self.labels) # number of images
self.rect = rect
self.batch_size = batch_size
self.stride = stride
self.pad = pad
if self.rect:
assert self.batch_size is not None
self.set_rectangle()
# Buffer thread for mosaic images
self.buffer = [] # buffer size = batch size
self.max_buffer_length = min((self.ni, self.batch_size * 8, 1000)) if self.augment else 0
# Cache images (options are cache = True, False, None, "ram", "disk")
self.ims, self.im_hw0, self.im_hw = [None] * self.ni, [None] * self.ni, [None] * self.ni
self.npy_files = [Path(f).with_suffix(".npy") for f in self.im_files]
self.cache = cache.lower() if isinstance(cache, str) else "ram" if cache is True else None
if self.cache == "ram" and self.check_cache_ram():
if hyp.deterministic:
LOGGER.warning(
"cache='ram' may produce non-deterministic training results. "
"Consider cache='disk' as a deterministic alternative if your disk space allows."
)
self.cache_images()
elif self.cache == "disk" and self.check_cache_disk():
self.cache_images()
# Transforms
self.transforms = self.build_transforms(hyp=hyp)
def get_img_files(self, img_path: str | list[str]) -> list[str]:
try:
f = [] # image files
for p in img_path if isinstance(img_path, list) else [img_path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / "**" / "*.*"), recursive=True)
# F = list(p.rglob('*.*')) # pathlib
elif p.is_file(): # file
with open(p, encoding="utf-8") as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace("./", parent) if x.startswith("./") else x for x in t] # local to global path
# F += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
raise FileNotFoundError(f"{self.prefix}{p} does not exist")
im_files = sorted(x.replace("/", os.sep) for x in f if x.rpartition(".")[-1].lower() in IMG_FORMATS)
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib
assert im_files, f"{self.prefix}No images found in {img_path}. {FORMATS_HELP_MSG}"
except Exception as e:
raise FileNotFoundError(f"{self.prefix}Error loading data from {img_path}\n{HELP_URL}") from e
if self.fraction < 1:
im_files = im_files[: round(len(im_files) * self.fraction)] # retain a fraction of the dataset
check_file_speeds(im_files, prefix=self.prefix) # check image read speeds
return im_files
def update_labels(self, include_class: list[int] | None) -> None:
include_class_array = np.array(include_class).reshape(1, -1)
for i in range(len(self.labels)):
if include_class is not None:
cls = self.labels[i]["cls"]
bboxes = self.labels[i]["bboxes"]
segments = self.labels[i]["segments"]
keypoints = self.labels[i]["keypoints"]
j = (cls == include_class_array).any(1)
self.labels[i]["cls"] = cls[j]
self.labels[i]["bboxes"] = bboxes[j]
if segments:
self.labels[i]["segments"] = [segments[si] for si, idx in enumerate(j) if idx]
if keypoints is not None:
self.labels[i]["keypoints"] = keypoints[j]
if self.single_cls:
self.labels[i]["cls"][:, 0] = 0
def load_image(self, i: int, rect_mode: bool = True) -> tuple[np.ndarray, tuple[int, int], tuple[int, int]]:
im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i]
if im is None: # not cached in RAM
if fn.exists(): # load npy
try:
im = np.load(fn)
except Exception as e:
LOGGER.warning(f"{self.prefix}Removing corrupt *.npy image file {fn} due to: {e}")
Path(fn).unlink(missing_ok=True)
im = imread(f, flags=self.cv2_flag) # BGR
else: # read image
im = imread(f, flags=self.cv2_flag) # BGR
if im is None:
raise FileNotFoundError(f"Image Not Found {f}")
h0, w0 = im.shape[:2] # orig hw
if rect_mode: # resize long side to imgsz while maintaining aspect ratio
r = self.imgsz / max(h0, w0) # ratio
if r != 1: # if sizes are not equal
w, h = (min(math.ceil(w0 * r), self.imgsz), min(math.ceil(h0 * r), self.imgsz))
im = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR)
elif not (h0 == w0 == self.imgsz): # resize by stretching image to square imgsz
im = cv2.resize(im, (self.imgsz, self.imgsz), interpolation=cv2.INTER_LINEAR)
if im.ndim == 2:
im = im[..., None]
# Add to buffer if training with augmentations
if self.augment:
self.ims[i], self.im_hw0[i], self.im_hw[i] = im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized
self.buffer.append(i)
if 1 < len(self.buffer) >= self.max_buffer_length: # prevent empty buffer
j = self.buffer.pop(0)
if self.cache != "ram":
self.ims[j], self.im_hw0[j], self.im_hw[j] = None, None, None
return im, (h0, w0), im.shape[:2]
return self.ims[i], self.im_hw0[i], self.im_hw[i]
def cache_images(self) -> None:
b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes
fcn, storage = (self.cache_images_to_disk, "Disk") if self.cache == "disk" else (self.load_image, "RAM")
with ThreadPool(NUM_THREADS) as pool:
results = pool.imap(fcn, range(self.ni))
pbar = TQDM(enumerate(results), total=self.ni, disable=LOCAL_RANK > 0)
for i, x in pbar:
if self.cache == "disk":
b += self.npy_files[i].stat().st_size
else: # 'ram'
self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)
b += self.ims[i].nbytes
pbar.desc = f"{self.prefix}Caching images ({b / gb:.1f}GB {storage})"
pbar.close()
def cache_images_to_disk(self, i: int) -> None:
f = self.npy_files[i]
if not f.exists():
try:
np.save(f.as_posix(), imread(self.im_files[i], flags=self.cv2_flag), allow_pickle=False)
except Exception as e:
f.unlink(missing_ok=True)
LOGGER.warning(f"{self.prefix}WARNING ⚠️ Failed to cache image {f}: {e}")
def check_cache_disk(self, safety_margin: float = 0.5) -> bool:
import shutil
b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes
n = min(self.ni, 30) # extrapolate from 30 random images
for _ in range(n):
im_file = random.choice(self.im_files)
im = imread(im_file)
if im is None:
continue
b += im.nbytes
if not os.access(Path(im_file).parent, os.W_OK):
self.cache = None
LOGGER.warning(f"{self.prefix}Skipping caching images to disk, directory not writable")
return False
disk_required = b * self.ni / n * (1 + safety_margin) # bytes required to cache dataset to disk
total, _used, free = shutil.disk_usage(Path(self.im_files[0]).parent)
if disk_required > free:
self.cache = None
LOGGER.warning(
f"{self.prefix}{disk_required / gb:.1f}GB disk space required, "
f"with {int(safety_margin * 100)}% safety margin but only "
f"{free / gb:.1f}/{total / gb:.1f}GB free, not caching images to disk"
)
return False
return True
def check_cache_ram(self, safety_margin: float = 0.5) -> bool:
b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes
n = min(self.ni, 30) # extrapolate from 30 random images
for _ in range(n):
im = imread(random.choice(self.im_files)) # sample image
if im is None:
continue
ratio = self.imgsz / max(im.shape[0], im.shape[1]) # max(h, w) # ratio
b += im.nbytes * ratio**2
mem_required = b * self.ni / n * (1 + safety_margin) # GB required to cache dataset into RAM
mem = __import__("psutil").virtual_memory()
if mem_required > mem.available:
self.cache = None
LOGGER.warning(
f"{self.prefix}{mem_required / gb:.1f}GB RAM required to cache images "
f"with {int(safety_margin * 100)}% safety margin but only "
f"{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, not caching images"
)
return False
return True
def set_rectangle(self) -> None:
bi = np.floor(np.arange(self.ni) / self.batch_size).astype(int) # batch index
nb = bi[-1] + 1 # number of batches
s = np.array([x.pop("shape") for x in self.labels]) # hw
ar = s[:, 0] / s[:, 1] # aspect ratio
irect = ar.argsort()
self.im_files = [self.im_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * self.imgsz / self.stride + self.pad).astype(int) * self.stride
self.batch = bi # batch index of image
def __getitem__(self, index: int) -> dict[str, Any]:
return self.transforms(self.get_image_and_label(index))
def get_image_and_label(self, index: int) -> dict[str, Any]:
label = deepcopy(self.labels[index]) # requires deepcopy() https://github.com/ultralytics/ultralytics/pull/1948
label.pop("shape", None) # shape is for rect, remove it
label["img"], label["ori_shape"], label["resized_shape"] = self.load_image(index)
label["ratio_pad"] = (
label["resized_shape"][0] / label["ori_shape"][0],
label["resized_shape"][1] / label["ori_shape"][1],
) # for evaluation
if self.rect:
label["rect_shape"] = self.batch_shapes[self.batch[index]]
return self.update_labels_info(label)
def __len__(self) -> int:
return len(self.labels)
def update_labels_info(self, label: dict[str, Any]) -> dict[str, Any]:
return label
def build_transforms(self, hyp: dict[str, Any] | None = None):
raise NotImplementedError
def get_labels(self) -> list[dict[str, Any]]:
raise NotImplementedError | --- +++ @@ -21,6 +21,53 @@
class BaseDataset(Dataset):
+ """Base dataset class for loading and processing image data.
+
+ This class provides core functionality for loading images, caching, and preparing data for training and inference in
+ object detection tasks.
+
+ Attributes:
+ img_path (str | list[str]): Path to the folder containing images.
+ imgsz (int): Target image size for resizing.
+ augment (bool): Whether to apply data augmentation.
+ single_cls (bool): Whether to treat all objects as a single class.
+ prefix (str): Prefix to print in log messages.
+ fraction (float): Fraction of dataset to utilize.
+ channels (int): Number of channels in the images (1 for grayscale, 3 for color). Color images loaded with OpenCV
+ are in BGR channel order.
+ cv2_flag (int): OpenCV flag for reading images.
+ im_files (list[str]): List of image file paths.
+ labels (list[dict]): List of label data dictionaries.
+ ni (int): Number of images in the dataset.
+ rect (bool): Whether to use rectangular training.
+ batch_size (int): Size of batches.
+ stride (int): Stride used in the model.
+ pad (float): Padding value.
+ buffer (list): Buffer for mosaic images.
+ max_buffer_length (int): Maximum buffer size.
+ ims (list): List of loaded images.
+ im_hw0 (list): List of original image dimensions (h, w).
+ im_hw (list): List of resized image dimensions (h, w).
+ npy_files (list[Path]): List of numpy file paths.
+ cache (str | None): Cache setting ('ram', 'disk', or None for no caching).
+ transforms (callable): Image transformation function.
+ batch_shapes (np.ndarray): Batch shapes for rectangular training.
+ batch (np.ndarray): Batch index of each image.
+
+ Methods:
+ get_img_files: Read image files from the specified path.
+ update_labels: Update labels to include only specified classes.
+ load_image: Load an image from the dataset.
+ cache_images: Cache images to memory or disk.
+ cache_images_to_disk: Save an image as an *.npy file for faster loading.
+ check_cache_disk: Check image caching requirements vs available disk space.
+ check_cache_ram: Check image caching requirements vs available memory.
+ set_rectangle: Sort images by aspect ratio and set batch shapes for rectangular training.
+ get_image_and_label: Get and return label information from the dataset.
+ update_labels_info: Custom label format method to be implemented by subclasses.
+ build_transforms: Build transformation pipeline to be implemented by subclasses.
+ get_labels: Get labels method to be implemented by subclasses.
+ """
def __init__(
self,
@@ -39,6 +86,25 @@ fraction: float = 1.0,
channels: int = 3,
):
+ """Initialize BaseDataset with given configuration and options.
+
+ Args:
+ img_path (str | list[str]): Path to the folder containing images or list of image paths.
+ imgsz (int): Image size for resizing.
+ cache (bool | str): Cache images to RAM or disk during training.
+ augment (bool): If True, data augmentation is applied.
+ hyp (dict[str, Any]): Hyperparameters to apply data augmentation.
+ prefix (str): Prefix to print in log messages.
+ rect (bool): If True, rectangular training is used.
+ batch_size (int): Size of batches.
+ stride (int): Stride used in the model.
+ pad (float): Padding value.
+ single_cls (bool): If True, single class training is used.
+ classes (list[int], optional): List of included classes.
+ fraction (float): Fraction of dataset to utilize.
+ channels (int): Number of channels in the images (1 for grayscale, 3 for color). Color images loaded with
+ OpenCV are in BGR channel order.
+ """
super().__init__()
self.img_path = img_path
self.imgsz = imgsz
@@ -82,6 +148,17 @@ self.transforms = self.build_transforms(hyp=hyp)
def get_img_files(self, img_path: str | list[str]) -> list[str]:
+ """Read image files from the specified path.
+
+ Args:
+ img_path (str | list[str]): Path or list of paths to image directories or files.
+
+ Returns:
+ (list[str]): List of image file paths.
+
+ Raises:
+ FileNotFoundError: If no images are found or the path doesn't exist.
+ """
try:
f = [] # image files
for p in img_path if isinstance(img_path, list) else [img_path]:
@@ -108,6 +185,11 @@ return im_files
def update_labels(self, include_class: list[int] | None) -> None:
+ """Update labels to include only specified classes.
+
+ Args:
+ include_class (list[int], optional): List of classes to include. If None, all classes are included.
+ """
include_class_array = np.array(include_class).reshape(1, -1)
for i in range(len(self.labels)):
if include_class is not None:
@@ -126,6 +208,20 @@ self.labels[i]["cls"][:, 0] = 0
def load_image(self, i: int, rect_mode: bool = True) -> tuple[np.ndarray, tuple[int, int], tuple[int, int]]:
+ """Load an image from dataset index 'i'.
+
+ Args:
+ i (int): Index of the image to load.
+ rect_mode (bool): Whether to use rectangular resizing.
+
+ Returns:
+ im (np.ndarray): Loaded image as a NumPy array.
+ hw_original (tuple[int, int]): Original image dimensions in (height, width) format.
+ hw_resized (tuple[int, int]): Resized image dimensions in (height, width) format.
+
+ Raises:
+ FileNotFoundError: If the image file is not found.
+ """
im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i]
if im is None: # not cached in RAM
if fn.exists(): # load npy
@@ -165,6 +261,7 @@ return self.ims[i], self.im_hw0[i], self.im_hw[i]
def cache_images(self) -> None:
+ """Cache images to memory or disk for faster training."""
b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes
fcn, storage = (self.cache_images_to_disk, "Disk") if self.cache == "disk" else (self.load_image, "RAM")
with ThreadPool(NUM_THREADS) as pool:
@@ -180,6 +277,7 @@ pbar.close()
def cache_images_to_disk(self, i: int) -> None:
+ """Save an image as an *.npy file for faster loading."""
f = self.npy_files[i]
if not f.exists():
try:
@@ -189,6 +287,14 @@ LOGGER.warning(f"{self.prefix}WARNING ⚠️ Failed to cache image {f}: {e}")
def check_cache_disk(self, safety_margin: float = 0.5) -> bool:
+ """Check if there's enough disk space for caching images.
+
+ Args:
+ safety_margin (float): Safety margin factor for disk space calculation.
+
+ Returns:
+ (bool): True if there's enough disk space, False otherwise.
+ """
import shutil
b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes
@@ -216,6 +322,14 @@ return True
def check_cache_ram(self, safety_margin: float = 0.5) -> bool:
+ """Check if there's enough RAM for caching images.
+
+ Args:
+ safety_margin (float): Safety margin factor for RAM calculation.
+
+ Returns:
+ (bool): True if there's enough RAM, False otherwise.
+ """
b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes
n = min(self.ni, 30) # extrapolate from 30 random images
for _ in range(n):
@@ -237,6 +351,7 @@ return True
def set_rectangle(self) -> None:
+ """Sort images by aspect ratio and set batch shapes for rectangular training."""
bi = np.floor(np.arange(self.ni) / self.batch_size).astype(int) # batch index
nb = bi[-1] + 1 # number of batches
@@ -261,9 +376,18 @@ self.batch = bi # batch index of image
def __getitem__(self, index: int) -> dict[str, Any]:
+ """Return transformed label information for given index."""
return self.transforms(self.get_image_and_label(index))
def get_image_and_label(self, index: int) -> dict[str, Any]:
+ """Get and return label information from the dataset.
+
+ Args:
+ index (int): Index of the image to retrieve.
+
+ Returns:
+ (dict[str, Any]): Label dictionary with image and metadata.
+ """
label = deepcopy(self.labels[index]) # requires deepcopy() https://github.com/ultralytics/ultralytics/pull/1948
label.pop("shape", None) # shape is for rect, remove it
label["img"], label["ori_shape"], label["resized_shape"] = self.load_image(index)
@@ -276,13 +400,40 @@ return self.update_labels_info(label)
def __len__(self) -> int:
+ """Return the length of the labels list for the dataset."""
return len(self.labels)
def update_labels_info(self, label: dict[str, Any]) -> dict[str, Any]:
+ """Customize your label format here."""
return label
def build_transforms(self, hyp: dict[str, Any] | None = None):
+ """Users can customize augmentations here.
+
+ Examples:
+ >>> if self.augment:
+ ... # Training transforms
+ ... return Compose([])
+ >>> else:
+ ... # Val transforms
+ ... return Compose([])
+ """
raise NotImplementedError
def get_labels(self) -> list[dict[str, Any]]:
- raise NotImplementedError+ """Users can customize their own format here.
+
+ Examples:
+ Ensure output is a dictionary with the following keys:
+ >>> dict(
+ ... im_file=im_file,
+ ... shape=shape, # format: (height, width)
+ ... cls=cls,
+ ... bboxes=bboxes, # xywh
+ ... segments=segments, # xy
+ ... keypoints=keypoints, # xy
+ ... normalized=True, # or False
+ ... bbox_format="xyxy", # or xywh, ltwh
+ ... )
+ """
+ raise NotImplementedError
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/data/base.py |
Expand my code with proper documentation strings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import torch
from ultralytics.engine.results import Results
from ultralytics.models.yolo.detect.predict import DetectionPredictor
from ultralytics.utils import DEFAULT_CFG, ops
class OBBPredictor(DetectionPredictor):
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks: dict | None = None):
super().__init__(cfg, overrides, _callbacks)
self.args.task = "obb"
def construct_result(self, pred, img, orig_img, img_path):
rboxes = torch.cat([pred[:, :4], pred[:, -1:]], dim=-1)
rboxes[:, :4] = ops.scale_boxes(img.shape[2:], rboxes[:, :4], orig_img.shape, xywh=True)
obb = torch.cat([rboxes, pred[:, 4:6]], dim=-1)
return Results(orig_img, path=img_path, names=self.model.names, obb=obb) | --- +++ @@ -10,13 +10,49 @@
class OBBPredictor(DetectionPredictor):
+ """A class extending the DetectionPredictor class for prediction based on an Oriented Bounding Box (OBB) model.
+
+ This predictor handles oriented bounding box detection tasks, processing images and returning results with rotated
+ bounding boxes.
+
+ Attributes:
+ args (namespace): Configuration arguments for the predictor.
+ model (torch.nn.Module): The loaded YOLO OBB model.
+
+ Examples:
+ >>> from ultralytics.utils import ASSETS
+ >>> from ultralytics.models.yolo.obb import OBBPredictor
+ >>> args = dict(model="yolo26n-obb.pt", source=ASSETS)
+ >>> predictor = OBBPredictor(overrides=args)
+ >>> predictor.predict_cli()
+ """
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks: dict | None = None):
+ """Initialize OBBPredictor with optional model and data configuration overrides.
+
+ Args:
+ cfg (dict, optional): Default configuration for the predictor.
+ overrides (dict, optional): Configuration overrides that take precedence over the default config.
+ _callbacks (dict, optional): Dictionary of callback functions to be invoked during prediction.
+ """
super().__init__(cfg, overrides, _callbacks)
self.args.task = "obb"
def construct_result(self, pred, img, orig_img, img_path):
+ """Construct the result object from the prediction.
+
+ Args:
+ pred (torch.Tensor): The predicted bounding boxes, scores, and rotation angles with shape (N, 7) where the
+ last dimension contains [x, y, w, h, confidence, class_id, angle].
+ img (torch.Tensor): The image after preprocessing with shape (B, C, H, W).
+ orig_img (np.ndarray): The original image before preprocessing.
+ img_path (str): The path to the original image.
+
+ Returns:
+ (Results): The result object containing the original image, image path, class names, and oriented bounding
+ boxes.
+ """
rboxes = torch.cat([pred[:, :4], pred[:, -1:]], dim=-1)
rboxes[:, :4] = ops.scale_boxes(img.shape[2:], rboxes[:, :4], orig_img.shape, xywh=True)
obb = torch.cat([rboxes, pred[:, 4:6]], dim=-1)
- return Results(orig_img, path=img_path, names=self.model.names, obb=obb)+ return Results(orig_img, path=img_path, names=self.model.names, obb=obb)
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/yolo/obb/predict.py |
Generate consistent documentation across files | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import torch
from ultralytics.data.augment import LetterBox
from ultralytics.engine.predictor import BasePredictor
from ultralytics.engine.results import Results
from ultralytics.utils import ops
class RTDETRPredictor(BasePredictor):
def postprocess(self, preds, img, orig_imgs):
if not isinstance(preds, (list, tuple)): # list for PyTorch inference but list[0] Tensor for export inference
preds = [preds, None]
nd = preds[0].shape[-1]
bboxes, scores = preds[0].split((4, nd - 4), dim=-1)
if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list
orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)[..., ::-1]
results = []
for bbox, score, orig_img, img_path in zip(bboxes, scores, orig_imgs, self.batch[0]): # (300, 4)
bbox = ops.xywh2xyxy(bbox)
max_score, cls = score.max(-1, keepdim=True) # (300, 1)
idx = max_score.squeeze(-1) > self.args.conf # (300, )
if self.args.classes is not None:
idx = (cls == torch.tensor(self.args.classes, device=cls.device)).any(1) & idx
pred = torch.cat([bbox, max_score, cls], dim=-1)[idx] # filter
pred = pred[pred[:, 4].argsort(descending=True)][: self.args.max_det]
oh, ow = orig_img.shape[:2]
pred[..., [0, 2]] *= ow # scale x coordinates to original width
pred[..., [1, 3]] *= oh # scale y coordinates to original height
results.append(Results(orig_img, path=img_path, names=self.model.names, boxes=pred))
return results
def pre_transform(self, im):
letterbox = LetterBox(self.imgsz, auto=False, scale_fill=True)
return [letterbox(image=x) for x in im] | --- +++ @@ -9,8 +9,45 @@
class RTDETRPredictor(BasePredictor):
+ """RT-DETR (Real-Time Detection Transformer) Predictor extending the BasePredictor class for making predictions.
+
+ This class leverages Vision Transformers to provide real-time object detection while maintaining high accuracy. It
+ supports key features like efficient hybrid encoding and IoU-aware query selection.
+
+ Attributes:
+ imgsz (int): Image size for inference (must be square and scale-filled).
+ args (dict): Argument overrides for the predictor.
+ model (torch.nn.Module): The loaded RT-DETR model.
+ batch (list): Current batch of processed inputs.
+
+ Methods:
+ postprocess: Postprocess raw model predictions to generate bounding boxes and confidence scores.
+ pre_transform: Pre-transform input images before feeding them into the model for inference.
+
+ Examples:
+ >>> from ultralytics.utils import ASSETS
+ >>> from ultralytics.models.rtdetr import RTDETRPredictor
+ >>> args = dict(model="rtdetr-l.pt", source=ASSETS)
+ >>> predictor = RTDETRPredictor(overrides=args)
+ >>> predictor.predict_cli()
+ """
def postprocess(self, preds, img, orig_imgs):
+ """Postprocess the raw predictions from the model to generate bounding boxes and confidence scores.
+
+ The method filters detections based on confidence and class if specified in `self.args`. It converts model
+ predictions to Results objects containing properly scaled bounding boxes.
+
+ Args:
+ preds (list | tuple): List of [predictions, extra] from the model, where predictions contain bounding boxes
+ and scores.
+ img (torch.Tensor): Processed input images with shape (N, 3, H, W).
+ orig_imgs (list | torch.Tensor): Original, unprocessed images.
+
+ Returns:
+ (list[Results]): A list of Results objects containing the post-processed bounding boxes, confidence scores,
+ and class labels.
+ """
if not isinstance(preds, (list, tuple)): # list for PyTorch inference but list[0] Tensor for export inference
preds = [preds, None]
@@ -36,5 +73,15 @@ return results
def pre_transform(self, im):
+ """Pre-transform input images before feeding them into the model for inference.
+
+ The input images are letterboxed to ensure a square aspect ratio and scale-filled.
+
+ Args:
+ im (list[np.ndarray]): Input images of shape [(H, W, 3) x N].
+
+ Returns:
+ (list): List of pre-transformed images ready for model inference.
+ """
letterbox = LetterBox(self.imgsz, auto=False, scale_fill=True)
- return [letterbox(image=x) for x in im]+ return [letterbox(image=x) for x in im]
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/rtdetr/predict.py |
Generate documentation strings for clarity | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
from __future__ import annotations
from copy import copy
import torch
import torch.nn as nn
from torch.nn.attention import SDPBackend, sdpa_kernel
from .necks import Sam3DualViTDetNeck
class SAM3VLBackbone(nn.Module):
def __init__(
self,
visual: Sam3DualViTDetNeck,
text,
compile_visual: bool = False,
act_ckpt_whole_vision_backbone: bool = False,
act_ckpt_whole_language_backbone: bool = False,
scalp=0,
):
super().__init__()
self.vision_backbone: Sam3DualViTDetNeck = torch.compile(visual) if compile_visual else visual
self.language_backbone = text
self.scalp = scalp
# allow running activation checkpointing on the entire vision and language backbones
self.act_ckpt_whole_vision_backbone = act_ckpt_whole_vision_backbone
self.act_ckpt_whole_language_backbone = act_ckpt_whole_language_backbone
def forward(
self,
samples: torch.Tensor,
captions: list[str],
input_boxes: torch.Tensor = None,
additional_text: list[str] | None = None,
):
output = self.forward_image(samples)
output.update(self.forward_text(captions, input_boxes, additional_text))
return output
def forward_image(self, samples: torch.Tensor):
# Forward through backbone
sam3_features, sam3_pos, sam2_features, sam2_pos = self.vision_backbone.forward(samples)
if self.scalp > 0:
# Discard the lowest resolution features
sam3_features, sam3_pos = (
sam3_features[: -self.scalp],
sam3_pos[: -self.scalp],
)
if sam2_features is not None and sam2_pos is not None:
sam2_features, sam2_pos = (
sam2_features[: -self.scalp],
sam2_pos[: -self.scalp],
)
sam2_output = None
if sam2_features is not None and sam2_pos is not None:
sam2_src = sam2_features[-1]
sam2_output = {
"vision_features": sam2_src,
"vision_pos_enc": sam2_pos,
"backbone_fpn": sam2_features,
}
sam3_src = sam3_features[-1]
return {
"vision_features": sam3_src,
"vision_pos_enc": sam3_pos,
"backbone_fpn": sam3_features,
"sam2_backbone_out": sam2_output,
}
def forward_image_sam2(self, samples: torch.Tensor):
xs = self.vision_backbone.trunk(samples)
x = xs[-1] # simpleFPN
assert self.vision_backbone.sam2_convs is not None, "SAM2 neck is not available."
sam2_features, sam2_pos = self.vision_backbone.sam_forward_feature_levels(x, self.vision_backbone.sam2_convs)
if self.scalp > 0:
# Discard the lowest resolution features
sam2_features, sam2_pos = (
sam2_features[: -self.scalp],
sam2_pos[: -self.scalp],
)
return {
"vision_features": sam2_features[-1],
"vision_pos_enc": sam2_pos,
"backbone_fpn": sam2_features,
}
def forward_text(self, captions, input_boxes=None, additional_text=None):
output = {}
# Forward through text_encoder
text_to_encode = copy(captions)
if additional_text is not None:
# if there are additional_text, we piggy-back them into this forward.
# They'll be used later for output alignment
text_to_encode += additional_text
with sdpa_kernel([SDPBackend.MATH, SDPBackend.EFFICIENT_ATTENTION, SDPBackend.FLASH_ATTENTION]):
text_attention_mask, text_memory, text_embeds = self.language_backbone(text_to_encode, input_boxes)
if additional_text is not None:
output["additional_text_features"] = text_memory[:, -len(additional_text) :]
output["additional_text_mask"] = text_attention_mask[-len(additional_text) :]
text_memory = text_memory[:, : len(captions)]
text_attention_mask = text_attention_mask[: len(captions)]
text_embeds = text_embeds[:, : len(captions)]
output["language_features"] = text_memory
output["language_mask"] = text_attention_mask
output["language_embeds"] = text_embeds # Text embeddings before forward to the encoder
return output
def set_imgsz(self, imgsz: list[int] = [1008, 1008]):
self.vision_backbone.set_imgsz(imgsz) | --- +++ @@ -2,6 +2,7 @@
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
+"""Provides utility to combine a vision backbone with a language backbone."""
from __future__ import annotations
@@ -15,6 +16,11 @@
class SAM3VLBackbone(nn.Module):
+ """This backbone combines a vision backbone and a language backbone without fusion. As such it is more of a
+ convenience wrapper to handle the two backbones together.
+
+ It adds support for activation checkpointing and compilation.
+ """
def __init__(
self,
@@ -25,6 +31,11 @@ act_ckpt_whole_language_backbone: bool = False,
scalp=0,
):
+ """Initialize the backbone combiner.
+
+ :param visual: The vision backbone to use
+ :param text: The text encoder to use
+ """
super().__init__()
self.vision_backbone: Sam3DualViTDetNeck = torch.compile(visual) if compile_visual else visual
self.language_backbone = text
@@ -40,11 +51,30 @@ input_boxes: torch.Tensor = None,
additional_text: list[str] | None = None,
):
+ """Forward pass of the backbone combiner.
+
+ :param samples: The input images
+ :param captions: The input captions
+ :param input_boxes: If the text contains place-holders for boxes, this
+ parameter contains the tensor containing their spatial features
+ :param additional_text: This can be used to encode some additional text
+ (different from the captions) in the same forward of the backbone
+ :return: Output dictionary with the following keys:
+ - vision_features: The output of the vision backbone
+ - language_features: The output of the language backbone
+ - language_mask: The attention mask of the language backbone
+ - vision_pos_enc: The positional encoding of the vision backbone
+ - (optional) additional_text_features: The output of the language
+ backbone for the additional text
+ - (optional) additional_text_mask: The attention mask of the
+ language backbone for the additional text
+ """
output = self.forward_image(samples)
output.update(self.forward_text(captions, input_boxes, additional_text))
return output
def forward_image(self, samples: torch.Tensor):
+ """Forward pass of the vision backbone and get both SAM3 and SAM2 features."""
# Forward through backbone
sam3_features, sam3_pos, sam2_features, sam2_pos = self.vision_backbone.forward(samples)
if self.scalp > 0:
@@ -78,6 +108,7 @@ }
def forward_image_sam2(self, samples: torch.Tensor):
+ """Forward pass of the vision backbone to get SAM2 features only."""
xs = self.vision_backbone.trunk(samples)
x = xs[-1] # simpleFPN
@@ -98,6 +129,7 @@ }
def forward_text(self, captions, input_boxes=None, additional_text=None):
+ """Forward pass of the text encoder."""
output = {}
# Forward through text_encoder
@@ -124,4 +156,5 @@ return output
def set_imgsz(self, imgsz: list[int] = [1008, 1008]):
- self.vision_backbone.set_imgsz(imgsz)+ """Set the image size for the vision backbone."""
+ self.vision_backbone.set_imgsz(imgsz)
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/sam/sam3/vl_combiner.py |
Add docstrings following best practices | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import os
from pathlib import Path
from typing import Any
import numpy as np
import torch
import torch.distributed as dist
from ultralytics.data import build_dataloader, build_yolo_dataset, converter
from ultralytics.engine.validator import BaseValidator
from ultralytics.utils import LOGGER, RANK, nms, ops
from ultralytics.utils.checks import check_requirements
from ultralytics.utils.metrics import ConfusionMatrix, DetMetrics, box_iou
from ultralytics.utils.plotting import plot_images
class DetectionValidator(BaseValidator):
def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks: dict | None = None) -> None:
super().__init__(dataloader, save_dir, args, _callbacks)
self.is_coco = False
self.is_lvis = False
self.class_map = None
self.args.task = "detect"
self.iouv = torch.linspace(0.5, 0.95, 10) # IoU vector for mAP@0.5:0.95
self.niou = self.iouv.numel()
self.metrics = DetMetrics()
def preprocess(self, batch: dict[str, Any]) -> dict[str, Any]:
for k, v in batch.items():
if isinstance(v, torch.Tensor):
batch[k] = v.to(self.device, non_blocking=self.device.type == "cuda")
batch["img"] = (batch["img"].half() if self.args.half else batch["img"].float()) / 255
return batch
def init_metrics(self, model: torch.nn.Module) -> None:
val = self.data.get(self.args.split, "") # validation path
self.is_coco = (
isinstance(val, str)
and "coco" in val
and (val.endswith(f"{os.sep}val2017.txt") or val.endswith(f"{os.sep}test-dev2017.txt"))
) # is COCO
self.is_lvis = isinstance(val, str) and "lvis" in val and not self.is_coco # is LVIS
self.class_map = converter.coco80_to_coco91_class() if self.is_coco else list(range(1, len(model.names) + 1))
self.args.save_json |= self.args.val and (self.is_coco or self.is_lvis) and not self.training # run final val
self.names = model.names
self.nc = len(model.names)
self.end2end = getattr(model, "end2end", False)
self.seen = 0
self.jdict = []
self.metrics.names = model.names
self.confusion_matrix = ConfusionMatrix(names=model.names, save_matches=self.args.plots and self.args.visualize)
def get_desc(self) -> str:
return ("%22s" + "%11s" * 6) % ("Class", "Images", "Instances", "Box(P", "R", "mAP50", "mAP50-95)")
def postprocess(self, preds: torch.Tensor) -> list[dict[str, torch.Tensor]]:
outputs = nms.non_max_suppression(
preds,
self.args.conf,
self.args.iou,
nc=0 if self.args.task == "detect" else self.nc,
multi_label=True,
agnostic=self.args.single_cls or self.args.agnostic_nms,
max_det=self.args.max_det,
end2end=self.end2end,
rotated=self.args.task == "obb",
)
return [{"bboxes": x[:, :4], "conf": x[:, 4], "cls": x[:, 5], "extra": x[:, 6:]} for x in outputs]
def _prepare_batch(self, si: int, batch: dict[str, Any]) -> dict[str, Any]:
idx = batch["batch_idx"] == si
cls = batch["cls"][idx].squeeze(-1)
bbox = batch["bboxes"][idx]
ori_shape = batch["ori_shape"][si]
imgsz = batch["img"].shape[2:]
ratio_pad = batch["ratio_pad"][si]
if cls.shape[0]:
bbox = ops.xywh2xyxy(bbox) * torch.tensor(imgsz, device=self.device)[[1, 0, 1, 0]] # target boxes
return {
"cls": cls,
"bboxes": bbox,
"ori_shape": ori_shape,
"imgsz": imgsz,
"ratio_pad": ratio_pad,
"im_file": batch["im_file"][si],
}
def _prepare_pred(self, pred: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
if self.args.single_cls:
pred["cls"] *= 0
return pred
def update_metrics(self, preds: list[dict[str, torch.Tensor]], batch: dict[str, Any]) -> None:
for si, pred in enumerate(preds):
self.seen += 1
pbatch = self._prepare_batch(si, batch)
predn = self._prepare_pred(pred)
cls = pbatch["cls"].cpu().numpy()
no_pred = predn["cls"].shape[0] == 0
self.metrics.update_stats(
{
**self._process_batch(predn, pbatch),
"target_cls": cls,
"target_img": np.unique(cls),
"conf": np.zeros(0) if no_pred else predn["conf"].cpu().numpy(),
"pred_cls": np.zeros(0) if no_pred else predn["cls"].cpu().numpy(),
}
)
# Evaluate
if self.args.plots:
self.confusion_matrix.process_batch(predn, pbatch, conf=self.args.conf)
if self.args.visualize:
self.confusion_matrix.plot_matches(batch["img"][si], pbatch["im_file"], self.save_dir)
if no_pred:
continue
# Save
if self.args.save_json or self.args.save_txt:
predn_scaled = self.scale_preds(predn, pbatch)
if self.args.save_json:
self.pred_to_json(predn_scaled, pbatch)
if self.args.save_txt:
self.save_one_txt(
predn_scaled,
self.args.save_conf,
pbatch["ori_shape"],
self.save_dir / "labels" / f"{Path(pbatch['im_file']).stem}.txt",
)
def finalize_metrics(self) -> None:
if self.args.plots:
for normalize in True, False:
self.confusion_matrix.plot(save_dir=self.save_dir, normalize=normalize, on_plot=self.on_plot)
self.metrics.speed = self.speed
self.metrics.confusion_matrix = self.confusion_matrix
self.metrics.save_dir = self.save_dir
def gather_stats(self) -> None:
if RANK == 0:
gathered_stats = [None] * dist.get_world_size()
dist.gather_object(self.metrics.stats, gathered_stats, dst=0)
merged_stats = {key: [] for key in self.metrics.stats.keys()}
for stats_dict in gathered_stats:
for key in merged_stats:
merged_stats[key].extend(stats_dict[key])
gathered_jdict = [None] * dist.get_world_size()
dist.gather_object(self.jdict, gathered_jdict, dst=0)
self.jdict = []
for jdict in gathered_jdict:
self.jdict.extend(jdict)
self.metrics.stats = merged_stats
self.seen = len(self.dataloader.dataset) # total image count from dataset
elif RANK > 0:
dist.gather_object(self.metrics.stats, None, dst=0)
dist.gather_object(self.jdict, None, dst=0)
self.jdict = []
self.metrics.clear_stats()
def get_stats(self) -> dict[str, Any]:
self.metrics.process(save_dir=self.save_dir, plot=self.args.plots, on_plot=self.on_plot)
self.metrics.clear_stats()
return self.metrics.results_dict
def print_results(self) -> None:
pf = "%22s" + "%11i" * 2 + "%11.3g" * len(self.metrics.keys) # print format
LOGGER.info(pf % ("all", self.seen, self.metrics.nt_per_class.sum(), *self.metrics.mean_results()))
if self.metrics.nt_per_class.sum() == 0:
LOGGER.warning(f"no labels found in {self.args.task} set, cannot compute metrics without labels")
# Print results per class
if self.args.verbose and not self.training and self.nc > 1 and len(self.metrics.stats):
for i, c in enumerate(self.metrics.ap_class_index):
LOGGER.info(
pf
% (
self.names[c],
self.metrics.nt_per_image[c],
self.metrics.nt_per_class[c],
*self.metrics.class_result(i),
)
)
def _process_batch(self, preds: dict[str, torch.Tensor], batch: dict[str, Any]) -> dict[str, np.ndarray]:
if batch["cls"].shape[0] == 0 or preds["cls"].shape[0] == 0:
return {"tp": np.zeros((preds["cls"].shape[0], self.niou), dtype=bool)}
iou = box_iou(batch["bboxes"], preds["bboxes"])
return {"tp": self.match_predictions(preds["cls"], batch["cls"], iou).cpu().numpy()}
def build_dataset(self, img_path: str, mode: str = "val", batch: int | None = None) -> torch.utils.data.Dataset:
return build_yolo_dataset(self.args, img_path, batch, self.data, mode=mode, stride=self.stride)
def get_dataloader(self, dataset_path: str, batch_size: int) -> torch.utils.data.DataLoader:
dataset = self.build_dataset(dataset_path, batch=batch_size, mode="val")
return build_dataloader(
dataset,
batch_size,
self.args.workers,
shuffle=False,
rank=-1,
drop_last=self.args.compile,
pin_memory=self.training,
)
def plot_val_samples(self, batch: dict[str, Any], ni: int) -> None:
plot_images(
labels=batch,
paths=batch["im_file"],
fname=self.save_dir / f"val_batch{ni}_labels.jpg",
names=self.names,
on_plot=self.on_plot,
)
def plot_predictions(
self, batch: dict[str, Any], preds: list[dict[str, torch.Tensor]], ni: int, max_det: int | None = None
) -> None:
if not preds:
return
for i, pred in enumerate(preds):
pred["batch_idx"] = torch.ones_like(pred["conf"]) * i # add batch index to predictions
keys = preds[0].keys()
max_det = max_det or self.args.max_det
batched_preds = {k: torch.cat([x[k][:max_det] for x in preds], dim=0) for k in keys}
batched_preds["bboxes"] = ops.xyxy2xywh(batched_preds["bboxes"]) # convert to xywh format
plot_images(
images=batch["img"],
labels=batched_preds,
paths=batch["im_file"],
fname=self.save_dir / f"val_batch{ni}_pred.jpg",
names=self.names,
on_plot=self.on_plot,
) # pred
def save_one_txt(self, predn: dict[str, torch.Tensor], save_conf: bool, shape: tuple[int, int], file: Path) -> None:
from ultralytics.engine.results import Results
Results(
np.zeros((shape[0], shape[1]), dtype=np.uint8),
path=None,
names=self.names,
boxes=torch.cat([predn["bboxes"], predn["conf"].unsqueeze(-1), predn["cls"].unsqueeze(-1)], dim=1),
).save_txt(file, save_conf=save_conf)
def pred_to_json(self, predn: dict[str, torch.Tensor], pbatch: dict[str, Any]) -> None:
path = Path(pbatch["im_file"])
stem = path.stem
image_id = int(stem) if stem.isnumeric() else stem
box = ops.xyxy2xywh(predn["bboxes"]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for b, s, c in zip(box.tolist(), predn["conf"].tolist(), predn["cls"].tolist()):
self.jdict.append(
{
"image_id": image_id,
"file_name": path.name,
"category_id": self.class_map[int(c)],
"bbox": [round(x, 3) for x in b],
"score": round(s, 5),
}
)
def scale_preds(self, predn: dict[str, torch.Tensor], pbatch: dict[str, Any]) -> dict[str, torch.Tensor]:
return {
**predn,
"bboxes": ops.scale_boxes(
pbatch["imgsz"],
predn["bboxes"].clone(),
pbatch["ori_shape"],
ratio_pad=pbatch["ratio_pad"],
),
}
def eval_json(self, stats: dict[str, Any]) -> dict[str, Any]:
pred_json = self.save_dir / "predictions.json" # predictions
anno_json = (
self.data["path"]
/ "annotations"
/ ("instances_val2017.json" if self.is_coco else f"lvis_v1_{self.args.split}.json")
) # annotations
return self.coco_evaluate(stats, pred_json, anno_json)
def coco_evaluate(
self,
stats: dict[str, Any],
pred_json: str,
anno_json: str,
iou_types: str | list[str] = "bbox",
suffix: str | list[str] = "Box",
) -> dict[str, Any]:
if self.args.save_json and (self.is_coco or self.is_lvis) and len(self.jdict):
LOGGER.info(f"\nEvaluating faster-coco-eval mAP using {pred_json} and {anno_json}...")
try:
for x in pred_json, anno_json:
assert x.is_file(), f"{x} file not found"
iou_types = [iou_types] if isinstance(iou_types, str) else iou_types
suffix = [suffix] if isinstance(suffix, str) else suffix
check_requirements("faster-coco-eval>=1.6.7")
from faster_coco_eval import COCO, COCOeval_faster
anno = COCO(anno_json)
pred = anno.loadRes(pred_json)
for i, iou_type in enumerate(iou_types):
val = COCOeval_faster(
anno, pred, iouType=iou_type, lvis_style=self.is_lvis, print_function=LOGGER.info
)
val.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # images to eval
val.evaluate()
val.accumulate()
val.summarize()
# update mAP50-95 and mAP50
stats[f"metrics/mAP50({suffix[i][0]})"] = val.stats_as_dict["AP_50"]
stats[f"metrics/mAP50-95({suffix[i][0]})"] = val.stats_as_dict["AP_all"]
# record mAP for small, medium, large objects as well
stats["metrics/mAP_small(B)"] = val.stats_as_dict["AP_small"]
stats["metrics/mAP_medium(B)"] = val.stats_as_dict["AP_medium"]
stats["metrics/mAP_large(B)"] = val.stats_as_dict["AP_large"]
# update fitness
stats["fitness"] = 0.9 * val.stats_as_dict["AP_all"] + 0.1 * val.stats_as_dict["AP_50"]
if self.is_lvis:
stats[f"metrics/APr({suffix[i][0]})"] = val.stats_as_dict["APr"]
stats[f"metrics/APc({suffix[i][0]})"] = val.stats_as_dict["APc"]
stats[f"metrics/APf({suffix[i][0]})"] = val.stats_as_dict["APf"]
if self.is_lvis:
stats["fitness"] = stats["metrics/mAP50-95(B)"] # always use box mAP50-95 for fitness
except Exception as e:
LOGGER.warning(f"faster-coco-eval unable to run: {e}")
return stats | --- +++ @@ -19,8 +19,38 @@
class DetectionValidator(BaseValidator):
+ """A class extending the BaseValidator class for validation based on a detection model.
+
+ This class implements validation functionality specific to object detection tasks, including metrics calculation,
+ prediction processing, and visualization of results.
+
+ Attributes:
+ is_coco (bool): Whether the dataset is COCO.
+ is_lvis (bool): Whether the dataset is LVIS.
+ class_map (list[int]): Mapping from model class indices to dataset class indices.
+ metrics (DetMetrics): Object detection metrics calculator.
+ iouv (torch.Tensor): IoU thresholds for mAP calculation.
+ niou (int): Number of IoU thresholds.
+ lb (list[Any]): List for storing ground truth labels for hybrid saving.
+ jdict (list[dict[str, Any]]): List for storing JSON detection results.
+ stats (dict[str, list[torch.Tensor]]): Dictionary for storing statistics during validation.
+
+ Examples:
+ >>> from ultralytics.models.yolo.detect import DetectionValidator
+ >>> args = dict(model="yolo26n.pt", data="coco8.yaml")
+ >>> validator = DetectionValidator(args=args)
+ >>> validator()
+ """
def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks: dict | None = None) -> None:
+ """Initialize detection validator with necessary variables and settings.
+
+ Args:
+ dataloader (torch.utils.data.DataLoader, optional): DataLoader to use for validation.
+ save_dir (Path, optional): Directory to save results.
+ args (dict[str, Any], optional): Arguments for the validator.
+ _callbacks (dict, optional): Dictionary of callback functions.
+ """
super().__init__(dataloader, save_dir, args, _callbacks)
self.is_coco = False
self.is_lvis = False
@@ -31,6 +61,14 @@ self.metrics = DetMetrics()
def preprocess(self, batch: dict[str, Any]) -> dict[str, Any]:
+ """Preprocess batch of images for YOLO validation.
+
+ Args:
+ batch (dict[str, Any]): Batch containing images and annotations.
+
+ Returns:
+ (dict[str, Any]): Preprocessed batch.
+ """
for k, v in batch.items():
if isinstance(v, torch.Tensor):
batch[k] = v.to(self.device, non_blocking=self.device.type == "cuda")
@@ -38,6 +76,11 @@ return batch
def init_metrics(self, model: torch.nn.Module) -> None:
+ """Initialize evaluation metrics for YOLO detection validation.
+
+ Args:
+ model (torch.nn.Module): Model to validate.
+ """
val = self.data.get(self.args.split, "") # validation path
self.is_coco = (
isinstance(val, str)
@@ -56,9 +99,19 @@ self.confusion_matrix = ConfusionMatrix(names=model.names, save_matches=self.args.plots and self.args.visualize)
def get_desc(self) -> str:
+ """Return a formatted string summarizing class metrics of YOLO model."""
return ("%22s" + "%11s" * 6) % ("Class", "Images", "Instances", "Box(P", "R", "mAP50", "mAP50-95)")
def postprocess(self, preds: torch.Tensor) -> list[dict[str, torch.Tensor]]:
+ """Apply Non-maximum suppression to prediction outputs.
+
+ Args:
+ preds (torch.Tensor): Raw predictions from the model.
+
+ Returns:
+ (list[dict[str, torch.Tensor]]): Processed predictions after NMS, where each dict contains 'bboxes', 'conf',
+ 'cls', and 'extra' tensors.
+ """
outputs = nms.non_max_suppression(
preds,
self.args.conf,
@@ -73,6 +126,15 @@ return [{"bboxes": x[:, :4], "conf": x[:, 4], "cls": x[:, 5], "extra": x[:, 6:]} for x in outputs]
def _prepare_batch(self, si: int, batch: dict[str, Any]) -> dict[str, Any]:
+ """Prepare a batch of images and annotations for validation.
+
+ Args:
+ si (int): Sample index within the batch.
+ batch (dict[str, Any]): Batch data containing images and annotations.
+
+ Returns:
+ (dict[str, Any]): Prepared batch with processed annotations.
+ """
idx = batch["batch_idx"] == si
cls = batch["cls"][idx].squeeze(-1)
bbox = batch["bboxes"][idx]
@@ -91,11 +153,25 @@ }
def _prepare_pred(self, pred: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
+ """Prepare predictions for evaluation against ground truth.
+
+ Args:
+ pred (dict[str, torch.Tensor]): Post-processed predictions from the model.
+
+ Returns:
+ (dict[str, torch.Tensor]): Prepared predictions in native space.
+ """
if self.args.single_cls:
pred["cls"] *= 0
return pred
def update_metrics(self, preds: list[dict[str, torch.Tensor]], batch: dict[str, Any]) -> None:
+ """Update metrics with new predictions and ground truth.
+
+ Args:
+ preds (list[dict[str, torch.Tensor]]): List of predictions from the model.
+ batch (dict[str, Any]): Batch data containing ground truth.
+ """
for si, pred in enumerate(preds):
self.seen += 1
pbatch = self._prepare_batch(si, batch)
@@ -135,6 +211,7 @@ )
def finalize_metrics(self) -> None:
+ """Set final values for metrics speed and confusion matrix."""
if self.args.plots:
for normalize in True, False:
self.confusion_matrix.plot(save_dir=self.save_dir, normalize=normalize, on_plot=self.on_plot)
@@ -143,6 +220,7 @@ self.metrics.save_dir = self.save_dir
def gather_stats(self) -> None:
+ """Gather stats from all GPUs."""
if RANK == 0:
gathered_stats = [None] * dist.get_world_size()
dist.gather_object(self.metrics.stats, gathered_stats, dst=0)
@@ -164,11 +242,17 @@ self.metrics.clear_stats()
def get_stats(self) -> dict[str, Any]:
+ """Calculate and return metrics statistics.
+
+ Returns:
+ (dict[str, Any]): Dictionary containing metrics results.
+ """
self.metrics.process(save_dir=self.save_dir, plot=self.args.plots, on_plot=self.on_plot)
self.metrics.clear_stats()
return self.metrics.results_dict
def print_results(self) -> None:
+ """Print training/validation set metrics per class."""
pf = "%22s" + "%11i" * 2 + "%11.3g" * len(self.metrics.keys) # print format
LOGGER.info(pf % ("all", self.seen, self.metrics.nt_per_class.sum(), *self.metrics.mean_results()))
if self.metrics.nt_per_class.sum() == 0:
@@ -188,15 +272,44 @@ )
def _process_batch(self, preds: dict[str, torch.Tensor], batch: dict[str, Any]) -> dict[str, np.ndarray]:
+ """Return correct prediction matrix.
+
+ Args:
+ preds (dict[str, torch.Tensor]): Dictionary containing prediction data with 'bboxes' and 'cls' keys.
+ batch (dict[str, Any]): Batch dictionary containing ground truth data with 'bboxes' and 'cls' keys.
+
+ Returns:
+ (dict[str, np.ndarray]): Dictionary containing 'tp' key with correct prediction matrix of shape (N, 10) for
+ 10 IoU levels.
+ """
if batch["cls"].shape[0] == 0 or preds["cls"].shape[0] == 0:
return {"tp": np.zeros((preds["cls"].shape[0], self.niou), dtype=bool)}
iou = box_iou(batch["bboxes"], preds["bboxes"])
return {"tp": self.match_predictions(preds["cls"], batch["cls"], iou).cpu().numpy()}
def build_dataset(self, img_path: str, mode: str = "val", batch: int | None = None) -> torch.utils.data.Dataset:
+ """Build YOLO Dataset.
+
+ Args:
+ img_path (str): Path to the folder containing images.
+ mode (str): `train` mode or `val` mode, users are able to customize different augmentations for each mode.
+ batch (int, optional): Size of batches, this is for `rect`.
+
+ Returns:
+ (Dataset): YOLO dataset.
+ """
return build_yolo_dataset(self.args, img_path, batch, self.data, mode=mode, stride=self.stride)
def get_dataloader(self, dataset_path: str, batch_size: int) -> torch.utils.data.DataLoader:
+ """Construct and return dataloader.
+
+ Args:
+ dataset_path (str): Path to the dataset.
+ batch_size (int): Size of each batch.
+
+ Returns:
+ (torch.utils.data.DataLoader): DataLoader for validation.
+ """
dataset = self.build_dataset(dataset_path, batch=batch_size, mode="val")
return build_dataloader(
dataset,
@@ -209,6 +322,12 @@ )
def plot_val_samples(self, batch: dict[str, Any], ni: int) -> None:
+ """Plot validation image samples.
+
+ Args:
+ batch (dict[str, Any]): Batch containing images and annotations.
+ ni (int): Batch index.
+ """
plot_images(
labels=batch,
paths=batch["im_file"],
@@ -220,6 +339,14 @@ def plot_predictions(
self, batch: dict[str, Any], preds: list[dict[str, torch.Tensor]], ni: int, max_det: int | None = None
) -> None:
+ """Plot predicted bounding boxes on input images and save the result.
+
+ Args:
+ batch (dict[str, Any]): Batch containing images and annotations.
+ preds (list[dict[str, torch.Tensor]]): List of predictions from the model.
+ ni (int): Batch index.
+ max_det (int | None): Maximum number of detections to plot.
+ """
if not preds:
return
for i, pred in enumerate(preds):
@@ -238,6 +365,14 @@ ) # pred
def save_one_txt(self, predn: dict[str, torch.Tensor], save_conf: bool, shape: tuple[int, int], file: Path) -> None:
+ """Save YOLO detections to a txt file in normalized coordinates in a specific format.
+
+ Args:
+ predn (dict[str, torch.Tensor]): Dictionary containing predictions with keys 'bboxes', 'conf', and 'cls'.
+ save_conf (bool): Whether to save confidence scores.
+ shape (tuple[int, int]): Shape of the original image (height, width).
+ file (Path): File path to save the detections.
+ """
from ultralytics.engine.results import Results
Results(
@@ -248,6 +383,22 @@ ).save_txt(file, save_conf=save_conf)
def pred_to_json(self, predn: dict[str, torch.Tensor], pbatch: dict[str, Any]) -> None:
+ """Serialize YOLO predictions to COCO json format.
+
+ Args:
+ predn (dict[str, torch.Tensor]): Predictions dictionary containing 'bboxes', 'conf', and 'cls' keys with
+ bounding box coordinates, confidence scores, and class predictions.
+ pbatch (dict[str, Any]): Batch dictionary containing 'imgsz', 'ori_shape', 'ratio_pad', and 'im_file'.
+
+ Examples:
+ >>> result = {
+ ... "image_id": 42,
+ ... "file_name": "42.jpg",
+ ... "category_id": 18,
+ ... "bbox": [258.15, 41.29, 348.26, 243.78],
+ ... "score": 0.236,
+ ... }
+ """
path = Path(pbatch["im_file"])
stem = path.stem
image_id = int(stem) if stem.isnumeric() else stem
@@ -265,6 +416,7 @@ )
def scale_preds(self, predn: dict[str, torch.Tensor], pbatch: dict[str, Any]) -> dict[str, torch.Tensor]:
+ """Scales predictions to the original image size."""
return {
**predn,
"bboxes": ops.scale_boxes(
@@ -276,6 +428,14 @@ }
def eval_json(self, stats: dict[str, Any]) -> dict[str, Any]:
+ """Evaluate YOLO output in JSON format and return performance statistics.
+
+ Args:
+ stats (dict[str, Any]): Current statistics dictionary.
+
+ Returns:
+ (dict[str, Any]): Updated statistics dictionary with COCO/LVIS evaluation results.
+ """
pred_json = self.save_dir / "predictions.json" # predictions
anno_json = (
self.data["path"]
@@ -292,6 +452,24 @@ iou_types: str | list[str] = "bbox",
suffix: str | list[str] = "Box",
) -> dict[str, Any]:
+ """Evaluate COCO/LVIS metrics using faster-coco-eval library.
+
+ Performs evaluation using the faster-coco-eval library to compute mAP metrics for object detection. Updates the
+ provided stats dictionary with computed metrics including mAP50, mAP50-95, and LVIS-specific metrics if
+ applicable.
+
+ Args:
+ stats (dict[str, Any]): Dictionary to store computed metrics and statistics.
+ pred_json (str | Path): Path to JSON file containing predictions in COCO format.
+ anno_json (str | Path): Path to JSON file containing ground truth annotations in COCO format.
+ iou_types (str | list[str]): IoU type(s) for evaluation. Can be single string or list of strings. Common
+ values include "bbox", "segm", "keypoints". Defaults to "bbox".
+ suffix (str | list[str]): Suffix to append to metric names in stats dictionary. Should correspond to
+ iou_types if multiple types provided. Defaults to "Box".
+
+ Returns:
+ (dict[str, Any]): Updated stats dictionary containing the computed COCO/LVIS evaluation metrics.
+ """
if self.args.save_json and (self.is_coco or self.is_lvis) and len(self.jdict):
LOGGER.info(f"\nEvaluating faster-coco-eval mAP using {pred_json} and {anno_json}...")
try:
@@ -332,4 +510,4 @@ stats["fitness"] = stats["metrics/mAP50-95(B)"] # always use box mAP50-95 for fitness
except Exception as e:
LOGGER.warning(f"faster-coco-eval unable to run: {e}")
- return stats+ return stats
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/yolo/detect/val.py |
Add docstrings to incomplete code | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import glob
import math
import os
import time
import urllib
from dataclasses import dataclass
from pathlib import Path
from threading import Thread
from typing import Any
import cv2
import numpy as np
import torch
from PIL import Image, ImageOps
from ultralytics.data.utils import FORMATS_HELP_MSG, IMG_FORMATS, VID_FORMATS
from ultralytics.utils import IS_COLAB, IS_KAGGLE, LOGGER, ops
from ultralytics.utils.checks import check_requirements
from ultralytics.utils.patches import imread
@dataclass
class SourceTypes:
stream: bool = False
screenshot: bool = False
from_img: bool = False
tensor: bool = False
class LoadStreams:
def __init__(self, sources: str = "file.streams", vid_stride: int = 1, buffer: bool = False, channels: int = 3):
torch.backends.cudnn.benchmark = True # faster for fixed-size inference
self.buffer = buffer # buffer input streams
self.running = True # running flag for Thread
self.mode = "stream"
self.vid_stride = vid_stride # video frame-rate stride
self.cv2_flag = cv2.IMREAD_GRAYSCALE if channels == 1 else cv2.IMREAD_COLOR # grayscale or color (BGR)
sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources]
n = len(sources)
self.bs = n
self.fps = [0] * n # frames per second
self.frames = [0] * n
self.threads = [None] * n
self.caps = [None] * n # video capture objects
self.imgs = [[] for _ in range(n)] # images
self.shape = [[] for _ in range(n)] # image shapes
self.sources = [ops.clean_str(x).replace(os.sep, "_") for x in sources] # clean source names for later
for i, s in enumerate(sources): # index, source
# Start thread to read frames from video stream
st = f"{i + 1}/{n}: {s}... "
if urllib.parse.urlparse(s).hostname in {"www.youtube.com", "youtube.com", "youtu.be"}: # YouTube video
# YouTube format i.e. 'https://www.youtube.com/watch?v=Jsn8D3aC840' or 'https://youtu.be/Jsn8D3aC840'
s = get_best_youtube_url(s)
s = int(s) if s.isnumeric() else s # i.e. s = '0' local webcam
if s == 0 and (IS_COLAB or IS_KAGGLE):
raise NotImplementedError(
"'source=0' webcam not supported in Colab and Kaggle notebooks. "
"Try running 'source=0' in a local environment."
)
self.caps[i] = cv2.VideoCapture(s) # store video capture object
if not self.caps[i].isOpened():
raise ConnectionError(f"{st}Failed to open {s}")
w = int(self.caps[i].get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(self.caps[i].get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = self.caps[i].get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan
self.frames[i] = max(int(self.caps[i].get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float(
"inf"
) # infinite stream fallback
self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback
success, im = self.caps[i].read() # guarantee first frame
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)[..., None] if self.cv2_flag == cv2.IMREAD_GRAYSCALE else im
if not success or im is None:
raise ConnectionError(f"{st}Failed to read images from {s}")
self.imgs[i].append(im)
self.shape[i] = im.shape
self.threads[i] = Thread(target=self.update, args=([i, self.caps[i], s]), daemon=True)
LOGGER.info(f"{st}Success ✅ ({self.frames[i]} frames of shape {w}x{h} at {self.fps[i]:.2f} FPS)")
self.threads[i].start()
LOGGER.info("") # newline
def update(self, i: int, cap: cv2.VideoCapture, stream: str):
n, f = 0, self.frames[i] # frame number, frame array
while self.running and cap.isOpened() and n < (f - 1):
if len(self.imgs[i]) < 30: # keep a <=30-image buffer
n += 1
cap.grab() # .read() = .grab() followed by .retrieve()
if n % self.vid_stride == 0:
success, im = cap.retrieve()
im = (
cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)[..., None] if self.cv2_flag == cv2.IMREAD_GRAYSCALE else im
)
if not success:
im = np.zeros(self.shape[i], dtype=np.uint8)
LOGGER.warning("Video stream unresponsive, please check your IP camera connection.")
cap.open(stream) # re-open stream if signal was lost
if self.buffer:
self.imgs[i].append(im)
else:
self.imgs[i] = [im]
else:
time.sleep(0.01) # wait until the buffer is empty
def close(self):
self.running = False # stop flag for Thread
for thread in self.threads:
if thread.is_alive():
thread.join(timeout=5) # Add timeout
for cap in self.caps: # Iterate through the stored VideoCapture objects
try:
cap.release() # release video capture
except Exception as e:
LOGGER.warning(f"Could not release VideoCapture object: {e}")
def __iter__(self):
self.count = -1
return self
def __next__(self) -> tuple[list[str], list[np.ndarray], list[str]]:
self.count += 1
images = []
for i, x in enumerate(self.imgs):
# Wait until a frame is available in each buffer
while not x:
if not self.threads[i].is_alive():
self.close()
raise StopIteration
time.sleep(1 / min(self.fps))
x = self.imgs[i]
if not x:
LOGGER.warning(f"Waiting for stream {i}")
# Get and remove the first frame from imgs buffer
if self.buffer:
images.append(x.pop(0))
# Get the last frame, and clear the rest from the imgs buffer
else:
images.append(x.pop(-1) if x else np.zeros(self.shape[i], dtype=np.uint8))
x.clear()
return self.sources, images, [""] * self.bs
def __len__(self) -> int:
return self.bs # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadScreenshots:
def __init__(self, source: str, channels: int = 3):
check_requirements("mss")
import mss
source, *params = source.split()
self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0
if len(params) == 1:
self.screen = int(params[0])
elif len(params) == 4:
left, top, width, height = (int(x) for x in params)
elif len(params) == 5:
self.screen, left, top, width, height = (int(x) for x in params)
self.mode = "stream"
self.frame = 0
self.sct = mss.mss()
self.bs = 1
self.fps = 30
self.cv2_flag = cv2.IMREAD_GRAYSCALE if channels == 1 else cv2.IMREAD_COLOR # grayscale or color (BGR)
# Parse monitor shape
monitor = self.sct.monitors[self.screen]
self.top = monitor["top"] if top is None else (monitor["top"] + top)
self.left = monitor["left"] if left is None else (monitor["left"] + left)
self.width = width or monitor["width"]
self.height = height or monitor["height"]
self.monitor = {"left": self.left, "top": self.top, "width": self.width, "height": self.height}
def __iter__(self):
return self
def __next__(self) -> tuple[list[str], list[np.ndarray], list[str]]:
im0 = np.asarray(self.sct.grab(self.monitor))[:, :, :3] # BGRA to BGR
im0 = cv2.cvtColor(im0, cv2.COLOR_BGR2GRAY)[..., None] if self.cv2_flag == cv2.IMREAD_GRAYSCALE else im0
s = f"screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: "
self.frame += 1
return [str(self.screen)], [im0], [s] # screen, img, string
class LoadImagesAndVideos:
def __init__(self, path: str | Path | list, batch: int = 1, vid_stride: int = 1, channels: int = 3):
parent = None
if isinstance(path, str) and Path(path).suffix in {".txt", ".csv"}: # txt/csv file with source paths
parent, content = Path(path).parent, Path(path).read_text()
path = content.splitlines() if Path(path).suffix == ".txt" else content.split(",") # list of sources
path = [p.strip() for p in path]
files = []
for p in sorted(path) if isinstance(path, (list, tuple)) else [path]:
a = str(Path(p).absolute()) # do not use .resolve() https://github.com/ultralytics/ultralytics/issues/2912
if "*" in a:
files.extend(sorted(glob.glob(a, recursive=True))) # glob
elif os.path.isdir(a):
files.extend(sorted(glob.glob(os.path.join(a, "*.*")))) # dir
elif os.path.isfile(a):
files.append(a) # files (absolute or relative to CWD)
elif parent and (parent / p).is_file():
files.append(str((parent / p).absolute())) # files (relative to *.txt file parent)
else:
raise FileNotFoundError(f"{p} does not exist")
# Define files as images or videos
images, videos = [], []
for f in files:
suffix = f.rpartition(".")[-1].lower() # Get file extension without the dot and lowercase
if suffix in IMG_FORMATS:
images.append(f)
elif suffix in VID_FORMATS:
videos.append(f)
ni, nv = len(images), len(videos)
self.files = images + videos
self.nf = ni + nv # number of files
self.ni = ni # number of images
self.video_flag = [False] * ni + [True] * nv
self.mode = "video" if ni == 0 else "image" # default to video if no images
self.vid_stride = vid_stride # video frame-rate stride
self.bs = batch
self.cv2_flag = cv2.IMREAD_GRAYSCALE if channels == 1 else cv2.IMREAD_COLOR # grayscale or color (BGR)
if any(videos):
self._new_video(videos[0]) # new video
else:
self.cap = None
if self.nf == 0:
raise FileNotFoundError(f"No images or videos found in {p}. {FORMATS_HELP_MSG}")
def __iter__(self):
self.count = 0
return self
def __next__(self) -> tuple[list[str], list[np.ndarray], list[str]]:
paths, imgs, info = [], [], []
while len(imgs) < self.bs:
if self.count >= self.nf: # end of file list
if imgs:
return paths, imgs, info # return last partial batch
else:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
self.mode = "video"
if not self.cap or not self.cap.isOpened():
self._new_video(path)
success = False
for _ in range(self.vid_stride):
success = self.cap.grab()
if not success:
break # end of video or failure
if success:
success, im0 = self.cap.retrieve()
im0 = (
cv2.cvtColor(im0, cv2.COLOR_BGR2GRAY)[..., None]
if self.cv2_flag == cv2.IMREAD_GRAYSCALE
else im0
)
if success:
self.frame += 1
paths.append(path)
imgs.append(im0)
info.append(f"video {self.count + 1}/{self.nf} (frame {self.frame}/{self.frames}) {path}: ")
if self.frame == self.frames: # end of video
self.count += 1
self.cap.release()
else:
# Move to the next file if the current video ended or failed to open
self.count += 1
if self.cap:
self.cap.release()
if self.count < self.nf:
self._new_video(self.files[self.count])
else:
# Handle image files
self.mode = "image"
im0 = imread(path, flags=self.cv2_flag) # BGR
if im0 is None:
LOGGER.warning(f"Image Read Error {path}")
else:
paths.append(path)
imgs.append(im0)
info.append(f"image {self.count + 1}/{self.nf} {path}: ")
self.count += 1 # move to the next file
if self.count >= self.ni: # end of image list
break
return paths, imgs, info
def _new_video(self, path: str):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.fps = int(self.cap.get(cv2.CAP_PROP_FPS))
if not self.cap.isOpened():
raise FileNotFoundError(f"Failed to open video {path}")
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride)
def __len__(self) -> int:
return math.ceil(self.nf / self.bs) # number of batches
class LoadPilAndNumpy:
def __init__(self, im0: Image.Image | np.ndarray | list, channels: int = 3):
if not isinstance(im0, list):
im0 = [im0]
# use `image{i}.jpg` when Image.filename returns an empty path.
self.paths = [getattr(im, "filename", "") or f"image{i}.jpg" for i, im in enumerate(im0)]
pil_flag = "L" if channels == 1 else "RGB" # grayscale or RGB
self.im0 = [self._single_check(im, pil_flag) for im in im0]
self.mode = "image"
self.bs = len(self.im0)
@staticmethod
def _single_check(im: Image.Image | np.ndarray, flag: str = "RGB") -> np.ndarray:
assert isinstance(im, (Image.Image, np.ndarray)), f"Expected PIL/np.ndarray image type, but got {type(im)}"
if isinstance(im, Image.Image):
im = np.asarray(im.convert(flag))
# Add a new axis if grayscale; convert RGB -> BGR for OpenCV compatibility.
im = im[..., None] if flag == "L" else im[..., ::-1]
im = np.ascontiguousarray(im) # contiguous
elif im.ndim == 2: # grayscale in numpy form
im = im[..., None]
return im
def __len__(self) -> int:
return len(self.im0)
def __next__(self) -> tuple[list[str], list[np.ndarray], list[str]]:
if self.count == 1: # loop only once as it's batch inference
raise StopIteration
self.count += 1
return self.paths, self.im0, [""] * self.bs
def __iter__(self):
self.count = 0
return self
class LoadTensor:
def __init__(self, im0: torch.Tensor) -> None:
self.im0 = self._single_check(im0)
self.bs = self.im0.shape[0]
self.mode = "image"
self.paths = [getattr(im, "filename", f"image{i}.jpg") for i, im in enumerate(im0)]
@staticmethod
def _single_check(im: torch.Tensor, stride: int = 32) -> torch.Tensor:
s = (
f"torch.Tensor inputs should be BCHW i.e. shape(1, 3, 640, 640) "
f"divisible by stride {stride}. Input shape{tuple(im.shape)} is incompatible."
)
if len(im.shape) != 4:
if len(im.shape) != 3:
raise ValueError(s)
LOGGER.warning(s)
im = im.unsqueeze(0)
if im.shape[2] % stride or im.shape[3] % stride:
raise ValueError(s)
if im.max() > 1.0 + torch.finfo(im.dtype).eps: # torch.float32 eps is 1.2e-07
LOGGER.warning(
f"torch.Tensor inputs should be normalized 0.0-1.0 but max value is {im.max()}. Dividing input by 255."
)
im = im.float() / 255.0
return im
def __iter__(self):
self.count = 0
return self
def __next__(self) -> tuple[list[str], torch.Tensor, list[str]]:
if self.count == 1:
raise StopIteration
self.count += 1
return self.paths, self.im0, [""] * self.bs
def __len__(self) -> int:
return self.bs
def autocast_list(source: list[Any]) -> list[Image.Image | np.ndarray]:
files = []
for im in source:
if isinstance(im, (str, Path)): # filename or uri
files.append(
ImageOps.exif_transpose(Image.open(urllib.request.urlopen(im) if str(im).startswith("http") else im))
)
elif isinstance(im, (Image.Image, np.ndarray)): # PIL or np Image
files.append(im)
else:
raise TypeError(
f"type {type(im).__name__} is not a supported Ultralytics prediction source type. \n"
f"See https://docs.ultralytics.com/modes/predict for supported source types."
)
return files
def get_best_youtube_url(url: str, method: str = "pytube") -> str | None:
if method == "pytube":
# Switched from pytube to pytubefix to resolve https://github.com/pytube/pytube/issues/1954
check_requirements("pytubefix>=6.5.2")
from pytubefix import YouTube
streams = YouTube(url).streams.filter(file_extension="mp4", only_video=True)
streams = sorted(streams, key=lambda s: s.resolution, reverse=True) # sort streams by resolution
for stream in streams:
if stream.resolution and int(stream.resolution[:-1]) >= 1080: # check if resolution is at least 1080p
return stream.url
elif method == "pafy":
check_requirements(("pafy", "youtube_dl==2020.12.2"))
import pafy
return pafy.new(url).getbestvideo(preftype="mp4").url
elif method == "yt-dlp":
check_requirements("yt-dlp")
import yt_dlp
with yt_dlp.YoutubeDL({"quiet": True}) as ydl:
info_dict = ydl.extract_info(url, download=False) # extract info
for f in reversed(info_dict.get("formats", [])): # reversed because best is usually last
# Find a format with video codec, no audio, *.mp4 extension at least 1920x1080 size
good_size = (f.get("width") or 0) >= 1920 or (f.get("height") or 0) >= 1080
if good_size and f["vcodec"] != "none" and f["acodec"] == "none" and f["ext"] == "mp4":
return f.get("url")
# Define constants
LOADERS = (LoadStreams, LoadPilAndNumpy, LoadImagesAndVideos, LoadScreenshots) | --- +++ @@ -25,6 +25,24 @@
@dataclass
class SourceTypes:
+ """Class to represent various types of input sources for predictions.
+
+ This class uses dataclass to define boolean flags for different types of input sources that can be used for making
+ predictions with YOLO models.
+
+ Attributes:
+ stream (bool): Flag indicating if the input source is a video stream.
+ screenshot (bool): Flag indicating if the input source is a screenshot.
+ from_img (bool): Flag indicating if the input source is an image file.
+ tensor (bool): Flag indicating if the input source is a tensor.
+
+ Examples:
+ >>> source_types = SourceTypes(stream=True, screenshot=False, from_img=False)
+ >>> print(source_types.stream)
+ True
+ >>> print(source_types.from_img)
+ False
+ """
stream: bool = False
screenshot: bool = False
@@ -33,8 +51,55 @@
class LoadStreams:
+ """Stream Loader for various types of video streams.
+
+ Supports RTSP, RTMP, HTTP, and TCP streams. This class handles the loading and processing of multiple video streams
+ simultaneously, making it suitable for real-time video analysis tasks.
+
+ Attributes:
+ sources (list[str]): The source input paths or URLs for the video streams.
+ vid_stride (int): Video frame-rate stride.
+ buffer (bool): Whether to buffer input streams.
+ running (bool): Flag to indicate if the streaming thread is running.
+ mode (str): Set to 'stream' indicating real-time capture.
+ imgs (list[list[np.ndarray]]): List of image frames for each stream.
+ fps (list[float]): List of FPS for each stream.
+ frames (list[int]): List of total frames for each stream.
+ threads (list[Thread]): List of threads for each stream.
+ shape (list[tuple[int, int, int]]): List of shapes for each stream.
+ caps (list[cv2.VideoCapture]): List of cv2.VideoCapture objects for each stream.
+ bs (int): Batch size for processing.
+ cv2_flag (int): OpenCV flag for image reading (grayscale or color/BGR).
+
+ Methods:
+ update: Read stream frames in daemon thread.
+ close: Close stream loader and release resources.
+ __iter__: Returns an iterator object for the class.
+ __next__: Returns source paths, transformed, and original images for processing.
+ __len__: Return the length of the sources object.
+
+ Examples:
+ >>> stream_loader = LoadStreams("rtsp://example.com/stream1.mp4")
+ >>> for sources, imgs, _ in stream_loader:
+ ... # Process the images
+ ... pass
+ >>> stream_loader.close()
+
+ Notes:
+ - The class uses threading to efficiently load frames from multiple streams simultaneously.
+ - It automatically handles YouTube links, converting them to the best available stream URL.
+ - The class implements a buffer system to manage frame storage and retrieval.
+ """
def __init__(self, sources: str = "file.streams", vid_stride: int = 1, buffer: bool = False, channels: int = 3):
+ """Initialize stream loader for multiple video sources, supporting various stream types.
+
+ Args:
+ sources (str): Path to streams file or single stream URL.
+ vid_stride (int): Video frame-rate stride.
+ buffer (bool): Whether to buffer input streams.
+ channels (int): Number of image channels (1 for grayscale, 3 for color).
+ """
torch.backends.cudnn.benchmark = True # faster for fixed-size inference
self.buffer = buffer # buffer input streams
self.running = True # running flag for Thread
@@ -87,6 +152,7 @@ LOGGER.info("") # newline
def update(self, i: int, cap: cv2.VideoCapture, stream: str):
+ """Read stream frames in daemon thread and update image buffer."""
n, f = 0, self.frames[i] # frame number, frame array
while self.running and cap.isOpened() and n < (f - 1):
if len(self.imgs[i]) < 30: # keep a <=30-image buffer
@@ -109,6 +175,7 @@ time.sleep(0.01) # wait until the buffer is empty
def close(self):
+ """Terminate stream loader, stop threads, and release video capture resources."""
self.running = False # stop flag for Thread
for thread in self.threads:
if thread.is_alive():
@@ -120,10 +187,12 @@ LOGGER.warning(f"Could not release VideoCapture object: {e}")
def __iter__(self):
+ """Return an iterator object and reset the frame counter."""
self.count = -1
return self
def __next__(self) -> tuple[list[str], list[np.ndarray], list[str]]:
+ """Return the next batch of frames from multiple video streams for processing."""
self.count += 1
images = []
@@ -150,12 +219,47 @@ return self.sources, images, [""] * self.bs
def __len__(self) -> int:
+ """Return the number of video streams in the LoadStreams object."""
return self.bs # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadScreenshots:
+ """Ultralytics screenshot dataloader for capturing and processing screen images.
+
+ This class manages the loading of screenshot images for processing with YOLO. It is suitable for use with `yolo
+ predict source=screen`.
+
+ Attributes:
+ screen (int): The screen number to capture.
+ left (int): The left coordinate for screen capture area.
+ top (int): The top coordinate for screen capture area.
+ width (int): The width of the screen capture area.
+ height (int): The height of the screen capture area.
+ mode (str): Set to 'stream' indicating real-time capture.
+ frame (int): Counter for captured frames.
+ sct (mss.mss): Screen capture object from `mss` library.
+ bs (int): Batch size, set to 1.
+ fps (int): Frames per second, set to 30.
+ monitor (dict[str, int]): Monitor configuration details.
+ cv2_flag (int): OpenCV flag for image reading (grayscale or color/BGR).
+
+ Methods:
+ __iter__: Returns an iterator object.
+ __next__: Captures the next screenshot and returns it.
+
+ Examples:
+ >>> loader = LoadScreenshots("0 100 100 640 480") # screen 0, top-left (100,100), 640x480
+ >>> for sources, imgs, info in loader:
+ ... print(f"Captured frame: {imgs[0].shape}")
+ """
def __init__(self, source: str, channels: int = 3):
+ """Initialize screenshot capture with specified screen and region parameters.
+
+ Args:
+ source (str): Screen capture source string in format "screen_num left top width height".
+ channels (int): Number of image channels (1 for grayscale, 3 for color).
+ """
check_requirements("mss")
import mss
@@ -183,9 +287,11 @@ self.monitor = {"left": self.left, "top": self.top, "width": self.width, "height": self.height}
def __iter__(self):
+ """Return an iterator object for the screenshot capture."""
return self
def __next__(self) -> tuple[list[str], list[np.ndarray], list[str]]:
+ """Capture and return the next screenshot as a numpy array using the mss library."""
im0 = np.asarray(self.sct.grab(self.monitor))[:, :, :3] # BGRA to BGR
im0 = cv2.cvtColor(im0, cv2.COLOR_BGR2GRAY)[..., None] if self.cv2_flag == cv2.IMREAD_GRAYSCALE else im0
s = f"screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: "
@@ -195,8 +301,53 @@
class LoadImagesAndVideos:
+ """A class for loading and processing images and videos for YOLO object detection.
+
+ This class manages the loading and pre-processing of image and video data from various sources, including single
+ image files, video files, and lists of image and video paths.
+
+ Attributes:
+ files (list[str]): List of image and video file paths.
+ nf (int): Total number of files (images and videos).
+ video_flag (list[bool]): Flags indicating whether a file is a video (True) or an image (False).
+ mode (str): Current mode, 'image' or 'video'.
+ vid_stride (int): Stride for video frame-rate.
+ bs (int): Batch size.
+ cap (cv2.VideoCapture): Video capture object for OpenCV.
+ frame (int): Frame counter for video.
+ frames (int): Total number of frames in the video.
+ count (int): Counter for iteration, initialized at 0 during __iter__().
+ ni (int): Number of images.
+ cv2_flag (int): OpenCV flag for image reading (grayscale or color/BGR).
+
+ Methods:
+ __init__: Initialize the LoadImagesAndVideos object.
+ __iter__: Returns an iterator object for VideoStream or ImageFolder.
+ __next__: Returns the next batch of images or video frames along with their paths and metadata.
+ _new_video: Creates a new video capture object for the given path.
+ __len__: Returns the number of batches in the object.
+
+ Examples:
+ >>> loader = LoadImagesAndVideos("path/to/data", batch=32, vid_stride=1)
+ >>> for paths, imgs, info in loader:
+ ... # Process batch of images or video frames
+ ... pass
+
+ Notes:
+ - Supports various image formats including HEIC.
+ - Handles both local files and directories.
+ - Can read from a text file containing paths to images and videos.
+ """
def __init__(self, path: str | Path | list, batch: int = 1, vid_stride: int = 1, channels: int = 3):
+ """Initialize dataloader for images and videos, supporting various input formats.
+
+ Args:
+ path (str | Path | list): Path to images/videos, directory, or list of paths.
+ batch (int): Batch size for processing.
+ vid_stride (int): Video frame-rate stride.
+ channels (int): Number of image channels (1 for grayscale, 3 for color).
+ """
parent = None
if isinstance(path, str) and Path(path).suffix in {".txt", ".csv"}: # txt/csv file with source paths
parent, content = Path(path).parent, Path(path).read_text()
@@ -242,10 +393,12 @@ raise FileNotFoundError(f"No images or videos found in {p}. {FORMATS_HELP_MSG}")
def __iter__(self):
+ """Iterate through image/video files, yielding source paths, images, and metadata."""
self.count = 0
return self
def __next__(self) -> tuple[list[str], list[np.ndarray], list[str]]:
+ """Return the next batch of images or video frames with their paths and metadata."""
paths, imgs, info = [], [], []
while len(imgs) < self.bs:
if self.count >= self.nf: # end of file list
@@ -305,6 +458,7 @@ return paths, imgs, info
def _new_video(self, path: str):
+ """Create a new video capture object for the given path and initialize video-related attributes."""
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.fps = int(self.cap.get(cv2.CAP_PROP_FPS))
@@ -313,12 +467,43 @@ self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride)
def __len__(self) -> int:
+ """Return the number of batches in the dataset."""
return math.ceil(self.nf / self.bs) # number of batches
class LoadPilAndNumpy:
+ """Load images from PIL and Numpy arrays for batch processing.
+
+ This class manages loading and pre-processing of image data from both PIL and Numpy formats. It performs basic
+ validation and format conversion to ensure that the images are in the required format for downstream processing.
+
+ Attributes:
+ paths (list[str]): List of image paths or autogenerated filenames.
+ im0 (list[np.ndarray]): List of images stored as Numpy arrays.
+ mode (str): Type of data being processed, set to 'image'.
+ bs (int): Batch size, equivalent to the length of `im0`.
+
+ Methods:
+ _single_check: Validate and format a single image to a Numpy array.
+
+ Examples:
+ >>> from PIL import Image
+ >>> import numpy as np
+ >>> pil_img = Image.new("RGB", (100, 100))
+ >>> np_img = np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8)
+ >>> loader = LoadPilAndNumpy([pil_img, np_img])
+ >>> paths, images, _ = next(iter(loader))
+ >>> print(f"Loaded {len(images)} images")
+ Loaded 2 images
+ """
def __init__(self, im0: Image.Image | np.ndarray | list, channels: int = 3):
+ """Initialize a loader for PIL and Numpy images, converting inputs to a standardized format.
+
+ Args:
+ im0 (PIL.Image.Image | np.ndarray | list): Single image or list of images in PIL or numpy format.
+ channels (int): Number of image channels (1 for grayscale, 3 for color).
+ """
if not isinstance(im0, list):
im0 = [im0]
# use `image{i}.jpg` when Image.filename returns an empty path.
@@ -330,6 +515,12 @@
@staticmethod
def _single_check(im: Image.Image | np.ndarray, flag: str = "RGB") -> np.ndarray:
+ """Validate and format an image to a NumPy array.
+
+ Notes:
+ - PIL inputs are converted to NumPy and returned in OpenCV-compatible BGR order for color images.
+ - NumPy inputs are returned as-is (no channel-order conversion is applied).
+ """
assert isinstance(im, (Image.Image, np.ndarray)), f"Expected PIL/np.ndarray image type, but got {type(im)}"
if isinstance(im, Image.Image):
im = np.asarray(im.convert(flag))
@@ -341,22 +532,51 @@ return im
def __len__(self) -> int:
+ """Return the length of the 'im0' attribute, representing the number of loaded images."""
return len(self.im0)
def __next__(self) -> tuple[list[str], list[np.ndarray], list[str]]:
+ """Return the next batch of images, paths, and metadata for processing."""
if self.count == 1: # loop only once as it's batch inference
raise StopIteration
self.count += 1
return self.paths, self.im0, [""] * self.bs
def __iter__(self):
+ """Iterate through PIL/numpy images, yielding paths, raw images, and metadata for processing."""
self.count = 0
return self
class LoadTensor:
+ """A class for loading and processing tensor data for object detection tasks.
+
+ This class handles the loading and pre-processing of image data from PyTorch tensors, preparing them for further
+ processing in object detection pipelines.
+
+ Attributes:
+ im0 (torch.Tensor): The input tensor containing the image(s) with shape (B, C, H, W).
+ bs (int): Batch size, inferred from the shape of `im0`.
+ mode (str): Current processing mode, set to 'image'.
+ paths (list[str]): List of image paths or auto-generated filenames.
+
+ Methods:
+ _single_check: Validates and formats an input tensor.
+
+ Examples:
+ >>> import torch
+ >>> tensor = torch.rand(1, 3, 640, 640)
+ >>> loader = LoadTensor(tensor)
+ >>> paths, images, info = next(iter(loader))
+ >>> print(f"Processed {len(images)} images")
+ """
def __init__(self, im0: torch.Tensor) -> None:
+ """Initialize LoadTensor object for processing torch.Tensor image data.
+
+ Args:
+ im0 (torch.Tensor): Input tensor with shape (B, C, H, W).
+ """
self.im0 = self._single_check(im0)
self.bs = self.im0.shape[0]
self.mode = "image"
@@ -364,6 +584,7 @@
@staticmethod
def _single_check(im: torch.Tensor, stride: int = 32) -> torch.Tensor:
+ """Validate and format a single image tensor, ensuring correct shape and normalization."""
s = (
f"torch.Tensor inputs should be BCHW i.e. shape(1, 3, 640, 640) "
f"divisible by stride {stride}. Input shape{tuple(im.shape)} is incompatible."
@@ -384,20 +605,24 @@ return im
def __iter__(self):
+ """Yield an iterator object for iterating through tensor image data."""
self.count = 0
return self
def __next__(self) -> tuple[list[str], torch.Tensor, list[str]]:
+ """Yield the next batch of tensor images and metadata for processing."""
if self.count == 1:
raise StopIteration
self.count += 1
return self.paths, self.im0, [""] * self.bs
def __len__(self) -> int:
+ """Return the batch size of the tensor input."""
return self.bs
def autocast_list(source: list[Any]) -> list[Image.Image | np.ndarray]:
+ """Convert a list of sources into a list of numpy arrays or PIL images for Ultralytics prediction."""
files = []
for im in source:
if isinstance(im, (str, Path)): # filename or uri
@@ -416,6 +641,26 @@
def get_best_youtube_url(url: str, method: str = "pytube") -> str | None:
+ """Retrieve the URL of the best quality MP4 video stream from a given YouTube video.
+
+ Args:
+ url (str): The URL of the YouTube video.
+ method (str): The method to use for extracting video info. Options are "pytube", "pafy", and "yt-dlp".
+
+ Returns:
+ (str | None): The URL of the best quality MP4 video stream, or None if no suitable stream is found.
+
+ Examples:
+ >>> url = "https://www.youtube.com/watch?v=dQw4w9WgXcQ"
+ >>> best_url = get_best_youtube_url(url)
+ >>> print(best_url)
+ https://rr4---sn-q4flrnek.googlevideo.com/videoplayback?expire=...
+
+ Notes:
+ - Requires additional libraries based on the chosen method: pytubefix, pafy, or yt-dlp.
+ - The function prioritizes streams with at least 1080p resolution when available.
+ - For the "yt-dlp" method, it looks for formats with video codec, no audio, and *.mp4 extension.
+ """
if method == "pytube":
# Switched from pytube to pytubefix to resolve https://github.com/pytube/pytube/issues/1954
check_requirements("pytubefix>=6.5.2")
@@ -447,4 +692,4 @@
# Define constants
-LOADERS = (LoadStreams, LoadPilAndNumpy, LoadImagesAndVideos, LoadScreenshots)+LOADERS = (LoadStreams, LoadPilAndNumpy, LoadImagesAndVideos, LoadScreenshots)
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/data/loaders.py |
Create documentation for each function signature | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import concurrent.futures
import statistics
import time
class GCPRegions:
def __init__(self):
self.regions = {
"asia-east1": (1, "Taiwan", "China"),
"asia-east2": (2, "Hong Kong", "China"),
"asia-northeast1": (1, "Tokyo", "Japan"),
"asia-northeast2": (1, "Osaka", "Japan"),
"asia-northeast3": (2, "Seoul", "South Korea"),
"asia-south1": (2, "Mumbai", "India"),
"asia-south2": (2, "Delhi", "India"),
"asia-southeast1": (2, "Jurong West", "Singapore"),
"asia-southeast2": (2, "Jakarta", "Indonesia"),
"australia-southeast1": (2, "Sydney", "Australia"),
"australia-southeast2": (2, "Melbourne", "Australia"),
"europe-central2": (2, "Warsaw", "Poland"),
"europe-north1": (1, "Hamina", "Finland"),
"europe-southwest1": (1, "Madrid", "Spain"),
"europe-west1": (1, "St. Ghislain", "Belgium"),
"europe-west10": (2, "Berlin", "Germany"),
"europe-west12": (2, "Turin", "Italy"),
"europe-west2": (2, "London", "United Kingdom"),
"europe-west3": (2, "Frankfurt", "Germany"),
"europe-west4": (1, "Eemshaven", "Netherlands"),
"europe-west6": (2, "Zurich", "Switzerland"),
"europe-west8": (1, "Milan", "Italy"),
"europe-west9": (1, "Paris", "France"),
"me-central1": (2, "Doha", "Qatar"),
"me-west1": (1, "Tel Aviv", "Israel"),
"northamerica-northeast1": (2, "Montreal", "Canada"),
"northamerica-northeast2": (2, "Toronto", "Canada"),
"southamerica-east1": (2, "São Paulo", "Brazil"),
"southamerica-west1": (2, "Santiago", "Chile"),
"us-central1": (1, "Iowa", "United States"),
"us-east1": (1, "South Carolina", "United States"),
"us-east4": (1, "Northern Virginia", "United States"),
"us-east5": (1, "Columbus", "United States"),
"us-south1": (1, "Dallas", "United States"),
"us-west1": (1, "Oregon", "United States"),
"us-west2": (2, "Los Angeles", "United States"),
"us-west3": (2, "Salt Lake City", "United States"),
"us-west4": (2, "Las Vegas", "United States"),
}
def tier1(self) -> list[str]:
return [region for region, info in self.regions.items() if info[0] == 1]
def tier2(self) -> list[str]:
return [region for region, info in self.regions.items() if info[0] == 2]
@staticmethod
def _ping_region(region: str, attempts: int = 1) -> tuple[str, float, float, float, float]:
import requests # scoped as slow import
url = f"https://{region}-docker.pkg.dev"
latencies = []
for _ in range(attempts):
try:
start_time = time.time()
_ = requests.head(url, timeout=5)
latency = (time.time() - start_time) * 1000 # Convert latency to milliseconds
if latency != float("inf"):
latencies.append(latency)
except requests.RequestException:
pass
if not latencies:
return region, float("inf"), float("inf"), float("inf"), float("inf")
std_dev = statistics.stdev(latencies) if len(latencies) > 1 else 0
return region, statistics.mean(latencies), std_dev, min(latencies), max(latencies)
def lowest_latency(
self,
top: int = 1,
verbose: bool = False,
tier: int | None = None,
attempts: int = 1,
) -> list[tuple[str, float, float, float, float]]:
if verbose:
print(f"Testing GCP regions for latency (with {attempts} {'retry' if attempts == 1 else 'attempts'})...")
regions_to_test = [k for k, v in self.regions.items() if v[0] == tier] if tier else list(self.regions.keys())
with concurrent.futures.ThreadPoolExecutor(max_workers=50) as executor:
results = list(executor.map(lambda r: self._ping_region(r, attempts), regions_to_test))
sorted_results = sorted(results, key=lambda x: x[1])
if verbose:
print(f"{'Region':<25} {'Location':<35} {'Tier':<5} Latency (ms)")
for region, mean, std, min_, max_ in sorted_results:
tier, city, country = self.regions[region]
location = f"{city}, {country}"
if mean == float("inf"):
print(f"{region:<25} {location:<35} {tier:<5} Timeout")
else:
print(f"{region:<25} {location:<35} {tier:<5} {mean:.0f} ± {std:.0f} ({min_:.0f} - {max_:.0f})")
print(f"\nLowest latency region{'s' if top > 1 else ''}:")
for region, mean, std, min_, max_ in sorted_results[:top]:
tier, city, country = self.regions[region]
location = f"{city}, {country}"
print(f"{region} ({location}, {mean:.0f} ± {std:.0f} ms ({min_:.0f} - {max_:.0f}))")
return sorted_results[:top]
# Usage example
if __name__ == "__main__":
regions = GCPRegions()
top_3_latency_tier1 = regions.lowest_latency(top=3, verbose=True, tier=1, attempts=3) | --- +++ @@ -8,8 +8,28 @@
class GCPRegions:
+ """A class for managing and analyzing Google Cloud Platform (GCP) regions.
+
+ This class provides functionality to initialize, categorize, and analyze GCP regions based on their geographical
+ location, tier classification, and network latency.
+
+ Attributes:
+ regions (dict[str, tuple[int, str, str]]): A dictionary of GCP regions with their tier, city, and country.
+
+ Methods:
+ tier1: Returns a list of tier 1 GCP regions.
+ tier2: Returns a list of tier 2 GCP regions.
+ lowest_latency: Determines the GCP region(s) with the lowest network latency.
+
+ Examples:
+ >>> from ultralytics.hub.google import GCPRegions
+ >>> regions = GCPRegions()
+ >>> lowest_latency_region = regions.lowest_latency(verbose=True, attempts=3)
+ >>> print(f"Lowest latency region: {lowest_latency_region[0][0]}")
+ """
def __init__(self):
+ """Initialize the GCPRegions class with predefined Google Cloud Platform regions and their details."""
self.regions = {
"asia-east1": (1, "Taiwan", "China"),
"asia-east2": (2, "Hong Kong", "China"),
@@ -52,13 +72,32 @@ }
def tier1(self) -> list[str]:
+ """Return a list of GCP regions classified as tier 1 based on predefined criteria."""
return [region for region, info in self.regions.items() if info[0] == 1]
def tier2(self) -> list[str]:
+ """Return a list of GCP regions classified as tier 2 based on predefined criteria."""
return [region for region, info in self.regions.items() if info[0] == 2]
@staticmethod
def _ping_region(region: str, attempts: int = 1) -> tuple[str, float, float, float, float]:
+ """Ping a specified GCP region and measure network latency statistics.
+
+ Args:
+ region (str): The GCP region identifier to ping (e.g., 'us-central1').
+ attempts (int, optional): Number of ping attempts to make for calculating statistics.
+
+ Returns:
+ region (str): The GCP region identifier that was pinged.
+ mean_latency (float): Mean latency in milliseconds, or infinity if all pings failed.
+ std_dev (float): Standard deviation of latencies in milliseconds, or infinity if all pings failed.
+ min_latency (float): Minimum latency in milliseconds, or infinity if all pings failed.
+ max_latency (float): Maximum latency in milliseconds, or infinity if all pings failed.
+
+ Examples:
+ >>> region, mean, std, min_lat, max_lat = GCPRegions._ping_region("us-central1", attempts=3)
+ >>> print(f"Region {region} has mean latency: {mean:.2f}ms")
+ """
import requests # scoped as slow import
url = f"https://{region}-docker.pkg.dev"
@@ -85,6 +124,23 @@ tier: int | None = None,
attempts: int = 1,
) -> list[tuple[str, float, float, float, float]]:
+ """Determine the GCP regions with the lowest latency based on ping tests.
+
+ Args:
+ top (int, optional): Number of top regions to return.
+ verbose (bool, optional): If True, prints detailed latency information for all tested regions.
+ tier (int | None, optional): Filter regions by tier (1 or 2). If None, all regions are tested.
+ attempts (int, optional): Number of ping attempts per region.
+
+ Returns:
+ (list[tuple[str, float, float, float, float]]): List of tuples containing region information and latency
+ statistics. Each tuple contains (region, mean_latency, std_dev, min_latency, max_latency).
+
+ Examples:
+ >>> regions = GCPRegions()
+ >>> results = regions.lowest_latency(top=3, verbose=True, tier=1, attempts=2)
+ >>> print(results[0][0]) # Print the name of the lowest latency region
+ """
if verbose:
print(f"Testing GCP regions for latency (with {attempts} {'retry' if attempts == 1 else 'attempts'})...")
@@ -115,4 +171,4 @@ # Usage example
if __name__ == "__main__":
regions = GCPRegions()
- top_3_latency_tier1 = regions.lowest_latency(top=3, verbose=True, tier=1, attempts=3)+ top_3_latency_tier1 = regions.lowest_latency(top=3, verbose=True, tier=1, attempts=3)
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/hub/google/__init__.py |
Create docstrings for API functions | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
from __future__ import annotations
import numpy as np
import torch
from torch import nn
from torchvision.ops.roi_align import RoIAlign
from ultralytics.nn.modules.transformer import MLP
from ultralytics.nn.modules.utils import _get_clones, inverse_sigmoid
from ultralytics.utils.ops import xywh2xyxy
from .model_misc import gen_sineembed_for_position
class TransformerDecoderLayer(nn.Module):
def __init__(
self,
d_model: int,
dim_feedforward: int,
dropout: float,
cross_attention: nn.Module,
n_heads: int,
use_text_cross_attention: bool = False,
):
super().__init__()
# cross attention
self.cross_attn = cross_attention
self.dropout1 = nn.Dropout(dropout) if dropout > 0 else nn.Identity()
self.norm1 = nn.LayerNorm(d_model)
# cross attention text
self.use_text_cross_attention = use_text_cross_attention
if use_text_cross_attention:
self.ca_text = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.catext_dropout = nn.Dropout(dropout) if dropout > 0 else nn.Identity()
self.catext_norm = nn.LayerNorm(d_model)
# self attention
self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.dropout2 = nn.Dropout(dropout) if dropout > 0 else nn.Identity()
self.norm2 = nn.LayerNorm(d_model)
# ffn
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.activation = nn.ReLU()
self.dropout3 = nn.Dropout(dropout) if dropout > 0 else nn.Identity()
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.dropout4 = nn.Dropout(dropout) if dropout > 0 else nn.Identity()
self.norm3 = nn.LayerNorm(d_model)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward_ffn(self, tgt):
tgt2 = self.linear2(self.dropout3(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout4(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward(
self,
# for tgt
tgt: torch.Tensor, # nq, bs, d_model
tgt_query_pos: torch.Tensor = None, # pos for query. MLP(Sine(pos))
memory_text: torch.Tensor = None, # num_token, bs, d_model
text_attention_mask: torch.Tensor = None, # bs, num_token
# for memory
memory: torch.Tensor = None, # hw, bs, d_model
memory_key_padding_mask: torch.Tensor = None,
memory_pos: torch.Tensor = None, # pos for memory
# sa
self_attn_mask: torch.Tensor = None, # mask used for self-attention
cross_attn_mask: torch.Tensor = None, # mask used for cross-attention
# dac
dac=False,
dac_use_selfatt_ln=True,
presence_token=None,
# skip inside deformable attn
**kwargs, # additional kwargs for compatibility
):
# self attention
tgt, tgt_query_pos = self._apply_self_attention(
tgt, tgt_query_pos, dac, dac_use_selfatt_ln, presence_token, self_attn_mask
)
if self.use_text_cross_attention:
tgt2 = self.ca_text(
self.with_pos_embed(tgt, tgt_query_pos),
memory_text.to(tgt.dtype),
memory_text.to(tgt.dtype),
key_padding_mask=text_attention_mask,
)[0]
tgt = tgt + self.catext_dropout(tgt2)
tgt = self.catext_norm(tgt)
if presence_token is not None:
presence_token_mask = torch.zeros_like(cross_attn_mask[:, :1, :])
cross_attn_mask = torch.cat([presence_token_mask, cross_attn_mask], dim=1) # (bs*nheads, 1+nq, hw)
# Cross attention to image
tgt2 = self.cross_attn(
query=self.with_pos_embed(tgt, tgt_query_pos),
key=self.with_pos_embed(memory, memory_pos),
value=memory,
attn_mask=cross_attn_mask,
key_padding_mask=(memory_key_padding_mask.transpose(0, 1) if memory_key_padding_mask is not None else None),
need_weights=False,
)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
# ffn
tgt = self.forward_ffn(tgt.to(memory.dtype))
presence_token_out = None
if presence_token is not None:
presence_token_out = tgt[:1]
tgt = tgt[1:]
return tgt, presence_token_out
def _apply_self_attention(self, tgt, tgt_query_pos, dac, dac_use_selfatt_ln, presence_token, self_attn_mask):
if self.self_attn is None:
return tgt
if dac:
# Split queries for DAC (detect-and-classify)
assert tgt.shape[0] % 2 == 0, "DAC requires even number of queries"
num_o2o_queries = tgt.shape[0] // 2
tgt_o2o = tgt[:num_o2o_queries]
tgt_query_pos_o2o = tgt_query_pos[:num_o2o_queries]
tgt_o2m = tgt[num_o2o_queries:]
else:
tgt_o2o = tgt
tgt_query_pos_o2o = tgt_query_pos
# Handle presence token
if presence_token is not None:
tgt_o2o = torch.cat([presence_token, tgt_o2o], dim=0)
tgt_query_pos_o2o = torch.cat([torch.zeros_like(presence_token), tgt_query_pos_o2o], dim=0).to(
tgt_o2o.dtype
)
tgt_query_pos = torch.cat([torch.zeros_like(presence_token), tgt_query_pos], dim=0)
# Self-attention
q = k = self.with_pos_embed(tgt_o2o, tgt_query_pos_o2o)
tgt2 = self.self_attn(q, k, tgt_o2o, attn_mask=self_attn_mask)[0].to(tgt.dtype)
tgt_o2o = tgt_o2o + self.dropout2(tgt2)
# Recombine and normalize
if dac:
if not dac_use_selfatt_ln:
tgt_o2o = self.norm2(tgt_o2o)
tgt = torch.cat((tgt_o2o, tgt_o2m), dim=0)
if dac_use_selfatt_ln:
tgt = self.norm2(tgt)
else:
tgt = tgt_o2o
tgt = self.norm2(tgt)
return tgt, tgt_query_pos
class TransformerDecoder(nn.Module):
def __init__(
self,
d_model: int,
frozen: bool,
interaction_layer,
layer,
num_layers: int,
num_queries: int,
return_intermediate: bool,
box_refine: bool = False,
num_o2m_queries: int = 0,
dac: bool = False,
boxRPB: str = "none",
# Experimental: An object query for SAM 2 tasks
instance_query: bool = False,
# Defines the number of additional instance queries,
# 1 or 4 are the most likely for single vs multi mask support
num_instances: int = 1, # Irrelevant if instance_query is False
dac_use_selfatt_ln: bool = True,
use_act_checkpoint: bool = False,
compile_mode=None,
presence_token: bool = False,
clamp_presence_logits: bool = True,
clamp_presence_logit_max_val: float = 10.0,
use_normed_output_consistently: bool = True,
separate_box_head_instance: bool = False,
separate_norm_instance: bool = False,
):
super().__init__()
self.d_model = d_model
self.layers = _get_clones(layer, num_layers)
self.fine_layers = (
_get_clones(interaction_layer, num_layers) if interaction_layer is not None else [None] * num_layers
)
self.num_layers = num_layers
self.num_queries = num_queries
self.dac = dac
if dac:
self.num_o2m_queries = num_queries
tot_num_queries = num_queries
else:
self.num_o2m_queries = num_o2m_queries
tot_num_queries = num_queries + num_o2m_queries
self.norm = nn.LayerNorm(d_model)
self.return_intermediate = return_intermediate
self.bbox_embed = MLP(d_model, d_model, 4, 3)
self.query_embed = nn.Embedding(tot_num_queries, d_model)
self.instance_query_embed = None
self.instance_query_reference_points = None
self.use_instance_query = instance_query
self.num_instances = num_instances
self.use_normed_output_consistently = use_normed_output_consistently
self.instance_norm = nn.LayerNorm(d_model) if separate_norm_instance else None
self.instance_bbox_embed = None
if separate_box_head_instance:
self.instance_bbox_embed = MLP(d_model, d_model, 4, 3)
if instance_query:
self.instance_query_embed = nn.Embedding(num_instances, d_model)
self.box_refine = box_refine
if box_refine:
nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0)
nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)
self.reference_points = nn.Embedding(num_queries, 4)
if instance_query:
self.instance_reference_points = nn.Embedding(num_instances, 4)
assert boxRPB in ["none", "log", "linear", "both"]
self.boxRPB = boxRPB
if boxRPB != "none":
try:
nheads = self.layers[0].cross_attn_image.num_heads
except AttributeError:
nheads = self.layers[0].cross_attn.num_heads
n_input = 4 if boxRPB == "both" else 2
self.boxRPB_embed_x = MLP(n_input, d_model, nheads, 2)
self.boxRPB_embed_y = MLP(n_input, d_model, nheads, 2)
self.compilable_cord_cache = None
self.compilable_stored_size = None
self.coord_cache = {}
self.roi_pooler = (
RoIAlign(output_size=7, spatial_scale=1, sampling_ratio=-1, aligned=True)
if interaction_layer is not None
else None
)
if frozen:
for p in self.parameters():
p.requires_grad_(False)
self.presence_token = None
self.clamp_presence_logits = clamp_presence_logits
self.clamp_presence_logit_max_val = clamp_presence_logit_max_val
if presence_token:
self.presence_token = nn.Embedding(1, d_model)
self.presence_token_head = MLP(d_model, d_model, 1, 3)
self.presence_token_out_norm = nn.LayerNorm(d_model)
self.ref_point_head = MLP(2 * self.d_model, self.d_model, self.d_model, 2)
self.dac_use_selfatt_ln = dac_use_selfatt_ln
self.use_act_checkpoint = use_act_checkpoint
nn.init.normal_(self.query_embed.weight.data)
if self.instance_query_embed is not None:
nn.init.normal_(self.instance_query_embed.weight.data)
assert self.roi_pooler is None
assert self.return_intermediate, "support return_intermediate only"
assert self.box_refine, "support box refine only"
self.compile_mode = compile_mode
self.compiled = False
# We defer compilation till after the first forward, to first warm-up the boxRPB cache
# assign layer index to each layer so that some layers can decide what to do
# based on which layer index they are (e.g. cross attention to memory bank only
# in selected layers)
for layer_idx, layer in enumerate(self.layers):
layer.layer_idx = layer_idx
@staticmethod
def _get_coords(H, W, device, dtype):
coords_h = torch.arange(0, H, dtype=dtype, device=device) / H
coords_w = torch.arange(0, W, dtype=dtype, device=device) / W
return coords_h, coords_w
def _get_rpb_matrix(self, reference_boxes, feat_size):
H, W = feat_size
boxes_xyxy = xywh2xyxy(reference_boxes).transpose(0, 1)
bs, num_queries, _ = boxes_xyxy.shape
if self.compilable_cord_cache is None:
self.compilable_cord_cache = self._get_coords(H, W, reference_boxes.device, reference_boxes.dtype)
self.compilable_stored_size = (H, W)
if torch.compiler.is_dynamo_compiling() or self.compilable_stored_size == (
H,
W,
):
# good, hitting the cache, will be compilable
coords_h, coords_w = self.compilable_cord_cache
else:
# cache miss, will create compilation issue
# In case we're not compiling, we'll still rely on the dict-based cache
if feat_size not in self.coord_cache:
self.coord_cache[feat_size] = self._get_coords(H, W, reference_boxes.device)
coords_h, coords_w = self.coord_cache[feat_size]
assert coords_h.shape == (H,)
assert coords_w.shape == (W,)
deltas_y = coords_h.view(1, -1, 1) - boxes_xyxy.reshape(-1, 1, 4)[:, :, 1:4:2]
deltas_y = deltas_y.view(bs, num_queries, -1, 2)
deltas_x = coords_w.view(1, -1, 1) - boxes_xyxy.reshape(-1, 1, 4)[:, :, 0:3:2]
deltas_x = deltas_x.view(bs, num_queries, -1, 2)
if self.boxRPB in ["log", "both"]:
deltas_x_log = deltas_x * 8 # normalize to -8, 8
deltas_x_log = torch.sign(deltas_x_log) * torch.log2(torch.abs(deltas_x_log) + 1.0) / np.log2(8)
deltas_y_log = deltas_y * 8 # normalize to -8, 8
deltas_y_log = torch.sign(deltas_y_log) * torch.log2(torch.abs(deltas_y_log) + 1.0) / np.log2(8)
if self.boxRPB == "log":
deltas_x = deltas_x_log
deltas_y = deltas_y_log
else:
deltas_x = torch.cat([deltas_x, deltas_x_log], dim=-1)
deltas_y = torch.cat([deltas_y, deltas_y_log], dim=-1)
if self.training:
assert self.use_act_checkpoint, "activation ckpt not enabled in decoder"
deltas_x = self.boxRPB_embed_x(x=deltas_x) # bs, num_queries, W, n_heads
deltas_y = self.boxRPB_embed_y(x=deltas_y) # bs, num_queries, H, n_heads
if not torch.compiler.is_dynamo_compiling():
assert deltas_x.shape[:3] == (bs, num_queries, W)
assert deltas_y.shape[:3] == (bs, num_queries, H)
B = deltas_y.unsqueeze(3) + deltas_x.unsqueeze(2) # bs, num_queries, H, W, n_heads
if not torch.compiler.is_dynamo_compiling():
assert B.shape[:4] == (bs, num_queries, H, W)
B = B.flatten(2, 3) # bs, num_queries, H*W, n_heads
B = B.permute(0, 3, 1, 2) # bs, n_heads, num_queries, H*W
B = B.contiguous() # memeff attn likes ordered strides
if not torch.compiler.is_dynamo_compiling():
assert B.shape[2:] == (num_queries, H * W)
return B
def forward(
self,
tgt,
memory,
tgt_mask: torch.Tensor = None,
memory_mask: torch.Tensor = None,
memory_key_padding_mask: torch.Tensor = None,
pos: torch.Tensor = None,
reference_boxes: torch.Tensor = None, # num_queries, bs, 4
# for memory
spatial_shapes: torch.Tensor = None, # bs, num_levels, 2
valid_ratios: torch.Tensor = None,
# for text
memory_text: torch.Tensor = None,
text_attention_mask: torch.Tensor = None,
# if `apply_dac` is None, it will default to `self.dac`
apply_dac: bool | None = None,
is_instance_prompt=False,
decoder_extra_kwargs: dict | None = None,
# ROI memory bank
obj_roi_memory_feat=None,
obj_roi_memory_mask=None,
box_head_trk=None,
):
if memory_mask is not None:
assert self.boxRPB == "none", (
"inputting a memory_mask in the presence of boxRPB is unexpected/not implemented"
)
apply_dac = apply_dac if apply_dac is not None else self.dac
if apply_dac:
assert (tgt.shape[0] == self.num_queries) or (
self.use_instance_query and (tgt.shape[0] == self.instance_query_embed.num_embeddings)
)
tgt = tgt.repeat(2, 1, 1)
# note that we don't tile tgt_mask, since DAC doesn't
# use self-attention in o2m queries
if reference_boxes is not None:
assert (reference_boxes.shape[0] == self.num_queries) or (
self.use_instance_query and (reference_boxes.shape[0] == self.instance_query_embed.num_embeddings)
)
reference_boxes = reference_boxes.repeat(2, 1, 1)
bs = tgt.shape[1]
intermediate = []
intermediate_presence_logits = []
presence_feats = None
if self.box_refine:
if reference_boxes is None:
# In this case, we're in a one-stage model, so we generate the reference boxes
reference_boxes = self.reference_points.weight.unsqueeze(1)
reference_boxes = reference_boxes.repeat(2, bs, 1) if apply_dac else reference_boxes.repeat(1, bs, 1)
reference_boxes = reference_boxes.sigmoid()
intermediate_ref_boxes = [reference_boxes]
else:
reference_boxes = None
intermediate_ref_boxes = None
output = tgt
presence_out = None
if self.presence_token is not None and is_instance_prompt is False:
# expand to batch dim
presence_out = self.presence_token.weight[None].expand(1, bs, -1)
box_head = self.bbox_embed
if is_instance_prompt and self.instance_bbox_embed is not None:
box_head = self.instance_bbox_embed
out_norm = self.norm
if is_instance_prompt and self.instance_norm is not None:
out_norm = self.instance_norm
for layer_idx, layer in enumerate(self.layers):
reference_points_input = (
reference_boxes[:, :, None] * torch.cat([valid_ratios, valid_ratios], -1)[None, :]
) # nq, bs, nlevel, 4
query_sine_embed = gen_sineembed_for_position(
reference_points_input[:, :, 0, :], self.d_model
) # nq, bs, d_model*2
# conditional query
query_pos = self.ref_point_head(query_sine_embed) # nq, bs, d_model
if self.boxRPB != "none" and reference_boxes is not None:
assert spatial_shapes.shape[0] == 1, "only single scale support implemented"
memory_mask = self._get_rpb_matrix(
reference_boxes,
(spatial_shapes[0, 0], spatial_shapes[0, 1]),
)
memory_mask = memory_mask.flatten(0, 1) # (bs*n_heads, nq, H*W)
if self.training:
assert self.use_act_checkpoint, "Activation checkpointing not enabled in the decoder"
output, presence_out = layer(
tgt=output,
tgt_query_pos=query_pos,
memory_text=memory_text,
text_attention_mask=text_attention_mask,
memory=memory,
memory_key_padding_mask=memory_key_padding_mask,
memory_pos=pos,
self_attn_mask=tgt_mask,
cross_attn_mask=memory_mask,
dac=apply_dac,
dac_use_selfatt_ln=self.dac_use_selfatt_ln,
presence_token=presence_out,
**(decoder_extra_kwargs or {}),
# ROI memory bank
obj_roi_memory_feat=obj_roi_memory_feat,
obj_roi_memory_mask=obj_roi_memory_mask,
)
# iter update
if self.box_refine:
reference_before_sigmoid = inverse_sigmoid(reference_boxes)
if box_head_trk is None:
# delta_unsig = self.bbox_embed(output)
if not self.use_normed_output_consistently:
delta_unsig = box_head(output)
else:
delta_unsig = box_head(out_norm(output))
else:
# box_head_trk use a separate box head for tracking queries
Q_det = decoder_extra_kwargs["Q_det"]
assert output.size(0) >= Q_det
delta_unsig_det = self.bbox_embed(output[:Q_det])
delta_unsig_trk = box_head_trk(output[Q_det:])
delta_unsig = torch.cat([delta_unsig_det, delta_unsig_trk], dim=0)
outputs_unsig = delta_unsig + reference_before_sigmoid
new_reference_points = outputs_unsig.sigmoid()
reference_boxes = new_reference_points.detach()
if layer_idx != self.num_layers - 1:
intermediate_ref_boxes.append(new_reference_points)
else:
raise NotImplementedError("not implemented yet")
intermediate.append(out_norm(output))
if self.presence_token is not None and is_instance_prompt is False:
# norm, mlp head
intermediate_layer_presence_logits = self.presence_token_head(
self.presence_token_out_norm(presence_out)
).squeeze(-1)
# clamp to mitigate numerical issues
if self.clamp_presence_logits:
intermediate_layer_presence_logits.clamp(
min=-self.clamp_presence_logit_max_val,
max=self.clamp_presence_logit_max_val,
)
intermediate_presence_logits.append(intermediate_layer_presence_logits)
presence_feats = presence_out.clone()
if not self.compiled and self.compile_mode is not None:
self.forward = torch.compile(self.forward, mode=self.compile_mode, fullgraph=True)
self.compiled = True
return (
torch.stack(intermediate),
torch.stack(intermediate_ref_boxes),
(
torch.stack(intermediate_presence_logits)
if self.presence_token is not None and is_instance_prompt is False
else None
),
presence_feats,
) | --- +++ @@ -1,6 +1,10 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
+"""
+Transformer decoder.
+Inspired from Pytorch's version, adds the pre-norm variant.
+"""
from __future__ import annotations
@@ -17,6 +21,7 @@
class TransformerDecoderLayer(nn.Module):
+ """TransformerDecoderLayer is made up of self-attn, cross-attn, and feedforward network (FFN)."""
def __init__(
self,
@@ -27,6 +32,7 @@ n_heads: int,
use_text_cross_attention: bool = False,
):
+ """Initialize the TransformerDecoderLayer."""
super().__init__()
# cross attention
self.cross_attn = cross_attention
@@ -55,9 +61,11 @@
@staticmethod
def with_pos_embed(tensor, pos):
+ """Add positional embedding to the tensor."""
return tensor if pos is None else tensor + pos
def forward_ffn(self, tgt):
+ """Feedforward network forward pass."""
tgt2 = self.linear2(self.dropout3(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout4(tgt2)
tgt = self.norm3(tgt)
@@ -84,6 +92,7 @@ # skip inside deformable attn
**kwargs, # additional kwargs for compatibility
):
+ """Forward pass of the TransformerDecoderLayer."""
# self attention
tgt, tgt_query_pos = self._apply_self_attention(
tgt, tgt_query_pos, dac, dac_use_selfatt_ln, presence_token, self_attn_mask
@@ -127,6 +136,7 @@ return tgt, presence_token_out
def _apply_self_attention(self, tgt, tgt_query_pos, dac, dac_use_selfatt_ln, presence_token, self_attn_mask):
+ """Apply self-attention with optional DAC splitting."""
if self.self_attn is None:
return tgt
@@ -169,6 +179,7 @@
class TransformerDecoder(nn.Module):
+ """Transformer Decoder consisting of multiple layers."""
def __init__(
self,
@@ -198,6 +209,7 @@ separate_box_head_instance: bool = False,
separate_norm_instance: bool = False,
):
+ """Initialize the TransformerDecoder."""
super().__init__()
self.d_model = d_model
self.layers = _get_clones(layer, num_layers)
@@ -294,11 +306,13 @@
@staticmethod
def _get_coords(H, W, device, dtype):
+ """Get normalized coordinates for height and width."""
coords_h = torch.arange(0, H, dtype=dtype, device=device) / H
coords_w = torch.arange(0, W, dtype=dtype, device=device) / W
return coords_h, coords_w
def _get_rpb_matrix(self, reference_boxes, feat_size):
+ """Get the relative position bias (RPB) matrix for box-relative position bias."""
H, W = feat_size
boxes_xyxy = xywh2xyxy(reference_boxes).transpose(0, 1)
bs, num_queries, _ = boxes_xyxy.shape
@@ -383,6 +397,7 @@ obj_roi_memory_mask=None,
box_head_trk=None,
):
+ """Forward pass of the TransformerDecoder."""
if memory_mask is not None:
assert self.boxRPB == "none", (
"inputting a memory_mask in the presence of boxRPB is unexpected/not implemented"
@@ -528,4 +543,4 @@ else None
),
presence_feats,
- )+ )
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/sam/sam3/decoder.py |
Generate docstrings with examples | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
__version__ = "8.4.23"
import importlib
import os
from typing import TYPE_CHECKING
# Set ENV variables (place before imports)
if not os.environ.get("OMP_NUM_THREADS"):
os.environ["OMP_NUM_THREADS"] = "1" # default for reduced CPU utilization during training
from ultralytics.utils import ASSETS, SETTINGS
from ultralytics.utils.checks import check_yolo as checks
from ultralytics.utils.downloads import download
settings = SETTINGS
MODELS = ("YOLO", "YOLOWorld", "YOLOE", "NAS", "SAM", "FastSAM", "RTDETR")
__all__ = (
"__version__",
"ASSETS",
*MODELS,
"checks",
"download",
"settings",
)
if TYPE_CHECKING:
# Enable hints for type checkers
from ultralytics.models import YOLO, YOLOWorld, YOLOE, NAS, SAM, FastSAM, RTDETR # noqa
def __getattr__(name: str):
if name in MODELS:
return getattr(importlib.import_module("ultralytics.models"), name)
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return sorted(set(globals()) | set(MODELS))
if __name__ == "__main__":
print(__version__) | --- +++ @@ -33,14 +33,16 @@
def __getattr__(name: str):
+ """Lazy-import model classes on first access."""
if name in MODELS:
return getattr(importlib.import_module("ultralytics.models"), name)
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
+ """Extend dir() to include lazily available model names for IDE autocompletion."""
return sorted(set(globals()) | set(MODELS))
if __name__ == "__main__":
- print(__version__)+ print(__version__)
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/__init__.py |
Add structured docstrings to improve clarity | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import math
from typing import Any
import torch
import torch.nn.functional as F
def select_closest_cond_frames(frame_idx: int, cond_frame_outputs: dict[int, Any], max_cond_frame_num: int):
if max_cond_frame_num == -1 or len(cond_frame_outputs) <= max_cond_frame_num:
selected_outputs = cond_frame_outputs
unselected_outputs = {}
else:
assert max_cond_frame_num >= 2, "we should allow using 2+ conditioning frames"
selected_outputs = {}
# The closest conditioning frame before `frame_idx` (if any)
idx_before = max((t for t in cond_frame_outputs if t < frame_idx), default=None)
if idx_before is not None:
selected_outputs[idx_before] = cond_frame_outputs[idx_before]
# The closest conditioning frame after `frame_idx` (if any)
idx_after = min((t for t in cond_frame_outputs if t >= frame_idx), default=None)
if idx_after is not None:
selected_outputs[idx_after] = cond_frame_outputs[idx_after]
# Add other temporally closest conditioning frames until reaching a total
# of `max_cond_frame_num` conditioning frames.
num_remain = max_cond_frame_num - len(selected_outputs)
inds_remain = sorted(
(t for t in cond_frame_outputs if t not in selected_outputs),
key=lambda x: abs(x - frame_idx),
)[:num_remain]
selected_outputs.update((t, cond_frame_outputs[t]) for t in inds_remain)
unselected_outputs = {t: v for t, v in cond_frame_outputs.items() if t not in selected_outputs}
return selected_outputs, unselected_outputs
def get_1d_sine_pe(pos_inds: torch.Tensor, dim: int, temperature: float = 10000):
pe_dim = dim // 2
dim_t = torch.arange(pe_dim, dtype=pos_inds.dtype, device=pos_inds.device)
dim_t = temperature ** (2 * (dim_t // 2) / pe_dim)
pos_embed = pos_inds.unsqueeze(-1) / dim_t
pos_embed = torch.cat([pos_embed.sin(), pos_embed.cos()], dim=-1)
return pos_embed
def init_t_xy(end_x: int, end_y: int, scale: float = 1.0, offset: int = 0):
t = torch.arange(end_x * end_y, dtype=torch.float32)
t_x = (t % end_x).float()
t_y = torch.div(t, end_x, rounding_mode="floor").float()
return t_x * scale + offset, t_y * scale + offset
def compute_axial_cis(dim: int, end_x: int, end_y: int, theta: float = 10000.0, scale_pos: float = 1.0):
freqs_x = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim))
freqs_y = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim))
t_x, t_y = init_t_xy(end_x, end_y, scale=scale_pos)
freqs_x = torch.outer(t_x, freqs_x)
freqs_y = torch.outer(t_y, freqs_y)
freqs_cis_x = torch.polar(torch.ones_like(freqs_x), freqs_x)
freqs_cis_y = torch.polar(torch.ones_like(freqs_y), freqs_y)
return torch.cat([freqs_cis_x, freqs_cis_y], dim=-1)
def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor):
ndim = x.ndim
assert ndim >= 2
assert freqs_cis.shape == (x.shape[-2], x.shape[-1])
shape = [d if i >= ndim - 2 else 1 for i, d in enumerate(x.shape)]
return freqs_cis.view(*shape)
def apply_rotary_enc(
xq: torch.Tensor,
xk: torch.Tensor,
freqs_cis: torch.Tensor,
repeat_freqs_k: bool = False,
):
xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2)) if xk.shape[-2] != 0 else None
freqs_cis = reshape_for_broadcast(freqs_cis, xq_)
xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3)
if xk_ is None:
# No keys to rotate, due to dropout
return xq_out.type_as(xq).to(xq.device), xk
# Repeat freqs along seq_len dim to match k seq_len
if repeat_freqs_k and (r := xk_.shape[-2] // xq_.shape[-2]) > 1:
# MPS doesn't support repeat on complex tensors, decompose to real representation
if freqs_cis.device.type == "mps":
freqs_cis = torch.view_as_real(freqs_cis)
freqs_cis = freqs_cis.repeat(*([1] * (freqs_cis.ndim - 3)), r, 1, 1)
freqs_cis = torch.view_as_complex(freqs_cis.contiguous())
else:
freqs_cis = freqs_cis.repeat(*([1] * (freqs_cis.ndim - 2)), r, 1)
xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3)
return xq_out.type_as(xq).to(xq.device), xk_out.type_as(xk).to(xk.device)
def window_partition(x: torch.Tensor, window_size: int):
B, H, W, C = x.shape
pad_h = (window_size - H % window_size) % window_size
pad_w = (window_size - W % window_size) % window_size
if pad_h > 0 or pad_w > 0:
x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
Hp, Wp = H + pad_h, W + pad_w
x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows, (Hp, Wp)
def window_unpartition(windows: torch.Tensor, window_size: int, pad_hw: tuple[int, int], hw: tuple[int, int]):
Hp, Wp = pad_hw
H, W = hw
B = windows.shape[0] // (Hp * Wp // window_size // window_size)
x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
if Hp > H or Wp > W:
x = x[:, :H, :W, :].contiguous()
return x
def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
max_rel_dist = int(2 * max(q_size, k_size) - 1)
# Interpolate rel pos if needed.
if rel_pos.shape[0] != max_rel_dist:
# Interpolate rel pos.
rel_pos_resized = F.interpolate(
rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
size=max_rel_dist,
mode="linear",
)
rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
else:
rel_pos_resized = rel_pos
# Scale the coords with short length if shapes for q and k are different.
q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
return rel_pos_resized[relative_coords.long()]
def add_decomposed_rel_pos(
attn: torch.Tensor,
q: torch.Tensor,
rel_pos_h: torch.Tensor,
rel_pos_w: torch.Tensor,
q_size: tuple[int, int],
k_size: tuple[int, int],
) -> torch.Tensor:
q_h, q_w = q_size
k_h, k_w = k_size
Rh = get_rel_pos(q_h, k_h, rel_pos_h)
Rw = get_rel_pos(q_w, k_w, rel_pos_w)
B, _, dim = q.shape
r_q = q.reshape(B, q_h, q_w, dim)
rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh)
rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw)
attn = (attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]).view(
B, q_h * q_w, k_h * k_w
)
return attn
def get_abs_pos(
abs_pos: torch.Tensor,
has_cls_token: bool,
hw: tuple[int, int],
retain_cls_token: bool = False,
tiling: bool = False,
) -> torch.Tensor:
if retain_cls_token:
assert has_cls_token
h, w = hw
if has_cls_token:
cls_pos = abs_pos[:, :1]
abs_pos = abs_pos[:, 1:]
xy_num = abs_pos.shape[1]
size = int(math.sqrt(xy_num))
assert size * size == xy_num
if size != h or size != w:
new_abs_pos = abs_pos.reshape(1, size, size, -1).permute(0, 3, 1, 2)
if tiling:
new_abs_pos = new_abs_pos.tile([1, 1] + [x // y + 1 for x, y in zip((h, w), new_abs_pos.shape[2:])])[
:, :, :h, :w
]
else:
new_abs_pos = F.interpolate(
new_abs_pos,
size=(h, w),
mode="bicubic",
align_corners=False,
)
if not retain_cls_token:
return new_abs_pos.permute(0, 2, 3, 1)
else:
# add cls_token back, flatten spatial dims
assert has_cls_token
return torch.cat(
[cls_pos, new_abs_pos.permute(0, 2, 3, 1).reshape(1, h * w, -1)],
dim=1,
)
else:
if not retain_cls_token:
return abs_pos.reshape(1, h, w, -1)
else:
assert has_cls_token
return torch.cat([cls_pos, abs_pos], dim=1)
def concat_rel_pos(
q: torch.Tensor,
k: torch.Tensor,
q_hw: tuple[int, int],
k_hw: tuple[int, int],
rel_pos_h: torch.Tensor,
rel_pos_w: torch.Tensor,
rescale: bool = False,
relative_coords: torch.Tensor = None,
) -> tuple[torch.Tensor, torch.Tensor]:
q_h, q_w = q_hw
k_h, k_w = k_hw
assert (q_h == q_w) and (k_h == k_w), "only square inputs supported"
if relative_coords is not None:
Rh = rel_pos_h[relative_coords]
Rw = rel_pos_w[relative_coords]
else:
Rh = get_rel_pos(q_h, k_h, rel_pos_h)
Rw = get_rel_pos(q_w, k_w, rel_pos_w)
B, _, dim = q.shape
r_q = q.reshape(B, q_h, q_w, dim)
old_scale = dim**0.5
new_scale = (dim + k_h + k_w) ** 0.5 if rescale else old_scale # for sdpa
# attn will be divided by new_scale, but we want to divide q by old_scale
scale_ratio = new_scale / old_scale
rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh) * new_scale # (B, q_h, q_w, k_h)
rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw) * new_scale # (B, q_h, q_w, k_w)
eye_h = torch.eye(k_h, dtype=q.dtype, device=q.device)
eye_w = torch.eye(k_w, dtype=q.dtype, device=q.device)
eye_h = eye_h.view(1, k_h, 1, k_h).expand([B, k_h, k_w, k_h])
eye_w = eye_w.view(1, 1, k_w, k_w).expand([B, k_h, k_w, k_w])
q = torch.cat([r_q * scale_ratio, rel_h, rel_w], dim=-1).view(B, q_h * q_w, -1)
k = torch.cat([k.view(B, k_h, k_w, -1), eye_h, eye_w], dim=-1).view(B, k_h * k_w, -1)
return q, k | --- +++ @@ -10,6 +10,27 @@
def select_closest_cond_frames(frame_idx: int, cond_frame_outputs: dict[int, Any], max_cond_frame_num: int):
+ """Select the closest conditioning frames to a given frame index.
+
+ Args:
+ frame_idx (int): Current frame index.
+ cond_frame_outputs (dict[int, Any]): Dictionary of conditioning frame outputs keyed by frame indices.
+ max_cond_frame_num (int): Maximum number of conditioning frames to select.
+
+ Returns:
+ selected_outputs (dict[int, Any]): Selected items from cond_frame_outputs.
+ unselected_outputs (dict[int, Any]): Items not selected from cond_frame_outputs.
+
+ Examples:
+ >>> frame_idx = 5
+ >>> cond_frame_outputs = {1: "a", 3: "b", 7: "c", 9: "d"}
+ >>> max_cond_frame_num = 2
+ >>> selected, unselected = select_closest_cond_frames(frame_idx, cond_frame_outputs, max_cond_frame_num)
+ >>> print(selected)
+ {3: 'b', 7: 'c'}
+ >>> print(unselected)
+ {1: 'a', 9: 'd'}
+ """
if max_cond_frame_num == -1 or len(cond_frame_outputs) <= max_cond_frame_num:
selected_outputs = cond_frame_outputs
unselected_outputs = {}
@@ -41,6 +62,22 @@
def get_1d_sine_pe(pos_inds: torch.Tensor, dim: int, temperature: float = 10000):
+ """Generate 1D sinusoidal positional embeddings for given positions and dimensions.
+
+ Args:
+ pos_inds (torch.Tensor): Position indices for which to generate embeddings.
+ dim (int): Dimension of the positional embeddings. Should be an even number.
+ temperature (float, optional): Scaling factor for the frequency of the sinusoidal functions.
+
+ Returns:
+ (torch.Tensor): Sinusoidal positional embeddings with shape (pos_inds.shape, dim).
+
+ Examples:
+ >>> pos = torch.tensor([0, 1, 2, 3])
+ >>> embeddings = get_1d_sine_pe(pos, 128)
+ >>> embeddings.shape
+ torch.Size([4, 128])
+ """
pe_dim = dim // 2
dim_t = torch.arange(pe_dim, dtype=pos_inds.dtype, device=pos_inds.device)
dim_t = temperature ** (2 * (dim_t // 2) / pe_dim)
@@ -51,6 +88,28 @@
def init_t_xy(end_x: int, end_y: int, scale: float = 1.0, offset: int = 0):
+ """Initialize 1D and 2D coordinate tensors for a grid of specified dimensions.
+
+ This function creates coordinate tensors for a grid with dimensions end_x × end_y. It generates a linear index
+ tensor and corresponding x and y coordinate tensors.
+
+ Args:
+ end_x (int): Width of the grid (number of columns).
+ end_y (int): Height of the grid (number of rows).
+ scale (float): Scaling factor to apply to the coordinates.
+ offset (int): Offset to add to the coordinates.
+
+ Returns:
+ t_x (torch.Tensor): X-coordinates for each position, with shape (end_x * end_y).
+ t_y (torch.Tensor): Y-coordinates for each position, with shape (end_x * end_y).
+
+ Examples:
+ >>> t_x, t_y = init_t_xy(3, 2)
+ >>> print(t_x)
+ tensor([0., 1., 2., 0., 1., 2.])
+ >>> print(t_y)
+ tensor([0., 0., 0., 1., 1., 1.])
+ """
t = torch.arange(end_x * end_y, dtype=torch.float32)
t_x = (t % end_x).float()
t_y = torch.div(t, end_x, rounding_mode="floor").float()
@@ -58,6 +117,27 @@
def compute_axial_cis(dim: int, end_x: int, end_y: int, theta: float = 10000.0, scale_pos: float = 1.0):
+ """Compute axial complex exponential positional encodings for 2D spatial positions in a grid.
+
+ This function generates complex exponential positional encodings for a 2D grid of spatial positions, using separate
+ frequency components for the x and y dimensions.
+
+ Args:
+ dim (int): Dimension of the positional encoding.
+ end_x (int): Width of the 2D grid.
+ end_y (int): Height of the 2D grid.
+ theta (float, optional): Scaling factor for frequency computation.
+ scale_pos (float, optional): Scaling factor for position coordinates.
+
+ Returns:
+ (torch.Tensor): Complex exponential positional encodings with shape (end_x*end_y, dim//2).
+
+ Examples:
+ >>> dim, end_x, end_y = 128, 8, 8
+ >>> freqs_cis = compute_axial_cis(dim, end_x, end_y)
+ >>> freqs_cis.shape
+ torch.Size([64, 64])
+ """
freqs_x = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim))
freqs_y = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim))
@@ -70,6 +150,21 @@
def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor):
+ """Reshape frequency tensor for broadcasting with input tensor.
+
+ Reshapes a frequency tensor to ensure dimensional compatibility for broadcasting with an input tensor. This function
+ is typically used in positional encoding operations.
+
+ Args:
+ freqs_cis (torch.Tensor): Frequency tensor with shape matching the last two dimensions of x.
+ x (torch.Tensor): Input tensor to broadcast with.
+
+ Returns:
+ (torch.Tensor): Reshaped frequency tensor ready for broadcasting with the input tensor.
+
+ Raises:
+ AssertionError: If the shape of freqs_cis doesn't match the last two dimensions of x.
+ """
ndim = x.ndim
assert ndim >= 2
assert freqs_cis.shape == (x.shape[-2], x.shape[-1])
@@ -83,6 +178,30 @@ freqs_cis: torch.Tensor,
repeat_freqs_k: bool = False,
):
+ """Apply rotary positional encoding to query and key tensors.
+
+ This function applies rotary positional encoding (RoPE) to query and key tensors using complex-valued frequency
+ components. RoPE is a technique that injects relative position information into self-attention mechanisms.
+
+ Args:
+ xq (torch.Tensor): Query tensor to encode with positional information.
+ xk (torch.Tensor): Key tensor to encode with positional information.
+ freqs_cis (torch.Tensor): Complex-valued frequency components for rotary encoding with shape matching the last
+ two dimensions of xq.
+ repeat_freqs_k (bool, optional): Whether to repeat frequency components along sequence length dimension to match
+ key sequence length.
+
+ Returns:
+ xq_out (torch.Tensor): Query tensor with rotary positional encoding applied.
+ xk_out (torch.Tensor): Key tensor with rotary positional encoding applied, or original xk if xk is empty.
+
+ Examples:
+ >>> import torch
+ >>> xq = torch.randn(2, 8, 16, 64) # [batch, heads, seq_len, dim]
+ >>> xk = torch.randn(2, 8, 16, 64)
+ >>> freqs_cis = compute_axial_cis(64, 4, 4) # For a 4x4 spatial grid with dim=64
+ >>> q_encoded, k_encoded = apply_rotary_enc(xq, xk, freqs_cis)
+ """
xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2)) if xk.shape[-2] != 0 else None
freqs_cis = reshape_for_broadcast(freqs_cis, xq_)
@@ -104,6 +223,22 @@
def window_partition(x: torch.Tensor, window_size: int):
+ """Partition input tensor into non-overlapping windows with padding if needed.
+
+ Args:
+ x (torch.Tensor): Input tensor with shape (B, H, W, C).
+ window_size (int): Size of each window.
+
+ Returns:
+ windows (torch.Tensor): Partitioned windows with shape (B * num_windows, window_size, window_size, C).
+ padded_h_w (tuple[int, int]): Padded height and width before partition.
+
+ Examples:
+ >>> x = torch.randn(1, 16, 16, 3)
+ >>> windows, (Hp, Wp) = window_partition(x, window_size=4)
+ >>> print(windows.shape, Hp, Wp)
+ torch.Size([16, 4, 4, 3]) 16 16
+ """
B, H, W, C = x.shape
pad_h = (window_size - H % window_size) % window_size
@@ -118,6 +253,31 @@
def window_unpartition(windows: torch.Tensor, window_size: int, pad_hw: tuple[int, int], hw: tuple[int, int]):
+ """Unpartition windowed sequences into original sequences and remove padding.
+
+ This function reverses the windowing process, reconstructing the original input from windowed segments and removing
+ any padding that was added during the windowing process.
+
+ Args:
+ windows (torch.Tensor): Input tensor of windowed sequences with shape (B * num_windows, window_size,
+ window_size, C), where B is the batch size, num_windows is the number of windows, window_size is the size of
+ each window, and C is the number of channels.
+ window_size (int): Size of each window.
+ pad_hw (tuple[int, int]): Padded height and width (Hp, Wp) of the input before windowing.
+ hw (tuple[int, int]): Original height and width (H, W) of the input before padding and windowing.
+
+ Returns:
+ (torch.Tensor): Unpartitioned sequences with shape (B, H, W, C), where B is the batch size, H and W are the
+ original height and width, and C is the number of channels.
+
+ Examples:
+ >>> windows = torch.rand(32, 8, 8, 64) # 32 windows of size 8x8 with 64 channels
+ >>> pad_hw = (16, 16) # Padded height and width
+ >>> hw = (15, 14) # Original height and width
+ >>> x = window_unpartition(windows, window_size=8, pad_hw=pad_hw, hw=hw)
+ >>> print(x.shape)
+ torch.Size([1, 15, 14, 64])
+ """
Hp, Wp = pad_hw
H, W = hw
B = windows.shape[0] // (Hp * Wp // window_size // window_size)
@@ -130,6 +290,24 @@
def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
+ """Extract relative positional embeddings based on query and key sizes.
+
+ Args:
+ q_size (int): Size of the query.
+ k_size (int): Size of the key.
+ rel_pos (torch.Tensor): Relative position embeddings with shape (L, C), where L is the maximum relative distance
+ and C is the embedding dimension.
+
+ Returns:
+ (torch.Tensor): Extracted positional embeddings according to relative positions, with shape (q_size, k_size, C).
+
+ Examples:
+ >>> q_size, k_size = 8, 16
+ >>> rel_pos = torch.randn(31, 64) # 31 = 2 * max(8, 16) - 1
+ >>> extracted_pos = get_rel_pos(q_size, k_size, rel_pos)
+ >>> print(extracted_pos.shape)
+ torch.Size([8, 16, 64])
+ """
max_rel_dist = int(2 * max(q_size, k_size) - 1)
# Interpolate rel pos if needed.
if rel_pos.shape[0] != max_rel_dist:
@@ -159,6 +337,38 @@ q_size: tuple[int, int],
k_size: tuple[int, int],
) -> torch.Tensor:
+ """Add decomposed Relative Positional Embeddings to the attention map.
+
+ This function calculates and applies decomposed Relative Positional Embeddings as described in the MVITv2
+ paper. It enhances the attention mechanism by incorporating spatial relationships between query and key
+ positions.
+
+ Args:
+ attn (torch.Tensor): Attention map with shape (B, q_h * q_w, k_h * k_w).
+ q (torch.Tensor): Query tensor in the attention layer with shape (B, q_h * q_w, C).
+ rel_pos_h (torch.Tensor): Relative position embeddings for height axis with shape (Lh, C).
+ rel_pos_w (torch.Tensor): Relative position embeddings for width axis with shape (Lw, C).
+ q_size (tuple[int, int]): Spatial sequence size of query q as (q_h, q_w).
+ k_size (tuple[int, int]): Spatial sequence size of key k as (k_h, k_w).
+
+ Returns:
+ (torch.Tensor): Updated attention map with added relative positional embeddings, shape (B, q_h * q_w, k_h *
+ k_w).
+
+ Examples:
+ >>> B, C, q_h, q_w, k_h, k_w = 1, 64, 8, 8, 8, 8
+ >>> attn = torch.rand(B, q_h * q_w, k_h * k_w)
+ >>> q = torch.rand(B, q_h * q_w, C)
+ >>> rel_pos_h = torch.rand(2 * max(q_h, k_h) - 1, C)
+ >>> rel_pos_w = torch.rand(2 * max(q_w, k_w) - 1, C)
+ >>> q_size, k_size = (q_h, q_w), (k_h, k_w)
+ >>> updated_attn = add_decomposed_rel_pos(attn, q, rel_pos_h, rel_pos_w, q_size, k_size)
+ >>> print(updated_attn.shape)
+ torch.Size([1, 64, 64])
+
+ References:
+ https://github.com/facebookresearch/mvit/blob/main/mvit/models/attention.py
+ """
q_h, q_w = q_size
k_h, k_w = k_size
Rh = get_rel_pos(q_h, k_h, rel_pos_h)
@@ -183,6 +393,20 @@ retain_cls_token: bool = False,
tiling: bool = False,
) -> torch.Tensor:
+ """Calculate absolute positional embeddings. If needed, resize embeddings and remove cls_token dimension for the
+ original embeddings.
+
+ Args:
+ abs_pos (torch.Tensor): Absolute positional embeddings with shape (1, num_position, C).
+ has_cls_token (bool): If true, has 1 embedding in abs_pos for cls token.
+ hw (tuple[int, int]): Size of input image tokens.
+ retain_cls_token (bool): Whether to retain the cls_token.
+ tiling (bool): Whether to tile the embeddings, *instead* of interpolation (a la abs_win).
+
+ Returns:
+ (torch.Tensor): Absolute positional embeddings after processing with shape (1, H, W, C) if retain_cls_token is
+ False, otherwise (1, 1+H*W, C).
+ """
if retain_cls_token:
assert has_cls_token
@@ -237,6 +461,22 @@ rescale: bool = False,
relative_coords: torch.Tensor = None,
) -> tuple[torch.Tensor, torch.Tensor]:
+ """Concatenate rel pos coeffs to the q & k tensors, so that qk^T is now effectively including rel pos biases.
+
+ Args:
+ q (torch.Tensor): Query tensor with shape (B, L_q, C).
+ k (torch.Tensor): Key tensor with shape (B, L_k, C).
+ q_hw (tuple[int, int]): Spatial size of query tensors as (height, width).
+ k_hw (tuple[int, int]): Spatial size of key tensors as (height, width).
+ rel_pos_h (torch.Tensor): Relative positional embeddings for the height axis.
+ rel_pos_w (torch.Tensor): Relative positional embeddings for the width axis.
+ rescale (bool): Whether to rescale for use with SDPA, which would scale by the wrong factor due to the concat.
+ relative_coords (torch.Tensor | None): Precomputed relative coords index tensor.
+
+ Returns:
+ q (torch.Tensor): Query tensor padded so that qk^T accounts for relative position biases.
+ k (torch.Tensor): Key tensor padded so that qk^T accounts for relative position biases.
+ """
q_h, q_w = q_hw
k_h, k_w = k_hw
@@ -269,4 +509,4 @@ q = torch.cat([r_q * scale_ratio, rel_h, rel_w], dim=-1).view(B, q_h * q_w, -1)
k = torch.cat([k.view(B, k_h, k_w, -1), eye_h, eye_w], dim=-1).view(B, k_h * k_w, -1)
- return q, k+ return q, k
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/sam/modules/utils.py |
Write reusable docstrings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from functools import partial
import torch
from ultralytics.utils.downloads import attempt_download_asset
from ultralytics.utils.patches import torch_load
from .modules.decoders import MaskDecoder
from .modules.encoders import FpnNeck, Hiera, ImageEncoder, ImageEncoderViT, MemoryEncoder, PromptEncoder
from .modules.memory_attention import MemoryAttention, MemoryAttentionLayer
from .modules.sam import SAM2Model, SAMModel
from .modules.tiny_encoder import TinyViT
from .modules.transformer import TwoWayTransformer
def _load_checkpoint(model, checkpoint):
if checkpoint is None:
return model
checkpoint = attempt_download_asset(checkpoint)
with open(checkpoint, "rb") as f:
state_dict = torch_load(f)
# Handle nested "model" key
if "model" in state_dict and isinstance(state_dict["model"], dict):
state_dict = state_dict["model"]
model.load_state_dict(state_dict)
return model
def build_sam_vit_h(checkpoint=None):
return _build_sam(
encoder_embed_dim=1280,
encoder_depth=32,
encoder_num_heads=16,
encoder_global_attn_indexes=[7, 15, 23, 31],
checkpoint=checkpoint,
)
def build_sam_vit_l(checkpoint=None):
return _build_sam(
encoder_embed_dim=1024,
encoder_depth=24,
encoder_num_heads=16,
encoder_global_attn_indexes=[5, 11, 17, 23],
checkpoint=checkpoint,
)
def build_sam_vit_b(checkpoint=None):
return _build_sam(
encoder_embed_dim=768,
encoder_depth=12,
encoder_num_heads=12,
encoder_global_attn_indexes=[2, 5, 8, 11],
checkpoint=checkpoint,
)
def build_mobile_sam(checkpoint=None):
return _build_sam(
encoder_embed_dim=[64, 128, 160, 320],
encoder_depth=[2, 2, 6, 2],
encoder_num_heads=[2, 4, 5, 10],
encoder_global_attn_indexes=None,
mobile_sam=True,
checkpoint=checkpoint,
)
def build_sam2_t(checkpoint=None):
return _build_sam2(
encoder_embed_dim=96,
encoder_stages=[1, 2, 7, 2],
encoder_num_heads=1,
encoder_global_att_blocks=[5, 7, 9],
encoder_window_spec=[8, 4, 14, 7],
encoder_backbone_channel_list=[768, 384, 192, 96],
checkpoint=checkpoint,
)
def build_sam2_s(checkpoint=None):
return _build_sam2(
encoder_embed_dim=96,
encoder_stages=[1, 2, 11, 2],
encoder_num_heads=1,
encoder_global_att_blocks=[7, 10, 13],
encoder_window_spec=[8, 4, 14, 7],
encoder_backbone_channel_list=[768, 384, 192, 96],
checkpoint=checkpoint,
)
def build_sam2_b(checkpoint=None):
return _build_sam2(
encoder_embed_dim=112,
encoder_stages=[2, 3, 16, 3],
encoder_num_heads=2,
encoder_global_att_blocks=[12, 16, 20],
encoder_window_spec=[8, 4, 14, 7],
encoder_window_spatial_size=[14, 14],
encoder_backbone_channel_list=[896, 448, 224, 112],
checkpoint=checkpoint,
)
def build_sam2_l(checkpoint=None):
return _build_sam2(
encoder_embed_dim=144,
encoder_stages=[2, 6, 36, 4],
encoder_num_heads=2,
encoder_global_att_blocks=[23, 33, 43],
encoder_window_spec=[8, 4, 16, 8],
encoder_backbone_channel_list=[1152, 576, 288, 144],
checkpoint=checkpoint,
)
def _build_sam(
encoder_embed_dim,
encoder_depth,
encoder_num_heads,
encoder_global_attn_indexes,
checkpoint=None,
mobile_sam=False,
):
prompt_embed_dim = 256
image_size = 1024
vit_patch_size = 16
image_embedding_size = image_size // vit_patch_size
image_encoder = (
TinyViT(
img_size=1024,
in_chans=3,
num_classes=1000,
embed_dims=encoder_embed_dim,
depths=encoder_depth,
num_heads=encoder_num_heads,
window_sizes=[7, 7, 14, 7],
mlp_ratio=4.0,
drop_rate=0.0,
drop_path_rate=0.0,
use_checkpoint=False,
mbconv_expand_ratio=4.0,
local_conv_size=3,
layer_lr_decay=0.8,
)
if mobile_sam
else ImageEncoderViT(
depth=encoder_depth,
embed_dim=encoder_embed_dim,
img_size=image_size,
mlp_ratio=4,
norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
num_heads=encoder_num_heads,
patch_size=vit_patch_size,
qkv_bias=True,
use_rel_pos=True,
global_attn_indexes=encoder_global_attn_indexes,
window_size=14,
out_chans=prompt_embed_dim,
)
)
sam = SAMModel(
image_encoder=image_encoder,
prompt_encoder=PromptEncoder(
embed_dim=prompt_embed_dim,
image_embedding_size=(image_embedding_size, image_embedding_size),
input_image_size=(image_size, image_size),
mask_in_chans=16,
),
mask_decoder=MaskDecoder(
num_multimask_outputs=3,
transformer=TwoWayTransformer(
depth=2,
embedding_dim=prompt_embed_dim,
mlp_dim=2048,
num_heads=8,
),
transformer_dim=prompt_embed_dim,
iou_head_depth=3,
iou_head_hidden_dim=256,
),
pixel_mean=[123.675, 116.28, 103.53],
pixel_std=[58.395, 57.12, 57.375],
)
if checkpoint is not None:
sam = _load_checkpoint(sam, checkpoint)
sam.eval()
return sam
def _build_sam2(
encoder_embed_dim=1280,
encoder_stages=(2, 6, 36, 4),
encoder_num_heads=2,
encoder_global_att_blocks=(7, 15, 23, 31),
encoder_backbone_channel_list=(1152, 576, 288, 144),
encoder_window_spatial_size=(7, 7),
encoder_window_spec=(8, 4, 16, 8),
checkpoint=None,
):
image_encoder = ImageEncoder(
trunk=Hiera(
embed_dim=encoder_embed_dim,
num_heads=encoder_num_heads,
stages=encoder_stages,
global_att_blocks=encoder_global_att_blocks,
window_pos_embed_bkg_spatial_size=encoder_window_spatial_size,
window_spec=encoder_window_spec,
),
neck=FpnNeck(
d_model=256,
backbone_channel_list=encoder_backbone_channel_list,
fpn_top_down_levels=[2, 3],
fpn_interp_model="nearest",
),
scalp=1,
)
memory_attention = MemoryAttention(d_model=256, pos_enc_at_input=True, num_layers=4, layer=MemoryAttentionLayer())
memory_encoder = MemoryEncoder(out_dim=64)
is_sam2_1 = checkpoint is not None and "sam2.1" in checkpoint
sam2 = SAM2Model(
image_encoder=image_encoder,
memory_attention=memory_attention,
memory_encoder=memory_encoder,
num_maskmem=7,
image_size=1024,
sigmoid_scale_for_mem_enc=20.0,
sigmoid_bias_for_mem_enc=-10.0,
use_mask_input_as_output_without_sam=True,
directly_add_no_mem_embed=True,
use_high_res_features_in_sam=True,
multimask_output_in_sam=True,
iou_prediction_use_sigmoid=True,
use_obj_ptrs_in_encoder=True,
add_tpos_enc_to_obj_ptrs=True,
only_obj_ptrs_in_the_past_for_eval=True,
pred_obj_scores=True,
pred_obj_scores_mlp=True,
fixed_no_obj_ptr=True,
multimask_output_for_tracking=True,
use_multimask_token_for_obj_ptr=True,
multimask_min_pt_num=0,
multimask_max_pt_num=1,
use_mlp_for_obj_ptr_proj=True,
compile_image_encoder=False,
no_obj_embed_spatial=is_sam2_1,
proj_tpos_enc_in_obj_ptrs=is_sam2_1,
use_signed_tpos_enc_to_obj_ptrs=is_sam2_1,
sam_mask_decoder_extra_args=dict(
dynamic_multimask_via_stability=True,
dynamic_multimask_stability_delta=0.05,
dynamic_multimask_stability_thresh=0.98,
),
)
if checkpoint is not None:
sam2 = _load_checkpoint(sam2, checkpoint)
sam2.eval()
return sam2
sam_model_map = {
"sam_h.pt": build_sam_vit_h,
"sam_l.pt": build_sam_vit_l,
"sam_b.pt": build_sam_vit_b,
"mobile_sam.pt": build_mobile_sam,
"sam2_t.pt": build_sam2_t,
"sam2_s.pt": build_sam2_s,
"sam2_b.pt": build_sam2_b,
"sam2_l.pt": build_sam2_l,
"sam2.1_t.pt": build_sam2_t,
"sam2.1_s.pt": build_sam2_s,
"sam2.1_b.pt": build_sam2_b,
"sam2.1_l.pt": build_sam2_l,
}
def build_sam(ckpt="sam_b.pt"):
model_builder = None
ckpt = str(ckpt) # to allow Path ckpt types
for k in sam_model_map.keys():
if ckpt.endswith(k):
model_builder = sam_model_map.get(k)
if not model_builder:
raise FileNotFoundError(f"{ckpt} is not a supported SAM model. Available models are: \n {sam_model_map.keys()}")
return model_builder(ckpt) | --- +++ @@ -22,6 +22,7 @@
def _load_checkpoint(model, checkpoint):
+ """Load checkpoint into model from file path."""
if checkpoint is None:
return model
@@ -36,6 +37,7 @@
def build_sam_vit_h(checkpoint=None):
+ """Build and return a Segment Anything Model (SAM) h-size model with specified encoder parameters."""
return _build_sam(
encoder_embed_dim=1280,
encoder_depth=32,
@@ -46,6 +48,7 @@
def build_sam_vit_l(checkpoint=None):
+ """Build and return a Segment Anything Model (SAM) l-size model with specified encoder parameters."""
return _build_sam(
encoder_embed_dim=1024,
encoder_depth=24,
@@ -56,6 +59,7 @@
def build_sam_vit_b(checkpoint=None):
+ """Build and return a Segment Anything Model (SAM) b-size model with specified encoder parameters."""
return _build_sam(
encoder_embed_dim=768,
encoder_depth=12,
@@ -66,6 +70,7 @@
def build_mobile_sam(checkpoint=None):
+ """Build and return a Mobile Segment Anything Model (Mobile-SAM) for efficient image segmentation."""
return _build_sam(
encoder_embed_dim=[64, 128, 160, 320],
encoder_depth=[2, 2, 6, 2],
@@ -77,6 +82,7 @@
def build_sam2_t(checkpoint=None):
+ """Build and return a Segment Anything Model 2 (SAM2) tiny-size model with specified architecture parameters."""
return _build_sam2(
encoder_embed_dim=96,
encoder_stages=[1, 2, 7, 2],
@@ -89,6 +95,7 @@
def build_sam2_s(checkpoint=None):
+ """Build and return a small-size Segment Anything Model 2 (SAM2) with specified architecture parameters."""
return _build_sam2(
encoder_embed_dim=96,
encoder_stages=[1, 2, 11, 2],
@@ -101,6 +108,7 @@
def build_sam2_b(checkpoint=None):
+ """Build and return a Segment Anything Model 2 (SAM2) base-size model with specified architecture parameters."""
return _build_sam2(
encoder_embed_dim=112,
encoder_stages=[2, 3, 16, 3],
@@ -114,6 +122,7 @@
def build_sam2_l(checkpoint=None):
+ """Build and return a large-size Segment Anything Model 2 (SAM2) with specified architecture parameters."""
return _build_sam2(
encoder_embed_dim=144,
encoder_stages=[2, 6, 36, 4],
@@ -133,6 +142,23 @@ checkpoint=None,
mobile_sam=False,
):
+ """Build a Segment Anything Model (SAM) with specified encoder parameters.
+
+ Args:
+ encoder_embed_dim (int | list[int]): Embedding dimension for the encoder.
+ encoder_depth (int | list[int]): Depth of the encoder.
+ encoder_num_heads (int | list[int]): Number of attention heads in the encoder.
+ encoder_global_attn_indexes (list[int] | None): Indexes for global attention in the encoder.
+ checkpoint (str | None, optional): Path to the model checkpoint file.
+ mobile_sam (bool, optional): Whether to build a Mobile-SAM model.
+
+ Returns:
+ (SAMModel): A Segment Anything Model instance with the specified architecture.
+
+ Examples:
+ >>> sam = _build_sam(768, 12, 12, [2, 5, 8, 11])
+ >>> sam = _build_sam([64, 128, 160, 320], [2, 2, 6, 2], [2, 4, 5, 10], None, mobile_sam=True)
+ """
prompt_embed_dim = 256
image_size = 1024
vit_patch_size = 16
@@ -209,6 +235,25 @@ encoder_window_spec=(8, 4, 16, 8),
checkpoint=None,
):
+ """Build and return a Segment Anything Model 2 (SAM2) with specified architecture parameters.
+
+ Args:
+ encoder_embed_dim (int, optional): Embedding dimension for the encoder.
+ encoder_stages (list[int], optional): Number of blocks in each stage of the encoder.
+ encoder_num_heads (int, optional): Number of attention heads in the encoder.
+ encoder_global_att_blocks (list[int], optional): Indices of global attention blocks in the encoder.
+ encoder_backbone_channel_list (list[int], optional): Channel dimensions for each level of the encoder backbone.
+ encoder_window_spatial_size (list[int], optional): Spatial size of the window for position embeddings.
+ encoder_window_spec (list[int], optional): Window specifications for each stage of the encoder.
+ checkpoint (str | None, optional): Path to the checkpoint file for loading pre-trained weights.
+
+ Returns:
+ (SAM2Model): A configured and initialized SAM2 model.
+
+ Examples:
+ >>> sam2_model = _build_sam2(encoder_embed_dim=96, encoder_stages=[1, 2, 7, 2])
+ >>> sam2_model.eval()
+ """
image_encoder = ImageEncoder(
trunk=Hiera(
embed_dim=encoder_embed_dim,
@@ -288,6 +333,26 @@
def build_sam(ckpt="sam_b.pt"):
+ """Build and return a Segment Anything Model (SAM) based on the provided checkpoint.
+
+ Args:
+ ckpt (str | Path, optional): Path to the checkpoint file or name of a pre-defined SAM model.
+
+ Returns:
+ (SAMModel | SAM2Model): A configured and initialized SAM or SAM2 model instance.
+
+ Raises:
+ FileNotFoundError: If the provided checkpoint is not a supported SAM model.
+
+ Examples:
+ >>> sam_model = build_sam("sam_b.pt")
+ >>> sam_model = build_sam("path/to/custom_checkpoint.pt")
+
+ Notes:
+ Supported pre-defined models include:
+ - SAM: 'sam_h.pt', 'sam_l.pt', 'sam_b.pt', 'mobile_sam.pt'
+ - SAM2: 'sam2_t.pt', 'sam2_s.pt', 'sam2_b.pt', 'sam2_l.pt'
+ """
model_builder = None
ckpt = str(ckpt) # to allow Path ckpt types
for k in sam_model_map.keys():
@@ -297,4 +362,4 @@ if not model_builder:
raise FileNotFoundError(f"{ckpt} is not a supported SAM model. Available models are: \n {sam_model_map.keys()}")
- return model_builder(ckpt)+ return model_builder(ckpt)
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/sam/build.py |
Create docstrings for each class method | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import copy
import math
from functools import partial
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from ultralytics.nn.modules import MLP, LayerNorm2d, MLPBlock
from .transformer import Attention, TwoWayAttentionBlock, TwoWayTransformer
from .utils import add_decomposed_rel_pos, apply_rotary_enc, compute_axial_cis, window_partition, window_unpartition
class DropPath(nn.Module):
def __init__(self, drop_prob: float = 0.0, scale_by_keep: bool = True):
super().__init__()
self.drop_prob = drop_prob
self.scale_by_keep = scale_by_keep
def forward(self, x: Tensor) -> Tensor:
if self.drop_prob == 0.0 or not self.training:
return x
keep_prob = 1 - self.drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
if keep_prob > 0.0 and self.scale_by_keep:
random_tensor.div_(keep_prob)
return x * random_tensor
class MaskDownSampler(nn.Module):
def __init__(
self,
embed_dim: int = 256,
kernel_size: int = 4,
stride: int = 4,
padding: int = 0,
total_stride: int = 16,
activation: type[nn.Module] = nn.GELU,
interpol_size: tuple[int, int] | None = None,
):
super().__init__()
num_layers = int(math.log2(total_stride) // math.log2(stride))
assert stride**num_layers == total_stride
self.encoder = nn.Sequential()
mask_in_chans, mask_out_chans = 1, 1
for _ in range(num_layers):
mask_out_chans = mask_in_chans * (stride**2)
self.encoder.append(
nn.Conv2d(
mask_in_chans,
mask_out_chans,
kernel_size=kernel_size,
stride=stride,
padding=padding,
)
)
self.encoder.append(LayerNorm2d(mask_out_chans))
self.encoder.append(activation())
mask_in_chans = mask_out_chans
self.encoder.append(nn.Conv2d(mask_out_chans, embed_dim, kernel_size=1))
self.interpol_size = interpol_size
if self.interpol_size is not None:
assert isinstance(self.interpol_size, (list, tuple)), (
f"Unsupported type {type(self.interpol_size)}. Should be a list or tuple."
)
self.interpol_size = list(interpol_size)
assert len(self.interpol_size) == 2
def forward(self, x: Tensor) -> Tensor:
if self.interpol_size is not None and self.interpol_size != list(x.shape[-2:]):
x = F.interpolate(
x.float(),
size=self.interpol_size,
align_corners=False,
mode="bilinear",
antialias=True,
).to(x.dtype)
return self.encoder(x)
class CXBlock(nn.Module):
def __init__(
self,
dim: int,
kernel_size: int = 7,
padding: int = 3,
drop_path: float = 0.0,
layer_scale_init_value: float = 1e-6,
use_dwconv: bool = True,
):
super().__init__()
self.dwconv = nn.Conv2d(
dim,
dim,
kernel_size=kernel_size,
padding=padding,
groups=dim if use_dwconv else 1,
) # depthwise conv
self.norm = LayerNorm2d(dim, eps=1e-6)
self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers
self.act = nn.GELU()
self.pwconv2 = nn.Linear(4 * dim, dim)
self.gamma = (
nn.Parameter(layer_scale_init_value * torch.ones(dim), requires_grad=True)
if layer_scale_init_value > 0
else None
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
def forward(self, x: Tensor) -> Tensor:
input = x
x = self.dwconv(x)
x = self.norm(x)
x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
if self.gamma is not None:
x = self.gamma * x
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
x = input + self.drop_path(x)
return x
class Fuser(nn.Module):
def __init__(self, layer: nn.Module, num_layers: int, dim: int | None = None, input_projection: bool = False):
super().__init__()
self.proj = nn.Identity()
self.layers = nn.ModuleList([copy.deepcopy(layer) for _ in range(num_layers)])
if input_projection:
assert dim is not None
self.proj = nn.Conv2d(dim, dim, kernel_size=1)
def forward(self, x: Tensor) -> Tensor:
x = self.proj(x)
for layer in self.layers:
x = layer(x)
return x
class SAM2TwoWayAttentionBlock(TwoWayAttentionBlock):
def __init__(
self,
embedding_dim: int,
num_heads: int,
mlp_dim: int = 2048,
activation: type[nn.Module] = nn.ReLU,
attention_downsample_rate: int = 2,
skip_first_layer_pe: bool = False,
) -> None:
super().__init__(embedding_dim, num_heads, mlp_dim, activation, attention_downsample_rate, skip_first_layer_pe)
self.mlp = MLP(embedding_dim, mlp_dim, embedding_dim, num_layers=2, act=activation)
class SAM2TwoWayTransformer(TwoWayTransformer):
def __init__(
self,
depth: int,
embedding_dim: int,
num_heads: int,
mlp_dim: int,
activation: type[nn.Module] = nn.ReLU,
attention_downsample_rate: int = 2,
) -> None:
super().__init__(depth, embedding_dim, num_heads, mlp_dim, activation, attention_downsample_rate)
self.layers = nn.ModuleList()
for i in range(depth):
self.layers.append(
SAM2TwoWayAttentionBlock(
embedding_dim=embedding_dim,
num_heads=num_heads,
mlp_dim=mlp_dim,
activation=activation,
attention_downsample_rate=attention_downsample_rate,
skip_first_layer_pe=(i == 0),
)
)
class RoPEAttention(Attention):
def __init__(
self,
*args,
rope_theta: float = 10000.0,
rope_k_repeat: bool = False,
feat_sizes: tuple[int, int] = (32, 32), # [w, h] for stride 16 feats at 512 resolution
**kwargs,
):
super().__init__(*args, **kwargs)
self.compute_cis = partial(compute_axial_cis, dim=self.internal_dim // self.num_heads, theta=rope_theta)
freqs_cis = self.compute_cis(end_x=feat_sizes[0], end_y=feat_sizes[1])
self.freqs_cis = freqs_cis
self.rope_k_repeat = rope_k_repeat # repeat q rope to match k length, needed for cross-attention to memories
def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, num_k_exclude_rope: int = 0) -> torch.Tensor:
q = self.q_proj(q)
k = self.k_proj(k)
v = self.v_proj(v)
# Separate into heads
q = self._separate_heads(q, self.num_heads)
k = self._separate_heads(k, self.num_heads)
v = self._separate_heads(v, self.num_heads)
# Apply rotary position encoding
w = h = math.sqrt(q.shape[-2])
self.freqs_cis = self.freqs_cis.to(q.device)
if self.freqs_cis.shape[0] != q.shape[-2]:
self.freqs_cis = self.compute_cis(end_x=w, end_y=h).to(q.device)
if q.shape[-2] != k.shape[-2]:
assert self.rope_k_repeat
num_k_rope = k.size(-2) - num_k_exclude_rope
q, k[:, :, :num_k_rope] = apply_rotary_enc(
q,
k[:, :, :num_k_rope],
freqs_cis=self.freqs_cis,
repeat_freqs_k=self.rope_k_repeat,
)
# Attention
out = F.scaled_dot_product_attention(q, k, v)
out = self._recombine_heads(out)
out = self.out_proj(out)
return out
def do_pool(x: torch.Tensor, pool: nn.Module, norm: nn.Module = None) -> torch.Tensor:
if pool is None:
return x
# (B, H, W, C) -> (B, C, H, W)
x = x.permute(0, 3, 1, 2)
x = pool(x)
# (B, C, H', W') -> (B, H', W', C)
x = x.permute(0, 2, 3, 1)
if norm:
x = norm(x)
return x
class MultiScaleAttention(nn.Module):
def __init__(
self,
dim: int,
dim_out: int,
num_heads: int,
q_pool: nn.Module = None,
):
super().__init__()
self.dim = dim
self.dim_out = dim_out
self.num_heads = num_heads
head_dim = dim_out // num_heads
self.scale = head_dim**-0.5
self.q_pool = q_pool
self.qkv = nn.Linear(dim, dim_out * 3)
self.proj = nn.Linear(dim_out, dim_out)
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, H, W, _ = x.shape
# qkv with shape (B, H * W, 3, nHead, C)
qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1)
# q, k, v with shape (B, H * W, nheads, C)
q, k, v = torch.unbind(qkv, 2)
# Q pooling (for downsample at stage changes)
if self.q_pool:
q = do_pool(q.reshape(B, H, W, -1), self.q_pool)
H, W = q.shape[1:3] # downsampled shape
q = q.reshape(B, H * W, self.num_heads, -1)
# Torch's SDPA expects [B, nheads, H*W, C] so we transpose
x = F.scaled_dot_product_attention(
q.transpose(1, 2),
k.transpose(1, 2),
v.transpose(1, 2),
)
# Transpose back
x = x.transpose(1, 2)
x = x.reshape(B, H, W, -1)
x = self.proj(x)
return x
class MultiScaleBlock(nn.Module):
def __init__(
self,
dim: int,
dim_out: int,
num_heads: int,
mlp_ratio: float = 4.0,
drop_path: float = 0.0,
norm_layer: nn.Module | str = "LayerNorm",
q_stride: tuple[int, int] | None = None,
act_layer: type[nn.Module] = nn.GELU,
window_size: int = 0,
):
super().__init__()
if isinstance(norm_layer, str):
norm_layer = partial(getattr(nn, norm_layer), eps=1e-6)
self.dim = dim
self.dim_out = dim_out
self.norm1 = norm_layer(dim)
self.window_size = window_size
self.pool, self.q_stride = None, q_stride
if self.q_stride:
self.pool = nn.MaxPool2d(kernel_size=q_stride, stride=q_stride, ceil_mode=False)
self.attn = MultiScaleAttention(
dim,
dim_out,
num_heads=num_heads,
q_pool=self.pool,
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim_out)
self.mlp = MLP(
dim_out,
int(dim_out * mlp_ratio),
dim_out,
num_layers=2,
act=act_layer,
)
if dim != dim_out:
self.proj = nn.Linear(dim, dim_out)
def forward(self, x: torch.Tensor) -> torch.Tensor:
shortcut = x # B, H, W, C
x = self.norm1(x)
# Skip connection
if self.dim != self.dim_out:
shortcut = do_pool(self.proj(x), self.pool)
# Window partition
window_size = self.window_size
if window_size > 0:
H, W = x.shape[1], x.shape[2]
x, pad_hw = window_partition(x, window_size)
# Window Attention + Q Pooling (if stage change)
x = self.attn(x)
if self.q_stride:
# Shapes have changed due to Q pooling
window_size = self.window_size // self.q_stride[0]
H, W = shortcut.shape[1:3]
pad_h = (window_size - H % window_size) % window_size
pad_w = (window_size - W % window_size) % window_size
pad_hw = (H + pad_h, W + pad_w)
# Reverse window partition
if self.window_size > 0:
x = window_unpartition(x, window_size, pad_hw, (H, W))
x = shortcut + self.drop_path(x)
# MLP
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PositionEmbeddingSine(nn.Module):
def __init__(
self,
num_pos_feats: int,
temperature: int = 10000,
normalize: bool = True,
scale: float | None = None,
):
super().__init__()
assert num_pos_feats % 2 == 0, "Expecting even model width"
self.num_pos_feats = num_pos_feats // 2
self.temperature = temperature
self.normalize = normalize
if scale is not None and not normalize:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
self.cache = {}
def _encode_xy(self, x: torch.Tensor, y: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
assert len(x) == len(y) and x.ndim == y.ndim == 1
x_embed = x * self.scale
y_embed = y * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, None] / dim_t
pos_y = y_embed[:, None] / dim_t
pos_x = torch.stack((pos_x[:, 0::2].sin(), pos_x[:, 1::2].cos()), dim=2).flatten(1)
pos_y = torch.stack((pos_y[:, 0::2].sin(), pos_y[:, 1::2].cos()), dim=2).flatten(1)
return pos_x, pos_y
@torch.no_grad()
def encode_boxes(self, x: torch.Tensor, y: torch.Tensor, w: torch.Tensor, h: torch.Tensor) -> torch.Tensor:
pos_x, pos_y = self._encode_xy(x, y)
return torch.cat((pos_y, pos_x, h[:, None], w[:, None]), dim=1)
encode = encode_boxes # Backwards compatibility
@torch.no_grad()
def encode_points(self, x: torch.Tensor, y: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:
(bx, nx), (by, ny), (bl, nl) = x.shape, y.shape, labels.shape
assert bx == by and nx == ny and bx == bl and nx == nl
pos_x, pos_y = self._encode_xy(x.flatten(), y.flatten())
pos_x, pos_y = pos_x.reshape(bx, nx, -1), pos_y.reshape(by, ny, -1)
return torch.cat((pos_y, pos_x, labels[:, :, None]), dim=2)
@torch.no_grad()
def forward(self, x: torch.Tensor) -> Tensor:
cache_key = (x.shape[-2], x.shape[-1])
if cache_key in self.cache:
return self.cache[cache_key][None].repeat(x.shape[0], 1, 1, 1)
y_embed = (
torch.arange(1, x.shape[-2] + 1, dtype=torch.float32, device=x.device)
.view(1, -1, 1)
.repeat(x.shape[0], 1, x.shape[-1])
)
x_embed = (
torch.arange(1, x.shape[-1] + 1, dtype=torch.float32, device=x.device)
.view(1, 1, -1)
.repeat(x.shape[0], x.shape[-2], 1)
)
if self.normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
self.cache[cache_key] = pos[0]
return pos
class PositionEmbeddingRandom(nn.Module):
def __init__(self, num_pos_feats: int = 64, scale: float | None = None) -> None:
super().__init__()
if scale is None or scale <= 0.0:
scale = 1.0
self.register_buffer("positional_encoding_gaussian_matrix", scale * torch.randn((2, num_pos_feats)))
# Set non-deterministic for forward() error 'cumsum_cuda_kernel does not have a deterministic implementation'
torch.use_deterministic_algorithms(False)
torch.backends.cudnn.deterministic = False
def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor:
# Assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
coords = 2 * coords - 1
coords = coords @ self.positional_encoding_gaussian_matrix
coords = 2 * np.pi * coords
# Outputs d_1 x ... x d_n x C shape
return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1)
def forward(self, size: tuple[int, int]) -> torch.Tensor:
h, w = size
grid = torch.ones(
(h, w),
device=self.positional_encoding_gaussian_matrix.device,
dtype=self.positional_encoding_gaussian_matrix.dtype,
)
y_embed = grid.cumsum(dim=0) - 0.5
x_embed = grid.cumsum(dim=1) - 0.5
y_embed = y_embed / h
x_embed = x_embed / w
pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1))
return pe.permute(2, 0, 1) # C x H x W
def forward_with_coords(self, coords_input: torch.Tensor, image_size: tuple[int, int]) -> torch.Tensor:
coords = coords_input.clone()
coords[:, :, 0] = coords[:, :, 0] / image_size[1]
coords[:, :, 1] = coords[:, :, 1] / image_size[0]
return self._pe_encoding(coords) # B x N x C
class Block(nn.Module):
def __init__(
self,
dim: int,
num_heads: int,
mlp_ratio: float = 4.0,
qkv_bias: bool = True,
norm_layer: type[nn.Module] = nn.LayerNorm,
act_layer: type[nn.Module] = nn.GELU,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
window_size: int = 0,
input_size: tuple[int, int] | None = None,
) -> None:
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = REAttention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
input_size=input_size if window_size == 0 else (window_size, window_size),
)
self.norm2 = norm_layer(dim)
self.mlp = MLPBlock(embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer)
self.window_size = window_size
def forward(self, x: torch.Tensor) -> torch.Tensor:
shortcut = x
x = self.norm1(x)
# Window partition
if self.window_size > 0:
H, W = x.shape[1], x.shape[2]
x, pad_hw = window_partition(x, self.window_size)
x = self.attn(x)
# Reverse window partition
if self.window_size > 0:
x = window_unpartition(x, self.window_size, pad_hw, (H, W))
x = shortcut + x
return x + self.mlp(self.norm2(x))
class REAttention(nn.Module):
def __init__(
self,
dim: int,
num_heads: int = 8,
qkv_bias: bool = True,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
input_size: tuple[int, int] | None = None,
) -> None:
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim**-0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
self.use_rel_pos = use_rel_pos
if self.use_rel_pos:
assert input_size is not None, "Input size must be provided if using relative positional encoding."
# Initialize relative positional embeddings
self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))
self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, H, W, _ = x.shape
# qkv with shape (3, B, nHead, H * W, C)
qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
# q, k, v with shape (B * nHead, H * W, C)
q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0)
attn = (q * self.scale) @ k.transpose(-2, -1)
if self.use_rel_pos:
attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W))
attn = attn.softmax(dim=-1)
x = (attn @ v).view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1)
return self.proj(x)
class PatchEmbed(nn.Module):
def __init__(
self,
kernel_size: tuple[int, int] = (16, 16),
stride: tuple[int, int] = (16, 16),
padding: tuple[int, int] = (0, 0),
in_chans: int = 3,
embed_dim: int = 768,
bias: bool = True,
) -> None:
super().__init__()
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.proj(x).permute(0, 2, 3, 1) # B C H W -> B H W C | --- +++ @@ -17,13 +17,29 @@
class DropPath(nn.Module):
+ """Implements stochastic depth regularization for neural networks during training.
+
+ Attributes:
+ drop_prob (float): Probability of dropping a path during training.
+ scale_by_keep (bool): Whether to scale the output by the keep probability.
+
+ Methods:
+ forward: Applies stochastic depth to input tensor during training, with optional scaling.
+
+ Examples:
+ >>> drop_path = DropPath(drop_prob=0.2, scale_by_keep=True)
+ >>> x = torch.randn(32, 64, 224, 224)
+ >>> output = drop_path(x)
+ """
def __init__(self, drop_prob: float = 0.0, scale_by_keep: bool = True):
+ """Initialize DropPath module for stochastic depth regularization during training."""
super().__init__()
self.drop_prob = drop_prob
self.scale_by_keep = scale_by_keep
def forward(self, x: Tensor) -> Tensor:
+ """Apply stochastic depth to input tensor during training, with optional scaling."""
if self.drop_prob == 0.0 or not self.training:
return x
keep_prob = 1 - self.drop_prob
@@ -35,6 +51,25 @@
class MaskDownSampler(nn.Module):
+ """A mask downsampling and embedding module for efficient processing of input masks.
+
+ This class implements a mask downsampler that progressively reduces the spatial dimensions of input masks while
+ expanding their channel dimensions using convolutional layers, layer normalization, and activation functions.
+
+ Attributes:
+ encoder (nn.Sequential): A sequential container of convolutional layers, layer normalization, and activation
+ functions for downsampling and embedding masks.
+
+ Methods:
+ forward: Downsamples and encodes input mask to embed_dim channels.
+
+ Examples:
+ >>> mask_downsampler = MaskDownSampler(embed_dim=256, kernel_size=4, stride=4, padding=0, total_stride=16)
+ >>> input_mask = torch.randn(1, 1, 256, 256)
+ >>> output = mask_downsampler(input_mask)
+ >>> print(output.shape)
+ torch.Size([1, 256, 16, 16])
+ """
def __init__(
self,
@@ -46,6 +81,7 @@ activation: type[nn.Module] = nn.GELU,
interpol_size: tuple[int, int] | None = None,
):
+ """Initialize a mask downsampler module for progressive downsampling and channel expansion."""
super().__init__()
num_layers = int(math.log2(total_stride) // math.log2(stride))
assert stride**num_layers == total_stride
@@ -76,6 +112,7 @@ assert len(self.interpol_size) == 2
def forward(self, x: Tensor) -> Tensor:
+ """Downsample and encode input mask to embed_dim channels using convolutional layers and LayerNorm2d."""
if self.interpol_size is not None and self.interpol_size != list(x.shape[-2:]):
x = F.interpolate(
x.float(),
@@ -88,6 +125,31 @@
class CXBlock(nn.Module):
+ """ConvNeXt Block for efficient feature extraction in convolutional neural networks.
+
+ This block implements a modified version of the ConvNeXt architecture, offering improved performance and flexibility
+ in feature extraction.
+
+ Attributes:
+ dwconv (nn.Conv2d): Depthwise or standard 2D convolution layer.
+ norm (LayerNorm2d): Layer normalization applied to channels.
+ pwconv1 (nn.Linear): First pointwise convolution implemented as a linear layer.
+ act (nn.GELU): GELU activation function.
+ pwconv2 (nn.Linear): Second pointwise convolution implemented as a linear layer.
+ gamma (nn.Parameter | None): Learnable scale parameter for layer scaling.
+ drop_path (nn.Module): DropPath layer for stochastic depth regularization.
+
+ Methods:
+ forward: Processes the input tensor through the ConvNeXt block.
+
+ Examples:
+ >>> import torch
+ >>> x = torch.randn(1, 64, 56, 56)
+ >>> block = CXBlock(dim=64, kernel_size=7, padding=3)
+ >>> output = block(x)
+ >>> print(output.shape)
+ torch.Size([1, 64, 56, 56])
+ """
def __init__(
self,
@@ -98,6 +160,19 @@ layer_scale_init_value: float = 1e-6,
use_dwconv: bool = True,
):
+ """Initialize a ConvNeXt Block for efficient feature extraction in convolutional neural networks.
+
+ This block implements a modified version of the ConvNeXt architecture, offering improved performance and
+ flexibility in feature extraction.
+
+ Args:
+ dim (int): Number of input channels.
+ kernel_size (int): Size of the convolutional kernel.
+ padding (int): Padding size for the convolution.
+ drop_path (float): Stochastic depth rate.
+ layer_scale_init_value (float): Initial value for Layer Scale.
+ use_dwconv (bool): Whether to use depthwise convolution.
+ """
super().__init__()
self.dwconv = nn.Conv2d(
dim,
@@ -118,6 +193,7 @@ self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
def forward(self, x: Tensor) -> Tensor:
+ """Apply ConvNeXt block operations to input tensor, including convolutions and residual connection."""
input = x
x = self.dwconv(x)
x = self.norm(x)
@@ -134,8 +210,37 @@
class Fuser(nn.Module):
+ """A module for fusing features through multiple layers of a neural network.
+
+ This class applies a series of identical layers to an input tensor, optionally projecting the input first.
+
+ Attributes:
+ proj (nn.Module): An optional input projection layer. Identity if no projection is needed.
+ layers (nn.ModuleList): A list of identical layers to be applied sequentially.
+
+ Methods:
+ forward: Applies the fuser to an input tensor.
+
+ Examples:
+ >>> layer = CXBlock(dim=256)
+ >>> fuser = Fuser(layer, num_layers=3, dim=256, input_projection=True)
+ >>> x = torch.randn(1, 256, 32, 32)
+ >>> output = fuser(x)
+ >>> print(output.shape)
+ torch.Size([1, 256, 32, 32])
+ """
def __init__(self, layer: nn.Module, num_layers: int, dim: int | None = None, input_projection: bool = False):
+ """Initialize the Fuser module for feature fusion through multiple layers.
+
+ This module creates a sequence of identical layers and optionally applies an input projection.
+
+ Args:
+ layer (nn.Module): The layer to be replicated in the fuser.
+ num_layers (int): The number of times to replicate the layer.
+ dim (int | None): The dimension for input projection, if used.
+ input_projection (bool): Whether to use input projection.
+ """
super().__init__()
self.proj = nn.Identity()
self.layers = nn.ModuleList([copy.deepcopy(layer) for _ in range(num_layers)])
@@ -145,6 +250,7 @@ self.proj = nn.Conv2d(dim, dim, kernel_size=1)
def forward(self, x: Tensor) -> Tensor:
+ """Apply a series of layers to the input tensor, optionally projecting it first."""
x = self.proj(x)
for layer in self.layers:
x = layer(x)
@@ -152,6 +258,32 @@
class SAM2TwoWayAttentionBlock(TwoWayAttentionBlock):
+ """A two-way attention block for performing self-attention and cross-attention in both directions.
+
+ This block extends the TwoWayAttentionBlock and consists of four main components: self-attention on sparse inputs,
+ cross-attention from sparse to dense inputs, an MLP block on sparse inputs, and cross-attention from dense to sparse
+ inputs.
+
+ Attributes:
+ self_attn (Attention): Self-attention layer for queries.
+ norm1 (nn.LayerNorm): Layer normalization after the first attention block.
+ cross_attn_token_to_image (Attention): Cross-attention layer from queries to keys.
+ norm2 (nn.LayerNorm): Layer normalization after the second attention block.
+ mlp (MLP): MLP block for transforming query embeddings.
+ norm3 (nn.LayerNorm): Layer normalization after the MLP block.
+ norm4 (nn.LayerNorm): Layer normalization after the third attention block.
+ cross_attn_image_to_token (Attention): Cross-attention layer from keys to queries.
+ skip_first_layer_pe (bool): Flag to skip positional encoding in the first layer.
+
+ Methods:
+ forward: Processes input through the attention blocks and MLP.
+
+ Examples:
+ >>> block = SAM2TwoWayAttentionBlock(embedding_dim=256, num_heads=8)
+ >>> sparse_input = torch.randn(1, 100, 256)
+ >>> dense_input = torch.randn(1, 256, 16, 16)
+ >>> sparse_output, dense_output = block(sparse_input, dense_input)
+ """
def __init__(
self,
@@ -162,11 +294,51 @@ attention_downsample_rate: int = 2,
skip_first_layer_pe: bool = False,
) -> None:
+ """Initialize a SAM2TwoWayAttentionBlock for performing self-attention and cross-attention in two directions.
+
+ This block extends the TwoWayAttentionBlock and consists of four main components: self-attention on sparse
+ inputs, cross-attention from sparse to dense inputs, an MLP block on sparse inputs, and cross-attention from
+ dense to sparse inputs.
+
+ Args:
+ embedding_dim (int): The channel dimension of the embeddings.
+ num_heads (int): The number of heads in the attention layers.
+ mlp_dim (int): The hidden dimension of the MLP block.
+ activation (type[nn.Module]): The activation function of the MLP block.
+ attention_downsample_rate (int): The downsample rate for attention computations.
+ skip_first_layer_pe (bool): Whether to skip the positional encoding in the first layer.
+ """
super().__init__(embedding_dim, num_heads, mlp_dim, activation, attention_downsample_rate, skip_first_layer_pe)
self.mlp = MLP(embedding_dim, mlp_dim, embedding_dim, num_layers=2, act=activation)
class SAM2TwoWayTransformer(TwoWayTransformer):
+ """A Two-Way Transformer module for simultaneous attention to image and query points.
+
+ This class extends the TwoWayTransformer, implementing a specialized transformer decoder that attends to an input
+ image using queries with supplied positional embeddings. It is particularly useful for tasks like object detection,
+ image segmentation, and point cloud processing.
+
+ Attributes:
+ depth (int): Number of layers in the transformer.
+ embedding_dim (int): Channel dimension for input embeddings.
+ num_heads (int): Number of heads for multihead attention.
+ mlp_dim (int): Internal channel dimension for the MLP block.
+ layers (nn.ModuleList): List of SAM2TwoWayAttentionBlock layers comprising the transformer.
+ final_attn_token_to_image (Attention): Final attention layer from queries to image.
+ norm_final_attn (nn.LayerNorm): Layer normalization applied to final queries.
+
+ Methods:
+ forward: Processes input image embeddings and query embeddings through the transformer.
+
+ Examples:
+ >>> transformer = SAM2TwoWayTransformer(depth=5, embedding_dim=256, num_heads=8, mlp_dim=2048)
+ >>> image_embedding = torch.randn(1, 256, 64, 64)
+ >>> query_embedding = torch.randn(1, 100, 256)
+ >>> output = transformer(image_embedding, query_embedding)
+ >>> print(output[0].shape, output[1].shape)
+ torch.Size([1, 100, 256]) torch.Size([1, 256, 64, 64])
+ """
def __init__(
self,
@@ -177,6 +349,19 @@ activation: type[nn.Module] = nn.ReLU,
attention_downsample_rate: int = 2,
) -> None:
+ """Initialize a SAM2TwoWayTransformer instance.
+
+ This transformer decoder attends to an input image using queries with supplied positional embeddings. It is
+ designed for tasks like object detection, image segmentation, and point cloud processing.
+
+ Args:
+ depth (int): Number of layers in the transformer.
+ embedding_dim (int): Channel dimension for the input embeddings.
+ num_heads (int): Number of heads for multihead attention. Must divide embedding_dim.
+ mlp_dim (int): Channel dimension internal to the MLP block.
+ activation (type[nn.Module]): Activation function to use in the MLP block.
+ attention_downsample_rate (int): Downsampling rate for attention computations.
+ """
super().__init__(depth, embedding_dim, num_heads, mlp_dim, activation, attention_downsample_rate)
self.layers = nn.ModuleList()
for i in range(depth):
@@ -193,6 +378,28 @@
class RoPEAttention(Attention):
+ """Implements rotary position encoding for attention mechanisms in transformer architectures.
+
+ This class extends the base Attention class by incorporating Rotary Position Encoding (RoPE) to enhance the
+ positional awareness of the attention mechanism.
+
+ Attributes:
+ compute_cis (Callable): Function to compute axial complex numbers for rotary encoding.
+ freqs_cis (torch.Tensor): Precomputed frequency tensor for rotary encoding.
+ rope_k_repeat (bool): Flag to repeat query RoPE to match key length for cross-attention to memories.
+
+ Methods:
+ forward: Applies rotary position encoding and computes attention between query, key, and value tensors.
+
+ Examples:
+ >>> rope_attn = RoPEAttention(embedding_dim=256, num_heads=8, rope_theta=10000.0, feat_sizes=(32, 32))
+ >>> q = torch.randn(1, 1024, 256)
+ >>> k = torch.randn(1, 1024, 256)
+ >>> v = torch.randn(1, 1024, 256)
+ >>> output = rope_attn(q, k, v)
+ >>> print(output.shape)
+ torch.Size([1, 1024, 256])
+ """
def __init__(
self,
@@ -202,6 +409,7 @@ feat_sizes: tuple[int, int] = (32, 32), # [w, h] for stride 16 feats at 512 resolution
**kwargs,
):
+ """Initialize RoPEAttention with rotary position encoding for enhanced positional awareness."""
super().__init__(*args, **kwargs)
self.compute_cis = partial(compute_axial_cis, dim=self.internal_dim // self.num_heads, theta=rope_theta)
@@ -210,6 +418,7 @@ self.rope_k_repeat = rope_k_repeat # repeat q rope to match k length, needed for cross-attention to memories
def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, num_k_exclude_rope: int = 0) -> torch.Tensor:
+ """Apply rotary position encoding and compute attention between query, key, and value tensors."""
q = self.q_proj(q)
k = self.k_proj(k)
v = self.v_proj(v)
@@ -245,6 +454,7 @@
def do_pool(x: torch.Tensor, pool: nn.Module, norm: nn.Module = None) -> torch.Tensor:
+ """Apply pooling and optional normalization to a tensor, handling spatial dimension permutations."""
if pool is None:
return x
# (B, H, W, C) -> (B, C, H, W)
@@ -259,6 +469,33 @@
class MultiScaleAttention(nn.Module):
+ """Implements multiscale self-attention with optional query pooling for efficient feature extraction.
+
+ This class provides a flexible implementation of multiscale attention, allowing for optional downsampling of query
+ features through pooling. It's designed to enhance the model's ability to capture multiscale information in visual
+ tasks.
+
+ Attributes:
+ dim (int): Input dimension of the feature map.
+ dim_out (int): Output dimension of the attention module.
+ num_heads (int): Number of attention heads.
+ scale (float): Scaling factor for dot-product attention.
+ q_pool (nn.Module | None): Optional pooling module for query features.
+ qkv (nn.Linear): Linear projection for query, key, and value.
+ proj (nn.Linear): Output projection.
+
+ Methods:
+ forward: Applies multiscale attention to the input tensor.
+
+ Examples:
+ >>> import torch
+ >>> from torch import nn
+ >>> x = torch.randn(1, 64, 64, 256)
+ >>> msa = MultiScaleAttention(dim=256, dim_out=256, num_heads=8)
+ >>> output = msa(x)
+ >>> print(output.shape)
+ torch.Size([1, 64, 64, 256])
+ """
def __init__(
self,
@@ -267,6 +504,7 @@ num_heads: int,
q_pool: nn.Module = None,
):
+ """Initialize multiscale attention with optional query pooling for efficient feature extraction."""
super().__init__()
self.dim = dim
@@ -281,6 +519,7 @@ self.proj = nn.Linear(dim_out, dim_out)
def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Apply multiscale attention with optional query pooling to extract multiscale features."""
B, H, W, _ = x.shape
# qkv with shape (B, H * W, 3, nHead, C)
qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1)
@@ -309,6 +548,34 @@
class MultiScaleBlock(nn.Module):
+ """A multiscale attention block with window partitioning and query pooling for efficient vision transformers.
+
+ This class implements a multiscale attention mechanism with optional window partitioning and downsampling, designed
+ for use in vision transformer architectures.
+
+ Attributes:
+ dim (int): Input dimension of the block.
+ dim_out (int): Output dimension of the block.
+ norm1 (nn.Module): First normalization layer.
+ window_size (int): Size of the window for partitioning.
+ pool (nn.Module | None): Pooling layer for query downsampling.
+ q_stride (tuple[int, int] | None): Stride for query pooling.
+ attn (MultiScaleAttention): Multi-scale attention module.
+ drop_path (nn.Module): Drop path layer for regularization.
+ norm2 (nn.Module): Second normalization layer.
+ mlp (MLP): Multi-layer perceptron module.
+ proj (nn.Linear | None): Projection layer for dimension mismatch.
+
+ Methods:
+ forward: Processes input tensor through the multiscale block.
+
+ Examples:
+ >>> block = MultiScaleBlock(dim=256, dim_out=512, num_heads=8, window_size=7)
+ >>> x = torch.randn(1, 56, 56, 256)
+ >>> output = block(x)
+ >>> print(output.shape)
+ torch.Size([1, 28, 28, 512])
+ """
def __init__(
self,
@@ -322,6 +589,7 @@ act_layer: type[nn.Module] = nn.GELU,
window_size: int = 0,
):
+ """Initialize a multiscale attention block with window partitioning and optional query pooling."""
super().__init__()
if isinstance(norm_layer, str):
@@ -358,6 +626,7 @@ self.proj = nn.Linear(dim, dim_out)
def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Process input through multiscale attention and MLP, with optional windowing and downsampling."""
shortcut = x # B, H, W, C
x = self.norm1(x)
@@ -393,6 +662,31 @@
class PositionEmbeddingSine(nn.Module):
+ """A module for generating sinusoidal positional embeddings for 2D inputs like images.
+
+ This class implements sinusoidal position encoding for 2D spatial positions, which can be used in transformer-based
+ models for computer vision tasks.
+
+ Attributes:
+ num_pos_feats (int): Number of positional features (half of the embedding dimension).
+ temperature (int): Temperature parameter for the sinusoidal functions.
+ normalize (bool): Whether to normalize the positional embeddings.
+ scale (float): Scaling factor for the embeddings when normalize is True.
+ cache (dict): Cache for storing precomputed embeddings.
+
+ Methods:
+ _encode_xy: Encodes 2D positions using sine and cosine functions.
+ encode_boxes: Encodes box coordinates and dimensions into positional embeddings.
+ encode_points: Encodes 2D point coordinates with sinusoidal positional embeddings.
+ forward: Generates sinusoidal position embeddings for 2D inputs.
+
+ Examples:
+ >>> pos_emb = PositionEmbeddingSine(num_pos_feats=128)
+ >>> x = torch.randn(1, 3, 224, 224)
+ >>> embeddings = pos_emb(x)
+ >>> print(embeddings.shape)
+ torch.Size([1, 256, 224, 224])
+ """
def __init__(
self,
@@ -401,6 +695,7 @@ normalize: bool = True,
scale: float | None = None,
):
+ """Initialize sinusoidal position embeddings for 2D image inputs."""
super().__init__()
assert num_pos_feats % 2 == 0, "Expecting even model width"
self.num_pos_feats = num_pos_feats // 2
@@ -415,6 +710,7 @@ self.cache = {}
def _encode_xy(self, x: torch.Tensor, y: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
+ """Encode 2D positions using sine/cosine functions for transformer positional embeddings."""
assert len(x) == len(y) and x.ndim == y.ndim == 1
x_embed = x * self.scale
y_embed = y * self.scale
@@ -430,6 +726,7 @@
@torch.no_grad()
def encode_boxes(self, x: torch.Tensor, y: torch.Tensor, w: torch.Tensor, h: torch.Tensor) -> torch.Tensor:
+ """Encode box coordinates and dimensions into positional embeddings for detection."""
pos_x, pos_y = self._encode_xy(x, y)
return torch.cat((pos_y, pos_x, h[:, None], w[:, None]), dim=1)
@@ -437,6 +734,7 @@
@torch.no_grad()
def encode_points(self, x: torch.Tensor, y: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:
+ """Encode 2D points with sinusoidal embeddings and append labels."""
(bx, nx), (by, ny), (bl, nl) = x.shape, y.shape, labels.shape
assert bx == by and nx == ny and bx == bl and nx == nl
pos_x, pos_y = self._encode_xy(x.flatten(), y.flatten())
@@ -445,6 +743,7 @@
@torch.no_grad()
def forward(self, x: torch.Tensor) -> Tensor:
+ """Generate sinusoidal position embeddings for 2D inputs like images."""
cache_key = (x.shape[-2], x.shape[-1])
if cache_key in self.cache:
return self.cache[cache_key][None].repeat(x.shape[0], 1, 1, 1)
@@ -477,8 +776,29 @@
class PositionEmbeddingRandom(nn.Module):
+ """Positional encoding using random spatial frequencies.
+
+ This class generates positional embeddings for input coordinates using random spatial frequencies. It is
+ particularly useful for transformer-based models that require position information.
+
+ Attributes:
+ positional_encoding_gaussian_matrix (torch.Tensor): A buffer containing random values for encoding.
+
+ Methods:
+ _pe_encoding: Positionally encodes points that are normalized to [0,1].
+ forward: Generates positional encoding for a grid of the specified size.
+ forward_with_coords: Positionally encodes points that are not normalized to [0,1].
+
+ Examples:
+ >>> pe = PositionEmbeddingRandom(num_pos_feats=64)
+ >>> size = (32, 32)
+ >>> encoding = pe(size)
+ >>> print(encoding.shape)
+ torch.Size([128, 32, 32])
+ """
def __init__(self, num_pos_feats: int = 64, scale: float | None = None) -> None:
+ """Initialize random spatial frequency position embedding for transformers."""
super().__init__()
if scale is None or scale <= 0.0:
scale = 1.0
@@ -489,6 +809,7 @@ torch.backends.cudnn.deterministic = False
def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor:
+ """Encode normalized [0,1] coordinates using random spatial frequencies."""
# Assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
coords = 2 * coords - 1
coords = coords @ self.positional_encoding_gaussian_matrix
@@ -497,6 +818,7 @@ return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1)
def forward(self, size: tuple[int, int]) -> torch.Tensor:
+ """Generate positional encoding for a grid using random spatial frequencies."""
h, w = size
grid = torch.ones(
(h, w),
@@ -512,6 +834,7 @@ return pe.permute(2, 0, 1) # C x H x W
def forward_with_coords(self, coords_input: torch.Tensor, image_size: tuple[int, int]) -> torch.Tensor:
+ """Positionally encode input coordinates, normalizing them to [0,1] based on the given image size."""
coords = coords_input.clone()
coords[:, :, 0] = coords[:, :, 0] / image_size[1]
coords[:, :, 1] = coords[:, :, 1] / image_size[0]
@@ -519,6 +842,30 @@
class Block(nn.Module):
+ """Transformer block with support for window attention and residual propagation.
+
+ This class implements a transformer block that can use either global or windowed self-attention, followed by a
+ feed-forward network. It supports relative positional embeddings and is designed for use in vision transformer
+ architectures.
+
+ Attributes:
+ norm1 (nn.Module): First normalization layer.
+ attn (REAttention): Self-attention layer with optional relative positional encoding.
+ norm2 (nn.Module): Second normalization layer.
+ mlp (MLPBlock): Multi-layer perceptron block.
+ window_size (int): Size of attention window. If 0, global attention is used.
+
+ Methods:
+ forward: Processes input through the transformer block.
+
+ Examples:
+ >>> import torch
+ >>> block = Block(dim=256, num_heads=8, window_size=7)
+ >>> x = torch.randn(1, 56, 56, 256)
+ >>> output = block(x)
+ >>> print(output.shape)
+ torch.Size([1, 56, 56, 256])
+ """
def __init__(
self,
@@ -533,6 +880,24 @@ window_size: int = 0,
input_size: tuple[int, int] | None = None,
) -> None:
+ """Initialize a transformer block with optional window attention and relative positional embeddings.
+
+ This constructor sets up a transformer block that can use either global or windowed self-attention, followed by
+ a feed-forward network. It supports relative positional embeddings and is designed for use in vision transformer
+ architectures.
+
+ Args:
+ dim (int): Number of input channels.
+ num_heads (int): Number of attention heads in the self-attention layer.
+ mlp_ratio (float): Ratio of mlp hidden dimension to embedding dimension.
+ qkv_bias (bool): If True, adds a learnable bias to query, key, value projections.
+ norm_layer (type[nn.Module]): Type of normalization layer to use.
+ act_layer (type[nn.Module]): Type of activation function to use in the MLP block.
+ use_rel_pos (bool): If True, uses relative positional embeddings in attention.
+ rel_pos_zero_init (bool): If True, initializes relative positional parameters to zero.
+ window_size (int): Size of attention window. If 0, uses global attention.
+ input_size (tuple[int, int] | None): Input resolution for calculating relative positional parameter size.
+ """
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = REAttention(
@@ -550,6 +915,7 @@ self.window_size = window_size
def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Process input through transformer block with optional windowed self-attention and residual connection."""
shortcut = x
x = self.norm1(x)
# Window partition
@@ -567,6 +933,30 @@
class REAttention(nn.Module):
+ """Relative Position Attention module for efficient self-attention in transformer architectures.
+
+ This class implements a multi-head attention mechanism with relative positional embeddings, designed for use in
+ vision transformer models.
+
+ Attributes:
+ num_heads (int): Number of attention heads.
+ scale (float): Scaling factor for attention computation.
+ qkv (nn.Linear): Linear projection for query, key, and value.
+ proj (nn.Linear): Output projection layer.
+ use_rel_pos (bool): Whether to use relative positional embeddings.
+ rel_pos_h (nn.Parameter): Relative positional embeddings for height dimension.
+ rel_pos_w (nn.Parameter): Relative positional embeddings for width dimension.
+
+ Methods:
+ forward: Applies multi-head attention with optional relative positional encoding to input tensor.
+
+ Examples:
+ >>> attention = REAttention(dim=256, num_heads=8, input_size=(32, 32))
+ >>> x = torch.randn(1, 32, 32, 256)
+ >>> output = attention(x)
+ >>> print(output.shape)
+ torch.Size([1, 32, 32, 256])
+ """
def __init__(
self,
@@ -577,6 +967,20 @@ rel_pos_zero_init: bool = True,
input_size: tuple[int, int] | None = None,
) -> None:
+ """Initialize a Relative Position Attention module for transformer-based architectures.
+
+ This module implements multi-head attention with optional relative positional encodings, designed specifically
+ for vision tasks in transformer models.
+
+ Args:
+ dim (int): Number of input channels.
+ num_heads (int): Number of attention heads.
+ qkv_bias (bool): If True, adds a learnable bias to query, key, value projections.
+ use_rel_pos (bool): If True, uses relative positional encodings.
+ rel_pos_zero_init (bool): If True, initializes relative positional parameters to zero.
+ input_size (tuple[int, int] | None): Input resolution for calculating relative positional parameter size.
+ Required if use_rel_pos is True.
+ """
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
@@ -593,6 +997,7 @@ self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Apply multi-head attention with optional relative positional encoding to input tensor."""
B, H, W, _ = x.shape
# qkv with shape (3, B, nHead, H * W, C)
qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
@@ -610,6 +1015,25 @@
class PatchEmbed(nn.Module):
+ """Image to Patch Embedding module for vision transformer architectures.
+
+ This module converts an input image into a sequence of patch embeddings using a convolutional layer. It is commonly
+ used as the first layer in vision transformer architectures to transform image data into a suitable format for
+ subsequent transformer blocks.
+
+ Attributes:
+ proj (nn.Conv2d): Convolutional layer for projecting image patches to embeddings.
+
+ Methods:
+ forward: Applies patch embedding to the input tensor.
+
+ Examples:
+ >>> patch_embed = PatchEmbed(kernel_size=(16, 16), stride=(16, 16), in_chans=3, embed_dim=768)
+ >>> x = torch.randn(1, 3, 224, 224)
+ >>> output = patch_embed(x)
+ >>> print(output.shape)
+ torch.Size([1, 768, 14, 14])
+ """
def __init__(
self,
@@ -620,9 +1044,23 @@ embed_dim: int = 768,
bias: bool = True,
) -> None:
+ """Initialize the PatchEmbed module for converting image patches to embeddings.
+
+ This module is typically used as the first layer in vision transformer architectures to transform image data
+ into a suitable format for subsequent transformer blocks.
+
+ Args:
+ kernel_size (tuple[int, int]): Size of the convolutional kernel for patch extraction.
+ stride (tuple[int, int]): Stride of the convolutional operation.
+ padding (tuple[int, int]): Padding applied to the input before convolution.
+ in_chans (int): Number of input image channels.
+ embed_dim (int): Dimensionality of the output patch embeddings.
+ bias (bool): Whether to include a bias term in the convolutional layer.
+ """
super().__init__()
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias)
def forward(self, x: torch.Tensor) -> torch.Tensor:
- return self.proj(x).permute(0, 2, 3, 1) # B C H W -> B H W C+ """Compute patch embedding by applying convolution and transposing resulting tensor."""
+ return self.proj(x).permute(0, 2, 3, 1) # B C H W -> B H W C
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/sam/modules/blocks.py |
Add minimal docstrings for each function | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from ultralytics.engine.model import Model
from ultralytics.nn.tasks import RTDETRDetectionModel
from ultralytics.utils.torch_utils import TORCH_1_11
from .predict import RTDETRPredictor
from .train import RTDETRTrainer
from .val import RTDETRValidator
class RTDETR(Model):
def __init__(self, model: str = "rtdetr-l.pt") -> None:
assert TORCH_1_11, "RTDETR requires torch>=1.11"
super().__init__(model=model, task="detect")
@property
def task_map(self) -> dict:
return {
"detect": {
"predictor": RTDETRPredictor,
"validator": RTDETRValidator,
"trainer": RTDETRTrainer,
"model": RTDETRDetectionModel,
}
} | --- +++ @@ -1,4 +1,13 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""
+Interface for Baidu's RT-DETR, a Vision Transformer-based real-time object detector.
+
+RT-DETR offers real-time performance and high accuracy, excelling in accelerated backends like CUDA with TensorRT.
+It features an efficient hybrid encoder and IoU-aware query selection for enhanced detection accuracy.
+
+References:
+ https://arxiv.org/pdf/2304.08069.pdf
+"""
from ultralytics.engine.model import Model
from ultralytics.nn.tasks import RTDETRDetectionModel
@@ -10,13 +19,40 @@
class RTDETR(Model):
+ """Interface for Baidu's RT-DETR model, a Vision Transformer-based real-time object detector.
+
+ This model provides real-time performance with high accuracy. It supports efficient hybrid encoding, IoU-aware query
+ selection, and adaptable inference speed.
+
+ Attributes:
+ model (str): Path to the pre-trained model.
+
+ Methods:
+ task_map: Return a task map for RT-DETR, associating tasks with corresponding Ultralytics classes.
+
+ Examples:
+ Initialize RT-DETR with a pre-trained model
+ >>> from ultralytics import RTDETR
+ >>> model = RTDETR("rtdetr-l.pt")
+ >>> results = model("image.jpg")
+ """
def __init__(self, model: str = "rtdetr-l.pt") -> None:
+ """Initialize the RT-DETR model with the given pre-trained model file.
+
+ Args:
+ model (str): Path to the pre-trained model. Supports .pt, .yaml, and .yml formats.
+ """
assert TORCH_1_11, "RTDETR requires torch>=1.11"
super().__init__(model=model, task="detect")
@property
def task_map(self) -> dict:
+ """Return a task map for RT-DETR, associating tasks with corresponding Ultralytics classes.
+
+ Returns:
+ (dict): A dictionary mapping task names to Ultralytics task classes for the RT-DETR model.
+ """
return {
"detect": {
"predictor": RTDETRPredictor,
@@ -24,4 +60,4 @@ "trainer": RTDETRTrainer,
"model": RTDETRDetectionModel,
}
- }+ }
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/rtdetr/model.py |
Create docstrings for each class method | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from copy import deepcopy
from functools import lru_cache
from pathlib import Path
from typing import Any
import numpy as np
import torch
from ultralytics.data.augment import LetterBox
from ultralytics.utils import LOGGER, DataExportMixin, SimpleClass, ops
from ultralytics.utils.plotting import Annotator, colors, save_one_box
class BaseTensor(SimpleClass):
def __init__(self, data: torch.Tensor | np.ndarray, orig_shape: tuple[int, int]) -> None:
assert isinstance(data, (torch.Tensor, np.ndarray)), "data must be torch.Tensor or np.ndarray"
self.data = data
self.orig_shape = orig_shape
@property
def shape(self) -> tuple[int, ...]:
return self.data.shape
def cpu(self):
return self if isinstance(self.data, np.ndarray) else self.__class__(self.data.cpu(), self.orig_shape)
def numpy(self):
return self if isinstance(self.data, np.ndarray) else self.__class__(self.data.numpy(), self.orig_shape)
def cuda(self):
return self.__class__(torch.as_tensor(self.data).cuda(), self.orig_shape)
def to(self, *args, **kwargs):
return self.__class__(torch.as_tensor(self.data).to(*args, **kwargs), self.orig_shape)
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, idx):
return self.__class__(self.data[idx], self.orig_shape)
class Results(SimpleClass, DataExportMixin):
def __init__(
self,
orig_img: np.ndarray,
path: str,
names: dict[int, str],
boxes: torch.Tensor | None = None,
masks: torch.Tensor | None = None,
probs: torch.Tensor | None = None,
keypoints: torch.Tensor | None = None,
obb: torch.Tensor | None = None,
speed: dict[str, float] | None = None,
) -> None:
self.orig_img = orig_img
self.orig_shape = orig_img.shape[:2]
self.boxes = Boxes(boxes, self.orig_shape) if boxes is not None else None # native size boxes
self.masks = Masks(masks, self.orig_shape) if masks is not None else None # native size or imgsz masks
self.probs = Probs(probs) if probs is not None else None
self.keypoints = Keypoints(keypoints, self.orig_shape) if keypoints is not None else None
self.obb = OBB(obb, self.orig_shape) if obb is not None else None
self.speed = speed if speed is not None else {"preprocess": None, "inference": None, "postprocess": None}
self.names = names
self.path = path
self.save_dir = None
self._keys = "boxes", "masks", "probs", "keypoints", "obb"
def __getitem__(self, idx):
return self._apply("__getitem__", idx)
def __len__(self) -> int:
for k in self._keys:
v = getattr(self, k)
if v is not None:
return len(v)
def update(
self,
boxes: torch.Tensor | None = None,
masks: torch.Tensor | None = None,
probs: torch.Tensor | None = None,
obb: torch.Tensor | None = None,
keypoints: torch.Tensor | None = None,
):
if boxes is not None:
self.boxes = Boxes(ops.clip_boxes(boxes, self.orig_shape), self.orig_shape)
if masks is not None:
self.masks = Masks(masks, self.orig_shape)
if probs is not None:
self.probs = probs
if obb is not None:
self.obb = OBB(obb, self.orig_shape)
if keypoints is not None:
self.keypoints = Keypoints(keypoints, self.orig_shape)
def _apply(self, fn: str, *args, **kwargs):
r = self.new()
for k in self._keys:
v = getattr(self, k)
if v is not None:
setattr(r, k, getattr(v, fn)(*args, **kwargs))
return r
def cpu(self):
return self._apply("cpu")
def numpy(self):
return self._apply("numpy")
def cuda(self):
return self._apply("cuda")
def to(self, *args, **kwargs):
return self._apply("to", *args, **kwargs)
def new(self):
return Results(orig_img=self.orig_img, path=self.path, names=self.names, speed=self.speed)
def plot(
self,
conf: bool = True,
line_width: float | None = None,
font_size: float | None = None,
font: str = "Arial.ttf",
pil: bool = False,
img: np.ndarray | None = None,
im_gpu: torch.Tensor | None = None,
kpt_radius: int = 5,
kpt_line: bool = True,
labels: bool = True,
boxes: bool = True,
masks: bool = True,
probs: bool = True,
show: bool = False,
save: bool = False,
filename: str | None = None,
color_mode: str = "class",
txt_color: tuple[int, int, int] = (255, 255, 255),
) -> np.ndarray:
assert color_mode in {"instance", "class"}, f"Expected color_mode='instance' or 'class', not {color_mode}."
if img is None and isinstance(self.orig_img, torch.Tensor):
img = (self.orig_img[0].detach().permute(1, 2, 0).contiguous() * 255).byte().cpu().numpy()
names = self.names
is_obb = self.obb is not None
pred_boxes, show_boxes = self.obb if is_obb else self.boxes, boxes
pred_masks, show_masks = self.masks, masks
pred_probs, show_probs = self.probs, probs
annotator = Annotator(
deepcopy(self.orig_img if img is None else img),
line_width,
font_size,
font,
pil or (pred_probs is not None and show_probs), # Classify tasks default to pil=True
example=names,
)
# Plot Segment results
if pred_masks and show_masks:
if im_gpu is None:
img = LetterBox(pred_masks.shape[1:])(image=annotator.result())
im_gpu = (
torch.as_tensor(img, dtype=torch.float16, device=pred_masks.data.device)
.permute(2, 0, 1)
.flip(0)
.contiguous()
/ 255
)
idx = (
pred_boxes.id
if pred_boxes.is_track and color_mode == "instance"
else pred_boxes.cls
if pred_boxes and color_mode == "class"
else reversed(range(len(pred_masks)))
)
annotator.masks(pred_masks.data, colors=[colors(x, True) for x in idx], im_gpu=im_gpu)
# Plot Detect results
if pred_boxes is not None and show_boxes:
for i, d in enumerate(reversed(pred_boxes)):
c, d_conf, id = int(d.cls), float(d.conf) if conf else None, int(d.id.item()) if d.is_track else None
name = ("" if id is None else f"id:{id} ") + names[c]
label = (f"{name} {d_conf:.2f}" if conf else name) if labels else None
box = d.xyxyxyxy.squeeze() if is_obb else d.xyxy.squeeze()
annotator.box_label(
box,
label,
color=colors(
c
if color_mode == "class"
else id
if id is not None
else i
if color_mode == "instance"
else None,
True,
),
)
# Plot Classify results
if pred_probs is not None and show_probs:
text = "\n".join(f"{names[j] if names else j} {pred_probs.data[j]:.2f}" for j in pred_probs.top5)
x = round(self.orig_shape[0] * 0.03)
annotator.text([x, x], text, txt_color=txt_color, box_color=(64, 64, 64, 128)) # RGBA box
# Plot Pose results
if self.keypoints is not None:
for i, k in enumerate(reversed(self.keypoints.data)):
annotator.kpts(
k,
self.orig_shape,
radius=kpt_radius,
kpt_line=kpt_line,
kpt_color=colors(i, True) if color_mode == "instance" else None,
)
# Show results
if show:
annotator.show(self.path)
# Save results
if save:
annotator.save(filename or f"results_{Path(self.path).name}")
return annotator.result(pil)
def show(self, *args, **kwargs):
self.plot(show=True, *args, **kwargs)
def save(self, filename: str | None = None, *args, **kwargs) -> str:
if not filename:
filename = f"results_{Path(self.path).name}"
Path(filename).absolute().parent.mkdir(parents=True, exist_ok=True)
self.plot(save=True, filename=filename, *args, **kwargs)
return filename
def verbose(self) -> str:
boxes = self.obb if self.obb is not None else self.boxes
if len(self) == 0:
return "" if self.probs is not None else "(no detections), "
if self.probs is not None:
return f"{', '.join(f'{self.names[j]} {self.probs.data[j]:.2f}' for j in self.probs.top5)}, "
if boxes:
counts = boxes.cls.int().bincount()
return "".join(f"{n} {self.names[i]}{'s' * (n > 1)}, " for i, n in enumerate(counts) if n > 0)
def save_txt(self, txt_file: str | Path, save_conf: bool = False) -> str:
is_obb = self.obb is not None
boxes = self.obb if is_obb else self.boxes
masks = self.masks
probs = self.probs
kpts = self.keypoints
texts = []
if probs is not None:
# Classify
[texts.append(f"{probs.data[j]:.2f} {self.names[j]}") for j in probs.top5]
elif boxes:
# Detect/segment/pose
for j, d in enumerate(boxes):
c, conf, id = int(d.cls), float(d.conf), int(d.id.item()) if d.is_track else None
line = (c, *(d.xyxyxyxyn.view(-1) if is_obb else d.xywhn.view(-1)))
if masks:
seg = masks[j].xyn[0].copy().reshape(-1) # reversed mask.xyn, (n,2) to (n*2)
line = (c, *seg)
if kpts is not None:
kpt = torch.cat((kpts[j].xyn, kpts[j].conf[..., None]), 2) if kpts[j].has_visible else kpts[j].xyn
line += (*kpt.reshape(-1).tolist(),)
line += (conf,) * save_conf + (() if id is None else (id,))
texts.append(("%g " * len(line)).rstrip() % line)
if texts:
Path(txt_file).parent.mkdir(parents=True, exist_ok=True) # make directory
with open(txt_file, "a", encoding="utf-8") as f:
f.writelines(text + "\n" for text in texts)
return str(txt_file)
def save_crop(self, save_dir: str | Path, file_name: str | Path = Path("im.jpg")):
if self.probs is not None:
LOGGER.warning("Classify task does not support `save_crop`.")
return
if self.obb is not None:
LOGGER.warning("OBB task does not support `save_crop`.")
return
for d in self.boxes:
save_one_box(
d.xyxy,
self.orig_img.copy(),
file=Path(save_dir) / self.names[int(d.cls)] / Path(file_name).with_suffix(".jpg"),
BGR=True,
)
def summary(self, normalize: bool = False, decimals: int = 5) -> list[dict[str, Any]]:
# Create list of detection dictionaries
results = []
if self.probs is not None:
# Return top 5 classification results
for class_id, conf in zip(self.probs.top5, self.probs.top5conf.tolist()):
class_id = int(class_id)
results.append(
{
"name": self.names[class_id],
"class": class_id,
"confidence": round(conf, decimals),
}
)
return results
is_obb = self.obb is not None
data = self.obb if is_obb else self.boxes
h, w = self.orig_shape if normalize else (1, 1)
for i, row in enumerate(data): # xyxy, track_id if tracking, conf, class_id
class_id, conf = int(row.cls), round(row.conf.item(), decimals)
box = (row.xyxyxyxy if is_obb else row.xyxy).squeeze().reshape(-1, 2).tolist()
xy = {}
for j, b in enumerate(box):
xy[f"x{j + 1}"] = round(b[0] / w, decimals)
xy[f"y{j + 1}"] = round(b[1] / h, decimals)
result = {"name": self.names[class_id], "class": class_id, "confidence": conf, "box": xy}
if data.is_track:
result["track_id"] = int(row.id.item()) # track ID
if self.masks:
result["segments"] = {
"x": (self.masks.xy[i][:, 0] / w).round(decimals).tolist(),
"y": (self.masks.xy[i][:, 1] / h).round(decimals).tolist(),
}
if self.keypoints is not None:
kpt = self.keypoints[i]
if kpt.has_visible:
x, y, visible = kpt.data[0].cpu().unbind(dim=1)
else:
x, y = kpt.data[0].cpu().unbind(dim=1)
result["keypoints"] = {
"x": (x / w).numpy().round(decimals).tolist(),
"y": (y / h).numpy().round(decimals).tolist(),
}
if kpt.has_visible:
result["keypoints"]["visible"] = visible.numpy().round(decimals).tolist()
results.append(result)
return results
class Boxes(BaseTensor):
def __init__(self, boxes: torch.Tensor | np.ndarray, orig_shape: tuple[int, int]) -> None:
if boxes.ndim == 1:
boxes = boxes[None, :]
n = boxes.shape[-1]
assert n in {6, 7}, f"expected 6 or 7 values but got {n}" # xyxy, track_id, conf, cls
super().__init__(boxes, orig_shape)
self.is_track = n == 7
self.orig_shape = orig_shape
@property
def xyxy(self) -> torch.Tensor | np.ndarray:
return self.data[:, :4]
@property
def conf(self) -> torch.Tensor | np.ndarray:
return self.data[:, -2]
@property
def cls(self) -> torch.Tensor | np.ndarray:
return self.data[:, -1]
@property
def id(self) -> torch.Tensor | np.ndarray | None:
return self.data[:, -3] if self.is_track else None
@property
@lru_cache(maxsize=2)
def xywh(self) -> torch.Tensor | np.ndarray:
return ops.xyxy2xywh(self.xyxy)
@property
@lru_cache(maxsize=2)
def xyxyn(self) -> torch.Tensor | np.ndarray:
xyxy = self.xyxy.clone() if isinstance(self.xyxy, torch.Tensor) else np.copy(self.xyxy)
xyxy[..., [0, 2]] /= self.orig_shape[1]
xyxy[..., [1, 3]] /= self.orig_shape[0]
return xyxy
@property
@lru_cache(maxsize=2)
def xywhn(self) -> torch.Tensor | np.ndarray:
xywh = ops.xyxy2xywh(self.xyxy)
xywh[..., [0, 2]] /= self.orig_shape[1]
xywh[..., [1, 3]] /= self.orig_shape[0]
return xywh
class Masks(BaseTensor):
def __init__(self, masks: torch.Tensor | np.ndarray, orig_shape: tuple[int, int]) -> None:
if masks.ndim == 2:
masks = masks[None, :]
super().__init__(masks, orig_shape)
@property
@lru_cache(maxsize=1)
def xyn(self) -> list[np.ndarray]:
return [
ops.scale_coords(self.data.shape[1:], x, self.orig_shape, normalize=True)
for x in ops.masks2segments(self.data)
]
@property
@lru_cache(maxsize=1)
def xy(self) -> list[np.ndarray]:
return [
ops.scale_coords(self.data.shape[1:], x, self.orig_shape, normalize=False)
for x in ops.masks2segments(self.data)
]
class Keypoints(BaseTensor):
def __init__(self, keypoints: torch.Tensor | np.ndarray, orig_shape: tuple[int, int]) -> None:
if keypoints.ndim == 2:
keypoints = keypoints[None, :]
super().__init__(keypoints, orig_shape)
self.has_visible = self.data.shape[-1] == 3
@property
@lru_cache(maxsize=1)
def xy(self) -> torch.Tensor | np.ndarray:
return self.data[..., :2]
@property
@lru_cache(maxsize=1)
def xyn(self) -> torch.Tensor | np.ndarray:
xy = self.xy.clone() if isinstance(self.xy, torch.Tensor) else np.copy(self.xy)
xy[..., 0] /= self.orig_shape[1]
xy[..., 1] /= self.orig_shape[0]
return xy
@property
@lru_cache(maxsize=1)
def conf(self) -> torch.Tensor | np.ndarray | None:
return self.data[..., 2] if self.has_visible else None
class Probs(BaseTensor):
def __init__(self, probs: torch.Tensor | np.ndarray, orig_shape: tuple[int, int] | None = None) -> None:
super().__init__(probs, orig_shape)
@property
@lru_cache(maxsize=1)
def top1(self) -> int:
return int(self.data.argmax())
@property
@lru_cache(maxsize=1)
def top5(self) -> list[int]:
return (-self.data).argsort(0)[:5].tolist() # this way works with both torch and numpy.
@property
@lru_cache(maxsize=1)
def top1conf(self) -> torch.Tensor | np.ndarray:
return self.data[self.top1]
@property
@lru_cache(maxsize=1)
def top5conf(self) -> torch.Tensor | np.ndarray:
return self.data[self.top5]
class OBB(BaseTensor):
def __init__(self, boxes: torch.Tensor | np.ndarray, orig_shape: tuple[int, int]) -> None:
if boxes.ndim == 1:
boxes = boxes[None, :]
n = boxes.shape[-1]
assert n in {7, 8}, f"expected 7 or 8 values but got {n}" # xywh, rotation, track_id, conf, cls
super().__init__(boxes, orig_shape)
self.is_track = n == 8
self.orig_shape = orig_shape
@property
def xywhr(self) -> torch.Tensor | np.ndarray:
return self.data[:, :5]
@property
def conf(self) -> torch.Tensor | np.ndarray:
return self.data[:, -2]
@property
def cls(self) -> torch.Tensor | np.ndarray:
return self.data[:, -1]
@property
def id(self) -> torch.Tensor | np.ndarray | None:
return self.data[:, -3] if self.is_track else None
@property
@lru_cache(maxsize=2)
def xyxyxyxy(self) -> torch.Tensor | np.ndarray:
return ops.xywhr2xyxyxyxy(self.xywhr)
@property
@lru_cache(maxsize=2)
def xyxyxyxyn(self) -> torch.Tensor | np.ndarray:
xyxyxyxyn = self.xyxyxyxy.clone() if isinstance(self.xyxyxyxy, torch.Tensor) else np.copy(self.xyxyxyxy)
xyxyxyxyn[..., 0] /= self.orig_shape[1]
xyxyxyxyn[..., 1] /= self.orig_shape[0]
return xyxyxyxyn
@property
@lru_cache(maxsize=2)
def xyxy(self) -> torch.Tensor | np.ndarray:
x = self.xyxyxyxy[..., 0]
y = self.xyxyxyxy[..., 1]
return (
torch.stack([x.amin(1), y.amin(1), x.amax(1), y.amax(1)], -1)
if isinstance(x, torch.Tensor)
else np.stack([x.min(1), y.min(1), x.max(1), y.max(1)], -1)
) | --- +++ @@ -1,4 +1,9 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""
+Ultralytics Results, Boxes, Masks, Keypoints, Probs, and OBB classes for handling inference results.
+
+Usage: See https://docs.ultralytics.com/modes/predict/
+"""
from __future__ import annotations
@@ -16,36 +21,204 @@
class BaseTensor(SimpleClass):
+ """Base tensor class with additional methods for easy manipulation and device handling.
+
+ This class provides a foundation for tensor-like objects with device management capabilities, supporting both
+ PyTorch tensors and NumPy arrays. It includes methods for moving data between devices and converting between tensor
+ types.
+
+ Attributes:
+ data (torch.Tensor | np.ndarray): Prediction data such as bounding boxes, masks, or keypoints.
+ orig_shape (tuple[int, int]): Original shape of the image, typically in the format (height, width).
+
+ Methods:
+ cpu: Return a copy of the tensor stored in CPU memory.
+ numpy: Return a copy of the tensor as a numpy array.
+ cuda: Move the tensor to GPU memory, returning a new instance if necessary.
+ to: Return a copy of the tensor with the specified device and dtype.
+
+ Examples:
+ >>> import torch
+ >>> data = torch.tensor([[1, 2, 3], [4, 5, 6]])
+ >>> orig_shape = (720, 1280)
+ >>> base_tensor = BaseTensor(data, orig_shape)
+ >>> cpu_tensor = base_tensor.cpu()
+ >>> numpy_array = base_tensor.numpy()
+ >>> gpu_tensor = base_tensor.cuda()
+ """
def __init__(self, data: torch.Tensor | np.ndarray, orig_shape: tuple[int, int]) -> None:
+ """Initialize BaseTensor with prediction data and the original shape of the image.
+
+ Args:
+ data (torch.Tensor | np.ndarray): Prediction data such as bounding boxes, masks, or keypoints.
+ orig_shape (tuple[int, int]): Original shape of the image in (height, width) format.
+ """
assert isinstance(data, (torch.Tensor, np.ndarray)), "data must be torch.Tensor or np.ndarray"
self.data = data
self.orig_shape = orig_shape
@property
def shape(self) -> tuple[int, ...]:
+ """Return the shape of the underlying data tensor.
+
+ Returns:
+ (tuple[int, ...]): The shape of the data tensor.
+
+ Examples:
+ >>> data = torch.rand(100, 4)
+ >>> base_tensor = BaseTensor(data, orig_shape=(720, 1280))
+ >>> print(base_tensor.shape)
+ (100, 4)
+ """
return self.data.shape
def cpu(self):
+ """Return a copy of the tensor stored in CPU memory.
+
+ Returns:
+ (BaseTensor): A new BaseTensor object with the data tensor moved to CPU memory.
+
+ Examples:
+ >>> data = torch.tensor([[1, 2, 3], [4, 5, 6]]).cuda()
+ >>> base_tensor = BaseTensor(data, orig_shape=(720, 1280))
+ >>> cpu_tensor = base_tensor.cpu()
+ >>> isinstance(cpu_tensor, BaseTensor)
+ True
+ >>> cpu_tensor.data.device
+ device(type='cpu')
+ """
return self if isinstance(self.data, np.ndarray) else self.__class__(self.data.cpu(), self.orig_shape)
def numpy(self):
+ """Return a copy of this object with its data converted to a NumPy array.
+
+ Returns:
+ (BaseTensor): A new instance with `data` as a NumPy array.
+
+ Examples:
+ >>> data = torch.tensor([[1, 2, 3], [4, 5, 6]])
+ >>> orig_shape = (720, 1280)
+ >>> base_tensor = BaseTensor(data, orig_shape)
+ >>> numpy_tensor = base_tensor.numpy()
+ >>> print(type(numpy_tensor.data))
+ <class 'numpy.ndarray'>
+ """
return self if isinstance(self.data, np.ndarray) else self.__class__(self.data.numpy(), self.orig_shape)
def cuda(self):
+ """Move the tensor to GPU memory.
+
+ Returns:
+ (BaseTensor): A new BaseTensor instance with the data moved to GPU memory.
+
+ Examples:
+ >>> import torch
+ >>> from ultralytics.engine.results import BaseTensor
+ >>> data = torch.tensor([[1, 2, 3], [4, 5, 6]])
+ >>> base_tensor = BaseTensor(data, orig_shape=(720, 1280))
+ >>> gpu_tensor = base_tensor.cuda()
+ >>> print(gpu_tensor.data.device)
+ cuda:0
+ """
return self.__class__(torch.as_tensor(self.data).cuda(), self.orig_shape)
def to(self, *args, **kwargs):
+ """Return a copy of the tensor with the specified device and dtype.
+
+ Args:
+ *args (Any): Variable length argument list to be passed to torch.Tensor.to().
+ **kwargs (Any): Arbitrary keyword arguments to be passed to torch.Tensor.to().
+
+ Returns:
+ (BaseTensor): A new BaseTensor instance with the data moved to the specified device and/or dtype.
+
+ Examples:
+ >>> base_tensor = BaseTensor(torch.randn(3, 4), orig_shape=(480, 640))
+ >>> cuda_tensor = base_tensor.to("cuda")
+ >>> float16_tensor = base_tensor.to(dtype=torch.float16)
+ """
return self.__class__(torch.as_tensor(self.data).to(*args, **kwargs), self.orig_shape)
def __len__(self) -> int:
+ """Return the length of the underlying data tensor.
+
+ Returns:
+ (int): The number of elements in the first dimension of the data tensor.
+
+ Examples:
+ >>> data = torch.tensor([[1, 2, 3], [4, 5, 6]])
+ >>> base_tensor = BaseTensor(data, orig_shape=(720, 1280))
+ >>> len(base_tensor)
+ 2
+ """
return len(self.data)
def __getitem__(self, idx):
+ """Return a new BaseTensor instance containing the specified indexed elements of the data tensor.
+
+ Args:
+ idx (int | list[int] | torch.Tensor): Index or indices to select from the data tensor.
+
+ Returns:
+ (BaseTensor): A new BaseTensor instance containing the indexed data.
+
+ Examples:
+ >>> data = torch.tensor([[1, 2, 3], [4, 5, 6]])
+ >>> base_tensor = BaseTensor(data, orig_shape=(720, 1280))
+ >>> result = base_tensor[0] # Select the first row
+ >>> print(result.data)
+ tensor([1, 2, 3])
+ """
return self.__class__(self.data[idx], self.orig_shape)
class Results(SimpleClass, DataExportMixin):
+ """A class for storing and manipulating inference results.
+
+ This class provides comprehensive functionality for handling inference results from various Ultralytics models,
+ including detection, segmentation, classification, and pose estimation. It supports visualization, data export, and
+ various coordinate transformations.
+
+ Attributes:
+ orig_img (np.ndarray): The original image as a numpy array.
+ orig_shape (tuple[int, int]): Original image shape in (height, width) format.
+ boxes (Boxes | None): Detected bounding boxes.
+ masks (Masks | None): Segmentation masks.
+ probs (Probs | None): Classification probabilities.
+ keypoints (Keypoints | None): Detected keypoints.
+ obb (OBB | None): Oriented bounding boxes.
+ speed (dict): Dictionary containing inference speed information.
+ names (dict): Dictionary mapping class indices to class names.
+ path (str): Path to the input image file.
+ save_dir (str | None): Directory to save results.
+
+ Methods:
+ update: Update the Results object with new detection data.
+ cpu: Return a copy of the Results object with all tensors moved to CPU memory.
+ numpy: Convert all tensors in the Results object to numpy arrays.
+ cuda: Move all tensors in the Results object to GPU memory.
+ to: Move all tensors to the specified device and dtype.
+ new: Create a new Results object with the same image, path, names, and speed attributes.
+ plot: Plot detection results on an input BGR image.
+ show: Display the image with annotated inference results.
+ save: Save annotated inference results image to file.
+ verbose: Return a log string for each task in the results.
+ save_txt: Save detection results to a text file.
+ save_crop: Save cropped detection images to specified directory.
+ summary: Convert inference results to a summarized dictionary.
+ to_df: Convert detection results to a Polars DataFrame.
+ to_json: Convert detection results to JSON format.
+ to_csv: Convert detection results to a CSV format.
+
+ Examples:
+ >>> results = model("path/to/image.jpg")
+ >>> result = results[0] # Get the first result
+ >>> boxes = result.boxes # Get the boxes for the first result
+ >>> masks = result.masks # Get the masks for the first result
+ >>> for result in results:
+ ... result.plot() # Plot detection results
+ """
def __init__(
self,
@@ -59,6 +232,26 @@ obb: torch.Tensor | None = None,
speed: dict[str, float] | None = None,
) -> None:
+ """Initialize the Results class for storing and manipulating inference results.
+
+ Args:
+ orig_img (np.ndarray): The original image as a numpy array.
+ path (str): The path to the image file.
+ names (dict): A dictionary of class names.
+ boxes (torch.Tensor | None): A 2D tensor of bounding box coordinates for each detection.
+ masks (torch.Tensor | None): A 3D tensor of detection masks, where each mask is a binary image.
+ probs (torch.Tensor | None): A 1D tensor of probabilities of each class for classification task.
+ keypoints (torch.Tensor | None): A 2D tensor of keypoint coordinates for each detection.
+ obb (torch.Tensor | None): A 2D tensor of oriented bounding box coordinates for each detection.
+ speed (dict | None): A dictionary containing preprocess, inference, and postprocess speeds (ms/image).
+
+ Notes:
+ For the default pose model, keypoint indices for human body pose estimation are:
+ 0: Nose, 1: Left Eye, 2: Right Eye, 3: Left Ear, 4: Right Ear
+ 5: Left Shoulder, 6: Right Shoulder, 7: Left Elbow, 8: Right Elbow
+ 9: Left Wrist, 10: Right Wrist, 11: Left Hip, 12: Right Hip
+ 13: Left Knee, 14: Right Knee, 15: Left Ankle, 16: Right Ankle
+ """
self.orig_img = orig_img
self.orig_shape = orig_img.shape[:2]
self.boxes = Boxes(boxes, self.orig_shape) if boxes is not None else None # native size boxes
@@ -73,9 +266,33 @@ self._keys = "boxes", "masks", "probs", "keypoints", "obb"
def __getitem__(self, idx):
+ """Return a Results object for a specific index of inference results.
+
+ Args:
+ idx (int | slice): Index or slice to retrieve from the Results object.
+
+ Returns:
+ (Results): A new Results object containing the specified subset of inference results.
+
+ Examples:
+ >>> results = model("path/to/image.jpg") # Perform inference
+ >>> single_result = results[0] # Get the first result
+ >>> subset_results = results[1:4] # Get a slice of results
+ """
return self._apply("__getitem__", idx)
def __len__(self) -> int:
+ """Return the number of detections in the Results object.
+
+ Returns:
+ (int): The number of detections, determined by the length of the first non-empty attribute in (boxes, masks,
+ probs, keypoints, or obb).
+
+ Examples:
+ >>> results = Results(orig_img, path, names, boxes=torch.rand(5, 6))
+ >>> len(results)
+ 5
+ """
for k in self._keys:
v = getattr(self, k)
if v is not None:
@@ -89,6 +306,24 @@ obb: torch.Tensor | None = None,
keypoints: torch.Tensor | None = None,
):
+ """Update the Results object with new detection data.
+
+ This method allows updating the boxes, masks, keypoints, probabilities, and oriented bounding boxes (OBB) of
+ the Results object. It ensures that boxes are clipped to the original image shape.
+
+ Args:
+ boxes (torch.Tensor | None): A tensor of shape (N, 6) containing bounding box coordinates and confidence
+ scores. The format is (x1, y1, x2, y2, conf, class).
+ masks (torch.Tensor | None): A tensor of shape (N, H, W) containing segmentation masks.
+ probs (torch.Tensor | None): A tensor of shape (num_classes,) containing class probabilities.
+ obb (torch.Tensor | None): A tensor of shape (N, 7) or (N, 8) containing oriented bounding box coordinates.
+ keypoints (torch.Tensor | None): A tensor of shape (N, K, 3) containing keypoints, were K=17 for persons.
+
+ Examples:
+ >>> results = model("image.jpg")
+ >>> new_boxes = torch.tensor([[100, 100, 200, 200, 0.9, 0]])
+ >>> results[0].update(boxes=new_boxes)
+ """
if boxes is not None:
self.boxes = Boxes(ops.clip_boxes(boxes, self.orig_shape), self.orig_shape)
if masks is not None:
@@ -101,6 +336,24 @@ self.keypoints = Keypoints(keypoints, self.orig_shape)
def _apply(self, fn: str, *args, **kwargs):
+ """Apply a function to all non-empty attributes and return a new Results object with modified attributes.
+
+ This method is internally called by methods like .to(), .cuda(), .cpu(), etc.
+
+ Args:
+ fn (str): The name of the function to apply.
+ *args (Any): Variable length argument list to pass to the function.
+ **kwargs (Any): Arbitrary keyword arguments to pass to the function.
+
+ Returns:
+ (Results): A new Results object with attributes modified by the applied function.
+
+ Examples:
+ >>> results = model("path/to/image.jpg")
+ >>> for result in results:
+ ... result_cuda = result.cuda()
+ ... result_cpu = result.cpu()
+ """
r = self.new()
for k in self._keys:
v = getattr(self, k)
@@ -109,18 +362,81 @@ return r
def cpu(self):
+ """Return a copy of the Results object with all its tensors moved to CPU memory.
+
+ This method creates a new Results object with all tensor attributes (boxes, masks, probs, keypoints, obb)
+ transferred to CPU memory. It's useful for moving data from GPU to CPU for further processing or saving.
+
+ Returns:
+ (Results): A new Results object with all tensor attributes on CPU memory.
+
+ Examples:
+ >>> results = model("path/to/image.jpg") # Perform inference
+ >>> cpu_result = results[0].cpu() # Move the first result to CPU
+ >>> print(cpu_result.boxes.device) # Output: cpu
+ """
return self._apply("cpu")
def numpy(self):
+ """Convert all tensors in the Results object to numpy arrays.
+
+ Returns:
+ (Results): A new Results object with all tensors converted to numpy arrays.
+
+ Examples:
+ >>> results = model("path/to/image.jpg")
+ >>> numpy_result = results[0].numpy()
+ >>> type(numpy_result.boxes.data)
+ <class 'numpy.ndarray'>
+
+ Notes:
+ This method creates a new Results object, leaving the original unchanged. It's useful for
+ interoperability with numpy-based libraries or when CPU-based operations are required.
+ """
return self._apply("numpy")
def cuda(self):
+ """Move all tensors in the Results object to GPU memory.
+
+ Returns:
+ (Results): A new Results object with all tensors moved to CUDA device.
+
+ Examples:
+ >>> results = model("path/to/image.jpg")
+ >>> cuda_results = results[0].cuda() # Move first result to GPU
+ >>> for result in results:
+ ... result_cuda = result.cuda() # Move each result to GPU
+ """
return self._apply("cuda")
def to(self, *args, **kwargs):
+ """Move all tensors in the Results object to the specified device and dtype.
+
+ Args:
+ *args (Any): Variable length argument list to be passed to torch.Tensor.to().
+ **kwargs (Any): Arbitrary keyword arguments to be passed to torch.Tensor.to().
+
+ Returns:
+ (Results): A new Results object with all tensors moved to the specified device and dtype.
+
+ Examples:
+ >>> results = model("path/to/image.jpg")
+ >>> result_cuda = results[0].to("cuda") # Move first result to GPU
+ >>> result_cpu = results[0].to("cpu") # Move first result to CPU
+ >>> result_half = results[0].to(dtype=torch.float16) # Convert first result to half precision
+ """
return self._apply("to", *args, **kwargs)
def new(self):
+ """Create a new Results object with the same image, path, names, and speed attributes.
+
+ Returns:
+ (Results): A new Results object with copied attributes from the original instance.
+
+ Examples:
+ >>> results = model("path/to/image.jpg")
+ >>> new_result = results[0].new()
+ """
return Results(orig_img=self.orig_img, path=self.path, names=self.names, speed=self.speed)
def plot(
@@ -144,6 +460,37 @@ color_mode: str = "class",
txt_color: tuple[int, int, int] = (255, 255, 255),
) -> np.ndarray:
+ """Plot detection results on an input BGR image.
+
+ Args:
+ conf (bool): Whether to plot detection confidence scores.
+ line_width (float | None): Line width of bounding boxes. If None, scaled to image size.
+ font_size (float | None): Font size for text. If None, scaled to image size.
+ font (str): Font to use for text.
+ pil (bool): Whether to return the image as a PIL Image.
+ img (np.ndarray | None): Image to plot on. If None, uses original image.
+ im_gpu (torch.Tensor | None): Normalized image on GPU for faster mask plotting.
+ kpt_radius (int): Radius of drawn keypoints.
+ kpt_line (bool): Whether to draw lines connecting keypoints.
+ labels (bool): Whether to plot labels of bounding boxes.
+ boxes (bool): Whether to plot bounding boxes.
+ masks (bool): Whether to plot masks.
+ probs (bool): Whether to plot classification probabilities.
+ show (bool): Whether to display the annotated image.
+ save (bool): Whether to save the annotated image.
+ filename (str | None): Filename to save image if save is True.
+ color_mode (str): Specify the color mode, e.g., 'instance' or 'class'.
+ txt_color (tuple[int, int, int]): Text color in BGR format for classification output.
+
+ Returns:
+ (np.ndarray | PIL.Image.Image): Annotated image as a NumPy array (BGR) or PIL image (RGB) if `pil=True`.
+
+ Examples:
+ >>> results = model("image.jpg")
+ >>> for result in results:
+ ... im = result.plot()
+ ... im.show()
+ """
assert color_mode in {"instance", "class"}, f"Expected color_mode='instance' or 'class', not {color_mode}."
if img is None and isinstance(self.orig_img, torch.Tensor):
img = (self.orig_img[0].detach().permute(1, 2, 0).contiguous() * 255).byte().cpu().numpy()
@@ -232,9 +579,48 @@ return annotator.result(pil)
def show(self, *args, **kwargs):
+ """Display the image with annotated inference results.
+
+ This method plots the detection results on the original image and displays it. It's a convenient way to
+ visualize the model's predictions directly.
+
+ Args:
+ *args (Any): Variable length argument list to be passed to the `plot()` method.
+ **kwargs (Any): Arbitrary keyword arguments to be passed to the `plot()` method.
+
+ Examples:
+ >>> results = model("path/to/image.jpg")
+ >>> results[0].show() # Display the first result
+ >>> for result in results:
+ ... result.show() # Display all results
+ """
self.plot(show=True, *args, **kwargs)
def save(self, filename: str | None = None, *args, **kwargs) -> str:
+ """Save annotated inference results image to file.
+
+ This method plots the detection results on the original image and saves the annotated image to a file. It
+ utilizes the `plot` method to generate the annotated image and then saves it to the specified filename.
+
+ Args:
+ filename (str | None): The filename to save the annotated image. If None, a default filename is generated
+ based on the original image path.
+ *args (Any): Variable length argument list to be passed to the `plot` method.
+ **kwargs (Any): Arbitrary keyword arguments to be passed to the `plot` method.
+
+ Returns:
+ (str): The filename where the image was saved.
+
+ Examples:
+ >>> results = model("path/to/image.jpg")
+ >>> for result in results:
+ ... result.save("annotated_image.jpg")
+ >>> # Or with custom plot arguments
+ >>> for result in results:
+ ... result.save("annotated_image.jpg", conf=False, line_width=2)
+ >>> # Directory will be created automatically if it does not exist
+ >>> result.save("path/to/annotated_image.jpg")
+ """
if not filename:
filename = f"results_{Path(self.path).name}"
Path(filename).absolute().parent.mkdir(parents=True, exist_ok=True)
@@ -242,6 +628,27 @@ return filename
def verbose(self) -> str:
+ """Return a log string for each task in the results, detailing detection and classification outcomes.
+
+ This method generates a human-readable string summarizing the detection and classification results. It includes
+ the number of detections for each class and the top probabilities for classification tasks.
+
+ Returns:
+ (str): A formatted string containing a summary of the results. For detection tasks, it includes the number
+ of detections per class. For classification tasks, it includes the top 5 class probabilities.
+
+ Examples:
+ >>> results = model("path/to/image.jpg")
+ >>> for result in results:
+ ... print(result.verbose())
+ 2 persons, 1 car, 3 traffic lights,
+ dog 0.92, cat 0.78, horse 0.64,
+
+ Notes:
+ - If there are no detections, the method returns "(no detections), " for detection tasks.
+ - For classification tasks, it returns the top 5 class probabilities and their corresponding class names.
+ - The returned string is comma-separated and ends with a comma and a space.
+ """
boxes = self.obb if self.obb is not None else self.boxes
if len(self) == 0:
return "" if self.probs is not None else "(no detections), "
@@ -252,6 +659,31 @@ return "".join(f"{n} {self.names[i]}{'s' * (n > 1)}, " for i, n in enumerate(counts) if n > 0)
def save_txt(self, txt_file: str | Path, save_conf: bool = False) -> str:
+ """Save detection results to a text file.
+
+ Args:
+ txt_file (str | Path): Path to the output text file.
+ save_conf (bool): Whether to include confidence scores in the output.
+
+ Returns:
+ (str): Path to the saved text file.
+
+ Examples:
+ >>> from ultralytics import YOLO
+ >>> model = YOLO("yolo26n.pt")
+ >>> results = model("path/to/image.jpg")
+ >>> for result in results:
+ ... result.save_txt("output.txt")
+
+ Notes:
+ - The file will contain one line per detection or classification with the following structure:
+ - For detections: `class x_center y_center width height [confidence] [track_id]`
+ - For classifications: `confidence class_name`
+ - For masks and keypoints, the specific formats will vary accordingly.
+ - The function will create the output directory if it does not exist.
+ - If save_conf is False, the confidence scores will be excluded from the output.
+ - Existing contents of the file will not be overwritten; new results will be appended.
+ """
is_obb = self.obb is not None
boxes = self.obb if is_obb else self.boxes
masks = self.masks
@@ -283,6 +715,26 @@ return str(txt_file)
def save_crop(self, save_dir: str | Path, file_name: str | Path = Path("im.jpg")):
+ """Save cropped detection images to specified directory.
+
+ This method saves cropped images of detected objects to a specified directory. Each crop is saved in a
+ subdirectory named after the object's class, with the filename based on the input file_name.
+
+ Args:
+ save_dir (str | Path): Directory path where cropped images will be saved.
+ file_name (str | Path): Base filename for the saved cropped images.
+
+ Examples:
+ >>> results = model("path/to/image.jpg")
+ >>> for result in results:
+ ... result.save_crop(save_dir="path/to/crops", file_name="detection")
+
+ Notes:
+ - This method does not support Classify or Oriented Bounding Box (OBB) tasks.
+ - Crops are saved as 'save_dir/class_name/file_name.jpg'.
+ - The method will create necessary subdirectories if they don't exist.
+ - Original image is copied before cropping to avoid modifying the original.
+ """
if self.probs is not None:
LOGGER.warning("Classify task does not support `save_crop`.")
return
@@ -298,6 +750,28 @@ )
def summary(self, normalize: bool = False, decimals: int = 5) -> list[dict[str, Any]]:
+ """Convert inference results to a summarized dictionary with optional normalization for box coordinates.
+
+ This method creates a list of detection dictionaries, each containing information about a single detection or
+ classification result. For classification tasks, it returns the top 5 classes and their
+ confidences. For detection tasks, it includes class information, bounding box coordinates, and
+ optionally mask segments and keypoints.
+
+ Args:
+ normalize (bool): Whether to normalize bounding box coordinates by image dimensions.
+ decimals (int): Number of decimal places to round the output values to.
+
+ Returns:
+ (list[dict[str, Any]]): A list of dictionaries, each containing summarized information for a single
+ detection or classification result. The structure of each dictionary varies based on the task type
+ (classification or detection) and available information (boxes, masks, keypoints).
+
+ Examples:
+ >>> results = model("image.jpg")
+ >>> for result in results:
+ ... summary = result.summary()
+ ... print(summary)
+ """
# Create list of detection dictionaries
results = []
if self.probs is not None:
@@ -349,8 +823,53 @@
class Boxes(BaseTensor):
+ """A class for managing and manipulating detection boxes.
+
+ This class provides comprehensive functionality for handling detection boxes, including their coordinates,
+ confidence scores, class labels, and optional tracking IDs. It supports various box formats and offers methods for
+ easy manipulation and conversion between different coordinate systems.
+
+ Attributes:
+ data (torch.Tensor | np.ndarray): The raw tensor containing detection boxes and associated data.
+ orig_shape (tuple[int, int]): The original image dimensions (height, width).
+ is_track (bool): Indicates whether tracking IDs are included in the box data.
+ xyxy (torch.Tensor | np.ndarray): Boxes in [x1, y1, x2, y2] format.
+ conf (torch.Tensor | np.ndarray): Confidence scores for each box.
+ cls (torch.Tensor | np.ndarray): Class labels for each box.
+ id (torch.Tensor | None): Tracking IDs for each box (if available).
+ xywh (torch.Tensor | np.ndarray): Boxes in [x, y, width, height] format.
+ xyxyn (torch.Tensor | np.ndarray): Normalized [x1, y1, x2, y2] boxes relative to orig_shape.
+ xywhn (torch.Tensor | np.ndarray): Normalized [x, y, width, height] boxes relative to orig_shape.
+
+ Methods:
+ cpu: Return a copy of the object with all tensors on CPU memory.
+ numpy: Return a copy of the object with all tensors as numpy arrays.
+ cuda: Return a copy of the object with all tensors on GPU memory.
+ to: Return a copy of the object with tensors on specified device and dtype.
+
+ Examples:
+ >>> import torch
+ >>> boxes_data = torch.tensor([[100, 50, 150, 100, 0.9, 0], [200, 150, 300, 250, 0.8, 1]])
+ >>> orig_shape = (480, 640) # height, width
+ >>> boxes = Boxes(boxes_data, orig_shape)
+ >>> print(boxes.xyxy)
+ >>> print(boxes.conf)
+ >>> print(boxes.cls)
+ >>> print(boxes.xywhn)
+ """
def __init__(self, boxes: torch.Tensor | np.ndarray, orig_shape: tuple[int, int]) -> None:
+ """Initialize the Boxes class with detection box data and the original image shape.
+
+ This class manages detection boxes, providing easy access and manipulation of box coordinates, confidence
+ scores, class identifiers, and optional tracking IDs. It supports multiple formats for box coordinates,
+ including both absolute and normalized forms.
+
+ Args:
+ boxes (torch.Tensor | np.ndarray): A tensor or numpy array with detection boxes of shape (num_boxes, 6) or
+ (num_boxes, 7). Columns should contain [x1, y1, x2, y2, (optional) track_id, confidence, class].
+ orig_shape (tuple[int, int]): The original image shape as (height, width). Used for normalization.
+ """
if boxes.ndim == 1:
boxes = boxes[None, :]
n = boxes.shape[-1]
@@ -361,28 +880,115 @@
@property
def xyxy(self) -> torch.Tensor | np.ndarray:
+ """Return bounding boxes in [x1, y1, x2, y2] format.
+
+ Returns:
+ (torch.Tensor | np.ndarray): A tensor or numpy array of shape (n, 4) containing bounding box coordinates in
+ [x1, y1, x2, y2] format, where n is the number of boxes.
+
+ Examples:
+ >>> results = model("image.jpg")
+ >>> boxes = results[0].boxes
+ >>> xyxy = boxes.xyxy
+ >>> print(xyxy)
+ """
return self.data[:, :4]
@property
def conf(self) -> torch.Tensor | np.ndarray:
+ """Return the confidence scores for each detection box.
+
+ Returns:
+ (torch.Tensor | np.ndarray): A 1D tensor or array containing confidence scores for each detection, with
+ shape (N,) where N is the number of detections.
+
+ Examples:
+ >>> boxes = Boxes(torch.tensor([[10, 20, 30, 40, 0.9, 0]]), orig_shape=(100, 100))
+ >>> conf_scores = boxes.conf
+ >>> print(conf_scores)
+ tensor([0.9000])
+ """
return self.data[:, -2]
@property
def cls(self) -> torch.Tensor | np.ndarray:
+ """Return the class ID tensor representing category predictions for each bounding box.
+
+ Returns:
+ (torch.Tensor | np.ndarray): A tensor or numpy array containing the class IDs for each detection box. The
+ shape is (N,), where N is the number of boxes.
+
+ Examples:
+ >>> results = model("image.jpg")
+ >>> boxes = results[0].boxes
+ >>> class_ids = boxes.cls
+ >>> print(class_ids) # tensor([0., 2., 1.])
+ """
return self.data[:, -1]
@property
def id(self) -> torch.Tensor | np.ndarray | None:
+ """Return the tracking IDs for each detection box if available.
+
+ Returns:
+ (torch.Tensor | np.ndarray | None): A tensor or array containing tracking IDs for each box if tracking is
+ enabled, otherwise None. Shape is (N,) where N is the number of boxes.
+
+ Examples:
+ >>> results = model.track("path/to/video.mp4")
+ >>> for result in results:
+ ... boxes = result.boxes
+ ... if boxes.is_track:
+ ... track_ids = boxes.id
+ ... print(f"Tracking IDs: {track_ids}")
+ ... else:
+ ... print("Tracking is not enabled for these boxes.")
+
+ Notes:
+ - This property is only available when tracking is enabled (i.e., when `is_track` is True).
+ - The tracking IDs are typically used to associate detections across multiple frames in video analysis.
+ """
return self.data[:, -3] if self.is_track else None
@property
@lru_cache(maxsize=2)
def xywh(self) -> torch.Tensor | np.ndarray:
+ """Convert bounding boxes from [x1, y1, x2, y2] format to [x, y, width, height] format.
+
+ Returns:
+ (torch.Tensor | np.ndarray): Boxes in [x_center, y_center, width, height] format, where x_center, y_center
+ are the coordinates of the center point of the bounding box, width, height are the dimensions of the
+ bounding box and the shape of the returned tensor is (N, 4), where N is the number of boxes.
+
+ Examples:
+ >>> boxes = Boxes(
+ ... torch.tensor([[100, 50, 150, 100, 0.9, 0], [200, 150, 300, 250, 0.8, 1]]), orig_shape=(480, 640)
+ ... )
+ >>> xywh = boxes.xywh
+ >>> print(xywh)
+ tensor([[125.0000, 75.0000, 50.0000, 50.0000],
+ [250.0000, 200.0000, 100.0000, 100.0000]])
+ """
return ops.xyxy2xywh(self.xyxy)
@property
@lru_cache(maxsize=2)
def xyxyn(self) -> torch.Tensor | np.ndarray:
+ """Return normalized bounding box coordinates relative to the original image size.
+
+ This property calculates and returns the bounding box coordinates in [x1, y1, x2, y2] format, normalized to the
+ range [0, 1] based on the original image dimensions.
+
+ Returns:
+ (torch.Tensor | np.ndarray): Normalized bounding box coordinates with shape (N, 4), where N is the number of
+ boxes. Each row contains [x1, y1, x2, y2] values normalized to [0, 1].
+
+ Examples:
+ >>> boxes = Boxes(torch.tensor([[100, 50, 300, 400, 0.9, 0]]), orig_shape=(480, 640))
+ >>> normalized = boxes.xyxyn
+ >>> print(normalized)
+ tensor([[0.1562, 0.1042, 0.4688, 0.8333]])
+ """
xyxy = self.xyxy.clone() if isinstance(self.xyxy, torch.Tensor) else np.copy(self.xyxy)
xyxy[..., [0, 2]] /= self.orig_shape[1]
xyxy[..., [1, 3]] /= self.orig_shape[0]
@@ -391,6 +997,22 @@ @property
@lru_cache(maxsize=2)
def xywhn(self) -> torch.Tensor | np.ndarray:
+ """Return normalized bounding boxes in [x, y, width, height] format.
+
+ This property calculates and returns the normalized bounding box coordinates in the format [x_center, y_center,
+ width, height], where all values are relative to the original image dimensions.
+
+ Returns:
+ (torch.Tensor | np.ndarray): Normalized bounding boxes with shape (N, 4), where N is the number of boxes.
+ Each row contains [x_center, y_center, width, height] values normalized to [0, 1] based on the original
+ image dimensions.
+
+ Examples:
+ >>> boxes = Boxes(torch.tensor([[100, 50, 150, 100, 0.9, 0]]), orig_shape=(480, 640))
+ >>> normalized = boxes.xywhn
+ >>> print(normalized)
+ tensor([[0.1953, 0.1562, 0.0781, 0.1042]])
+ """
xywh = ops.xyxy2xywh(self.xyxy)
xywh[..., [0, 2]] /= self.orig_shape[1]
xywh[..., [1, 3]] /= self.orig_shape[0]
@@ -398,8 +1020,38 @@
class Masks(BaseTensor):
+ """A class for storing and manipulating detection masks.
+
+ This class extends BaseTensor and provides functionality for handling segmentation masks, including methods for
+ converting between pixel and normalized coordinates.
+
+ Attributes:
+ data (torch.Tensor | np.ndarray): The raw tensor or array containing mask data.
+ orig_shape (tuple[int, int]): Original image shape in (height, width) format.
+ xy (list[np.ndarray]): A list of segments in pixel coordinates.
+ xyn (list[np.ndarray]): A list of normalized segments.
+
+ Methods:
+ cpu: Return a copy of the Masks object with the mask tensor on CPU memory.
+ numpy: Return a copy of the Masks object with the mask tensor as a numpy array.
+ cuda: Return a copy of the Masks object with the mask tensor on GPU memory.
+ to: Return a copy of the Masks object with the mask tensor on specified device and dtype.
+
+ Examples:
+ >>> masks_data = torch.rand(1, 160, 160)
+ >>> orig_shape = (720, 1280)
+ >>> masks = Masks(masks_data, orig_shape)
+ >>> pixel_coords = masks.xy
+ >>> normalized_coords = masks.xyn
+ """
def __init__(self, masks: torch.Tensor | np.ndarray, orig_shape: tuple[int, int]) -> None:
+ """Initialize the Masks class with detection mask data and the original image shape.
+
+ Args:
+ masks (torch.Tensor | np.ndarray): Detection masks with shape (num_masks, height, width).
+ orig_shape (tuple[int, int]): The original image shape as (height, width). Used for normalization.
+ """
if masks.ndim == 2:
masks = masks[None, :]
super().__init__(masks, orig_shape)
@@ -407,6 +1059,22 @@ @property
@lru_cache(maxsize=1)
def xyn(self) -> list[np.ndarray]:
+ """Return normalized xy-coordinates of the segmentation masks.
+
+ This property calculates and caches the normalized xy-coordinates of the segmentation masks. The coordinates are
+ normalized relative to the original image shape.
+
+ Returns:
+ (list[np.ndarray]): A list of numpy arrays, where each array contains the normalized xy-coordinates of a
+ single segmentation mask. Each array has shape (N, 2), where N is the number of points in the
+ mask contour.
+
+ Examples:
+ >>> results = model("image.jpg")
+ >>> masks = results[0].masks
+ >>> normalized_coords = masks.xyn
+ >>> print(normalized_coords[0]) # Normalized coordinates of the first mask
+ """
return [
ops.scale_coords(self.data.shape[1:], x, self.orig_shape, normalize=True)
for x in ops.masks2segments(self.data)
@@ -415,6 +1083,22 @@ @property
@lru_cache(maxsize=1)
def xy(self) -> list[np.ndarray]:
+ """Return the [x, y] pixel coordinates for each segment in the mask tensor.
+
+ This property calculates and returns a list of pixel coordinates for each segmentation mask in the Masks object.
+ The coordinates are scaled to match the original image dimensions.
+
+ Returns:
+ (list[np.ndarray]): A list of numpy arrays, where each array contains the [x, y] pixel coordinates for a
+ single segmentation mask. Each array has shape (N, 2), where N is the number of points in the segment.
+
+ Examples:
+ >>> results = model("image.jpg")
+ >>> masks = results[0].masks
+ >>> xy_coords = masks.xy
+ >>> print(len(xy_coords)) # Number of masks
+ >>> print(xy_coords[0].shape) # Shape of first mask's coordinates
+ """
return [
ops.scale_coords(self.data.shape[1:], x, self.orig_shape, normalize=False)
for x in ops.masks2segments(self.data)
@@ -422,8 +1106,47 @@
class Keypoints(BaseTensor):
+ """A class for storing and manipulating detection keypoints.
+
+ This class encapsulates functionality for handling keypoint data, including coordinate manipulation, normalization,
+ and confidence values. It supports keypoint detection results with optional visibility information.
+
+ Attributes:
+ data (torch.Tensor): The raw tensor containing keypoint data.
+ orig_shape (tuple[int, int]): The original image dimensions (height, width).
+ has_visible (bool): Indicates whether visibility information is available for keypoints.
+ xy (torch.Tensor): Keypoint coordinates in [x, y] format.
+ xyn (torch.Tensor): Normalized keypoint coordinates in [x, y] format, relative to orig_shape.
+ conf (torch.Tensor | None): Confidence values for each keypoint, if available.
+
+ Methods:
+ cpu: Return a copy of the keypoints tensor on CPU memory.
+ numpy: Return a copy of the keypoints tensor as a numpy array.
+ cuda: Return a copy of the keypoints tensor on GPU memory.
+ to: Return a copy of the keypoints tensor with specified device and dtype.
+
+ Examples:
+ >>> import torch
+ >>> from ultralytics.engine.results import Keypoints
+ >>> keypoints_data = torch.rand(1, 17, 3) # 1 detection, 17 keypoints, (x, y, conf)
+ >>> orig_shape = (480, 640) # Original image shape (height, width)
+ >>> keypoints = Keypoints(keypoints_data, orig_shape)
+ >>> print(keypoints.xy.shape) # Access xy coordinates
+ >>> print(keypoints.conf) # Access confidence values
+ >>> keypoints_cpu = keypoints.cpu() # Move keypoints to CPU
+ """
def __init__(self, keypoints: torch.Tensor | np.ndarray, orig_shape: tuple[int, int]) -> None:
+ """Initialize the Keypoints object with detection keypoints and original image dimensions.
+
+ This method processes the input keypoints tensor, handling both 2D and 3D formats.
+
+ Args:
+ keypoints (torch.Tensor | np.ndarray): A tensor or array containing keypoint data. Shape can be either:
+ - (num_objects, num_keypoints, 2) for x, y coordinates only
+ - (num_objects, num_keypoints, 3) for x, y coordinates and confidence scores
+ orig_shape (tuple[int, int]): The original image dimensions (height, width).
+ """
if keypoints.ndim == 2:
keypoints = keypoints[None, :]
super().__init__(keypoints, orig_shape)
@@ -432,11 +1155,41 @@ @property
@lru_cache(maxsize=1)
def xy(self) -> torch.Tensor | np.ndarray:
+ """Return x, y coordinates of keypoints.
+
+ Returns:
+ (torch.Tensor | np.ndarray): A tensor or array containing the x, y coordinates of keypoints with shape (N,
+ K, 2), where N is the number of detections and K is the number of keypoints per detection.
+
+ Examples:
+ >>> results = model("image.jpg")
+ >>> keypoints = results[0].keypoints
+ >>> xy = keypoints.xy
+ >>> print(xy.shape) # (N, K, 2)
+ >>> print(xy[0]) # x, y coordinates of keypoints for first detection
+
+ Notes:
+ - The returned coordinates are in pixel units relative to the original image dimensions.
+ - This property uses LRU caching to improve performance on repeated access.
+ """
return self.data[..., :2]
@property
@lru_cache(maxsize=1)
def xyn(self) -> torch.Tensor | np.ndarray:
+ """Return normalized coordinates (x, y) of keypoints relative to the original image size.
+
+ Returns:
+ (torch.Tensor | np.ndarray): A tensor or array of shape (N, K, 2) containing normalized keypoint
+ coordinates, where N is the number of instances, K is the number of keypoints, and the last dimension
+ contains [x, y] values in the range [0, 1].
+
+ Examples:
+ >>> keypoints = Keypoints(torch.rand(1, 17, 2), orig_shape=(480, 640))
+ >>> normalized_kpts = keypoints.xyn
+ >>> print(normalized_kpts.shape)
+ torch.Size([1, 17, 2])
+ """
xy = self.xy.clone() if isinstance(self.xy, torch.Tensor) else np.copy(self.xy)
xy[..., 0] /= self.orig_shape[1]
xy[..., 1] /= self.orig_shape[0]
@@ -445,38 +1198,185 @@ @property
@lru_cache(maxsize=1)
def conf(self) -> torch.Tensor | np.ndarray | None:
+ """Return confidence values for each keypoint.
+
+ Returns:
+ (torch.Tensor | np.ndarray | None): A tensor or array containing confidence scores for each keypoint if
+ available, otherwise None. Shape is (num_detections, num_keypoints) for batched data or (num_keypoints,)
+ for single detection.
+
+ Examples:
+ >>> keypoints = Keypoints(torch.rand(1, 17, 3), orig_shape=(640, 640)) # 1 detection, 17 keypoints
+ >>> conf = keypoints.conf
+ >>> print(conf.shape) # torch.Size([1, 17])
+ """
return self.data[..., 2] if self.has_visible else None
class Probs(BaseTensor):
+ """A class for storing and manipulating classification probabilities.
+
+ This class extends BaseTensor and provides methods for accessing and manipulating classification probabilities,
+ including top-1 and top-5 predictions.
+
+ Attributes:
+ data (torch.Tensor | np.ndarray): The raw tensor or array containing classification probabilities.
+ orig_shape (tuple[int, int] | None): The original image shape as (height, width). Not used in this class.
+ top1 (int): Index of the class with the highest probability.
+ top5 (list[int]): Indices of the top 5 classes by probability.
+ top1conf (torch.Tensor | np.ndarray): Confidence score of the top 1 class.
+ top5conf (torch.Tensor | np.ndarray): Confidence scores of the top 5 classes.
+
+ Methods:
+ cpu: Return a copy of the probabilities tensor on CPU memory.
+ numpy: Return a copy of the probabilities tensor as a numpy array.
+ cuda: Return a copy of the probabilities tensor on GPU memory.
+ to: Return a copy of the probabilities tensor with specified device and dtype.
+
+ Examples:
+ >>> probs = torch.tensor([0.1, 0.3, 0.6])
+ >>> p = Probs(probs)
+ >>> print(p.top1)
+ 2
+ >>> print(p.top5)
+ [2, 1, 0]
+ >>> print(p.top1conf)
+ tensor(0.6000)
+ >>> print(p.top5conf)
+ tensor([0.6000, 0.3000, 0.1000])
+ """
def __init__(self, probs: torch.Tensor | np.ndarray, orig_shape: tuple[int, int] | None = None) -> None:
+ """Initialize the Probs class with classification probabilities.
+
+ This class stores and manages classification probabilities, providing easy access to top predictions and their
+ confidences.
+
+ Args:
+ probs (torch.Tensor | np.ndarray): A 1D tensor or array of classification probabilities.
+ orig_shape (tuple[int, int] | None): The original image shape as (height, width). Not used in this class but
+ kept for consistency with other result classes.
+ """
super().__init__(probs, orig_shape)
@property
@lru_cache(maxsize=1)
def top1(self) -> int:
+ """Return the index of the class with the highest probability.
+
+ Returns:
+ (int): Index of the class with the highest probability.
+
+ Examples:
+ >>> probs = Probs(torch.tensor([0.1, 0.3, 0.6]))
+ >>> probs.top1
+ 2
+ """
return int(self.data.argmax())
@property
@lru_cache(maxsize=1)
def top5(self) -> list[int]:
+ """Return the indices of the top 5 class probabilities.
+
+ Returns:
+ (list[int]): A list containing the indices of the top 5 class probabilities, sorted in descending order.
+
+ Examples:
+ >>> probs = Probs(torch.tensor([0.1, 0.2, 0.3, 0.4, 0.5]))
+ >>> print(probs.top5)
+ [4, 3, 2, 1, 0]
+ """
return (-self.data).argsort(0)[:5].tolist() # this way works with both torch and numpy.
@property
@lru_cache(maxsize=1)
def top1conf(self) -> torch.Tensor | np.ndarray:
+ """Return the confidence score of the highest probability class.
+
+ This property retrieves the confidence score (probability) of the class with the highest predicted probability
+ from the classification results.
+
+ Returns:
+ (torch.Tensor | np.ndarray): A tensor containing the confidence score of the top 1 class.
+
+ Examples:
+ >>> results = model("image.jpg") # classify an image
+ >>> probs = results[0].probs # get classification probabilities
+ >>> top1_confidence = probs.top1conf # get confidence of top 1 class
+ >>> print(f"Top 1 class confidence: {top1_confidence.item():.4f}")
+ """
return self.data[self.top1]
@property
@lru_cache(maxsize=1)
def top5conf(self) -> torch.Tensor | np.ndarray:
+ """Return confidence scores for the top 5 classification predictions.
+
+ This property retrieves the confidence scores corresponding to the top 5 class probabilities predicted by the
+ model. It provides a quick way to access the most likely class predictions along with their associated
+ confidence levels.
+
+ Returns:
+ (torch.Tensor | np.ndarray): A tensor or array containing the confidence scores for the top 5 predicted
+ classes, sorted in descending order of probability.
+
+ Examples:
+ >>> results = model("image.jpg")
+ >>> probs = results[0].probs
+ >>> top5_conf = probs.top5conf
+ >>> print(top5_conf) # Prints confidence scores for top 5 classes
+ """
return self.data[self.top5]
class OBB(BaseTensor):
+ """A class for storing and manipulating Oriented Bounding Boxes (OBB).
+
+ This class provides functionality to handle oriented bounding boxes, including conversion between different formats,
+ normalization, and access to various properties of the boxes. It supports both tracking and non-tracking scenarios.
+
+ Attributes:
+ data (torch.Tensor): The raw OBB tensor containing box coordinates and associated data.
+ orig_shape (tuple[int, int]): Original image size as (height, width).
+ is_track (bool): Indicates whether tracking IDs are included in the box data.
+ xywhr (torch.Tensor | np.ndarray): Boxes in [x_center, y_center, width, height, rotation] format.
+ conf (torch.Tensor | np.ndarray): Confidence scores for each box.
+ cls (torch.Tensor | np.ndarray): Class labels for each box.
+ id (torch.Tensor | np.ndarray): Tracking IDs for each box, if available.
+ xyxyxyxy (torch.Tensor | np.ndarray): Boxes in 8-point [x1, y1, x2, y2, x3, y3, x4, y4] format.
+ xyxyxyxyn (torch.Tensor | np.ndarray): Normalized 8-point coordinates relative to orig_shape.
+ xyxy (torch.Tensor | np.ndarray): Axis-aligned bounding boxes in [x1, y1, x2, y2] format.
+
+ Methods:
+ cpu: Return a copy of the OBB object with all tensors on CPU memory.
+ numpy: Return a copy of the OBB object with all tensors as numpy arrays.
+ cuda: Return a copy of the OBB object with all tensors on GPU memory.
+ to: Return a copy of the OBB object with tensors on specified device and dtype.
+
+ Examples:
+ >>> boxes = torch.tensor([[100, 50, 150, 100, 30, 0.9, 0]]) # xywhr, conf, cls
+ >>> obb = OBB(boxes, orig_shape=(480, 640))
+ >>> print(obb.xyxyxyxy)
+ >>> print(obb.conf)
+ >>> print(obb.cls)
+ """
def __init__(self, boxes: torch.Tensor | np.ndarray, orig_shape: tuple[int, int]) -> None:
+ """Initialize an OBB (Oriented Bounding Box) instance with oriented bounding box data and original image shape.
+
+ This class stores and manipulates Oriented Bounding Boxes (OBB) for object detection tasks. It provides various
+ properties and methods to access and transform the OBB data.
+
+ Args:
+ boxes (torch.Tensor | np.ndarray): A tensor or numpy array containing the detection boxes, with shape
+ (num_boxes, 7) or (num_boxes, 8). The last two columns contain confidence and class values. If present,
+ the third last column contains track IDs, and the fifth column contains rotation.
+ orig_shape (tuple[int, int]): Original image size, in the format (height, width).
+
+ Raises:
+ AssertionError: If the number of values per box is not 7 or 8.
+ """
if boxes.ndim == 1:
boxes = boxes[None, :]
n = boxes.shape[-1]
@@ -487,28 +1387,109 @@
@property
def xywhr(self) -> torch.Tensor | np.ndarray:
+ """Return boxes in [x_center, y_center, width, height, rotation] format.
+
+ Returns:
+ (torch.Tensor | np.ndarray): A tensor or numpy array containing the oriented bounding boxes with format
+ [x_center, y_center, width, height, rotation]. The shape is (N, 5) where N is the number of boxes.
+
+ Examples:
+ >>> results = model("image.jpg")
+ >>> obb = results[0].obb
+ >>> xywhr = obb.xywhr
+ >>> print(xywhr.shape)
+ torch.Size([3, 5])
+ """
return self.data[:, :5]
@property
def conf(self) -> torch.Tensor | np.ndarray:
+ """Return the confidence scores for Oriented Bounding Boxes (OBBs).
+
+ This property retrieves the confidence values associated with each OBB detection. The confidence score
+ represents the model's certainty in the detection.
+
+ Returns:
+ (torch.Tensor | np.ndarray): A tensor or numpy array of shape (N,) containing confidence scores for N
+ detections, where each score is in the range [0, 1].
+
+ Examples:
+ >>> results = model("image.jpg")
+ >>> obb_result = results[0].obb
+ >>> confidence_scores = obb_result.conf
+ >>> print(confidence_scores)
+ """
return self.data[:, -2]
@property
def cls(self) -> torch.Tensor | np.ndarray:
+ """Return the class values of the oriented bounding boxes.
+
+ Returns:
+ (torch.Tensor | np.ndarray): A tensor or numpy array containing the class values for each oriented bounding
+ box. The shape is (N,), where N is the number of boxes.
+
+ Examples:
+ >>> results = model("image.jpg")
+ >>> result = results[0]
+ >>> obb = result.obb
+ >>> class_values = obb.cls
+ >>> print(class_values)
+ """
return self.data[:, -1]
@property
def id(self) -> torch.Tensor | np.ndarray | None:
+ """Return the tracking IDs of the oriented bounding boxes (if available).
+
+ Returns:
+ (torch.Tensor | np.ndarray | None): A tensor or numpy array containing the tracking IDs for each oriented
+ bounding box. Returns None if tracking IDs are not available.
+
+ Examples:
+ >>> results = model("image.jpg", tracker=True) # Run inference with tracking
+ >>> for result in results:
+ ... if result.obb is not None:
+ ... track_ids = result.obb.id
+ ... if track_ids is not None:
+ ... print(f"Tracking IDs: {track_ids}")
+ """
return self.data[:, -3] if self.is_track else None
@property
@lru_cache(maxsize=2)
def xyxyxyxy(self) -> torch.Tensor | np.ndarray:
+ """Convert OBB format to 8-point (xyxyxyxy) coordinate format for rotated bounding boxes.
+
+ Returns:
+ (torch.Tensor | np.ndarray): Rotated bounding boxes in xyxyxyxy format with shape (N, 4, 2), where N is the
+ number of boxes. Each box is represented by 4 points (x, y), starting from the top-left corner and
+ moving clockwise.
+
+ Examples:
+ >>> obb = OBB(torch.tensor([[100, 100, 50, 30, 0.5, 0.9, 0]]), orig_shape=(640, 640))
+ >>> xyxyxyxy = obb.xyxyxyxy
+ >>> print(xyxyxyxy.shape)
+ torch.Size([1, 4, 2])
+ """
return ops.xywhr2xyxyxyxy(self.xywhr)
@property
@lru_cache(maxsize=2)
def xyxyxyxyn(self) -> torch.Tensor | np.ndarray:
+ """Convert rotated bounding boxes to normalized xyxyxyxy format.
+
+ Returns:
+ (torch.Tensor | np.ndarray): Normalized rotated bounding boxes in xyxyxyxy format with shape (N, 4, 2),
+ where N is the number of boxes. Each box is represented by 4 points (x, y), normalized relative to the
+ original image dimensions.
+
+ Examples:
+ >>> obb = OBB(torch.rand(10, 7), orig_shape=(640, 480)) # 10 random OBBs
+ >>> normalized_boxes = obb.xyxyxyxyn
+ >>> print(normalized_boxes.shape)
+ torch.Size([10, 4, 2])
+ """
xyxyxyxyn = self.xyxyxyxy.clone() if isinstance(self.xyxyxyxy, torch.Tensor) else np.copy(self.xyxyxyxy)
xyxyxyxyn[..., 0] /= self.orig_shape[1]
xyxyxyxyn[..., 1] /= self.orig_shape[0]
@@ -517,10 +1498,36 @@ @property
@lru_cache(maxsize=2)
def xyxy(self) -> torch.Tensor | np.ndarray:
+ """Convert oriented bounding boxes (OBB) to axis-aligned bounding boxes in xyxy format.
+
+ This property calculates the minimal enclosing rectangle for each oriented bounding box and returns it in xyxy
+ format (x1, y1, x2, y2). This is useful for operations that require axis-aligned bounding boxes, such as IoU
+ calculation with non-rotated boxes.
+
+ Returns:
+ (torch.Tensor | np.ndarray): Axis-aligned bounding boxes in xyxy format with shape (N, 4), where N is the
+ number of boxes. Each row contains [x1, y1, x2, y2] coordinates.
+
+ Examples:
+ >>> import torch
+ >>> from ultralytics import YOLO
+ >>> model = YOLO("yolo26n-obb.pt")
+ >>> results = model("path/to/image.jpg")
+ >>> for result in results:
+ ... obb = result.obb
+ ... if obb is not None:
+ ... xyxy_boxes = obb.xyxy
+ ... print(xyxy_boxes.shape) # (N, 4)
+
+ Notes:
+ - This method approximates the OBB by its minimal enclosing rectangle.
+ - The returned format is compatible with standard object detection metrics and visualization tools.
+ - The property uses caching to improve performance for repeated access.
+ """
x = self.xyxyxyxy[..., 0]
y = self.xyxyxyxy[..., 1]
return (
torch.stack([x.amin(1), y.amin(1), x.amax(1), y.amax(1)], -1)
if isinstance(x, torch.Tensor)
else np.stack([x.min(1), y.min(1), x.max(1), y.max(1)], -1)
- )+ )
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/engine/results.py |
Add detailed docstrings explaining each function | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import json
import os
import random
import subprocess
import time
import zipfile
from multiprocessing.pool import ThreadPool
from pathlib import Path
from tarfile import is_tarfile
from typing import Any
import cv2
import numpy as np
from PIL import Image, ImageOps
from ultralytics.nn.autobackend import check_class_names
from ultralytics.utils import (
ASSETS_URL,
DATASETS_DIR,
LOGGER,
NUM_THREADS,
ROOT,
SETTINGS_FILE,
TQDM,
YAML,
clean_url,
colorstr,
emojis,
is_dir_writeable,
)
from ultralytics.utils.checks import check_file, check_font, is_ascii
from ultralytics.utils.downloads import download, safe_download, unzip_file
from ultralytics.utils.ops import segments2boxes
HELP_URL = "See https://docs.ultralytics.com/datasets for dataset formatting guidance."
IMG_FORMATS = {
"avif",
"bmp",
"dng",
"heic",
"heif",
"jp2",
"jpeg",
"jpeg2000",
"jpg",
"mpo",
"png",
"tif",
"tiff",
"webp",
}
VID_FORMATS = {"asf", "avi", "gif", "m4v", "mkv", "mov", "mp4", "mpeg", "mpg", "ts", "wmv", "webm"} # videos
FORMATS_HELP_MSG = f"Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}"
def img2label_paths(img_paths: list[str]) -> list[str]:
sa, sb = f"{os.sep}images{os.sep}", f"{os.sep}labels{os.sep}" # /images/, /labels/ substrings
return [sb.join(x.rsplit(sa, 1)).rsplit(".", 1)[0] + ".txt" for x in img_paths]
def check_file_speeds(
files: list[str], threshold_ms: float = 10, threshold_mb: float = 50, max_files: int = 5, prefix: str = ""
):
if not files:
LOGGER.warning(f"{prefix}Image speed checks: No files to check")
return
# Sample files (max 5)
files = random.sample(files, min(max_files, len(files)))
# Test ping (stat time)
ping_times = []
file_sizes = []
read_speeds = []
for f in files:
try:
# Measure ping (stat call)
start = time.perf_counter()
file_size = os.stat(f).st_size
ping_times.append((time.perf_counter() - start) * 1000) # ms
file_sizes.append(file_size)
# Measure read speed
start = time.perf_counter()
with open(f, "rb") as file_obj:
_ = file_obj.read()
read_time = time.perf_counter() - start
if read_time > 0: # Avoid division by zero
read_speeds.append(file_size / (1 << 20) / read_time) # MB/s
except Exception:
pass
if not ping_times:
LOGGER.warning(f"{prefix}Image speed checks: failed to access files")
return
# Calculate stats with uncertainties
avg_ping = np.mean(ping_times)
std_ping = np.std(ping_times, ddof=1) if len(ping_times) > 1 else 0
size_msg = f", size: {np.mean(file_sizes) / (1 << 10):.1f} KB"
ping_msg = f"ping: {avg_ping:.1f}±{std_ping:.1f} ms"
if read_speeds:
avg_speed = np.mean(read_speeds)
std_speed = np.std(read_speeds, ddof=1) if len(read_speeds) > 1 else 0
speed_msg = f", read: {avg_speed:.1f}±{std_speed:.1f} MB/s"
else:
speed_msg = ""
if avg_ping < threshold_ms or avg_speed < threshold_mb:
LOGGER.info(f"{prefix}Fast image access ✅ ({ping_msg}{speed_msg}{size_msg})")
else:
LOGGER.warning(
f"{prefix}Slow image access detected ({ping_msg}{speed_msg}{size_msg}). "
f"Use local storage instead of remote/mounted storage for better performance. "
f"See https://docs.ultralytics.com/guides/model-training-tips/"
)
def get_hash(paths: list[str]) -> str:
size = 0
for p in paths:
try:
size += os.stat(p).st_size
except OSError:
continue
h = __import__("hashlib").sha256(str(size).encode()) # hash sizes
h.update("".join(paths).encode()) # hash paths
return h.hexdigest() # return hash
def exif_size(img: Image.Image) -> tuple[int, int]:
s = img.size # (width, height)
if img.format == "JPEG": # only support JPEG images
try:
if exif := img.getexif():
rotation = exif.get(274, None) # the EXIF key for the orientation tag is 274
if rotation in {6, 8}: # rotation 270 or 90
s = s[1], s[0]
except Exception:
pass
return s
def verify_image(args: tuple) -> tuple:
(im_file, cls), prefix = args
# Number (found, corrupt), message
nf, nc, msg = 0, 0, ""
try:
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
shape = (shape[1], shape[0]) # hw
assert (shape[0] > 9) & (shape[1] > 9), f"image size {shape} <10 pixels"
assert im.format.lower() in IMG_FORMATS, f"Invalid image format {im.format}. {FORMATS_HELP_MSG}"
if im.format.lower() in {"jpg", "jpeg"}:
with open(im_file, "rb") as f:
f.seek(-2, 2)
if f.read() != b"\xff\xd9": # corrupt JPEG
ImageOps.exif_transpose(Image.open(im_file)).save(im_file, "JPEG", subsampling=0, quality=100)
msg = f"{prefix}{im_file}: corrupt JPEG restored and saved"
nf = 1
except Exception as e:
nc = 1
msg = f"{prefix}{im_file}: ignoring corrupt image/label: {e}"
return (im_file, cls), nf, nc, msg
def verify_image_label(args: tuple) -> list:
im_file, lb_file, prefix, keypoint, num_cls, nkpt, ndim, single_cls = args
# Number (missing, found, empty, corrupt), message, segments, keypoints
nm, nf, ne, nc, msg, segments, keypoints = 0, 0, 0, 0, "", [], None
try:
# Verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
shape = (shape[1], shape[0]) # hw
assert (shape[0] > 9) & (shape[1] > 9), f"image size {shape} <10 pixels"
assert im.format.lower() in IMG_FORMATS, f"invalid image format {im.format}. {FORMATS_HELP_MSG}"
if im.format.lower() in {"jpg", "jpeg"}:
with open(im_file, "rb") as f:
f.seek(-2, 2)
if f.read() != b"\xff\xd9": # corrupt JPEG
ImageOps.exif_transpose(Image.open(im_file)).save(im_file, "JPEG", subsampling=0, quality=100)
msg = f"{prefix}{im_file}: corrupt JPEG restored and saved"
# Verify labels
if os.path.isfile(lb_file):
nf = 1 # label found
with open(lb_file, encoding="utf-8") as f:
lb = [x.split() for x in f.read().strip().splitlines() if len(x)]
if any(len(x) > 6 for x in lb) and (not keypoint): # is segment
classes = np.array([x[0] for x in lb], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...)
lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
lb = np.array(lb, dtype=np.float32)
if nl := len(lb):
if keypoint:
assert lb.shape[1] == (5 + nkpt * ndim), f"labels require {(5 + nkpt * ndim)} columns each"
points = lb[:, 5:].reshape(-1, ndim)[:, :2]
else:
assert lb.shape[1] == 5, f"labels require 5 columns, {lb.shape[1]} columns detected"
points = lb[:, 1:]
# Coordinate points check with 1% tolerance
assert points.max() <= 1.01, f"non-normalized or out of bounds coordinates {points[points > 1.01]}"
assert lb.min() >= -0.01, f"negative class labels or coordinate {lb[lb < -0.01]}"
# All labels
max_cls = 0 if single_cls else lb[:, 0].max() # max label count
assert max_cls < num_cls, (
f"Label class {int(max_cls)} exceeds dataset class count {num_cls}. "
f"Possible class labels are 0-{num_cls - 1}"
)
_, i = np.unique(lb, axis=0, return_index=True)
if len(i) < nl: # duplicate row check
lb = lb[i] # remove duplicates
if segments:
segments = [segments[x] for x in i]
msg = f"{prefix}{im_file}: {nl - len(i)} duplicate labels removed"
else:
ne = 1 # label empty
lb = np.zeros((0, (5 + nkpt * ndim) if keypoint else 5), dtype=np.float32)
else:
nm = 1 # label missing
lb = np.zeros((0, (5 + nkpt * ndim) if keypoint else 5), dtype=np.float32)
if keypoint:
keypoints = lb[:, 5:].reshape(-1, nkpt, ndim)
if ndim == 2:
kpt_mask = np.where((keypoints[..., 0] < 0) | (keypoints[..., 1] < 0), 0.0, 1.0).astype(np.float32)
keypoints = np.concatenate([keypoints, kpt_mask[..., None]], axis=-1) # (nl, nkpt, 3)
lb = lb[:, :5]
return im_file, lb, shape, segments, keypoints, nm, nf, ne, nc, msg
except Exception as e:
nc = 1
msg = f"{prefix}{im_file}: ignoring corrupt image/label: {e}"
return [None, None, None, None, None, nm, nf, ne, nc, msg]
def visualize_image_annotations(image_path: str, txt_path: str, label_map: dict[int, str]):
import matplotlib.pyplot as plt
from ultralytics.utils.plotting import colors
img = np.array(Image.open(image_path))
img_height, img_width = img.shape[:2]
annotations = []
with open(txt_path, encoding="utf-8") as file:
for line in file:
class_id, x_center, y_center, width, height = map(float, line.split())
x = (x_center - width / 2) * img_width
y = (y_center - height / 2) * img_height
w = width * img_width
h = height * img_height
annotations.append((x, y, w, h, int(class_id)))
_, ax = plt.subplots(1) # Plot the image and annotations
for x, y, w, h, label in annotations:
color = tuple(c / 255 for c in colors(label, False)) # Get and normalize an RGB color for Matplotlib
rect = plt.Rectangle((x, y), w, h, linewidth=2, edgecolor=color, facecolor="none") # Create a rectangle
ax.add_patch(rect)
luminance = 0.2126 * color[0] + 0.7152 * color[1] + 0.0722 * color[2] # Formula for luminance
ax.text(x, y - 5, label_map[label], color="white" if luminance < 0.5 else "black", backgroundcolor=color)
ax.imshow(img)
plt.show()
def polygon2mask(
imgsz: tuple[int, int], polygons: list[np.ndarray], color: int = 1, downsample_ratio: int = 1
) -> np.ndarray:
mask = np.zeros(imgsz, dtype=np.uint8)
polygons = np.asarray(polygons, dtype=np.int32)
polygons = polygons.reshape((polygons.shape[0], -1, 2))
cv2.fillPoly(mask, polygons, color=color)
nh, nw = (imgsz[0] // downsample_ratio, imgsz[1] // downsample_ratio)
# Note: fillPoly first then resize is trying to keep the same loss calculation method when mask-ratio=1
return cv2.resize(mask, (nw, nh))
def polygons2masks(
imgsz: tuple[int, int], polygons: list[np.ndarray], color: int, downsample_ratio: int = 1
) -> np.ndarray:
return np.array([polygon2mask(imgsz, [x.reshape(-1)], color, downsample_ratio) for x in polygons])
def polygons2masks_overlap(
imgsz: tuple[int, int], segments: list[np.ndarray], downsample_ratio: int = 1
) -> tuple[np.ndarray, np.ndarray]:
masks = np.zeros(
(imgsz[0] // downsample_ratio, imgsz[1] // downsample_ratio),
dtype=np.int32 if len(segments) > 255 else np.uint8,
)
areas = []
ms = []
for segment in segments:
mask = polygon2mask(
imgsz,
[segment.reshape(-1)],
downsample_ratio=downsample_ratio,
color=1,
)
ms.append(mask.astype(masks.dtype))
areas.append(mask.sum())
areas = np.asarray(areas)
index = np.argsort(-areas)
ms = np.array(ms)[index]
for i in range(len(segments)):
mask = ms[i] * (i + 1)
masks = masks + mask
masks = np.clip(masks, a_min=0, a_max=i + 1)
return masks, index
def find_dataset_yaml(path: Path) -> Path:
files = list(path.glob("*.yaml")) or list(path.rglob("*.yaml")) # try root level first and then recursive
assert files, f"No YAML file found in '{path.resolve()}'"
if len(files) > 1:
files = [f for f in files if f.stem == path.stem] # prefer *.yaml files that match
assert len(files) == 1, f"Expected 1 YAML file in '{path.resolve()}', but found {len(files)}.\n{files}"
return files[0]
def check_det_dataset(dataset: str, autodownload: bool = True) -> dict[str, Any]:
file = check_file(dataset)
# Download (optional)
extract_dir = ""
if zipfile.is_zipfile(file) or is_tarfile(file):
new_dir = safe_download(file, dir=DATASETS_DIR, unzip=True, delete=False)
file = find_dataset_yaml(DATASETS_DIR / new_dir)
extract_dir, autodownload = file.parent, False
# Read YAML
data = YAML.load(file, append_filename=True) # dictionary
# Checks
for k in "train", "val":
if k not in data:
if k != "val" or "validation" not in data:
raise SyntaxError(
emojis(f"{dataset} '{k}:' key missing ❌.\n'train' and 'val' are required in all data YAMLs.")
)
LOGGER.warning("renaming data YAML 'validation' key to 'val' to match YOLO format.")
data["val"] = data.pop("validation") # replace 'validation' key with 'val' key
if "names" not in data and "nc" not in data:
raise SyntaxError(emojis(f"{dataset} key missing ❌.\n either 'names' or 'nc' are required in all data YAMLs."))
if "names" in data and "nc" in data and len(data["names"]) != data["nc"]:
raise SyntaxError(emojis(f"{dataset} 'names' length {len(data['names'])} and 'nc: {data['nc']}' must match."))
if "names" not in data:
data["names"] = [f"class_{i}" for i in range(data["nc"])]
else:
data["nc"] = len(data["names"])
data["names"] = check_class_names(data["names"])
data["channels"] = data.get("channels", 3) # get image channels, default to 3
# Resolve paths
path = Path(extract_dir or data.get("path") or Path(data.get("yaml_file", "")).parent) # dataset root
if not path.exists() and not path.is_absolute():
path = (DATASETS_DIR / path).resolve() # path relative to DATASETS_DIR
# Set paths
data["path"] = path # download scripts
for k in "train", "val", "test", "minival":
if data.get(k): # prepend path
if isinstance(data[k], str):
x = (path / data[k]).resolve()
if not x.exists() and data[k].startswith("../"):
x = (path / data[k][3:]).resolve()
data[k] = str(x)
else:
data[k] = [str((path / x).resolve()) for x in data[k]]
# Parse YAML
val, s = (data.get(x) for x in ("val", "download"))
if val:
val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
if not all(x.exists() for x in val):
name = clean_url(dataset) # dataset name with URL auth stripped
LOGGER.info("")
m = f"Dataset '{name}' images not found, missing path '{next(x for x in val if not x.exists())}'"
if s and autodownload:
LOGGER.warning(m)
else:
m += f"\nNote dataset download directory is '{DATASETS_DIR}'. You can update this in '{SETTINGS_FILE}'"
raise FileNotFoundError(m)
t = time.time()
r = None # success
if s.startswith("http") and s.endswith(".zip"): # URL
safe_download(url=s, dir=DATASETS_DIR, delete=True)
elif s.startswith("bash "): # bash script
LOGGER.info(f"Running {s} ...")
subprocess.run(s.split(), check=True)
else: # python script
exec(s, {"yaml": data})
dt = f"({round(time.time() - t, 1)}s)"
s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in {0, None} else f"failure {dt} ❌"
LOGGER.info(f"Dataset download {s}\n")
check_font("Arial.ttf" if is_ascii(data["names"]) else "Arial.Unicode.ttf") # download fonts
return data # dictionary
def check_cls_dataset(dataset: str | Path, split: str = "") -> dict[str, Any]:
# Download (optional if dataset=https://file.zip is passed directly)
if str(dataset).startswith(("http:/", "https:/")):
dataset = safe_download(dataset, dir=DATASETS_DIR, unzip=True, delete=False)
elif str(dataset).endswith((".zip", ".tar", ".gz")):
file = check_file(dataset)
dataset = safe_download(file, dir=DATASETS_DIR, unzip=True, delete=False)
dataset = Path(dataset)
data_dir = (dataset if dataset.is_dir() else (DATASETS_DIR / dataset)).resolve()
if not data_dir.is_dir():
if data_dir.suffix != "":
raise ValueError(
f'Classification datasets must be a directory (data="path/to/dir") not a file (data="{dataset}"), '
"See https://docs.ultralytics.com/datasets/classify/"
)
LOGGER.info("")
LOGGER.warning(f"Dataset not found, missing path {data_dir}, attempting download...")
t = time.time()
if str(dataset) == "imagenet":
subprocess.run(["bash", str(ROOT / "data/scripts/get_imagenet.sh")], check=True)
else:
download(f"{ASSETS_URL}/{dataset}.zip", dir=data_dir.parent)
LOGGER.info(f"Dataset download success ✅ ({time.time() - t:.1f}s), saved to {colorstr('bold', data_dir)}\n")
train_set = data_dir / "train"
if not train_set.is_dir():
LOGGER.warning(f"Dataset 'split=train' not found at {train_set}")
if image_files := list(data_dir.rglob("*.jpg")) + list(data_dir.rglob("*.png")):
from ultralytics.data.split import split_classify_dataset
LOGGER.info(f"Found {len(image_files)} images in subdirectories. Attempting to split...")
data_dir = split_classify_dataset(data_dir, train_ratio=0.8)
train_set = data_dir / "train"
else:
LOGGER.error(f"No images found in {data_dir} or its subdirectories.")
val_set = (
data_dir / "val"
if (data_dir / "val").exists()
else data_dir / "validation"
if (data_dir / "validation").exists()
else data_dir / "valid"
if (data_dir / "valid").exists()
else None
) # data/test or data/val
test_set = data_dir / "test" if (data_dir / "test").exists() else None # data/val or data/test
if split == "val" and not val_set:
LOGGER.warning("Dataset 'split=val' not found, using 'split=test' instead.")
val_set = test_set
elif split == "test" and not test_set:
LOGGER.warning("Dataset 'split=test' not found, using 'split=val' instead.")
test_set = val_set
nc = len([x for x in (data_dir / "train").glob("*") if x.is_dir()]) # number of classes
names = [x.name for x in (data_dir / "train").iterdir() if x.is_dir()] # class names list
names = dict(enumerate(sorted(names)))
# Print to console
for k, v in {"train": train_set, "val": val_set, "test": test_set}.items():
prefix = f"{colorstr(f'{k}:')} {v}..."
if v is None:
LOGGER.info(prefix)
else:
files = [path for path in v.rglob("*.*") if path.suffix[1:].lower() in IMG_FORMATS]
nf = len(files) # number of files
nd = len({file.parent for file in files}) # number of directories
if nf == 0:
if k == "train":
raise FileNotFoundError(f"{dataset} '{k}:' no training images found")
else:
LOGGER.warning(f"{prefix} found {nf} images in {nd} classes (no images found)")
elif nd != nc:
LOGGER.error(f"{prefix} found {nf} images in {nd} classes (requires {nc} classes, not {nd})")
else:
LOGGER.info(f"{prefix} found {nf} images in {nd} classes ✅ ")
return {"train": train_set, "val": val_set, "test": test_set, "nc": nc, "names": names, "channels": 3}
class HUBDatasetStats:
def __init__(self, path: str = "coco8.yaml", task: str = "detect", autodownload: bool = False):
path = Path(path).resolve()
LOGGER.info(f"Starting HUB dataset checks for {path}....")
self.task = task # detect, segment, pose, classify, obb
if self.task == "classify":
unzip_dir = unzip_file(path)
data = check_cls_dataset(unzip_dir)
data["path"] = unzip_dir
else: # detect, segment, pose, obb
_, data_dir, yaml_path = self._unzip(Path(path))
try:
# Load YAML with checks
data = YAML.load(yaml_path)
data["path"] = "" # strip path since YAML should be in dataset root for all HUB datasets
YAML.save(yaml_path, data)
data = check_det_dataset(yaml_path, autodownload) # dict
data["path"] = data_dir # YAML path should be set to '' (relative) or parent (absolute)
except Exception as e:
raise Exception("error/HUB/dataset_stats/init") from e
self.hub_dir = Path(f"{data['path']}-hub")
self.im_dir = self.hub_dir / "images"
self.stats = {"nc": len(data["names"]), "names": list(data["names"].values())} # statistics dictionary
self.data = data
@staticmethod
def _unzip(path: Path) -> tuple[bool, str, Path]:
if not str(path).endswith(".zip"): # path is data.yaml
return False, None, path
unzip_dir = unzip_file(path, path=path.parent)
assert unzip_dir.is_dir(), (
f"Error unzipping {path}, {unzip_dir} not found. path/to/abc.zip MUST unzip to path/to/abc/"
)
return True, str(unzip_dir), find_dataset_yaml(unzip_dir) # zipped, data_dir, yaml_path
def _hub_ops(self, f: str):
compress_one_image(f, self.im_dir / Path(f).name) # save to dataset-hub
def get_json(self, save: bool = False, verbose: bool = False) -> dict:
def _round(labels):
if self.task == "detect":
coordinates = labels["bboxes"]
elif self.task in {"segment", "obb"}: # Segment and OBB use segments. OBB segments are normalized xyxyxyxy
coordinates = [x.flatten() for x in labels["segments"]]
elif self.task == "pose":
n, nk, nd = labels["keypoints"].shape
coordinates = np.concatenate((labels["bboxes"], labels["keypoints"].reshape(n, nk * nd)), 1)
else:
raise ValueError(f"Undefined dataset task={self.task}.")
zipped = zip(labels["cls"], coordinates)
return [[int(c[0]), *(round(float(x), 4) for x in points)] for c, points in zipped]
for split in "train", "val", "test":
self.stats[split] = None # predefine
path = self.data.get(split)
# Check split
if path is None: # no split
continue
files = [f for f in Path(path).rglob("*.*") if f.suffix[1:].lower() in IMG_FORMATS] # image files in split
if not files: # no images
continue
# Get dataset statistics
if self.task == "classify":
from torchvision.datasets import ImageFolder # scope for faster 'import ultralytics'
dataset = ImageFolder(self.data[split])
x = np.zeros(len(dataset.classes)).astype(int)
for im in dataset.imgs:
x[im[1]] += 1
self.stats[split] = {
"instance_stats": {"total": len(dataset), "per_class": x.tolist()},
"image_stats": {"total": len(dataset), "unlabelled": 0, "per_class": x.tolist()},
"labels": [{Path(k).name: v} for k, v in dataset.imgs],
}
else:
from ultralytics.data import YOLODataset
dataset = YOLODataset(img_path=self.data[split], data=self.data, task=self.task)
x = np.array(
[
np.bincount(label["cls"].astype(int).flatten(), minlength=self.data["nc"])
for label in TQDM(dataset.labels, total=len(dataset), desc="Statistics")
]
) # shape(128x80)
self.stats[split] = {
"instance_stats": {"total": int(x.sum()), "per_class": x.sum(0).tolist()},
"image_stats": {
"total": len(dataset),
"unlabelled": int(np.all(x == 0, 1).sum()),
"per_class": (x > 0).sum(0).tolist(),
},
"labels": [{Path(k).name: _round(v)} for k, v in zip(dataset.im_files, dataset.labels)],
}
# Save, print and return
if save:
self.hub_dir.mkdir(parents=True, exist_ok=True) # makes dataset-hub/
stats_path = self.hub_dir / "stats.json"
LOGGER.info(f"Saving {stats_path.resolve()}...")
with open(stats_path, "w", encoding="utf-8") as f:
json.dump(self.stats, f) # save stats.json
if verbose:
LOGGER.info(json.dumps(self.stats, indent=2, sort_keys=False))
return self.stats
def process_images(self) -> Path:
from ultralytics.data import YOLODataset # ClassificationDataset
self.im_dir.mkdir(parents=True, exist_ok=True) # makes dataset-hub/images/
for split in "train", "val", "test":
if self.data.get(split) is None:
continue
dataset = YOLODataset(img_path=self.data[split], data=self.data)
with ThreadPool(NUM_THREADS) as pool:
for _ in TQDM(pool.imap(self._hub_ops, dataset.im_files), total=len(dataset), desc=f"{split} images"):
pass
LOGGER.info(f"Done. All images saved to {self.im_dir}")
return self.im_dir
def compress_one_image(f: str, f_new: str | None = None, max_dim: int = 1920, quality: int = 50):
try: # use PIL
Image.MAX_IMAGE_PIXELS = None # Fix DecompressionBombError, allow optimization of image > ~178.9 million pixels
im = Image.open(f)
if im.mode in {"RGBA", "LA"}: # Convert to RGB if needed (for JPEG)
im = im.convert("RGB")
r = max_dim / max(im.height, im.width) # ratio
if r < 1.0: # image too large
im = im.resize((int(im.width * r), int(im.height * r)))
im.save(f_new or f, "JPEG", quality=quality, optimize=True) # save
except Exception as e: # use OpenCV
LOGGER.warning(f"HUB ops PIL failure {f}: {e}")
im = cv2.imread(f)
im_height, im_width = im.shape[:2]
r = max_dim / max(im_height, im_width) # ratio
if r < 1.0: # image too large
im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_AREA)
cv2.imwrite(str(f_new or f), im)
def load_dataset_cache_file(path: Path) -> dict:
import gc
gc.disable() # reduce pickle load time https://github.com/ultralytics/ultralytics/pull/1585
cache = np.load(str(path), allow_pickle=True).item() # load dict
gc.enable()
return cache
def save_dataset_cache_file(prefix: str, path: Path, x: dict, version: str):
x["version"] = version # add cache version
if is_dir_writeable(path.parent):
if path.exists():
path.unlink() # remove *.cache file if exists
try:
with open(str(path), "wb") as file: # context manager here fixes windows async np.save bug
np.save(file, x)
LOGGER.info(f"{prefix}New cache created: {path}")
except Exception as e:
Path(path).unlink(missing_ok=True) # remove partially written file
LOGGER.warning(f"{prefix}WARNING ⚠️ Failed to save cache to {path}: {e}")
else:
LOGGER.warning(f"{prefix}Cache directory {path.parent} is not writable, cache not saved.") | --- +++ @@ -58,6 +58,7 @@
def img2label_paths(img_paths: list[str]) -> list[str]:
+ """Convert image paths to label paths by replacing 'images' with 'labels' and extension with '.txt'."""
sa, sb = f"{os.sep}images{os.sep}", f"{os.sep}labels{os.sep}" # /images/, /labels/ substrings
return [sb.join(x.rsplit(sa, 1)).rsplit(".", 1)[0] + ".txt" for x in img_paths]
@@ -65,6 +66,23 @@ def check_file_speeds(
files: list[str], threshold_ms: float = 10, threshold_mb: float = 50, max_files: int = 5, prefix: str = ""
):
+ """Check dataset file access speed and provide performance feedback.
+
+ This function tests the access speed of dataset files by measuring ping (stat call) time and read speed. It samples
+ up to `max_files` files from the provided list and warns if access times exceed the threshold.
+
+ Args:
+ files (list[str]): List of file paths to check for access speed.
+ threshold_ms (float, optional): Threshold in milliseconds for ping time warnings.
+ threshold_mb (float, optional): Threshold in megabytes per second for read speed warnings.
+ max_files (int, optional): The maximum number of files to check.
+ prefix (str, optional): Prefix string to add to log messages.
+
+ Examples:
+ >>> from pathlib import Path
+ >>> image_files = list(Path("dataset/images").glob("*.jpg"))
+ >>> check_file_speeds(image_files, threshold_ms=15)
+ """
if not files:
LOGGER.warning(f"{prefix}Image speed checks: No files to check")
return
@@ -123,6 +141,7 @@
def get_hash(paths: list[str]) -> str:
+ """Return a single hash value of a list of paths (files or dirs)."""
size = 0
for p in paths:
try:
@@ -135,6 +154,7 @@
def exif_size(img: Image.Image) -> tuple[int, int]:
+ """Return exif-corrected PIL size."""
s = img.size # (width, height)
if img.format == "JPEG": # only support JPEG images
try:
@@ -148,6 +168,7 @@
def verify_image(args: tuple) -> tuple:
+ """Verify one image."""
(im_file, cls), prefix = args
# Number (found, corrupt), message
nf, nc, msg = 0, 0, ""
@@ -172,6 +193,7 @@
def verify_image_label(args: tuple) -> list:
+ """Verify one image-label pair."""
im_file, lb_file, prefix, keypoint, num_cls, nkpt, ndim, single_cls = args
# Number (missing, found, empty, corrupt), message, segments, keypoints
nm, nf, ne, nc, msg, segments, keypoints = 0, 0, 0, 0, "", [], None
@@ -243,6 +265,22 @@
def visualize_image_annotations(image_path: str, txt_path: str, label_map: dict[int, str]):
+ """Visualize YOLO annotations (bounding boxes and class labels) on an image.
+
+ This function reads an image and its corresponding annotation file in YOLO format, then draws bounding boxes around
+ detected objects and labels them with their respective class names. The bounding box colors are assigned based on
+ the class ID, and the text color is dynamically adjusted for readability, depending on the background color's
+ luminance.
+
+ Args:
+ image_path (str): Path to the image file to annotate. The file must be readable by PIL.
+ txt_path (str): Path to the annotation file in YOLO format, which should contain one line per object.
+ label_map (dict[int, str]): A dictionary that maps class IDs (integers) to class labels (strings).
+
+ Examples:
+ >>> label_map = {0: "cat", 1: "dog", 2: "bird"} # Should include all annotated classes
+ >>> visualize_image_annotations("path/to/image.jpg", "path/to/annotations.txt", label_map)
+ """
import matplotlib.pyplot as plt
from ultralytics.utils.plotting import colors
@@ -272,6 +310,18 @@ def polygon2mask(
imgsz: tuple[int, int], polygons: list[np.ndarray], color: int = 1, downsample_ratio: int = 1
) -> np.ndarray:
+ """Convert a list of polygons to a binary mask of the specified image size.
+
+ Args:
+ imgsz (tuple[int, int]): The size of the image as (height, width).
+ polygons (list[np.ndarray]): A list of polygons. Each polygon is a 1D array of coordinates with length M, where
+ M % 2 = 0 (alternating x, y values).
+ color (int, optional): The color value to fill in the polygons on the mask.
+ downsample_ratio (int, optional): Factor by which to downsample the mask.
+
+ Returns:
+ (np.ndarray): A binary mask of the specified image size with the polygons filled in.
+ """
mask = np.zeros(imgsz, dtype=np.uint8)
polygons = np.asarray(polygons, dtype=np.int32)
polygons = polygons.reshape((polygons.shape[0], -1, 2))
@@ -284,12 +334,25 @@ def polygons2masks(
imgsz: tuple[int, int], polygons: list[np.ndarray], color: int, downsample_ratio: int = 1
) -> np.ndarray:
+ """Convert a list of polygons to a set of binary masks of the specified image size.
+
+ Args:
+ imgsz (tuple[int, int]): The size of the image as (height, width).
+ polygons (list[np.ndarray]): A list of polygons. Each polygon is an array of coordinates that can be reshaped to
+ (-1, 2) as (x, y) point pairs.
+ color (int): The color value to fill in the polygons on the masks.
+ downsample_ratio (int, optional): Factor by which to downsample each mask.
+
+ Returns:
+ (np.ndarray): A set of binary masks of the specified image size with the polygons filled in.
+ """
return np.array([polygon2mask(imgsz, [x.reshape(-1)], color, downsample_ratio) for x in polygons])
def polygons2masks_overlap(
imgsz: tuple[int, int], segments: list[np.ndarray], downsample_ratio: int = 1
) -> tuple[np.ndarray, np.ndarray]:
+ """Return a downsampled overlap mask and sorted area indices."""
masks = np.zeros(
(imgsz[0] // downsample_ratio, imgsz[1] // downsample_ratio),
dtype=np.int32 if len(segments) > 255 else np.uint8,
@@ -316,6 +379,17 @@
def find_dataset_yaml(path: Path) -> Path:
+ """Find and return the YAML file associated with a Detect, Segment or Pose dataset.
+
+ This function searches for a YAML file at the root level of the provided directory first, and if not found, it
+ performs a recursive search. It prefers YAML files that have the same stem as the provided path.
+
+ Args:
+ path (Path): The directory path to search for the YAML file.
+
+ Returns:
+ (Path): The path of the found YAML file.
+ """
files = list(path.glob("*.yaml")) or list(path.rglob("*.yaml")) # try root level first and then recursive
assert files, f"No YAML file found in '{path.resolve()}'"
if len(files) > 1:
@@ -325,6 +399,19 @@
def check_det_dataset(dataset: str, autodownload: bool = True) -> dict[str, Any]:
+ """Download, verify, and/or unzip a dataset if not found locally.
+
+ This function checks the availability of a specified dataset, and if not found, it has the option to download and
+ unzip the dataset. It then reads and parses the accompanying YAML data, ensuring key requirements are met and also
+ resolves paths related to the dataset.
+
+ Args:
+ dataset (str): Path to the dataset or dataset descriptor (like a YAML file).
+ autodownload (bool, optional): Whether to automatically download the dataset if not found.
+
+ Returns:
+ (dict[str, Any]): Parsed dataset information and paths.
+ """
file = check_file(dataset)
# Download (optional)
@@ -406,6 +493,24 @@
def check_cls_dataset(dataset: str | Path, split: str = "") -> dict[str, Any]:
+ """Check a classification dataset such as Imagenet.
+
+ This function accepts a `dataset` name and attempts to retrieve the corresponding dataset information. If the
+ dataset is not found locally, it attempts to download the dataset from the internet and save it locally.
+
+ Args:
+ dataset (str | Path): The name of the dataset.
+ split (str, optional): The split of the dataset. Either 'val', 'test', or ''.
+
+ Returns:
+ (dict[str, Any]): A dictionary containing the following keys:
+
+ - 'train' (Path): The directory path containing the training set of the dataset.
+ - 'val' (Path): The directory path containing the validation set of the dataset.
+ - 'test' (Path): The directory path containing the test set of the dataset.
+ - 'nc' (int): The number of classes in the dataset.
+ - 'names' (dict[int, str]): A dictionary of class names in the dataset.
+ """
# Download (optional if dataset=https://file.zip is passed directly)
if str(dataset).startswith(("http:/", "https:/")):
dataset = safe_download(dataset, dir=DATASETS_DIR, unzip=True, delete=False)
@@ -484,8 +589,41 @@
class HUBDatasetStats:
+ """A class for generating HUB dataset JSON and `-hub` dataset directory.
+
+ Args:
+ path (str): Path to data.yaml or data.zip (with data.yaml inside data.zip).
+ task (str): Dataset task. Options are 'detect', 'segment', 'pose', 'classify', 'obb'.
+ autodownload (bool): Attempt to download dataset if not found locally.
+
+ Attributes:
+ task (str): Dataset task type.
+ hub_dir (Path): Directory path for HUB dataset files.
+ im_dir (Path): Directory path for compressed images.
+ stats (dict): Statistics dictionary containing dataset information.
+ data (dict): Dataset configuration data.
+
+ Methods:
+ get_json: Return dataset JSON for Ultralytics HUB.
+ process_images: Compress images for Ultralytics HUB.
+
+ Examples:
+ >>> from ultralytics.data.utils import HUBDatasetStats
+ >>> stats = HUBDatasetStats("path/to/coco8.zip", task="detect") # detect dataset
+ >>> stats = HUBDatasetStats("path/to/coco8-seg.zip", task="segment") # segment dataset
+ >>> stats = HUBDatasetStats("path/to/coco8-pose.zip", task="pose") # pose dataset
+ >>> stats = HUBDatasetStats("path/to/dota8.zip", task="obb") # OBB dataset
+ >>> stats = HUBDatasetStats("path/to/imagenet10.zip", task="classify") # classification dataset
+ >>> stats.get_json(save=True)
+ >>> stats.process_images()
+
+ Notes:
+ Download *.zip files from https://github.com/ultralytics/hub/tree/main/example_datasets
+ i.e. https://github.com/ultralytics/hub/raw/main/example_datasets/coco8.zip for coco8.zip.
+ """
def __init__(self, path: str = "coco8.yaml", task: str = "detect", autodownload: bool = False):
+ """Initialize class."""
path = Path(path).resolve()
LOGGER.info(f"Starting HUB dataset checks for {path}....")
@@ -513,6 +651,7 @@
@staticmethod
def _unzip(path: Path) -> tuple[bool, str, Path]:
+ """Unzip data.zip."""
if not str(path).endswith(".zip"): # path is data.yaml
return False, None, path
unzip_dir = unzip_file(path, path=path.parent)
@@ -522,11 +661,14 @@ return True, str(unzip_dir), find_dataset_yaml(unzip_dir) # zipped, data_dir, yaml_path
def _hub_ops(self, f: str):
+ """Save a compressed image for HUB previews."""
compress_one_image(f, self.im_dir / Path(f).name) # save to dataset-hub
def get_json(self, save: bool = False, verbose: bool = False) -> dict:
+ """Return dataset JSON for Ultralytics HUB."""
def _round(labels):
+ """Update labels to integer class and 4 decimal place floats."""
if self.task == "detect":
coordinates = labels["bboxes"]
elif self.task in {"segment", "obb"}: # Segment and OBB use segments. OBB segments are normalized xyxyxyxy
@@ -597,6 +739,7 @@ return self.stats
def process_images(self) -> Path:
+ """Compress images for Ultralytics HUB."""
from ultralytics.data import YOLODataset # ClassificationDataset
self.im_dir.mkdir(parents=True, exist_ok=True) # makes dataset-hub/images/
@@ -612,6 +755,22 @@
def compress_one_image(f: str, f_new: str | None = None, max_dim: int = 1920, quality: int = 50):
+ """Compress a single image file to reduced size while preserving its aspect ratio and quality using either the
+ Python Imaging Library (PIL) or OpenCV library. If the input image is smaller than the maximum dimension, it
+ will not be resized.
+
+ Args:
+ f (str): The path to the input image file.
+ f_new (str, optional): The path to the output image file. If not specified, the input file will be overwritten.
+ max_dim (int, optional): The maximum dimension (width or height) of the output image.
+ quality (int, optional): The image compression quality as a percentage.
+
+ Examples:
+ >>> from pathlib import Path
+ >>> from ultralytics.data.utils import compress_one_image
+ >>> for f in Path("path/to/dataset").rglob("*.jpg"):
+ >>> compress_one_image(f)
+ """
try: # use PIL
Image.MAX_IMAGE_PIXELS = None # Fix DecompressionBombError, allow optimization of image > ~178.9 million pixels
im = Image.open(f)
@@ -632,6 +791,7 @@
def load_dataset_cache_file(path: Path) -> dict:
+ """Load an Ultralytics *.cache dictionary from path."""
import gc
gc.disable() # reduce pickle load time https://github.com/ultralytics/ultralytics/pull/1585
@@ -641,6 +801,7 @@
def save_dataset_cache_file(prefix: str, path: Path, x: dict, version: str):
+ """Save an Ultralytics dataset *.cache dictionary x to path."""
x["version"] = version # add cache version
if is_dir_writeable(path.parent):
if path.exists():
@@ -653,4 +814,4 @@ Path(path).unlink(missing_ok=True) # remove partially written file
LOGGER.warning(f"{prefix}WARNING ⚠️ Failed to save cache to {path}: {e}")
else:
- LOGGER.warning(f"{prefix}Cache directory {path.parent} is not writable, cache not saved.")+ LOGGER.warning(f"{prefix}Cache directory {path.parent} is not writable, cache not saved.")
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/data/utils.py |
Document this code for team use | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import ast
import html
import re
import subprocess
import textwrap
from collections import defaultdict
from collections.abc import Iterable
from dataclasses import dataclass, field
from pathlib import Path
from typing import Literal
from ultralytics.utils import LOGGER
from ultralytics.utils.tqdm import TQDM
# Constants
FILE = Path(__file__).resolve()
REPO_ROOT = FILE.parents[1]
PACKAGE_DIR = REPO_ROOT / "ultralytics"
REFERENCE_DIR = PACKAGE_DIR.parent / "docs/en/reference"
GITHUB_REPO = "ultralytics/ultralytics"
SIGNATURE_LINE_LENGTH = 120
# Use Font Awesome brand GitHub icon (CSS already loaded via mkdocs.yml and HTML head)
GITHUB_ICON = '<i class="fa-brands fa-github" aria-hidden="true" style="margin-right:6px;"></i>'
MKDOCS_YAML = PACKAGE_DIR.parent / "mkdocs.yml"
INCLUDE_SPECIAL_METHODS = {
"__call__",
"__dir__",
"__enter__",
"__exit__",
"__aenter__",
"__aexit__",
"__getitem__",
"__iter__",
"__len__",
"__next__",
"__getattr__",
}
PROPERTY_DECORATORS = {"property", "cached_property"}
CLASS_DEF_RE = re.compile(r"(?:^|\n)class\s(\w+)(?:\(|:)")
FUNC_DEF_RE = re.compile(r"(?:^|\n)(?:async\s+)?def\s(\w+)\(")
SECTION_ENTRY_RE = re.compile(r"([\w*]+)\s*(?:\(([^)]+)\))?:\s*(.*)")
RETURNS_RE = re.compile(r"([^:]+):\s*(.*)")
@dataclass
class ParameterDoc:
name: str
type: str | None
description: str
default: str | None = None
@dataclass
class ReturnDoc:
type: str | None
description: str
@dataclass
class ParsedDocstring:
summary: str = ""
description: str = ""
params: list[ParameterDoc] = field(default_factory=list)
attributes: list[ParameterDoc] = field(default_factory=list)
returns: list[ReturnDoc] = field(default_factory=list)
yields: list[ReturnDoc] = field(default_factory=list)
raises: list[ParameterDoc] = field(default_factory=list)
notes: list[str] = field(default_factory=list)
examples: list[str] = field(default_factory=list)
@dataclass
class DocItem:
name: str
qualname: str
kind: Literal["class", "function", "method", "property"]
signature: str
doc: ParsedDocstring
signature_params: list[ParameterDoc]
lineno: int
end_lineno: int
bases: list[str] = field(default_factory=list)
children: list[DocItem] = field(default_factory=list)
module_path: str = ""
source: str = ""
@dataclass
class DocumentedModule:
path: Path
module_path: str
classes: list[DocItem]
functions: list[DocItem]
# --------------------------------------------------------------------------------------------- #
# Placeholder (legacy) generation for mkdocstrings-style stubs
# --------------------------------------------------------------------------------------------- #
def extract_classes_and_functions(filepath: Path) -> tuple[list[str], list[str]]:
content = filepath.read_text()
classes = CLASS_DEF_RE.findall(content)
functions = FUNC_DEF_RE.findall(content)
return classes, functions
def create_placeholder_markdown(py_filepath: Path, module_path: str, classes: list[str], functions: list[str]) -> Path:
md_filepath = REFERENCE_DIR / py_filepath.relative_to(PACKAGE_DIR).with_suffix(".md")
exists = md_filepath.exists()
header_content = ""
if exists:
current = md_filepath.read_text()
if current.startswith("---"):
parts = current.split("---", 2)
if len(parts) > 2:
header_content = f"---{parts[1]}---\n\n"
if not header_content:
header_content = "---\ndescription: TODO ADD DESCRIPTION\nkeywords: TODO ADD KEYWORDS\n---\n\n"
module_path_dots = module_path
module_path_fs = module_path.replace(".", "/")
url = f"https://github.com/{GITHUB_REPO}/blob/main/{module_path_fs}.py"
pretty = url.replace("__init__.py", "\\_\\_init\\_\\_.py")
title_content = f"# Reference for `{module_path_fs}.py`\n\n" + contribution_admonition(
pretty, url, kind="success", title="Improvements"
)
md_content = ["<br>\n\n"]
md_content.extend(f"## ::: {module_path_dots}.{cls}\n\n<br><br><hr><br>\n\n" for cls in classes)
md_content.extend(f"## ::: {module_path_dots}.{func}\n\n<br><br><hr><br>\n\n" for func in functions)
if md_content[-1:]:
md_content[-1] = md_content[-1].replace("<hr><br>\n\n", "")
md_filepath.parent.mkdir(parents=True, exist_ok=True)
md_filepath.write_text(header_content + title_content + "".join(md_content) + "\n")
return _relative_to_workspace(md_filepath)
def _get_source(src: str, node: ast.AST) -> str:
segment = ast.get_source_segment(src, node)
if segment:
return segment
try:
return ast.unparse(node)
except Exception:
return ""
def _format_annotation(annotation: ast.AST | None, src: str) -> str | None:
if annotation is None:
return None
text = _get_source(src, annotation).strip()
return " ".join(text.split()) if text else None
def _format_default(default: ast.AST | None, src: str) -> str | None:
if default is None:
return None
text = _get_source(src, default).strip()
return " ".join(text.split()) if text else None
def _format_parameter(arg: ast.arg, default: ast.AST | None, src: str) -> str:
annotation = _format_annotation(arg.annotation, src)
rendered = arg.arg
if annotation:
rendered += f": {annotation}"
default_value = _format_default(default, src)
if default_value is not None:
rendered += f" = {default_value}"
return rendered
def collect_signature_parameters(args: ast.arguments, src: str, *, skip_self: bool = True) -> list[ParameterDoc]:
params: list[ParameterDoc] = []
def add_param(arg: ast.arg, default_value: ast.AST | None = None):
name = arg.arg
if skip_self and name in {"self", "cls"}:
return
params.append(
ParameterDoc(
name=name,
type=_format_annotation(arg.annotation, src),
description="",
default=_format_default(default_value, src),
)
)
posonly = list(getattr(args, "posonlyargs", []))
regular = list(getattr(args, "args", []))
defaults = list(getattr(args, "defaults", []))
total_regular = len(posonly) + len(regular)
default_offset = total_regular - len(defaults)
combined = posonly + regular
for idx, arg in enumerate(combined):
default = defaults[idx - default_offset] if idx >= default_offset else None
add_param(arg, default)
vararg = getattr(args, "vararg", None)
if vararg:
add_param(vararg)
params[-1].name = f"*{params[-1].name}"
kwonly = list(getattr(args, "kwonlyargs", []))
kw_defaults = list(getattr(args, "kw_defaults", []))
for kwarg, default in zip(kwonly, kw_defaults):
add_param(kwarg, default)
kwarg = getattr(args, "kwarg", None)
if kwarg:
add_param(kwarg)
params[-1].name = f"**{params[-1].name}"
return params
def format_signature(
node: ast.AST, src: str, *, is_class: bool = False, is_async: bool = False, display_name: str | None = None
) -> str:
if not isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
return ""
if isinstance(node, ast.ClassDef):
init_method = next(
(n for n in node.body if isinstance(n, (ast.FunctionDef, ast.AsyncFunctionDef)) and n.name == "__init__"),
None,
)
args = (
init_method.args
if init_method
else ast.arguments(
posonlyargs=[], args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[]
)
)
else:
args = node.args
name = display_name or getattr(node, "name", "")
params: list[str] = []
posonly = list(getattr(args, "posonlyargs", []))
regular = list(getattr(args, "args", []))
defaults = list(getattr(args, "defaults", []))
total_regular = len(posonly) + len(regular)
default_offset = total_regular - len(defaults)
combined = posonly + regular
for idx, arg in enumerate(combined):
default = defaults[idx - default_offset] if idx >= default_offset else None
params.append(_format_parameter(arg, default, src))
if posonly and idx == len(posonly) - 1:
params.append("/")
vararg = getattr(args, "vararg", None)
if vararg:
rendered = _format_parameter(vararg, None, src)
params.append(f"*{rendered}")
kwonly = list(getattr(args, "kwonlyargs", []))
kw_defaults = list(getattr(args, "kw_defaults", []))
if kwonly:
if not vararg:
params.append("*")
for kwarg, default in zip(kwonly, kw_defaults):
params.append(_format_parameter(kwarg, default, src))
kwarg = getattr(args, "kwarg", None)
if kwarg:
rendered = _format_parameter(kwarg, None, src)
params.append(f"**{rendered}")
return_annotation = (
_format_annotation(node.returns, src)
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)) and node.returns
else None
)
prefix = "" if is_class else ("async def " if is_async else "def ")
signature = f"{prefix}{name}({', '.join(params)})"
if return_annotation:
signature += f" -> {return_annotation}"
if len(signature) <= SIGNATURE_LINE_LENGTH or not params:
return signature
raw_signature = _get_definition_signature(node, src)
return raw_signature or signature
def _split_section_entries(lines: list[str]) -> list[list[str]]:
entries: list[list[str]] = []
current: list[str] = []
base_indent: int | None = None
for raw_line in lines:
if not raw_line.strip():
if current:
current.append("")
continue
indent = len(raw_line) - len(raw_line.lstrip(" "))
if base_indent is None:
base_indent = indent
if indent <= base_indent and current:
entries.append(current)
current = [raw_line]
else:
current.append(raw_line)
if current:
entries.append(current)
return entries
def _parse_named_entries(lines: list[str]) -> list[ParameterDoc]:
entries = []
for block in _split_section_entries(lines):
text = textwrap.dedent("\n".join(block)).strip()
if not text:
continue
first_line, *rest = text.splitlines()
match = SECTION_ENTRY_RE.match(first_line)
if match:
name, type_hint, desc = match.groups()
description = " ".join(desc.split())
if rest:
description = f"{description}\n" + "\n".join(rest)
entries.append(ParameterDoc(name=name, type=type_hint, description=_normalize_text(description)))
else:
entries.append(ParameterDoc(name=text, type=None, description=""))
return entries
def _parse_returns(lines: list[str]) -> list[ReturnDoc]:
entries = []
for block in _split_section_entries(lines):
text = textwrap.dedent("\n".join(block)).strip()
if not text:
continue
match = RETURNS_RE.match(text)
if match:
type_hint, desc = match.groups()
cleaned_type = type_hint.strip()
if cleaned_type.startswith("(") and cleaned_type.endswith(")"):
cleaned_type = cleaned_type[1:-1].strip()
entries.append(ReturnDoc(type=cleaned_type, description=_normalize_text(desc.strip())))
else:
entries.append(ReturnDoc(type=None, description=_normalize_text(text)))
return entries
SECTION_ALIASES = {
"args": "params",
"arguments": "params",
"parameters": "params",
"params": "params",
"returns": "returns",
"return": "returns",
"yields": "yields",
"yield": "yields",
"raises": "raises",
"exceptions": "raises",
"exception": "raises",
"attributes": "attributes",
"attr": "attributes",
"examples": "examples",
"example": "examples",
"notes": "notes",
"note": "notes",
"methods": "methods",
}
def _normalize_text(text: str) -> str:
if not text:
return ""
# Check if text contains Markdown structures that need line preservation
if any(marker in text for marker in ("|", "!!!", "```", "\n#", "\n- ", "\n* ", "\n1. ", "\n ")):
# Preserve Markdown formatting - just strip trailing whitespace from lines
return "\n".join(line.rstrip() for line in text.splitlines()).strip()
# Simple text - collapse single newlines within paragraphs
paragraphs: list[str] = []
current: list[str] = []
for line in text.splitlines():
stripped = line.strip()
if not stripped:
if current:
paragraphs.append(" ".join(current))
current = []
continue
current.append(stripped)
if current:
paragraphs.append(" ".join(current))
return "\n\n".join(paragraphs)
def parse_google_docstring(docstring: str | None) -> ParsedDocstring:
if not docstring:
return ParsedDocstring()
lines = textwrap.dedent(docstring).splitlines()
while lines and not lines[0].strip():
lines.pop(0)
if not lines:
return ParsedDocstring()
summary = _normalize_text(lines[0].strip())
body = lines[1:]
sections: defaultdict[str, list[str]] = defaultdict(list)
current = "description"
for line in body:
stripped = line.strip()
key = SECTION_ALIASES.get(stripped.rstrip(":").lower())
if key and stripped.endswith(":"):
current = key
continue
if current != "methods": # ignore "Methods:" sections; methods are rendered from AST
sections[current].append(line)
description = "\n".join(sections.pop("description", [])).strip("\n")
description = _normalize_text(description)
return ParsedDocstring(
summary=summary,
description=description,
params=_parse_named_entries(sections.get("params", [])),
attributes=_parse_named_entries(sections.get("attributes", [])),
returns=_parse_returns(sections.get("returns", [])),
yields=_parse_returns(sections.get("yields", [])),
raises=_parse_named_entries(sections.get("raises", [])),
notes=[textwrap.dedent("\n".join(sections.get("notes", []))).strip()] if sections.get("notes") else [],
examples=[textwrap.dedent("\n".join(sections.get("examples", []))).strip()] if sections.get("examples") else [],
)
def merge_docstrings(base: ParsedDocstring, extra: ParsedDocstring, ignore_summary: bool = True) -> ParsedDocstring:
# Keep existing class docs; append init docs only when they introduce new entries (class takes priority).
def _merge_unique(base_items, extra_items, key):
seen = {key(item) for item in base_items}
base_items.extend(item for item in extra_items if key(item) not in seen)
return base_items
if not base.summary and extra.summary and not ignore_summary:
base.summary = extra.summary
if extra.description:
base.description = "\n\n".join(filter(None, [base.description, extra.description]))
_merge_unique(base.params, extra.params, lambda p: (p.name, p.type, p.description, p.default))
_merge_unique(base.attributes, extra.attributes, lambda p: (p.name, p.type, p.description, p.default))
_merge_unique(base.returns, extra.returns, lambda r: (r.type, r.description))
_merge_unique(base.yields, extra.yields, lambda r: (r.type, r.description))
_merge_unique(base.raises, extra.raises, lambda r: (r.name, r.type, r.description, r.default))
_merge_unique(base.notes, extra.notes, lambda n: n.strip())
_merge_unique(base.examples, extra.examples, lambda e: e.strip())
return base
def _should_document(name: str, *, allow_private: bool = False) -> bool:
if name in INCLUDE_SPECIAL_METHODS:
return True
if name.startswith("_"):
return allow_private
return True
def _collect_source_block(src: str, node: ast.AST, end_line: int | None = None) -> str:
if not hasattr(node, "lineno") or not hasattr(node, "end_lineno"):
return ""
lines = src.splitlines()
# Include decorators by starting from the first decorator line if present
decorator_lines = [getattr(d, "lineno", node.lineno) for d in getattr(node, "decorator_list", [])]
start_line = min([*decorator_lines, node.lineno]) if decorator_lines else node.lineno
start = max(start_line - 1, 0)
end = end_line or getattr(node, "end_lineno", node.lineno)
snippet = "\n".join(lines[start:end])
return textwrap.dedent(snippet).rstrip()
def _get_definition_signature(node: ast.AST, src: str) -> str:
if not hasattr(node, "lineno"):
return ""
lines = src.splitlines()[node.lineno - 1 :]
collected: list[str] = []
for line in lines:
stripped = line.strip()
if not stripped:
continue
collected.append(line)
if stripped.endswith(":"):
break
header = textwrap.dedent("\n".join(collected)).rstrip()
return header[:-1].rstrip() if header.endswith(":") else header
def parse_function(
node: ast.FunctionDef | ast.AsyncFunctionDef,
module_path: str,
src: str,
*,
parent: str | None = None,
allow_private: bool = False,
) -> DocItem | None:
raw_docstring = ast.get_docstring(node)
if not _should_document(node.name, allow_private=allow_private) and not raw_docstring:
return None
is_async = isinstance(node, ast.AsyncFunctionDef)
doc = parse_google_docstring(raw_docstring)
qualname = f"{module_path}.{node.name}" if not parent else f"{parent}.{node.name}"
decorators = {_get_source(src, d).split(".")[-1] for d in node.decorator_list}
kind: Literal["function", "method", "property"] = "method" if parent else "function"
if decorators & PROPERTY_DECORATORS:
kind = "property"
signature_params = collect_signature_parameters(node.args, src, skip_self=bool(parent))
return DocItem(
name=node.name,
qualname=qualname,
kind=kind,
signature=format_signature(node, src, is_async=is_async),
doc=doc,
signature_params=signature_params,
lineno=node.lineno,
end_lineno=node.end_lineno or node.lineno,
bases=[],
children=[],
module_path=module_path,
source=_collect_source_block(src, node),
)
def parse_class(node: ast.ClassDef, module_path: str, src: str) -> DocItem:
class_doc = parse_google_docstring(ast.get_docstring(node))
init_node: ast.FunctionDef | ast.AsyncFunctionDef | None = next(
(n for n in node.body if isinstance(n, (ast.FunctionDef, ast.AsyncFunctionDef)) and n.name == "__init__"),
None,
)
signature_params: list[ParameterDoc] = []
if init_node:
init_doc = parse_google_docstring(ast.get_docstring(init_node))
class_doc = merge_docstrings(class_doc, init_doc, ignore_summary=True)
signature_params = collect_signature_parameters(init_node.args, src, skip_self=True)
bases = [_get_source(src, b) for b in node.bases] if node.bases else []
signature_node = init_node or node
class_signature = format_signature(signature_node, src, is_class=True, display_name=node.name)
methods: list[DocItem] = []
for child in node.body:
if isinstance(child, (ast.FunctionDef, ast.AsyncFunctionDef)) and child is not init_node:
method_doc = parse_function(child, module_path, src, parent=f"{module_path}.{node.name}")
if method_doc:
methods.append(method_doc)
return DocItem(
name=node.name,
qualname=f"{module_path}.{node.name}",
kind="class",
signature=class_signature,
doc=class_doc,
signature_params=signature_params,
lineno=node.lineno,
end_lineno=node.end_lineno or node.lineno,
bases=bases,
children=methods,
module_path=module_path,
source=_collect_source_block(src, node, end_line=init_node.end_lineno if init_node else node.lineno),
)
def parse_module(py_filepath: Path) -> DocumentedModule | None:
try:
src = py_filepath.read_text(encoding="utf-8")
except Exception:
return None
try:
tree = ast.parse(src)
except SyntaxError:
return None
module_path = (
f"{PACKAGE_DIR.name}.{py_filepath.relative_to(PACKAGE_DIR).with_suffix('').as_posix().replace('/', '.')}"
)
classes: list[DocItem] = []
functions: list[DocItem] = []
for node in tree.body:
if isinstance(node, ast.ClassDef):
classes.append(parse_class(node, module_path, src))
elif isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
func = parse_function(node, module_path, src, parent=None)
if func:
functions.append(func)
return DocumentedModule(path=py_filepath, module_path=module_path, classes=classes, functions=functions)
def _render_section(title: str, entries: Iterable[str], level: int) -> str:
entries = list(entries)
if not entries:
return ""
heading = f"{'#' * level} {title}\n"
body = "\n".join(entries).rstrip()
return f"{heading}{body}\n\n"
def _render_table(headers: list[str], rows: list[list[str]], level: int, title: str | None = None) -> str:
if not rows:
return ""
def _clean_cell(value: str | None) -> str:
if value is None:
return ""
return str(value).replace("\n", "<br>").strip()
rows = [[_clean_cell(c) for c in row] for row in rows]
table_lines = ["| " + " | ".join(headers) + " |", "| " + " | ".join("---" for _ in headers) + " |"]
for row in rows:
table_lines.append("| " + " | ".join(row) + " |")
heading = f"{'#' * level} {title}\n" if title else ""
return f"{heading}" + "\n".join(table_lines) + "\n\n"
def _code_fence(source: str, lang: str = "python") -> str:
return f"```{lang}\n{source}\n```"
def _merge_params(doc_params: list[ParameterDoc], signature_params: list[ParameterDoc]) -> list[ParameterDoc]:
sig_map = {p.name.lstrip("*"): p for p in signature_params}
merged: list[ParameterDoc] = []
seen = set()
for dp in doc_params:
sig = sig_map.get(dp.name.lstrip("*"))
merged.append(
ParameterDoc(
name=dp.name,
type=dp.type or (sig.type if sig else None),
description=dp.description,
default=sig.default if sig else None,
)
)
seen.add(dp.name.lstrip("*"))
for name, sig in sig_map.items():
if name in seen:
continue
merged.append(sig)
return merged
DEFAULT_SECTION_ORDER = ["args", "returns", "examples", "notes", "attributes", "yields", "raises"]
SUMMARY_BADGE_MAP = {"Classes": "class", "Properties": "property", "Methods": "method", "Functions": "function"}
_missing_type_warnings: list[str] = []
def contribution_admonition(pretty: str, url: str, *, kind: str = "note", title: str | None = None) -> str:
label = f' "{title}"' if title else ""
body = (
f"This page is sourced from [{pretty}]({url}). Have an improvement or example to add? "
f"Open a [Pull Request](https://docs.ultralytics.com/help/contributing/) — thank you! 🙏"
)
return f"!!! {kind}{label}\n\n {body}\n\n"
def _relative_to_workspace(path: Path) -> Path:
try:
return path.relative_to(PACKAGE_DIR.parent)
except ValueError:
return path
def render_source_panel(item: DocItem, module_url: str, module_path: str) -> str:
if not item.source:
return ""
source_url = f"{module_url}#L{item.lineno}-L{item.end_lineno}"
summary = f"Source code in <code>{html.escape(module_path)}.py</code>"
return (
"<details>\n"
f"<summary>{summary}</summary>\n\n"
f'<a href="{source_url}">{GITHUB_ICON}View on GitHub</a>\n'
f"{_code_fence(item.source)}\n"
"</details>\n"
)
def render_docstring(
doc: ParsedDocstring,
level: int,
signature_params: list[ParameterDoc] | None = None,
section_order: list[str] | None = None,
extra_sections: dict[str, str] | None = None,
) -> str:
parts: list[str] = []
if doc.summary:
parts.append(doc.summary)
if doc.description:
parts.append(doc.description)
sig_params = signature_params or []
merged_params = _merge_params(doc.params, sig_params)
sections: dict[str, str] = {}
if merged_params:
rows = []
for p in merged_params:
default_val = f"`{p.default}`" if p.default not in (None, "") else "*required*"
rows.append(
[
f"`{p.name}`",
f"`{p.type}`" if p.type else "",
p.description.strip() if p.description else "",
default_val,
]
)
table = _render_table(["Name", "Type", "Description", "Default"], rows, level, title=None)
sections["args"] = f"**Args**\n\n{table}"
if doc.returns:
rows = []
for r in doc.returns:
rows.append([f"`{r.type}`" if r.type else "", r.description])
table = _render_table(["Type", "Description"], rows, level, title=None)
sections["returns"] = f"**Returns**\n\n{table}"
if doc.examples:
code_block = "\n\n".join(f"```python\n{example.strip()}\n```" for example in doc.examples if example.strip())
if code_block:
sections["examples"] = f"**Examples**\n\n{code_block}\n\n"
if doc.notes:
note_text = "\n\n".join(doc.notes).strip()
indented = textwrap.indent(note_text, " ")
sections["notes"] = f'!!! note "Notes"\n\n{indented}\n\n'
if doc.attributes:
rows = []
for a in doc.attributes:
rows.append(
[f"`{a.name}`", f"`{a.type}`" if a.type else "", a.description.strip() if a.description else ""]
)
table = _render_table(["Name", "Type", "Description"], rows, level, title=None)
sections["attributes"] = f"**Attributes**\n\n{table}"
if doc.yields:
rows = []
for r in doc.yields:
rows.append([f"`{r.type}`" if r.type else "", r.description])
table = _render_table(["Type", "Description"], rows, level, title=None)
sections["yields"] = f"**Yields**\n\n{table}"
if doc.raises:
rows = []
for e in doc.raises:
type_cell = e.type or e.name
rows.append([f"`{type_cell}`" if type_cell else "", e.description or ""])
table = _render_table(["Type", "Description"], rows, level, title=None)
sections["raises"] = f"**Raises**\n\n{table}"
if extra_sections:
sections.update({k: v for k, v in extra_sections.items() if v})
# Ensure section order contains unique entries to avoid duplicate renders (e.g., classes injecting "examples")
order = list(dict.fromkeys(section_order or DEFAULT_SECTION_ORDER))
ordered_sections: list[str] = []
seen = set()
for key in order:
section = sections.get(key)
if section:
ordered_sections.append(section)
seen.add(key)
for key, section in sections.items():
if key not in seen:
ordered_sections.append(section)
parts.extend(filter(None, ordered_sections))
return "\n\n".join([p.rstrip() for p in parts if p]).strip() + ("\n\n" if parts else "")
def item_anchor(item: DocItem) -> str:
return item.qualname
def display_qualname(item: DocItem) -> str:
return item.qualname.replace(".__init__.", ".")
def render_summary_tabs(module: DocumentedModule) -> str:
tab_entries: list[tuple[str, list[str]]] = []
if module.classes:
tab_entries.append(
(
"Classes",
[f"- [`{cls.name}`](#{item_anchor(cls)})" for cls in module.classes],
)
)
property_links = []
method_links = []
for cls in module.classes:
for child in cls.children:
if child.kind == "property":
property_links.append(f"- [`{cls.name}.{child.name}`](#{item_anchor(child)})")
for child in cls.children:
if child.kind == "method":
method_links.append(f"- [`{cls.name}.{child.name}`](#{item_anchor(child)})")
if property_links:
tab_entries.append(("Properties", property_links))
if method_links:
tab_entries.append(("Methods", method_links))
if module.functions:
tab_entries.append(
(
"Functions",
[f"- [`{func.name}`](#{item_anchor(func)})" for func in module.functions],
)
)
if not tab_entries:
return ""
lines = ['!!! abstract "Summary"\n']
for label, bullets in tab_entries:
badge_class = SUMMARY_BADGE_MAP.get(label, label.lower())
label_badge = f'<span class="doc-kind doc-kind-{badge_class}">{label}</span>'
lines.append(f' === "{label_badge}"\n')
lines.append("\n".join(f" {line}" for line in bullets))
lines.append("") # Blank line after each tab block
return "\n".join(lines).rstrip() + "\n\n"
def render_item(item: DocItem, module_url: str, module_path: str, level: int = 2) -> str:
anchor = item_anchor(item)
title_prefix = item.kind.capitalize()
anchor_id = anchor.replace("_", r"\_") # escape underscores so attr_list keeps them in the id
heading = f"{'#' * level} {title_prefix} `{display_qualname(item)}` {{#{anchor_id}}}"
signature_block = f"```python\n{item.signature}\n```\n"
parts = [heading, signature_block]
if item.bases:
bases = ", ".join(f"`{b}`" for b in item.bases)
parts.append(f"**Bases:** {bases}\n")
# Check for parameters missing type annotations in both signature and docstring
if item.signature_params and item.doc.params:
merged = _merge_params(item.doc.params, item.signature_params)
missing = [p.name for p in merged if not p.type]
if missing:
_missing_type_warnings.append(f"{item.qualname}: {', '.join(missing)}")
if item.kind == "class":
method_section = None
if item.children:
props = [c for c in item.children if c.kind == "property"]
methods = [c for c in item.children if c.kind == "method"]
methods.sort(key=lambda m: (not m.name.startswith("__"), m.name))
rows = []
for child in props + methods:
summary = child.doc.summary or (
_normalize_text(child.doc.description).split("\n\n")[0] if child.doc.description else ""
)
rows.append([f"[`{child.name}`](#{item_anchor(child)})", summary.strip()])
if rows:
table = _render_table(["Name", "Description"], rows, level + 1, title=None)
method_section = f"**Methods**\n\n{table}"
order = ["args", "attributes", "methods", "examples", *DEFAULT_SECTION_ORDER]
rendered = render_docstring(
item.doc,
level + 1,
signature_params=item.signature_params,
section_order=order,
extra_sections={"methods": method_section} if method_section else None,
)
parts.append(rendered)
else:
parts.append(render_docstring(item.doc, level + 1, signature_params=item.signature_params))
if item.kind == "class" and item.source:
parts.append(render_source_panel(item, module_url, module_path))
if item.children:
props = [c for c in item.children if c.kind == "property"]
methods = [c for c in item.children if c.kind == "method"]
methods.sort(key=lambda m: (not m.name.startswith("__"), m.name))
ordered_children = props + methods
parts.append("<br>\n")
for idx, child in enumerate(ordered_children):
parts.append(render_item(child, module_url, module_path, level + 1))
if idx != len(ordered_children) - 1:
parts.append("<br>\n")
if item.source and item.kind != "class":
parts.append(render_source_panel(item, module_url, module_path))
return "\n\n".join(p.rstrip() for p in parts if p).rstrip() + "\n\n"
def render_module_markdown(module: DocumentedModule) -> str:
module_path = module.module_path.replace(".", "/")
module_url = f"https://github.com/{GITHUB_REPO}/blob/main/{module_path}.py"
content: list[str] = ["<br>\n"]
summary_tabs = render_summary_tabs(module)
if summary_tabs:
content.append(summary_tabs)
sections: list[str] = []
for idx, cls in enumerate(module.classes):
sections.append(render_item(cls, module_url, module_path, level=2))
if idx != len(module.classes) - 1 or module.functions:
sections.append("<br><br><hr><br>\n")
for idx, func in enumerate(module.functions):
sections.append(render_item(func, module_url, module_path, level=2))
if idx != len(module.functions) - 1:
sections.append("<br><br><hr><br>\n")
content.extend(sections)
return "\n".join(content).rstrip() + "\n\n<br><br>\n"
def create_markdown(module: DocumentedModule) -> Path:
md_filepath = REFERENCE_DIR / module.path.relative_to(PACKAGE_DIR).with_suffix(".md")
exists = md_filepath.exists()
header_content = ""
if exists:
for part in md_filepath.read_text().split("---"):
if "description:" in part or "comments:" in part:
header_content += f"---{part}---\n\n"
if not header_content:
header_content = "---\ndescription: TODO ADD DESCRIPTION\nkeywords: TODO ADD KEYWORDS\n---\n\n"
module_path_fs = module.module_path.replace(".", "/")
url = f"https://github.com/{GITHUB_REPO}/blob/main/{module_path_fs}.py"
pretty = url.replace("__init__.py", "\\_\\_init\\_\\_.py") # Properly display __init__.py filenames
title_content = f"# Reference for `{module_path_fs}.py`\n\n" + contribution_admonition(
pretty, url, kind="success", title="Improvements"
)
md_filepath.parent.mkdir(parents=True, exist_ok=True)
md_filepath.write_text(header_content + title_content + render_module_markdown(module))
if not exists:
subprocess.run(["git", "add", "-f", str(md_filepath)], check=True, cwd=REPO_ROOT)
return _relative_to_workspace(md_filepath)
def nested_dict():
return defaultdict(nested_dict)
def sort_nested_dict(d: dict) -> dict:
return {k: sort_nested_dict(v) if isinstance(v, dict) else v for k, v in sorted(d.items())}
def create_nav_menu_yaml(nav_items: list[str]) -> str:
nav_tree = nested_dict()
for item_str in nav_items:
item = Path(item_str)
parts = item.parts
current_level = nav_tree["reference"]
for part in parts[2:-1]: # Skip docs/reference and filename
current_level = current_level[part]
current_level[parts[-1].replace(".md", "")] = item
def _dict_to_yaml(d, level=0):
yaml_str = ""
indent = " " * level
for k, v in sorted(d.items()):
if isinstance(v, dict):
yaml_str += f"{indent}- {k}:\n{_dict_to_yaml(v, level + 1)}"
else:
yaml_str += f"{indent}- {k}: {str(v).replace('docs/en/', '')}\n"
return yaml_str
reference_yaml = _dict_to_yaml(sort_nested_dict(nav_tree))
LOGGER.info(f"Scan complete, generated reference section with {len(reference_yaml.splitlines())} lines")
return reference_yaml
def extract_document_paths(yaml_section: str) -> list[str]:
paths = []
# Match all paths that appear after a colon in the YAML
path_matches = re.findall(r":\s*([^\s][^:\n]*?)(?:\n|$)", yaml_section)
for path in path_matches:
# Clean up the path
path = path.strip()
if path and not path.startswith("-") and not path.endswith(":"):
paths.append(path)
return sorted(paths)
def update_mkdocs_file(reference_yaml: str) -> None:
mkdocs_content = MKDOCS_YAML.read_text()
# Find the top-level Reference section
ref_pattern = r"(\n - Reference:[\s\S]*?)(?=\n - \w|$)"
ref_match = re.search(ref_pattern, mkdocs_content)
# Build new section with proper indentation
new_section_lines = ["\n - Reference:"]
new_section_lines.extend(
f" {line}"
for line in reference_yaml.splitlines()
if line.strip() != "- reference:" # Skip redundant header
)
new_ref_section = "\n".join(new_section_lines) + "\n"
if ref_match:
# We found an existing Reference section
ref_section = ref_match.group(1)
LOGGER.info(f"Found existing top-level Reference section ({len(ref_section)} chars)")
# Compare only document paths
existing_paths = extract_document_paths(ref_section)
new_paths = extract_document_paths(new_ref_section)
# Check if the document paths are the same (ignoring structure or formatting differences)
if len(existing_paths) == len(new_paths) and set(existing_paths) == set(new_paths):
LOGGER.info(f"No changes detected in document paths ({len(existing_paths)} items). Skipping update.")
return
LOGGER.info(f"Changes detected: {len(new_paths)} document paths vs {len(existing_paths)} existing")
# Update content
new_content = mkdocs_content.replace(ref_section, new_ref_section)
MKDOCS_YAML.write_text(new_content)
try:
result = subprocess.run(
["npx", "prettier", "--write", str(MKDOCS_YAML)], capture_output=True, text=True, cwd=PACKAGE_DIR.parent
)
if result.returncode != 0:
LOGGER.warning(f"prettier formatting failed: {result.stderr.strip()}")
except FileNotFoundError:
LOGGER.warning("prettier not found (install Node.js or run 'npm i -g prettier'), skipping YAML formatting")
LOGGER.info(f"Updated Reference section in {MKDOCS_YAML}")
elif help_match := re.search(r"(\n - Help:)", mkdocs_content):
# No existing Reference section, we need to add it
help_section = help_match.group(1)
# Insert before Help section
new_content = mkdocs_content.replace(help_section, f"{new_ref_section}{help_section}")
MKDOCS_YAML.write_text(new_content)
LOGGER.info(f"Added new Reference section before Help in {MKDOCS_YAML}")
else:
LOGGER.warning("Could not find a suitable location to add Reference section")
def _finalize_reference(nav_items: list[str], update_nav: bool, created: int, created_label: str) -> list[str]:
if update_nav:
update_mkdocs_file(create_nav_menu_yaml(nav_items))
if created:
LOGGER.info(f"Created {created} new {created_label}")
return nav_items
def build_reference(update_nav: bool = True) -> list[str]:
return build_reference_placeholders(update_nav=update_nav)
def build_reference_placeholders(update_nav: bool = True) -> list[str]:
nav_items: list[str] = []
created = 0
orphans = set(REFERENCE_DIR.rglob("*.md"))
for py_filepath in TQDM(list(PACKAGE_DIR.rglob("*.py")), desc="Building reference stubs", unit="file"):
classes, functions = extract_classes_and_functions(py_filepath)
if not classes and not functions:
continue
module_path = (
f"{PACKAGE_DIR.name}.{py_filepath.relative_to(PACKAGE_DIR).with_suffix('').as_posix().replace('/', '.')}"
)
md_filepath = REFERENCE_DIR / py_filepath.relative_to(PACKAGE_DIR).with_suffix(".md")
exists = md_filepath.exists()
orphans.discard(md_filepath)
md_rel = create_placeholder_markdown(py_filepath, module_path, classes, functions)
nav_items.append(str(md_rel))
if not exists:
created += 1
for orphan in orphans:
orphan.unlink()
if update_nav:
update_mkdocs_file(create_nav_menu_yaml(nav_items))
if created:
LOGGER.info(f"Created {created} new reference stub files")
return nav_items
def build_reference_docs(update_nav: bool = False) -> list[str]:
_missing_type_warnings.clear()
nav_items: list[str] = []
created = 0
desc = f"Docstrings {GITHUB_REPO or PACKAGE_DIR.name}"
for py_filepath in TQDM(list(PACKAGE_DIR.rglob("*.py")), desc=desc, unit="file"):
md_target = REFERENCE_DIR / py_filepath.relative_to(PACKAGE_DIR).with_suffix(".md")
exists_before = md_target.exists()
module = parse_module(py_filepath)
if not module or (not module.classes and not module.functions):
continue
md_rel_filepath = create_markdown(module)
if not exists_before:
created += 1
nav_items.append(str(md_rel_filepath))
if update_nav:
update_mkdocs_file(create_nav_menu_yaml(nav_items))
if created:
LOGGER.info(f"Created {created} new reference files")
if _missing_type_warnings:
LOGGER.warning(f"{len(_missing_type_warnings)} functions/methods have parameters missing type annotations:")
for warning in _missing_type_warnings:
LOGGER.warning(f" - {warning}")
raise ValueError(
f"{len(_missing_type_warnings)} parameters missing types in both signature and docstring. "
f"Add type annotations to the function signature or (type) in the docstring Args section."
)
return nav_items
def build_reference_for(
package_dir: Path, reference_dir: Path, github_repo: str, update_nav: bool = False
) -> list[str]:
global PACKAGE_DIR, REFERENCE_DIR, GITHUB_REPO
prev = (PACKAGE_DIR, REFERENCE_DIR, GITHUB_REPO)
try:
PACKAGE_DIR, REFERENCE_DIR, GITHUB_REPO = package_dir, reference_dir, github_repo
return build_reference_docs(update_nav=update_nav)
finally:
PACKAGE_DIR, REFERENCE_DIR, GITHUB_REPO = prev
def main():
build_reference(update_nav=True)
if __name__ == "__main__":
main() | --- +++ @@ -1,4 +1,12 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""
+Helper file to build Ultralytics Docs reference section.
+
+This script recursively walks through the ultralytics directory and builds a MkDocs reference section of *.md files
+composed of classes and functions, and also creates a navigation menu for use in mkdocs.yaml.
+
+Note: Must be run from repository root directory. Do not run from docs directory.
+"""
from __future__ import annotations
@@ -49,6 +57,7 @@
@dataclass
class ParameterDoc:
+ """Structured documentation for parameters, attributes, and exceptions."""
name: str
type: str | None
@@ -58,6 +67,7 @@
@dataclass
class ReturnDoc:
+ """Structured documentation for return and yield values."""
type: str | None
description: str
@@ -65,6 +75,7 @@
@dataclass
class ParsedDocstring:
+ """Normalized representation of a Google-style docstring."""
summary: str = ""
description: str = ""
@@ -79,6 +90,7 @@
@dataclass
class DocItem:
+ """Represents a documented symbol (class, function, method, or property)."""
name: str
qualname: str
@@ -96,6 +108,7 @@
@dataclass
class DocumentedModule:
+ """Container for all documented items within a Python module."""
path: Path
module_path: str
@@ -109,6 +122,7 @@
def extract_classes_and_functions(filepath: Path) -> tuple[list[str], list[str]]:
+ """Extract top-level class and (a)sync function names from a Python file."""
content = filepath.read_text()
classes = CLASS_DEF_RE.findall(content)
functions = FUNC_DEF_RE.findall(content)
@@ -116,6 +130,7 @@
def create_placeholder_markdown(py_filepath: Path, module_path: str, classes: list[str], functions: list[str]) -> Path:
+ """Create a minimal Markdown stub used by mkdocstrings."""
md_filepath = REFERENCE_DIR / py_filepath.relative_to(PACKAGE_DIR).with_suffix(".md")
exists = md_filepath.exists()
@@ -151,6 +166,7 @@
def _get_source(src: str, node: ast.AST) -> str:
+ """Return the source segment for an AST node with safe fallbacks."""
segment = ast.get_source_segment(src, node)
if segment:
return segment
@@ -161,6 +177,7 @@
def _format_annotation(annotation: ast.AST | None, src: str) -> str | None:
+ """Format a type annotation into a compact string."""
if annotation is None:
return None
text = _get_source(src, annotation).strip()
@@ -168,6 +185,7 @@
def _format_default(default: ast.AST | None, src: str) -> str | None:
+ """Format a default value expression for display."""
if default is None:
return None
text = _get_source(src, default).strip()
@@ -175,6 +193,7 @@
def _format_parameter(arg: ast.arg, default: ast.AST | None, src: str) -> str:
+ """Render a single parameter with annotation and default value."""
annotation = _format_annotation(arg.annotation, src)
rendered = arg.arg
if annotation:
@@ -186,9 +205,11 @@
def collect_signature_parameters(args: ast.arguments, src: str, *, skip_self: bool = True) -> list[ParameterDoc]:
+ """Collect parameters from an ast.arguments object with types and defaults."""
params: list[ParameterDoc] = []
def add_param(arg: ast.arg, default_value: ast.AST | None = None):
+ """Append a parameter entry, optionally skipping self/cls."""
name = arg.arg
if skip_self and name in {"self", "cls"}:
return
@@ -233,6 +254,7 @@ def format_signature(
node: ast.AST, src: str, *, is_class: bool = False, is_async: bool = False, display_name: str | None = None
) -> str:
+ """Build a readable signature string for classes, functions, and methods."""
if not isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
return ""
@@ -303,6 +325,7 @@
def _split_section_entries(lines: list[str]) -> list[list[str]]:
+ """Split a docstring section into entries based on indentation."""
entries: list[list[str]] = []
current: list[str] = []
base_indent: int | None = None
@@ -326,6 +349,7 @@
def _parse_named_entries(lines: list[str]) -> list[ParameterDoc]:
+ """Parse Args/Attributes/Raises style sections."""
entries = []
for block in _split_section_entries(lines):
text = textwrap.dedent("\n".join(block)).strip()
@@ -345,6 +369,7 @@
def _parse_returns(lines: list[str]) -> list[ReturnDoc]:
+ """Parse Returns/Yields sections."""
entries = []
for block in _split_section_entries(lines):
text = textwrap.dedent("\n".join(block)).strip()
@@ -385,6 +410,7 @@
def _normalize_text(text: str) -> str:
+ """Normalize text while preserving Markdown structures like tables, admonitions, and code blocks."""
if not text:
return ""
# Check if text contains Markdown structures that need line preservation
@@ -408,6 +434,7 @@
def parse_google_docstring(docstring: str | None) -> ParsedDocstring:
+ """Parse a Google-style docstring into structured data."""
if not docstring:
return ParsedDocstring()
@@ -448,6 +475,7 @@
def merge_docstrings(base: ParsedDocstring, extra: ParsedDocstring, ignore_summary: bool = True) -> ParsedDocstring:
+ """Merge init docstring content into a class docstring."""
# Keep existing class docs; append init docs only when they introduce new entries (class takes priority).
def _merge_unique(base_items, extra_items, key):
@@ -470,6 +498,7 @@
def _should_document(name: str, *, allow_private: bool = False) -> bool:
+ """Decide whether to include a symbol based on its name."""
if name in INCLUDE_SPECIAL_METHODS:
return True
if name.startswith("_"):
@@ -478,6 +507,7 @@
def _collect_source_block(src: str, node: ast.AST, end_line: int | None = None) -> str:
+ """Return a dedented source snippet for the given node up to an optional end line."""
if not hasattr(node, "lineno") or not hasattr(node, "end_lineno"):
return ""
lines = src.splitlines()
@@ -491,6 +521,7 @@
def _get_definition_signature(node: ast.AST, src: str) -> str:
+ """Return the original multi-line definition signature from source if available."""
if not hasattr(node, "lineno"):
return ""
lines = src.splitlines()[node.lineno - 1 :]
@@ -514,6 +545,7 @@ parent: str | None = None,
allow_private: bool = False,
) -> DocItem | None:
+ """Parse a function or method node into a DocItem."""
raw_docstring = ast.get_docstring(node)
if not _should_document(node.name, allow_private=allow_private) and not raw_docstring:
return None
@@ -545,6 +577,7 @@
def parse_class(node: ast.ClassDef, module_path: str, src: str) -> DocItem:
+ """Parse a class node, merging __init__ docs and collecting methods."""
class_doc = parse_google_docstring(ast.get_docstring(node))
init_node: ast.FunctionDef | ast.AsyncFunctionDef | None = next(
@@ -585,6 +618,7 @@
def parse_module(py_filepath: Path) -> DocumentedModule | None:
+ """Parse a Python module into structured documentation objects."""
try:
src = py_filepath.read_text(encoding="utf-8")
except Exception:
@@ -612,6 +646,7 @@
def _render_section(title: str, entries: Iterable[str], level: int) -> str:
+ """Render a section with a given heading level."""
entries = list(entries)
if not entries:
return ""
@@ -621,10 +656,12 @@
def _render_table(headers: list[str], rows: list[list[str]], level: int, title: str | None = None) -> str:
+ """Render a Markdown table with an optional heading."""
if not rows:
return ""
def _clean_cell(value: str | None) -> str:
+ """Normalize table cell values for Markdown output."""
if value is None:
return ""
return str(value).replace("\n", "<br>").strip()
@@ -638,10 +675,12 @@
def _code_fence(source: str, lang: str = "python") -> str:
+ """Return a fenced code block with optional language for highlighting."""
return f"```{lang}\n{source}\n```"
def _merge_params(doc_params: list[ParameterDoc], signature_params: list[ParameterDoc]) -> list[ParameterDoc]:
+ """Merge docstring params with signature params to include defaults/types."""
sig_map = {p.name.lstrip("*"): p for p in signature_params}
merged: list[ParameterDoc] = []
@@ -672,6 +711,7 @@
def contribution_admonition(pretty: str, url: str, *, kind: str = "note", title: str | None = None) -> str:
+ """Return a standardized contribution call-to-action admonition."""
label = f' "{title}"' if title else ""
body = (
f"This page is sourced from [{pretty}]({url}). Have an improvement or example to add? "
@@ -681,6 +721,7 @@
def _relative_to_workspace(path: Path) -> Path:
+ """Return path relative to workspace root when possible."""
try:
return path.relative_to(PACKAGE_DIR.parent)
except ValueError:
@@ -688,6 +729,7 @@
def render_source_panel(item: DocItem, module_url: str, module_path: str) -> str:
+ """Render a collapsible source panel with a GitHub link."""
if not item.source:
return ""
source_url = f"{module_url}#L{item.lineno}-L{item.end_lineno}"
@@ -708,6 +750,7 @@ section_order: list[str] | None = None,
extra_sections: dict[str, str] | None = None,
) -> str:
+ """Convert a ParsedDocstring into Markdown with tables similar to mkdocstrings."""
parts: list[str] = []
if doc.summary:
parts.append(doc.summary)
@@ -797,14 +840,17 @@
def item_anchor(item: DocItem) -> str:
+ """Create a stable anchor for a documented item."""
return item.qualname
def display_qualname(item: DocItem) -> str:
+ """Return a cleaned, fully-qualified name for display (strip __init__ noise)."""
return item.qualname.replace(".__init__.", ".")
def render_summary_tabs(module: DocumentedModule) -> str:
+ """Render a tabbed summary of classes, methods, and functions for quick navigation."""
tab_entries: list[tuple[str, list[str]]] = []
if module.classes:
@@ -851,6 +897,7 @@
def render_item(item: DocItem, module_url: str, module_path: str, level: int = 2) -> str:
+ """Render a class, function, or method to Markdown."""
anchor = item_anchor(item)
title_prefix = item.kind.capitalize()
anchor_id = anchor.replace("_", r"\_") # escape underscores so attr_list keeps them in the id
@@ -921,6 +968,7 @@
def render_module_markdown(module: DocumentedModule) -> str:
+ """Render the full module reference content."""
module_path = module.module_path.replace(".", "/")
module_url = f"https://github.com/{GITHUB_REPO}/blob/main/{module_path}.py"
content: list[str] = ["<br>\n"]
@@ -944,6 +992,7 @@
def create_markdown(module: DocumentedModule) -> Path:
+ """Create a Markdown file containing the API reference for the given Python module."""
md_filepath = REFERENCE_DIR / module.path.relative_to(PACKAGE_DIR).with_suffix(".md")
exists = md_filepath.exists()
@@ -973,14 +1022,17 @@
def nested_dict():
+ """Create and return a nested defaultdict."""
return defaultdict(nested_dict)
def sort_nested_dict(d: dict) -> dict:
+ """Sort a nested dictionary recursively."""
return {k: sort_nested_dict(v) if isinstance(v, dict) else v for k, v in sorted(d.items())}
def create_nav_menu_yaml(nav_items: list[str]) -> str:
+ """Create and return a YAML string for the navigation menu."""
nav_tree = nested_dict()
for item_str in nav_items:
@@ -992,6 +1044,7 @@ current_level[parts[-1].replace(".md", "")] = item
def _dict_to_yaml(d, level=0):
+ """Convert a nested dictionary to a YAML-formatted string with indentation."""
yaml_str = ""
indent = " " * level
for k, v in sorted(d.items()):
@@ -1007,6 +1060,7 @@
def extract_document_paths(yaml_section: str) -> list[str]:
+ """Extract document paths from a YAML section, ignoring formatting and structure."""
paths = []
# Match all paths that appear after a colon in the YAML
path_matches = re.findall(r":\s*([^\s][^:\n]*?)(?:\n|$)", yaml_section)
@@ -1019,6 +1073,7 @@
def update_mkdocs_file(reference_yaml: str) -> None:
+ """Update the mkdocs.yaml file with the new reference section only if changes in document paths are detected."""
mkdocs_content = MKDOCS_YAML.read_text()
# Find the top-level Reference section
@@ -1074,6 +1129,7 @@
def _finalize_reference(nav_items: list[str], update_nav: bool, created: int, created_label: str) -> list[str]:
+ """Optionally sync navigation and print creation summary."""
if update_nav:
update_mkdocs_file(create_nav_menu_yaml(nav_items))
if created:
@@ -1082,10 +1138,12 @@
def build_reference(update_nav: bool = True) -> list[str]:
+ """Create placeholder reference files (legacy mkdocstrings flow)."""
return build_reference_placeholders(update_nav=update_nav)
def build_reference_placeholders(update_nav: bool = True) -> list[str]:
+ """Create minimal placeholder reference files (mkdocstrings-style) and optionally update nav."""
nav_items: list[str] = []
created = 0
orphans = set(REFERENCE_DIR.rglob("*.md"))
@@ -1114,6 +1172,7 @@
def build_reference_docs(update_nav: bool = False) -> list[str]:
+ """Render full docstring-based reference content."""
_missing_type_warnings.clear()
nav_items: list[str] = []
created = 0
@@ -1148,6 +1207,7 @@ def build_reference_for(
package_dir: Path, reference_dir: Path, github_repo: str, update_nav: bool = False
) -> list[str]:
+ """Temporarily switch package context to build reference docs for another project."""
global PACKAGE_DIR, REFERENCE_DIR, GITHUB_REPO
prev = (PACKAGE_DIR, REFERENCE_DIR, GITHUB_REPO)
try:
@@ -1158,8 +1218,9 @@
def main():
+ """CLI entrypoint."""
build_reference(update_nav=True)
if __name__ == "__main__":
- main()+ main()
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/docs/build_reference.py |
Write docstrings for this repository | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import gc
import math
import os
import subprocess
import time
import warnings
from copy import copy, deepcopy
from datetime import datetime, timedelta
from functools import partial
from pathlib import Path
import numpy as np
import torch
from torch import distributed as dist
from torch import nn, optim
from ultralytics import __version__
from ultralytics.cfg import get_cfg, get_save_dir
from ultralytics.data.utils import check_cls_dataset, check_det_dataset
from ultralytics.nn.tasks import load_checkpoint
from ultralytics.optim import MuSGD
from ultralytics.utils import (
DEFAULT_CFG,
GIT,
LOCAL_RANK,
LOGGER,
RANK,
TQDM,
YAML,
callbacks,
clean_url,
colorstr,
emojis,
)
from ultralytics.utils.autobatch import check_train_batch_size
from ultralytics.utils.checks import check_amp, check_file, check_imgsz, check_model_file_from_stem, print_args
from ultralytics.utils.dist import ddp_cleanup, generate_ddp_command
from ultralytics.utils.files import get_latest_run
from ultralytics.utils.plotting import plot_results
from ultralytics.utils.torch_utils import (
TORCH_2_4,
EarlyStopping,
ModelEMA,
attempt_compile,
autocast,
convert_optimizer_state_dict_to_fp16,
init_seeds,
one_cycle,
select_device,
strip_optimizer,
torch_distributed_zero_first,
unset_deterministic,
unwrap_model,
)
class BaseTrainer:
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks: dict | None = None):
self.hub_session = overrides.pop("session", None) # HUB
self.args = get_cfg(cfg, overrides)
self.check_resume(overrides)
self.device = select_device(self.args.device)
# Update "-1" devices so post-training val does not repeat search
self.args.device = os.getenv("CUDA_VISIBLE_DEVICES") if "cuda" in str(self.device) else str(self.device)
self.validator = None
self.metrics = None
self.plots = {}
init_seeds(self.args.seed + 1 + RANK, deterministic=self.args.deterministic)
# Dirs
self.save_dir = get_save_dir(self.args)
self.args.name = self.save_dir.name # update name for loggers
self.wdir = self.save_dir / "weights" # weights dir
if RANK in {-1, 0}:
self.wdir.mkdir(parents=True, exist_ok=True) # make dir
self.args.save_dir = str(self.save_dir)
# Save run args, serializing augmentations as reprs for resume compatibility
args_dict = vars(self.args).copy()
if args_dict.get("augmentations") is not None:
# Serialize Albumentations transforms as their repr strings for checkpoint compatibility
args_dict["augmentations"] = [repr(t) for t in args_dict["augmentations"]]
YAML.save(self.save_dir / "args.yaml", args_dict) # save run args
self.last, self.best = self.wdir / "last.pt", self.wdir / "best.pt" # checkpoint paths
self.save_period = self.args.save_period
self.batch_size = self.args.batch
self.epochs = self.args.epochs or 100 # in case users accidentally pass epochs=None with timed training
self.start_epoch = 0
if RANK == -1:
print_args(vars(self.args))
# Device
if self.device.type in {"cpu", "mps"}:
self.args.workers = 0 # faster CPU training as time dominated by inference, not dataloading
# Callbacks - initialize early so on_pretrain_routine_start can capture original args.data
self.callbacks = _callbacks or callbacks.get_default_callbacks()
if isinstance(self.args.device, str) and len(self.args.device): # i.e. device='0' or device='0,1,2,3'
world_size = len(self.args.device.split(","))
elif isinstance(self.args.device, (tuple, list)): # i.e. device=[0, 1, 2, 3] (multi-GPU from CLI is list)
world_size = len(self.args.device)
elif self.args.device in {"cpu", "mps"}: # i.e. device='cpu' or 'mps'
world_size = 0
elif torch.cuda.is_available(): # i.e. device=None or device='' or device=number
world_size = 1 # default to device 0
else: # i.e. device=None or device=''
world_size = 0
self.ddp = world_size > 1 and "LOCAL_RANK" not in os.environ
self.world_size = world_size
# Run on_pretrain_routine_start before get_dataset() to capture original args.data (e.g., ul:// URIs)
if RANK in {-1, 0} and not self.ddp:
callbacks.add_integration_callbacks(self)
self.run_callbacks("on_pretrain_routine_start")
# Model and Dataset
self.model = check_model_file_from_stem(self.args.model) # add suffix, i.e. yolo26n -> yolo26n.pt
with torch_distributed_zero_first(LOCAL_RANK): # avoid auto-downloading dataset multiple times
self.data = self.get_dataset()
self.ema = None
# Optimization utils init
self.lf = None
self.scheduler = None
# Epoch level metrics
self.best_fitness = None
self.fitness = None
self.loss = None
self.tloss = None
self.loss_names = ["Loss"]
self.csv = self.save_dir / "results.csv"
if self.csv.exists() and not self.args.resume:
self.csv.unlink()
self.plot_idx = [0, 1, 2]
self.nan_recovery_attempts = 0
def add_callback(self, event: str, callback):
self.callbacks[event].append(callback)
def set_callback(self, event: str, callback):
self.callbacks[event] = [callback]
def run_callbacks(self, event: str):
for callback in self.callbacks.get(event, []):
callback(self)
def train(self):
# Run subprocess if DDP training, else train normally
if self.ddp:
# Argument checks
if self.args.rect:
LOGGER.warning("'rect=True' is incompatible with Multi-GPU training, setting 'rect=False'")
self.args.rect = False
if self.args.batch < 1.0:
raise ValueError(
"AutoBatch with batch<1 not supported for Multi-GPU training, "
f"please specify a valid batch size multiple of GPU count {self.world_size}, i.e. batch={self.world_size * 8}."
)
# Command
cmd, file = generate_ddp_command(self)
try:
LOGGER.info(f"{colorstr('DDP:')} debug command {' '.join(cmd)}")
subprocess.run(cmd, check=True)
except Exception as e:
raise e
finally:
ddp_cleanup(self, str(file))
else:
self._do_train()
def _setup_scheduler(self):
if self.args.cos_lr:
self.lf = one_cycle(1, self.args.lrf, self.epochs) # cosine 1->hyp['lrf']
else:
self.lf = lambda x: max(1 - x / self.epochs, 0) * (1.0 - self.args.lrf) + self.args.lrf # linear
self.scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=self.lf)
def _setup_ddp(self):
torch.cuda.set_device(RANK)
self.device = torch.device("cuda", RANK)
os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" # set to enforce timeout
dist.init_process_group(
backend="nccl" if dist.is_nccl_available() else "gloo",
timeout=timedelta(seconds=10800), # 3 hours
rank=RANK,
world_size=self.world_size,
)
def _build_train_pipeline(self):
batch_size = self.batch_size // max(self.world_size, 1)
self.train_loader = self.get_dataloader(
self.data["train"], batch_size=batch_size, rank=LOCAL_RANK, mode="train"
)
# Note: When training DOTA dataset, double batch size could get OOM on images with >2000 objects.
self.test_loader = self.get_dataloader(
self.data.get("val") or self.data.get("test"),
batch_size=batch_size if self.args.task == "obb" else batch_size * 2,
rank=LOCAL_RANK,
mode="val",
)
self.accumulate = max(round(self.args.nbs / self.batch_size), 1) # accumulate loss before optimizing
weight_decay = self.args.weight_decay * self.batch_size * self.accumulate / self.args.nbs # scale weight_decay
iterations = math.ceil(len(self.train_loader.dataset) / max(self.batch_size, self.args.nbs)) * self.epochs
self.optimizer = self.build_optimizer(
model=self.model,
name=self.args.optimizer,
lr=self.args.lr0,
momentum=self.args.momentum,
decay=weight_decay,
iterations=iterations,
)
self._setup_scheduler()
def _setup_train(self):
ckpt = self.setup_model()
self.model = self.model.to(self.device)
self.set_model_attributes()
# Compile model
self.model = attempt_compile(self.model, device=self.device, mode=self.args.compile)
# Freeze layers
freeze_list = (
self.args.freeze
if isinstance(self.args.freeze, list)
else range(self.args.freeze)
if isinstance(self.args.freeze, int)
else []
)
always_freeze_names = [".dfl"] # always freeze these layers
freeze_layer_names = [f"model.{x}." for x in freeze_list] + always_freeze_names
self.freeze_layer_names = freeze_layer_names
for k, v in self.model.named_parameters():
# v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results)
if any(x in k for x in freeze_layer_names):
LOGGER.info(f"Freezing layer '{k}'")
v.requires_grad = False
elif not v.requires_grad and v.dtype.is_floating_point: # only floating point Tensor can require gradients
LOGGER.warning(
f"setting 'requires_grad=True' for frozen layer '{k}'. "
"See ultralytics.engine.trainer for customization of frozen layers."
)
v.requires_grad = True
# Check AMP
self.amp = torch.tensor(self.args.amp).to(self.device) # True or False
if self.amp and RANK in {-1, 0}: # Single-GPU and DDP
callbacks_backup = callbacks.default_callbacks.copy() # backup callbacks as check_amp() resets them
self.amp = torch.tensor(check_amp(self.model), device=self.device)
callbacks.default_callbacks = callbacks_backup # restore callbacks
if RANK > -1 and self.world_size > 1: # DDP
dist.broadcast(self.amp.int(), src=0) # broadcast from rank 0 to all other ranks; gloo errors with boolean
self.amp = bool(self.amp) # as boolean
self.scaler = (
torch.amp.GradScaler("cuda", enabled=self.amp) if TORCH_2_4 else torch.cuda.amp.GradScaler(enabled=self.amp)
)
if self.world_size > 1:
self.model = nn.parallel.DistributedDataParallel(self.model, device_ids=[RANK], find_unused_parameters=True)
# Check imgsz
gs = max(int(self.model.stride.max() if hasattr(self.model, "stride") else 32), 32) # grid size (max stride)
self.args.imgsz = check_imgsz(self.args.imgsz, stride=gs, floor=gs, max_dim=1)
self.stride = gs # for multiscale training
# Batch size
if self.batch_size < 1 and RANK == -1: # single-GPU only, estimate best batch size
self.args.batch = self.batch_size = self.auto_batch()
self._build_train_pipeline()
self.validator = self.get_validator()
self.ema = ModelEMA(self.model)
if RANK in {-1, 0}:
metric_keys = self.validator.metrics.keys + self.label_loss_items(prefix="val")
self.metrics = dict(zip(metric_keys, [0] * len(metric_keys)))
if self.args.plots:
self.plot_training_labels()
self.stopper, self.stop = EarlyStopping(patience=self.args.patience), False
self.resume_training(ckpt)
self.scheduler.last_epoch = self.start_epoch - 1 # do not move
self.run_callbacks("on_pretrain_routine_end")
def _do_train(self):
if self.world_size > 1:
self._setup_ddp()
self._setup_train()
nb = len(self.train_loader) # number of batches
nw = max(round(self.args.warmup_epochs * nb), 100) if self.args.warmup_epochs > 0 else -1 # warmup iterations
last_opt_step = -1
self.epoch_time = None
self.epoch_time_start = time.time()
self.train_time_start = time.time()
self.run_callbacks("on_train_start")
LOGGER.info(
f"Image sizes {self.args.imgsz} train, {self.args.imgsz} val\n"
f"Using {self.train_loader.num_workers * (self.world_size or 1)} dataloader workers\n"
f"Logging results to {colorstr('bold', self.save_dir)}\n"
f"Starting training for " + (f"{self.args.time} hours..." if self.args.time else f"{self.epochs} epochs...")
)
if self.args.close_mosaic:
base_idx = (self.epochs - self.args.close_mosaic) * nb
self.plot_idx.extend([base_idx, base_idx + 1, base_idx + 2])
epoch = self.start_epoch
self.optimizer.zero_grad() # zero any resumed gradients to ensure stability on train start
self._oom_retries = 0 # OOM auto-reduce counter for first epoch
while True:
self.epoch = epoch
self.run_callbacks("on_train_epoch_start")
with warnings.catch_warnings():
warnings.simplefilter("ignore") # suppress 'Detected lr_scheduler.step() before optimizer.step()'
self.scheduler.step()
self._model_train()
if RANK != -1:
self.train_loader.sampler.set_epoch(epoch)
pbar = enumerate(self.train_loader)
# Update dataloader attributes (optional)
if epoch == (self.epochs - self.args.close_mosaic):
self._close_dataloader_mosaic()
self.train_loader.reset()
if RANK in {-1, 0}:
LOGGER.info(self.progress_string())
pbar = TQDM(enumerate(self.train_loader), total=nb)
self.tloss = None
for i, batch in pbar:
self.run_callbacks("on_train_batch_start")
# Warmup
ni = i + nb * epoch
if ni <= nw:
xi = [0, nw] # x interp
self.accumulate = max(1, int(np.interp(ni, xi, [1, self.args.nbs / self.batch_size]).round()))
for x in self.optimizer.param_groups:
# Bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x["lr"] = np.interp(
ni,
xi,
[
self.args.warmup_bias_lr if x.get("param_group") == "bias" else 0.0,
x["initial_lr"] * self.lf(epoch),
],
)
if "momentum" in x:
x["momentum"] = np.interp(ni, xi, [self.args.warmup_momentum, self.args.momentum])
# Forward
try:
with autocast(self.amp):
batch = self.preprocess_batch(batch)
if self.args.compile:
# Decouple inference and loss calculations for improved compile performance
preds = self.model(batch["img"])
loss, self.loss_items = unwrap_model(self.model).loss(batch, preds)
else:
loss, self.loss_items = self.model(batch)
self.loss = loss.sum()
if RANK != -1:
self.loss *= self.world_size
self.tloss = (
self.loss_items if self.tloss is None else (self.tloss * i + self.loss_items) / (i + 1)
)
# Backward
self.scaler.scale(self.loss).backward()
except torch.cuda.OutOfMemoryError:
if epoch > self.start_epoch or self._oom_retries >= 3 or RANK != -1:
raise # only auto-reduce during first epoch on single GPU, max 3 retries
self._oom_retries += 1
old_batch = self.batch_size
self.args.batch = self.batch_size = max(self.batch_size // 2, 1)
LOGGER.warning(
f"CUDA out of memory with batch={old_batch}. "
f"Reducing to batch={self.batch_size} and retrying ({self._oom_retries}/3)."
)
self._clear_memory()
self._build_train_pipeline() # rebuild dataloaders, optimizer, scheduler
self.scheduler.last_epoch = self.start_epoch - 1
nb = len(self.train_loader)
nw = max(round(self.args.warmup_epochs * nb), 100) if self.args.warmup_epochs > 0 else -1
last_opt_step = -1
self.optimizer.zero_grad()
break # restart epoch loop with reduced batch size
if ni - last_opt_step >= self.accumulate:
self.optimizer_step()
last_opt_step = ni
# Timed stopping
if self.args.time:
self.stop = (time.time() - self.train_time_start) > (self.args.time * 3600)
if RANK != -1: # if DDP training
broadcast_list = [self.stop if RANK == 0 else None]
dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks
self.stop = broadcast_list[0]
if self.stop: # training time exceeded
break
# Log
if RANK in {-1, 0}:
loss_length = self.tloss.shape[0] if len(self.tloss.shape) else 1
pbar.set_description(
("%11s" * 2 + "%11.4g" * (2 + loss_length))
% (
f"{epoch + 1}/{self.epochs}",
f"{self._get_memory():.3g}G", # (GB) GPU memory util
*(self.tloss if loss_length > 1 else torch.unsqueeze(self.tloss, 0)), # losses
batch["cls"].shape[0], # batch size, i.e. 8
batch["img"].shape[-1], # imgsz, i.e 640
)
)
self.run_callbacks("on_batch_end")
if self.args.plots and ni in self.plot_idx:
self.plot_training_samples(batch, ni)
self.run_callbacks("on_train_batch_end")
if self.stop:
break # allow external stop (e.g. platform cancellation) between batches
else:
# for/else: this block runs only when the for loop completes without break (no OOM retry)
self._oom_retries = 0 # reset OOM counter after successful first epoch
if self._oom_retries and not self.stop:
continue # OOM recovery broke the for loop, restart with reduced batch size
if hasattr(unwrap_model(self.model).criterion, "update"):
unwrap_model(self.model).criterion.update()
self.lr = {f"lr/pg{ir}": x["lr"] for ir, x in enumerate(self.optimizer.param_groups)} # for loggers
self.run_callbacks("on_train_epoch_end")
if RANK in {-1, 0}:
self.ema.update_attr(self.model, include=["yaml", "nc", "args", "names", "stride", "class_weights"])
# Validation
final_epoch = epoch + 1 >= self.epochs
if self.args.val or final_epoch or self.stopper.possible_stop or self.stop:
self._clear_memory(threshold=0.5) # prevent VRAM spike
self.metrics, self.fitness = self.validate()
# NaN recovery
if self._handle_nan_recovery(epoch):
continue
self.nan_recovery_attempts = 0
if RANK in {-1, 0}:
self.save_metrics(metrics={**self.label_loss_items(self.tloss), **self.metrics, **self.lr})
self.stop |= self.stopper(epoch + 1, self.fitness) or final_epoch
if self.args.time:
self.stop |= (time.time() - self.train_time_start) > (self.args.time * 3600)
# Save model
if self.args.save or final_epoch:
self.save_model()
self.run_callbacks("on_model_save")
# Scheduler
t = time.time()
self.epoch_time = t - self.epoch_time_start
self.epoch_time_start = t
if self.args.time:
mean_epoch_time = (t - self.train_time_start) / (epoch - self.start_epoch + 1)
self.epochs = self.args.epochs = math.ceil(self.args.time * 3600 / mean_epoch_time)
self._setup_scheduler()
self.scheduler.last_epoch = self.epoch # do not move
self.stop |= epoch >= self.epochs # stop if exceeded epochs
self.run_callbacks("on_fit_epoch_end")
self._clear_memory(0.5) # clear if memory utilization > 50%
# Early Stopping
if RANK != -1: # if DDP training
broadcast_list = [self.stop if RANK == 0 else None]
dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks
self.stop = broadcast_list[0]
if self.stop:
break # must break all DDP ranks
epoch += 1
seconds = time.time() - self.train_time_start
LOGGER.info(f"\n{epoch - self.start_epoch + 1} epochs completed in {seconds / 3600:.3f} hours.")
# Do final val with best.pt
self.final_eval()
if RANK in {-1, 0}:
if self.args.plots:
self.plot_metrics()
self.run_callbacks("on_train_end")
self._clear_memory()
unset_deterministic()
self.run_callbacks("teardown")
def auto_batch(self, max_num_obj=0):
return check_train_batch_size(
model=self.model,
imgsz=self.args.imgsz,
amp=self.amp,
batch=self.batch_size,
max_num_obj=max_num_obj,
) # returns batch size
def _get_memory(self, fraction=False):
memory, total = 0, 0
if self.device.type == "mps":
memory = torch.mps.driver_allocated_memory()
if fraction:
return __import__("psutil").virtual_memory().percent / 100
elif self.device.type != "cpu":
memory = torch.cuda.memory_reserved()
if fraction:
total = torch.cuda.get_device_properties(self.device).total_memory
return ((memory / total) if total > 0 else 0) if fraction else (memory / 2**30)
def _clear_memory(self, threshold: float | None = None):
if threshold:
assert 0 <= threshold <= 1, "Threshold must be between 0 and 1."
if self._get_memory(fraction=True) <= threshold:
return
gc.collect()
if self.device.type == "mps":
torch.mps.empty_cache()
elif self.device.type == "cpu":
return
else:
torch.cuda.empty_cache()
def read_results_csv(self):
import polars as pl # scope for faster 'import ultralytics'
try:
return pl.read_csv(self.csv, infer_schema_length=None).to_dict(as_series=False)
except Exception:
return {}
def _model_train(self):
self.model.train()
# Freeze BN stat
for n, m in self.model.named_modules():
if any(filter(lambda f: f in n, self.freeze_layer_names)) and isinstance(m, nn.BatchNorm2d):
m.eval()
def save_model(self):
import io
# Serialize ckpt to a byte buffer once (faster than repeated torch.save() calls)
buffer = io.BytesIO()
torch.save(
{
"epoch": self.epoch,
"best_fitness": self.best_fitness,
"model": None, # resume and final checkpoints derive from EMA
"ema": deepcopy(unwrap_model(self.ema.ema)).half(),
"updates": self.ema.updates,
"optimizer": convert_optimizer_state_dict_to_fp16(deepcopy(self.optimizer.state_dict())),
"scaler": self.scaler.state_dict(),
"train_args": vars(self.args), # save as dict
"train_metrics": {**self.metrics, **{"fitness": self.fitness}},
"train_results": self.read_results_csv(),
"date": datetime.now().isoformat(),
"version": __version__,
"git": {
"root": str(GIT.root),
"branch": GIT.branch,
"commit": GIT.commit,
"origin": GIT.origin,
},
"license": "AGPL-3.0 (https://ultralytics.com/license)",
"docs": "https://docs.ultralytics.com",
},
buffer,
)
serialized_ckpt = buffer.getvalue() # get the serialized content to save
# Save checkpoints
self.wdir.mkdir(parents=True, exist_ok=True) # ensure weights directory exists
self.last.write_bytes(serialized_ckpt) # save last.pt
if self.best_fitness == self.fitness:
self.best.write_bytes(serialized_ckpt) # save best.pt
if (self.save_period > 0) and (self.epoch % self.save_period == 0):
(self.wdir / f"epoch{self.epoch}.pt").write_bytes(serialized_ckpt) # save epoch, i.e. 'epoch3.pt'
def get_dataset(self):
try:
# Convert ul:// platform URIs and NDJSON files to local dataset format first
data_str = str(self.args.data)
if data_str.endswith(".ndjson") or (data_str.startswith("ul://") and "/datasets/" in data_str):
import asyncio
from ultralytics.data.converter import convert_ndjson_to_yolo
from ultralytics.utils.checks import check_file
self.args.data = str(asyncio.run(convert_ndjson_to_yolo(check_file(self.args.data))))
# Task-specific dataset checking
if self.args.task == "classify":
data = check_cls_dataset(self.args.data)
elif str(self.args.data).rsplit(".", 1)[-1] in {"yaml", "yml"} or self.args.task in {
"detect",
"segment",
"pose",
"obb",
}:
data = check_det_dataset(self.args.data)
if "yaml_file" in data:
self.args.data = data["yaml_file"] # for validating 'yolo train data=url.zip' usage
except Exception as e:
raise RuntimeError(emojis(f"Dataset '{clean_url(self.args.data)}' error ❌ {e}")) from e
if self.args.single_cls:
LOGGER.info("Overriding class names with single class.")
data["names"] = {0: "item"}
data["nc"] = 1
return data
def setup_model(self):
if isinstance(self.model, torch.nn.Module): # if model is loaded beforehand. No setup needed
return
cfg, weights = self.model, None
ckpt = None
if str(self.model).endswith(".pt"):
weights, ckpt = load_checkpoint(self.model)
cfg = weights.yaml
elif isinstance(self.args.pretrained, (str, Path)):
weights, _ = load_checkpoint(self.args.pretrained)
self.model = self.get_model(cfg=cfg, weights=weights, verbose=RANK == -1) # calls Model(cfg, weights)
return ckpt
def optimizer_step(self):
self.scaler.unscale_(self.optimizer) # unscale gradients
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=10.0)
self.scaler.step(self.optimizer)
self.scaler.update()
self.optimizer.zero_grad()
if self.ema:
self.ema.update(self.model)
def preprocess_batch(self, batch):
return batch
def validate(self):
if self.ema and self.world_size > 1:
# Sync EMA buffers from rank 0 to all ranks
for buffer in self.ema.ema.buffers():
dist.broadcast(buffer, src=0)
metrics = self.validator(self)
if metrics is None:
return None, None
fitness = metrics.pop("fitness", -self.loss.detach().cpu().numpy()) # use loss as fitness measure if not found
if not self.best_fitness or self.best_fitness < fitness:
self.best_fitness = fitness
return metrics, fitness
def get_model(self, cfg=None, weights=None, verbose=True):
raise NotImplementedError("This task trainer doesn't support loading cfg files")
def get_validator(self):
raise NotImplementedError("get_validator function not implemented in trainer")
def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode="train"):
raise NotImplementedError("get_dataloader function not implemented in trainer")
def build_dataset(self, img_path, mode="train", batch=None):
raise NotImplementedError("build_dataset function not implemented in trainer")
def label_loss_items(self, loss_items=None, prefix="train"):
return {"loss": loss_items} if loss_items is not None else ["loss"]
def set_model_attributes(self):
self.model.names = self.data["names"]
def build_targets(self, preds, targets):
pass
def progress_string(self):
return ""
# TODO: may need to put these following functions into callback
def plot_training_samples(self, batch, ni):
pass
def plot_training_labels(self):
pass
def save_metrics(self, metrics):
keys, vals = list(metrics.keys()), list(metrics.values())
n = len(metrics) + 2 # number of cols
t = time.time() - self.train_time_start
self.csv.parent.mkdir(parents=True, exist_ok=True) # ensure parent directory exists
s = "" if self.csv.exists() else ("%s," * n % ("epoch", "time", *keys)).rstrip(",") + "\n"
with open(self.csv, "a", encoding="utf-8") as f:
f.write(s + ("%.6g," * n % (self.epoch + 1, t, *vals)).rstrip(",") + "\n")
def plot_metrics(self):
plot_results(file=self.csv, on_plot=self.on_plot) # save results.png
def on_plot(self, name, data=None):
path = Path(name)
self.plots[path] = {"data": data, "timestamp": time.time()}
def final_eval(self):
model = self.best if self.best.exists() else None
with torch_distributed_zero_first(LOCAL_RANK): # strip only on GPU 0; other GPUs should wait
if RANK in {-1, 0}:
ckpt = strip_optimizer(self.last) if self.last.exists() else {}
if model:
# update best.pt train_metrics from last.pt
strip_optimizer(self.best, updates={"train_results": ckpt.get("train_results")})
if model:
LOGGER.info(f"\nValidating {model}...")
self.validator.args.plots = self.args.plots
self.validator.args.compile = False # disable final val compile as too slow
self.metrics = self.validator(model=model)
self.metrics.pop("fitness", None)
self.run_callbacks("on_fit_epoch_end")
def check_resume(self, overrides):
resume = self.args.resume
if resume:
try:
exists = isinstance(resume, (str, Path)) and Path(resume).exists()
last = Path(check_file(resume) if exists else get_latest_run())
# Check that resume data YAML exists, otherwise strip to force re-download of dataset
ckpt_args = load_checkpoint(last)[0].args
if not isinstance(ckpt_args["data"], dict) and not Path(ckpt_args["data"]).exists():
ckpt_args["data"] = self.args.data
resume = True
self.args = get_cfg(ckpt_args)
self.args.model = self.args.resume = str(last) # reinstate model
for k in (
"imgsz",
"batch",
"device",
"close_mosaic",
"augmentations",
"save_period",
"workers",
"cache",
"patience",
"time",
"freeze",
"val",
"plots",
): # allow arg updates to reduce memory or update device on resume
if k in overrides:
setattr(self.args, k, overrides[k])
# Handle augmentations parameter for resume: check if user provided custom augmentations
if ckpt_args.get("augmentations") is not None:
# Augmentations were saved in checkpoint as reprs but can't be restored automatically
LOGGER.warning(
"Custom Albumentations transforms were used in the original training run but are not "
"being restored. To preserve custom augmentations when resuming, you need to pass the "
"'augmentations' parameter again to get expected results. Example: \n"
f"model.train(resume=True, augmentations={ckpt_args['augmentations']})"
)
except Exception as e:
raise FileNotFoundError(
"Resume checkpoint not found. Please pass a valid checkpoint to resume from, "
"i.e. 'yolo train resume model=path/to/last.pt'"
) from e
self.resume = resume
def _load_checkpoint_state(self, ckpt):
if ckpt.get("optimizer") is not None:
self.optimizer.load_state_dict(ckpt["optimizer"])
if ckpt.get("scaler") is not None:
self.scaler.load_state_dict(ckpt["scaler"])
if self.ema and ckpt.get("ema"):
self.ema = ModelEMA(self.model) # validation with EMA creates inference tensors that can't be updated
self.ema.ema.load_state_dict(ckpt["ema"].float().state_dict())
self.ema.updates = ckpt["updates"]
self.best_fitness = ckpt.get("best_fitness", 0.0)
def _handle_nan_recovery(self, epoch):
loss_nan = self.loss is not None and not self.loss.isfinite()
fitness_nan = self.fitness is not None and not np.isfinite(self.fitness)
fitness_collapse = self.best_fitness and self.best_fitness > 0 and self.fitness == 0
corrupted = RANK in {-1, 0} and loss_nan and (fitness_nan or fitness_collapse)
reason = "Loss NaN/Inf" if loss_nan else "Fitness NaN/Inf" if fitness_nan else "Fitness collapse"
if RANK != -1: # DDP: broadcast to all ranks
broadcast_list = [corrupted if RANK == 0 else None]
dist.broadcast_object_list(broadcast_list, 0)
corrupted = broadcast_list[0]
if not corrupted:
return False
if epoch == self.start_epoch or not self.last.exists():
LOGGER.warning(f"{reason} detected but can not recover from last.pt...")
return False # Cannot recover on first epoch, let training continue
self.nan_recovery_attempts += 1
if self.nan_recovery_attempts > 3:
raise RuntimeError(f"Training failed: NaN persisted for {self.nan_recovery_attempts} epochs")
LOGGER.warning(f"{reason} detected (attempt {self.nan_recovery_attempts}/3), recovering from last.pt...")
self._model_train() # set model to train mode before loading checkpoint to avoid inference tensor errors
_, ckpt = load_checkpoint(self.last)
ema_state = ckpt["ema"].float().state_dict()
if not all(torch.isfinite(v).all() for v in ema_state.values() if isinstance(v, torch.Tensor)):
raise RuntimeError(f"Checkpoint {self.last} is corrupted with NaN/Inf weights")
unwrap_model(self.model).load_state_dict(ema_state) # Load EMA weights into model
self._load_checkpoint_state(ckpt) # Load optimizer/scaler/EMA/best_fitness
del ckpt, ema_state
self.scheduler.last_epoch = epoch - 1
return True
def resume_training(self, ckpt):
if ckpt is None or not self.resume:
return
start_epoch = ckpt.get("epoch", -1) + 1
assert start_epoch > 0, (
f"{self.args.model} training to {self.epochs} epochs is finished, nothing to resume.\n"
f"Start a new training without resuming, i.e. 'yolo train model={self.args.model}'"
)
LOGGER.info(f"Resuming training {self.args.model} from epoch {start_epoch + 1} to {self.epochs} total epochs")
if self.epochs < start_epoch:
LOGGER.info(
f"{self.model} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {self.epochs} more epochs."
)
self.epochs += ckpt["epoch"] # finetune additional epochs
self._load_checkpoint_state(ckpt)
self.start_epoch = start_epoch
if start_epoch > (self.epochs - self.args.close_mosaic):
self._close_dataloader_mosaic()
def _close_dataloader_mosaic(self):
if hasattr(self.train_loader.dataset, "mosaic"):
self.train_loader.dataset.mosaic = False
if hasattr(self.train_loader.dataset, "close_mosaic"):
LOGGER.info("Closing dataloader mosaic")
self.train_loader.dataset.close_mosaic(hyp=copy(self.args))
def build_optimizer(self, model, name="auto", lr=0.001, momentum=0.9, decay=1e-5, iterations=1e5):
g = [{}, {}, {}, {}] # optimizer parameter groups
bn = tuple(v for k, v in nn.__dict__.items() if "Norm" in k) # normalization layers, i.e. BatchNorm2d()
if name == "auto":
LOGGER.info(
f"{colorstr('optimizer:')} 'optimizer=auto' found, "
f"ignoring 'lr0={self.args.lr0}' and 'momentum={self.args.momentum}' and "
f"determining best 'optimizer', 'lr0' and 'momentum' automatically... "
)
nc = self.data.get("nc", 10) # number of classes
lr_fit = round(0.002 * 5 / (4 + nc), 6) # lr0 fit equation to 6 decimal places
name, lr, momentum = ("MuSGD", 0.01, 0.9) if iterations > 10000 else ("AdamW", lr_fit, 0.9)
self.args.warmup_bias_lr = 0.0 # no higher than 0.01 for Adam
use_muon = name == "MuSGD"
for module_name, module in unwrap_model(model).named_modules():
for param_name, param in module.named_parameters(recurse=False):
fullname = f"{module_name}.{param_name}" if module_name else param_name
if param.ndim >= 2 and use_muon:
g[3][fullname] = param # muon params
elif "bias" in fullname: # bias (no decay)
g[2][fullname] = param
elif isinstance(module, bn) or "logit_scale" in fullname: # weight (no decay)
# ContrastiveHead and BNContrastiveHead included here with 'logit_scale'
g[1][fullname] = param
else: # weight (with decay)
g[0][fullname] = param
if not use_muon:
g = [x.values() for x in g[:3]] # convert to list of params
optimizers = {"Adam", "Adamax", "AdamW", "NAdam", "RAdam", "RMSProp", "SGD", "MuSGD", "auto"}
name = {x.lower(): x for x in optimizers}.get(name.lower())
if name in {"Adam", "Adamax", "AdamW", "NAdam", "RAdam"}:
optim_args = dict(lr=lr, betas=(momentum, 0.999), weight_decay=0.0)
elif name == "RMSProp":
optim_args = dict(lr=lr, momentum=momentum)
elif name == "SGD" or name == "MuSGD":
optim_args = dict(lr=lr, momentum=momentum, nesterov=True)
else:
raise NotImplementedError(
f"Optimizer '{name}' not found in list of available optimizers {optimizers}. "
"Request support for addition optimizers at https://github.com/ultralytics/ultralytics."
)
num_params = [len(g[0]), len(g[1]), len(g[2])] # number of param groups
g[2] = {"params": g[2], **optim_args, "param_group": "bias"}
g[0] = {"params": g[0], **optim_args, "weight_decay": decay, "param_group": "weight"}
g[1] = {"params": g[1], **optim_args, "weight_decay": 0.0, "param_group": "bn"}
muon, sgd = (0.2, 1.0)
if use_muon:
num_params[0] = len(g[3]) # update number of params
g[3] = {"params": g[3], **optim_args, "weight_decay": decay, "use_muon": True, "param_group": "muon"}
import re
# higher lr for certain parameters in MuSGD when funetuning
pattern = re.compile(r"(?=.*23)(?=.*cv3)|proto\.semseg")
g_ = [] # new param groups
for x in g:
p = x.pop("params")
p1 = [v for k, v in p.items() if pattern.search(k)]
p2 = [v for k, v in p.items() if not pattern.search(k)]
g_.extend([{"params": p1, **x, "lr": lr * 3}, {"params": p2, **x}])
g = g_
optimizer = getattr(optim, name, partial(MuSGD, muon=muon, sgd=sgd))(params=g)
LOGGER.info(
f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}, momentum={momentum}) with parameter groups "
f"{num_params[1]} weight(decay=0.0), {num_params[0]} weight(decay={decay}), {num_params[2]} bias(decay=0.0)"
)
return optimizer | --- +++ @@ -1,4 +1,10 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""
+Train a model on a dataset.
+
+Usage:
+ $ yolo mode=train model=yolo26n.pt data=coco8.yaml imgsz=640 epochs=100 batch=16
+"""
from __future__ import annotations
@@ -59,8 +65,63 @@
class BaseTrainer:
+ """A base class for creating trainers.
+
+ This class provides the foundation for training YOLO models, handling the training loop, validation, checkpointing,
+ and various training utilities. It supports both single-GPU and multi-GPU distributed training.
+
+ Attributes:
+ args (SimpleNamespace): Configuration for the trainer.
+ validator (BaseValidator): Validator instance.
+ model (nn.Module): Model instance.
+ callbacks (defaultdict): Dictionary of callbacks.
+ save_dir (Path): Directory to save results.
+ wdir (Path): Directory to save weights.
+ last (Path): Path to the last checkpoint.
+ best (Path): Path to the best checkpoint.
+ save_period (int): Save checkpoint every x epochs (disabled if < 1).
+ batch_size (int): Batch size for training.
+ epochs (int): Number of epochs to train for.
+ start_epoch (int): Starting epoch for training.
+ device (torch.device): Device to use for training.
+ amp (bool): Flag to enable AMP (Automatic Mixed Precision).
+ scaler (torch.amp.GradScaler): Gradient scaler for AMP.
+ data (dict): Dataset dictionary containing paths and metadata.
+ ema (ModelEMA): EMA (Exponential Moving Average) of the model.
+ resume (bool): Resume training from a checkpoint.
+ lf (callable): Learning rate scheduling function.
+ scheduler (torch.optim.lr_scheduler._LRScheduler): Learning rate scheduler.
+ best_fitness (float): The best fitness value achieved.
+ fitness (float): Current fitness value.
+ loss (torch.Tensor): Current loss value.
+ tloss (torch.Tensor): Running mean of loss items.
+ loss_names (list): List of loss names.
+ csv (Path): Path to results CSV file.
+ metrics (dict): Dictionary of metrics.
+ plots (dict): Dictionary of plots.
+
+ Methods:
+ train: Execute the training process.
+ validate: Run validation on the val set.
+ save_model: Save model training checkpoints.
+ get_dataset: Get train and validation datasets.
+ setup_model: Load, create, or download model.
+ build_optimizer: Construct an optimizer for the model.
+
+ Examples:
+ Initialize a trainer and start training
+ >>> trainer = BaseTrainer(cfg="config.yaml")
+ >>> trainer.train()
+ """
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks: dict | None = None):
+ """Initialize the BaseTrainer class.
+
+ Args:
+ cfg (str | dict | SimpleNamespace, optional): Path to a configuration file or configuration object.
+ overrides (dict, optional): Configuration overrides.
+ _callbacks (dict, optional): Dictionary of callback functions.
+ """
self.hub_session = overrides.pop("session", None) # HUB
self.args = get_cfg(cfg, overrides)
self.check_resume(overrides)
@@ -143,16 +204,20 @@ self.nan_recovery_attempts = 0
def add_callback(self, event: str, callback):
+ """Append the given callback to the event's callback list."""
self.callbacks[event].append(callback)
def set_callback(self, event: str, callback):
+ """Override the existing callbacks with the given callback for the specified event."""
self.callbacks[event] = [callback]
def run_callbacks(self, event: str):
+ """Run all existing callbacks associated with a particular event."""
for callback in self.callbacks.get(event, []):
callback(self)
def train(self):
+ """Execute the training process, using DDP subprocess for multi-GPU or direct training for single-GPU."""
# Run subprocess if DDP training, else train normally
if self.ddp:
# Argument checks
@@ -179,6 +244,7 @@ self._do_train()
def _setup_scheduler(self):
+ """Initialize training learning rate scheduler."""
if self.args.cos_lr:
self.lf = one_cycle(1, self.args.lrf, self.epochs) # cosine 1->hyp['lrf']
else:
@@ -186,6 +252,7 @@ self.scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=self.lf)
def _setup_ddp(self):
+ """Initialize and set the DistributedDataParallel parameters for training."""
torch.cuda.set_device(RANK)
self.device = torch.device("cuda", RANK)
os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" # set to enforce timeout
@@ -197,6 +264,7 @@ )
def _build_train_pipeline(self):
+ """Build dataloaders, optimizer, and scheduler for current batch size."""
batch_size = self.batch_size // max(self.world_size, 1)
self.train_loader = self.get_dataloader(
self.data["train"], batch_size=batch_size, rank=LOCAL_RANK, mode="train"
@@ -222,6 +290,7 @@ self._setup_scheduler()
def _setup_train(self):
+ """Configure model, optimizer, dataloaders, and training utilities before the training loop."""
ckpt = self.setup_model()
self.model = self.model.to(self.device)
self.set_model_attributes()
@@ -291,6 +360,7 @@ self.run_callbacks("on_pretrain_routine_end")
def _do_train(self):
+ """Perform the full training loop including setup, epoch iteration, validation, and final evaluation."""
if self.world_size > 1:
self._setup_ddp()
self._setup_train()
@@ -498,6 +568,7 @@ self.run_callbacks("teardown")
def auto_batch(self, max_num_obj=0):
+ """Calculate optimal batch size based on model and device memory constraints."""
return check_train_batch_size(
model=self.model,
imgsz=self.args.imgsz,
@@ -507,6 +578,7 @@ ) # returns batch size
def _get_memory(self, fraction=False):
+ """Get accelerator memory utilization in GB or as a fraction of total memory."""
memory, total = 0, 0
if self.device.type == "mps":
memory = torch.mps.driver_allocated_memory()
@@ -519,6 +591,7 @@ return ((memory / total) if total > 0 else 0) if fraction else (memory / 2**30)
def _clear_memory(self, threshold: float | None = None):
+ """Clear accelerator memory by calling garbage collector and emptying cache."""
if threshold:
assert 0 <= threshold <= 1, "Threshold must be between 0 and 1."
if self._get_memory(fraction=True) <= threshold:
@@ -532,6 +605,7 @@ torch.cuda.empty_cache()
def read_results_csv(self):
+ """Read results.csv into a dictionary using polars."""
import polars as pl # scope for faster 'import ultralytics'
try:
@@ -540,6 +614,7 @@ return {}
def _model_train(self):
+ """Set model in training mode."""
self.model.train()
# Freeze BN stat
for n, m in self.model.named_modules():
@@ -547,6 +622,7 @@ m.eval()
def save_model(self):
+ """Save model training checkpoints with additional metadata."""
import io
# Serialize ckpt to a byte buffer once (faster than repeated torch.save() calls)
@@ -587,6 +663,11 @@ (self.wdir / f"epoch{self.epoch}.pt").write_bytes(serialized_ckpt) # save epoch, i.e. 'epoch3.pt'
def get_dataset(self):
+ """Get train and validation datasets from data dictionary.
+
+ Returns:
+ (dict): A dictionary containing the training/validation/test dataset and category names.
+ """
try:
# Convert ul:// platform URIs and NDJSON files to local dataset format first
data_str = str(self.args.data)
@@ -619,6 +700,11 @@ return data
def setup_model(self):
+ """Load, create, or download model for any task.
+
+ Returns:
+ (dict | None): Checkpoint to resume training from, or None if no checkpoint is loaded.
+ """
if isinstance(self.model, torch.nn.Module): # if model is loaded beforehand. No setup needed
return
@@ -633,6 +719,7 @@ return ckpt
def optimizer_step(self):
+ """Perform a single step of the training optimizer with gradient clipping and EMA update."""
self.scaler.unscale_(self.optimizer) # unscale gradients
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=10.0)
self.scaler.step(self.optimizer)
@@ -642,9 +729,17 @@ self.ema.update(self.model)
def preprocess_batch(self, batch):
+ """Allow custom preprocessing of model inputs and ground truths depending on task type."""
return batch
def validate(self):
+ """Run validation on val set using self.validator.
+
+ Returns:
+ (tuple): A tuple containing:
+ - metrics (dict | None): Dictionary of validation metrics, or None if validation was skipped.
+ - fitness (float | None): Fitness score for the validation, or None if validation was skipped.
+ """
if self.ema and self.world_size > 1:
# Sync EMA buffers from rank 0 to all ranks
for buffer in self.ema.ema.buffers():
@@ -658,37 +753,52 @@ return metrics, fitness
def get_model(self, cfg=None, weights=None, verbose=True):
+ """Get model and raise NotImplementedError for loading cfg files."""
raise NotImplementedError("This task trainer doesn't support loading cfg files")
def get_validator(self):
+ """Raise NotImplementedError (must be implemented by subclasses)."""
raise NotImplementedError("get_validator function not implemented in trainer")
def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode="train"):
+ """Raise NotImplementedError (must return a `torch.utils.data.DataLoader` in subclasses)."""
raise NotImplementedError("get_dataloader function not implemented in trainer")
def build_dataset(self, img_path, mode="train", batch=None):
+ """Build dataset."""
raise NotImplementedError("build_dataset function not implemented in trainer")
def label_loss_items(self, loss_items=None, prefix="train"):
+ """Return a loss dict with labeled training loss items, or a list of loss names if loss_items is None.
+
+ Notes:
+ This is not needed for classification but necessary for segmentation & detection.
+ """
return {"loss": loss_items} if loss_items is not None else ["loss"]
def set_model_attributes(self):
+ """Set or update model parameters before training."""
self.model.names = self.data["names"]
def build_targets(self, preds, targets):
+ """Build target tensors for training YOLO model."""
pass
def progress_string(self):
+ """Return a string describing training progress."""
return ""
# TODO: may need to put these following functions into callback
def plot_training_samples(self, batch, ni):
+ """Plot training samples during YOLO training."""
pass
def plot_training_labels(self):
+ """Plot training labels for YOLO model."""
pass
def save_metrics(self, metrics):
+ """Save training metrics to a CSV file."""
keys, vals = list(metrics.keys()), list(metrics.values())
n = len(metrics) + 2 # number of cols
t = time.time() - self.train_time_start
@@ -698,13 +808,16 @@ f.write(s + ("%.6g," * n % (self.epoch + 1, t, *vals)).rstrip(",") + "\n")
def plot_metrics(self):
+ """Plot metrics from a CSV file."""
plot_results(file=self.csv, on_plot=self.on_plot) # save results.png
def on_plot(self, name, data=None):
+ """Register plots (e.g. to be consumed in callbacks)."""
path = Path(name)
self.plots[path] = {"data": data, "timestamp": time.time()}
def final_eval(self):
+ """Perform final evaluation and validation for the YOLO model."""
model = self.best if self.best.exists() else None
with torch_distributed_zero_first(LOCAL_RANK): # strip only on GPU 0; other GPUs should wait
if RANK in {-1, 0}:
@@ -721,6 +834,7 @@ self.run_callbacks("on_fit_epoch_end")
def check_resume(self, overrides):
+ """Check if resume checkpoint exists and update arguments accordingly."""
resume = self.args.resume
if resume:
try:
@@ -771,6 +885,7 @@ self.resume = resume
def _load_checkpoint_state(self, ckpt):
+ """Load optimizer, scaler, EMA, and best_fitness from checkpoint."""
if ckpt.get("optimizer") is not None:
self.optimizer.load_state_dict(ckpt["optimizer"])
if ckpt.get("scaler") is not None:
@@ -782,6 +897,7 @@ self.best_fitness = ckpt.get("best_fitness", 0.0)
def _handle_nan_recovery(self, epoch):
+ """Detect and recover from NaN/Inf loss and fitness collapse by loading last checkpoint."""
loss_nan = self.loss is not None and not self.loss.isfinite()
fitness_nan = self.fitness is not None and not np.isfinite(self.fitness)
fitness_collapse = self.best_fitness and self.best_fitness > 0 and self.fitness == 0
@@ -812,6 +928,7 @@ return True
def resume_training(self, ckpt):
+ """Resume YOLO training from a given checkpoint."""
if ckpt is None or not self.resume:
return
start_epoch = ckpt.get("epoch", -1) + 1
@@ -831,6 +948,7 @@ self._close_dataloader_mosaic()
def _close_dataloader_mosaic(self):
+ """Update dataloaders to stop using mosaic augmentation."""
if hasattr(self.train_loader.dataset, "mosaic"):
self.train_loader.dataset.mosaic = False
if hasattr(self.train_loader.dataset, "close_mosaic"):
@@ -838,6 +956,20 @@ self.train_loader.dataset.close_mosaic(hyp=copy(self.args))
def build_optimizer(self, model, name="auto", lr=0.001, momentum=0.9, decay=1e-5, iterations=1e5):
+ """Construct an optimizer for the given model.
+
+ Args:
+ model (torch.nn.Module): The model for which to build an optimizer.
+ name (str, optional): The name of the optimizer to use. If 'auto', the optimizer is selected based on the
+ number of iterations.
+ lr (float, optional): The learning rate for the optimizer.
+ momentum (float, optional): The momentum factor for the optimizer.
+ decay (float, optional): The weight decay for the optimizer.
+ iterations (float, optional): The number of iterations, which determines the optimizer if name is 'auto'.
+
+ Returns:
+ (torch.optim.Optimizer): The constructed optimizer.
+ """
g = [{}, {}, {}, {}] # optimizer parameter groups
bn = tuple(v for k, v in nn.__dict__.items() if "Norm" in k) # normalization layers, i.e. BatchNorm2d()
if name == "auto":
@@ -906,4 +1038,4 @@ f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}, momentum={momentum}) with parameter groups "
f"{num_params[1]} weight(decay=0.0), {num_params[0]} weight(decay={decay}), {num_params[2]} bias(decay=0.0)"
)
- return optimizer+ return optimizer
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/engine/trainer.py |
Write docstrings for utility functions | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# --------------------------------------------------------
# TinyViT Model Architecture
# Copyright (c) 2022 Microsoft
# Adapted from LeViT and Swin Transformer
# LeViT: (https://github.com/facebookresearch/levit)
# Swin: (https://github.com/microsoft/swin-transformer)
# Build the TinyViT Model
# --------------------------------------------------------
from __future__ import annotations
import itertools
import torch
import torch.nn as nn
import torch.nn.functional as F
from ultralytics.nn.modules import LayerNorm2d
from ultralytics.utils.instance import to_2tuple
class Conv2d_BN(torch.nn.Sequential):
def __init__(
self,
a: int,
b: int,
ks: int = 1,
stride: int = 1,
pad: int = 0,
dilation: int = 1,
groups: int = 1,
bn_weight_init: float = 1,
):
super().__init__()
self.add_module("c", torch.nn.Conv2d(a, b, ks, stride, pad, dilation, groups, bias=False))
bn = torch.nn.BatchNorm2d(b)
torch.nn.init.constant_(bn.weight, bn_weight_init)
torch.nn.init.constant_(bn.bias, 0)
self.add_module("bn", bn)
class PatchEmbed(nn.Module):
def __init__(self, in_chans: int, embed_dim: int, resolution: int, activation):
super().__init__()
img_size: tuple[int, int] = to_2tuple(resolution)
self.patches_resolution = (img_size[0] // 4, img_size[1] // 4)
self.num_patches = self.patches_resolution[0] * self.patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
n = embed_dim
self.seq = nn.Sequential(
Conv2d_BN(in_chans, n // 2, 3, 2, 1),
activation(),
Conv2d_BN(n // 2, n, 3, 2, 1),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.seq(x)
class MBConv(nn.Module):
def __init__(self, in_chans: int, out_chans: int, expand_ratio: float, activation, drop_path: float):
super().__init__()
self.in_chans = in_chans
self.hidden_chans = int(in_chans * expand_ratio)
self.out_chans = out_chans
self.conv1 = Conv2d_BN(in_chans, self.hidden_chans, ks=1)
self.act1 = activation()
self.conv2 = Conv2d_BN(self.hidden_chans, self.hidden_chans, ks=3, stride=1, pad=1, groups=self.hidden_chans)
self.act2 = activation()
self.conv3 = Conv2d_BN(self.hidden_chans, out_chans, ks=1, bn_weight_init=0.0)
self.act3 = activation()
# NOTE: `DropPath` is needed only for training.
# self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.drop_path = nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
shortcut = x
x = self.conv1(x)
x = self.act1(x)
x = self.conv2(x)
x = self.act2(x)
x = self.conv3(x)
x = self.drop_path(x)
x += shortcut
return self.act3(x)
class PatchMerging(nn.Module):
def __init__(self, input_resolution: tuple[int, int], dim: int, out_dim: int, activation):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.out_dim = out_dim
self.act = activation()
self.conv1 = Conv2d_BN(dim, out_dim, 1, 1, 0)
stride_c = 1 if out_dim in {320, 448, 576} else 2
self.conv2 = Conv2d_BN(out_dim, out_dim, 3, stride_c, 1, groups=out_dim)
self.conv3 = Conv2d_BN(out_dim, out_dim, 1, 1, 0)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if x.ndim == 3:
H, W = self.input_resolution
B = len(x)
# (B, C, H, W)
x = x.view(B, H, W, -1).permute(0, 3, 1, 2)
x = self.conv1(x)
x = self.act(x)
x = self.conv2(x)
x = self.act(x)
x = self.conv3(x)
return x.flatten(2).transpose(1, 2)
class ConvLayer(nn.Module):
def __init__(
self,
dim: int,
input_resolution: tuple[int, int],
depth: int,
activation,
drop_path: float | list[float] = 0.0,
downsample: nn.Module | None = None,
use_checkpoint: bool = False,
out_dim: int | None = None,
conv_expand_ratio: float = 4.0,
):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# Build blocks
self.blocks = nn.ModuleList(
[
MBConv(
dim,
dim,
conv_expand_ratio,
activation,
drop_path[i] if isinstance(drop_path, list) else drop_path,
)
for i in range(depth)
]
)
# Patch merging layer
self.downsample = (
None
if downsample is None
else downsample(input_resolution, dim=dim, out_dim=out_dim, activation=activation)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
for blk in self.blocks:
x = torch.utils.checkpoint(blk, x) if self.use_checkpoint else blk(x) # warn: checkpoint is slow import
return x if self.downsample is None else self.downsample(x)
class MLP(nn.Module):
def __init__(
self,
in_features: int,
hidden_features: int | None = None,
out_features: int | None = None,
activation=nn.GELU,
drop: float = 0.0,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.norm = nn.LayerNorm(in_features)
self.fc1 = nn.Linear(in_features, hidden_features)
self.fc2 = nn.Linear(hidden_features, out_features)
self.act = activation()
self.drop = nn.Dropout(drop)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.norm(x)
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
return self.drop(x)
class Attention(torch.nn.Module):
def __init__(
self,
dim: int,
key_dim: int,
num_heads: int = 8,
attn_ratio: float = 4,
resolution: tuple[int, int] = (14, 14),
):
super().__init__()
assert isinstance(resolution, tuple) and len(resolution) == 2, "'resolution' argument not tuple of length 2"
self.num_heads = num_heads
self.scale = key_dim**-0.5
self.key_dim = key_dim
self.nh_kd = nh_kd = key_dim * num_heads
self.d = int(attn_ratio * key_dim)
self.dh = int(attn_ratio * key_dim) * num_heads
self.attn_ratio = attn_ratio
h = self.dh + nh_kd * 2
self.norm = nn.LayerNorm(dim)
self.qkv = nn.Linear(dim, h)
self.proj = nn.Linear(self.dh, dim)
points = list(itertools.product(range(resolution[0]), range(resolution[1])))
N = len(points)
attention_offsets = {}
idxs = []
for p1 in points:
for p2 in points:
offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1]))
if offset not in attention_offsets:
attention_offsets[offset] = len(attention_offsets)
idxs.append(attention_offsets[offset])
self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, len(attention_offsets)))
self.register_buffer("attention_bias_idxs", torch.LongTensor(idxs).view(N, N), persistent=False)
@torch.no_grad()
def train(self, mode: bool = True):
super().train(mode)
if mode and hasattr(self, "ab"):
del self.ab
else:
self.ab = self.attention_biases[:, self.attention_bias_idxs]
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, N, _ = x.shape # B, N, C
# Normalization
x = self.norm(x)
qkv = self.qkv(x)
# (B, N, num_heads, d)
q, k, v = qkv.view(B, N, self.num_heads, -1).split([self.key_dim, self.key_dim, self.d], dim=3)
# (B, num_heads, N, d)
q = q.permute(0, 2, 1, 3)
k = k.permute(0, 2, 1, 3)
v = v.permute(0, 2, 1, 3)
self.ab = self.ab.to(self.attention_biases.device)
attn = (q @ k.transpose(-2, -1)) * self.scale + (
self.attention_biases[:, self.attention_bias_idxs] if self.training else self.ab
)
attn = attn.softmax(dim=-1)
x = (attn @ v).transpose(1, 2).reshape(B, N, self.dh)
return self.proj(x)
class TinyViTBlock(nn.Module):
def __init__(
self,
dim: int,
input_resolution: tuple[int, int],
num_heads: int,
window_size: int = 7,
mlp_ratio: float = 4.0,
drop: float = 0.0,
drop_path: float = 0.0,
local_conv_size: int = 3,
activation=nn.GELU,
):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
assert window_size > 0, "window_size must be greater than 0"
self.window_size = window_size
self.mlp_ratio = mlp_ratio
# NOTE: `DropPath` is needed only for training.
# self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.drop_path = nn.Identity()
assert dim % num_heads == 0, "dim must be divisible by num_heads"
head_dim = dim // num_heads
window_resolution = (window_size, window_size)
self.attn = Attention(dim, head_dim, num_heads, attn_ratio=1, resolution=window_resolution)
mlp_hidden_dim = int(dim * mlp_ratio)
mlp_activation = activation
self.mlp = MLP(in_features=dim, hidden_features=mlp_hidden_dim, activation=mlp_activation, drop=drop)
pad = local_conv_size // 2
self.local_conv = Conv2d_BN(dim, dim, ks=local_conv_size, stride=1, pad=pad, groups=dim)
def forward(self, x: torch.Tensor) -> torch.Tensor:
h, w = self.input_resolution
b, hw, c = x.shape # batch, height*width, channels
assert hw == h * w, "input feature has wrong size"
res_x = x
if h == self.window_size and w == self.window_size:
x = self.attn(x)
else:
x = x.view(b, h, w, c)
pad_b = (self.window_size - h % self.window_size) % self.window_size
pad_r = (self.window_size - w % self.window_size) % self.window_size
padding = pad_b > 0 or pad_r > 0
if padding:
x = F.pad(x, (0, 0, 0, pad_r, 0, pad_b))
pH, pW = h + pad_b, w + pad_r
nH = pH // self.window_size
nW = pW // self.window_size
# Window partition
x = (
x.view(b, nH, self.window_size, nW, self.window_size, c)
.transpose(2, 3)
.reshape(b * nH * nW, self.window_size * self.window_size, c)
)
x = self.attn(x)
# Window reverse
x = x.view(b, nH, nW, self.window_size, self.window_size, c).transpose(2, 3).reshape(b, pH, pW, c)
if padding:
x = x[:, :h, :w].contiguous()
x = x.view(b, hw, c)
x = res_x + self.drop_path(x)
x = x.transpose(1, 2).reshape(b, c, h, w)
x = self.local_conv(x)
x = x.view(b, c, hw).transpose(1, 2)
return x + self.drop_path(self.mlp(x))
def extra_repr(self) -> str:
return (
f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, "
f"window_size={self.window_size}, mlp_ratio={self.mlp_ratio}"
)
class BasicLayer(nn.Module):
def __init__(
self,
dim: int,
input_resolution: tuple[int, int],
depth: int,
num_heads: int,
window_size: int,
mlp_ratio: float = 4.0,
drop: float = 0.0,
drop_path: float | list[float] = 0.0,
downsample: nn.Module | None = None,
use_checkpoint: bool = False,
local_conv_size: int = 3,
activation=nn.GELU,
out_dim: int | None = None,
):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# Build blocks
self.blocks = nn.ModuleList(
[
TinyViTBlock(
dim=dim,
input_resolution=input_resolution,
num_heads=num_heads,
window_size=window_size,
mlp_ratio=mlp_ratio,
drop=drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
local_conv_size=local_conv_size,
activation=activation,
)
for i in range(depth)
]
)
# Patch merging layer
self.downsample = (
None
if downsample is None
else downsample(input_resolution, dim=dim, out_dim=out_dim, activation=activation)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
for blk in self.blocks:
x = torch.utils.checkpoint(blk, x) if self.use_checkpoint else blk(x) # warn: checkpoint is slow import
return x if self.downsample is None else self.downsample(x)
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
class TinyViT(nn.Module):
def __init__(
self,
img_size: int = 224,
in_chans: int = 3,
num_classes: int = 1000,
embed_dims: tuple[int, int, int, int] = (96, 192, 384, 768),
depths: tuple[int, int, int, int] = (2, 2, 6, 2),
num_heads: tuple[int, int, int, int] = (3, 6, 12, 24),
window_sizes: tuple[int, int, int, int] = (7, 7, 14, 7),
mlp_ratio: float = 4.0,
drop_rate: float = 0.0,
drop_path_rate: float = 0.1,
use_checkpoint: bool = False,
mbconv_expand_ratio: float = 4.0,
local_conv_size: int = 3,
layer_lr_decay: float = 1.0,
):
super().__init__()
self.img_size = img_size
self.num_classes = num_classes
self.depths = depths
self.num_layers = len(depths)
self.mlp_ratio = mlp_ratio
activation = nn.GELU
self.patch_embed = PatchEmbed(
in_chans=in_chans, embed_dim=embed_dims[0], resolution=img_size, activation=activation
)
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# Stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# Build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
kwargs = dict(
dim=embed_dims[i_layer],
input_resolution=(
patches_resolution[0] // (2 ** (i_layer - 1 if i_layer == 3 else i_layer)),
patches_resolution[1] // (2 ** (i_layer - 1 if i_layer == 3 else i_layer)),
),
# input_resolution=(patches_resolution[0] // (2 ** i_layer),
# patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])],
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint,
out_dim=embed_dims[min(i_layer + 1, len(embed_dims) - 1)],
activation=activation,
)
if i_layer == 0:
layer = ConvLayer(conv_expand_ratio=mbconv_expand_ratio, **kwargs)
else:
layer = BasicLayer(
num_heads=num_heads[i_layer],
window_size=window_sizes[i_layer],
mlp_ratio=self.mlp_ratio,
drop=drop_rate,
local_conv_size=local_conv_size,
**kwargs,
)
self.layers.append(layer)
# Classifier head
self.norm_head = nn.LayerNorm(embed_dims[-1])
self.head = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else torch.nn.Identity()
# Init weights
self.apply(self._init_weights)
self.set_layer_lr_decay(layer_lr_decay)
self.neck = nn.Sequential(
nn.Conv2d(
embed_dims[-1],
256,
kernel_size=1,
bias=False,
),
LayerNorm2d(256),
nn.Conv2d(
256,
256,
kernel_size=3,
padding=1,
bias=False,
),
LayerNorm2d(256),
)
def set_layer_lr_decay(self, layer_lr_decay: float):
decay_rate = layer_lr_decay
# Layers -> blocks (depth)
depth = sum(self.depths)
lr_scales = [decay_rate ** (depth - i - 1) for i in range(depth)]
def _set_lr_scale(m, scale):
for p in m.parameters():
p.lr_scale = scale
self.patch_embed.apply(lambda x: _set_lr_scale(x, lr_scales[0]))
i = 0
for layer in self.layers:
for block in layer.blocks:
block.apply(lambda x: _set_lr_scale(x, lr_scales[i]))
i += 1
if layer.downsample is not None:
layer.downsample.apply(lambda x: _set_lr_scale(x, lr_scales[i - 1]))
assert i == depth
for m in {self.norm_head, self.head}:
m.apply(lambda x: _set_lr_scale(x, lr_scales[-1]))
for k, p in self.named_parameters():
p.param_name = k
def _check_lr_scale(m):
for p in m.parameters():
assert hasattr(p, "lr_scale"), p.param_name
self.apply(_check_lr_scale)
@staticmethod
def _init_weights(m):
if isinstance(m, nn.Linear):
# NOTE: This initialization is needed only for training.
# trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {"attention_biases"}
def forward_features(self, x: torch.Tensor) -> torch.Tensor:
x = self.patch_embed(x) # x input is (N, C, H, W)
x = self.layers[0](x)
start_i = 1
for i in range(start_i, len(self.layers)):
layer = self.layers[i]
x = layer(x)
batch, _, channel = x.shape
x = x.view(batch, self.patches_resolution[0] // 4, self.patches_resolution[1] // 4, channel)
x = x.permute(0, 3, 1, 2)
return self.neck(x)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.forward_features(x)
def set_imgsz(self, imgsz: list[int] = [1024, 1024]):
imgsz = [s // 4 for s in imgsz]
self.patches_resolution = imgsz
for i, layer in enumerate(self.layers):
input_resolution = (
imgsz[0] // (2 ** (i - 1 if i == 3 else i)),
imgsz[1] // (2 ** (i - 1 if i == 3 else i)),
)
layer.input_resolution = input_resolution
if layer.downsample is not None:
layer.downsample.input_resolution = input_resolution
if isinstance(layer, BasicLayer):
for b in layer.blocks:
b.input_resolution = input_resolution | --- +++ @@ -22,6 +22,23 @@
class Conv2d_BN(torch.nn.Sequential):
+ """A sequential container that performs 2D convolution followed by batch normalization.
+
+ This module combines a 2D convolution layer with batch normalization, providing a common building block for
+ convolutional neural networks. The batch normalization weights and biases are initialized to specific values for
+ optimal training performance.
+
+ Attributes:
+ c (torch.nn.Conv2d): 2D convolution layer.
+ bn (torch.nn.BatchNorm2d): Batch normalization layer.
+
+ Examples:
+ >>> conv_bn = Conv2d_BN(3, 64, ks=3, stride=1, pad=1)
+ >>> input_tensor = torch.randn(1, 3, 224, 224)
+ >>> output = conv_bn(input_tensor)
+ >>> print(output.shape)
+ torch.Size([1, 64, 224, 224])
+ """
def __init__(
self,
@@ -34,6 +51,18 @@ groups: int = 1,
bn_weight_init: float = 1,
):
+ """Initialize a sequential container with 2D convolution followed by batch normalization.
+
+ Args:
+ a (int): Number of input channels.
+ b (int): Number of output channels.
+ ks (int, optional): Kernel size for the convolution.
+ stride (int, optional): Stride for the convolution.
+ pad (int, optional): Padding for the convolution.
+ dilation (int, optional): Dilation factor for the convolution.
+ groups (int, optional): Number of groups for the convolution.
+ bn_weight_init (float, optional): Initial value for batch normalization weight.
+ """
super().__init__()
self.add_module("c", torch.nn.Conv2d(a, b, ks, stride, pad, dilation, groups, bias=False))
bn = torch.nn.BatchNorm2d(b)
@@ -43,8 +72,36 @@
class PatchEmbed(nn.Module):
+ """Embed images into patches and project them into a specified embedding dimension.
+
+ This module converts input images into patch embeddings using a sequence of convolutional layers, effectively
+ downsampling the spatial dimensions while increasing the channel dimension.
+
+ Attributes:
+ patches_resolution (tuple[int, int]): Resolution of the patches after embedding.
+ num_patches (int): Total number of patches.
+ in_chans (int): Number of input channels.
+ embed_dim (int): Dimension of the embedding.
+ seq (nn.Sequential): Sequence of convolutional and activation layers for patch embedding.
+
+ Examples:
+ >>> import torch
+ >>> patch_embed = PatchEmbed(in_chans=3, embed_dim=96, resolution=224, activation=nn.GELU)
+ >>> x = torch.randn(1, 3, 224, 224)
+ >>> output = patch_embed(x)
+ >>> print(output.shape)
+ torch.Size([1, 96, 56, 56])
+ """
def __init__(self, in_chans: int, embed_dim: int, resolution: int, activation):
+ """Initialize patch embedding with convolutional layers for image-to-patch conversion and projection.
+
+ Args:
+ in_chans (int): Number of input channels.
+ embed_dim (int): Dimension of the embedding.
+ resolution (int): Input image resolution.
+ activation (nn.Module): Activation function to use between convolutions.
+ """
super().__init__()
img_size: tuple[int, int] = to_2tuple(resolution)
self.patches_resolution = (img_size[0] // 4, img_size[1] // 4)
@@ -59,12 +116,47 @@ )
def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Process input tensor through patch embedding sequence, converting images to patch embeddings."""
return self.seq(x)
class MBConv(nn.Module):
+ """Mobile Inverted Bottleneck Conv (MBConv) layer, part of the EfficientNet architecture.
+
+ This module implements the mobile inverted bottleneck convolution with expansion, depthwise convolution, and
+ projection phases, along with residual connections for improved gradient flow.
+
+ Attributes:
+ in_chans (int): Number of input channels.
+ hidden_chans (int): Number of hidden channels after expansion.
+ out_chans (int): Number of output channels.
+ conv1 (Conv2d_BN): First convolutional layer for channel expansion.
+ act1 (nn.Module): First activation function.
+ conv2 (Conv2d_BN): Depthwise convolutional layer.
+ act2 (nn.Module): Second activation function.
+ conv3 (Conv2d_BN): Final convolutional layer for projection.
+ act3 (nn.Module): Third activation function.
+ drop_path (nn.Module): Drop path layer (Identity for inference).
+
+ Examples:
+ >>> in_chans, out_chans = 32, 64
+ >>> mbconv = MBConv(in_chans, out_chans, expand_ratio=4, activation=nn.ReLU, drop_path=0.1)
+ >>> x = torch.randn(1, in_chans, 56, 56)
+ >>> output = mbconv(x)
+ >>> print(output.shape)
+ torch.Size([1, 64, 56, 56])
+ """
def __init__(self, in_chans: int, out_chans: int, expand_ratio: float, activation, drop_path: float):
+ """Initialize the MBConv layer with specified input/output channels, expansion ratio, and activation.
+
+ Args:
+ in_chans (int): Number of input channels.
+ out_chans (int): Number of output channels.
+ expand_ratio (float): Channel expansion ratio for the hidden layer.
+ activation (nn.Module): Activation function to use.
+ drop_path (float): Drop path rate for stochastic depth.
+ """
super().__init__()
self.in_chans = in_chans
self.hidden_chans = int(in_chans * expand_ratio)
@@ -84,6 +176,7 @@ self.drop_path = nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Implement the forward pass of MBConv, applying convolutions and skip connection."""
shortcut = x
x = self.conv1(x)
x = self.act1(x)
@@ -96,8 +189,39 @@
class PatchMerging(nn.Module):
+ """Merge neighboring patches in the feature map and project to a new dimension.
+
+ This class implements a patch merging operation that combines spatial information and adjusts the feature dimension
+ using a series of convolutional layers with batch normalization. It effectively reduces spatial resolution while
+ potentially increasing channel dimensions.
+
+ Attributes:
+ input_resolution (tuple[int, int]): The input resolution (height, width) of the feature map.
+ dim (int): The input dimension of the feature map.
+ out_dim (int): The output dimension after merging and projection.
+ act (nn.Module): The activation function used between convolutions.
+ conv1 (Conv2d_BN): The first convolutional layer for dimension projection.
+ conv2 (Conv2d_BN): The second convolutional layer for spatial merging.
+ conv3 (Conv2d_BN): The third convolutional layer for final projection.
+
+ Examples:
+ >>> input_resolution = (56, 56)
+ >>> patch_merging = PatchMerging(input_resolution, dim=64, out_dim=128, activation=nn.ReLU)
+ >>> x = torch.randn(4, 64, 56, 56)
+ >>> output = patch_merging(x)
+ >>> print(output.shape)
+ torch.Size([4, 3136, 128])
+ """
def __init__(self, input_resolution: tuple[int, int], dim: int, out_dim: int, activation):
+ """Initialize the PatchMerging module for merging and projecting neighboring patches in feature maps.
+
+ Args:
+ input_resolution (tuple[int, int]): The input resolution (height, width) of the feature map.
+ dim (int): The input dimension of the feature map.
+ out_dim (int): The output dimension after merging and projection.
+ activation (nn.Module): The activation function used between convolutions.
+ """
super().__init__()
self.input_resolution = input_resolution
@@ -110,6 +234,7 @@ self.conv3 = Conv2d_BN(out_dim, out_dim, 1, 1, 0)
def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Apply patch merging and dimension projection to the input feature map."""
if x.ndim == 3:
H, W = self.input_resolution
B = len(x)
@@ -126,6 +251,26 @@
class ConvLayer(nn.Module):
+ """Convolutional Layer featuring multiple MobileNetV3-style inverted bottleneck convolutions (MBConv).
+
+ This layer optionally applies downsample operations to the output and supports gradient checkpointing for memory
+ efficiency during training.
+
+ Attributes:
+ dim (int): Dimensionality of the input and output.
+ input_resolution (tuple[int, int]): Resolution of the input image.
+ depth (int): Number of MBConv layers in the block.
+ use_checkpoint (bool): Whether to use gradient checkpointing to save memory.
+ blocks (nn.ModuleList): List of MBConv layers.
+ downsample (nn.Module | None): Function for downsampling the output.
+
+ Examples:
+ >>> input_tensor = torch.randn(1, 64, 56, 56)
+ >>> conv_layer = ConvLayer(64, (56, 56), depth=3, activation=nn.ReLU)
+ >>> output = conv_layer(input_tensor)
+ >>> print(output.shape)
+ torch.Size([1, 3136, 128])
+ """
def __init__(
self,
@@ -139,6 +284,22 @@ out_dim: int | None = None,
conv_expand_ratio: float = 4.0,
):
+ """Initialize the ConvLayer with the given dimensions and settings.
+
+ This layer consists of multiple MobileNetV3-style inverted bottleneck convolutions (MBConv) and optionally
+ applies downsampling to the output.
+
+ Args:
+ dim (int): The dimensionality of the input and output.
+ input_resolution (tuple[int, int]): The resolution of the input image.
+ depth (int): The number of MBConv layers in the block.
+ activation (nn.Module): Activation function applied after each convolution.
+ drop_path (float | list[float], optional): Drop path rate. Single float or a list of floats for each MBConv.
+ downsample (nn.Module | None, optional): Function for downsampling the output. None to skip downsampling.
+ use_checkpoint (bool, optional): Whether to use gradient checkpointing to save memory.
+ out_dim (int | None, optional): Output dimensions. None means it will be the same as `dim`.
+ conv_expand_ratio (float, optional): Expansion ratio for the MBConv layers.
+ """
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
@@ -167,12 +328,34 @@ )
def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Process input through convolutional layers, applying MBConv blocks and optional downsampling."""
for blk in self.blocks:
x = torch.utils.checkpoint(blk, x) if self.use_checkpoint else blk(x) # warn: checkpoint is slow import
return x if self.downsample is None else self.downsample(x)
class MLP(nn.Module):
+ """Multi-layer Perceptron (MLP) module for transformer architectures.
+
+ This module applies layer normalization, two fully-connected layers with an activation function in between, and
+ dropout. It is commonly used in transformer-based architectures for processing token embeddings.
+
+ Attributes:
+ norm (nn.LayerNorm): Layer normalization applied to the input.
+ fc1 (nn.Linear): First fully-connected layer.
+ fc2 (nn.Linear): Second fully-connected layer.
+ act (nn.Module): Activation function applied after the first fully-connected layer.
+ drop (nn.Dropout): Dropout layer applied after the activation function.
+
+ Examples:
+ >>> import torch
+ >>> from torch import nn
+ >>> mlp = MLP(in_features=256, hidden_features=512, out_features=256, activation=nn.GELU, drop=0.1)
+ >>> x = torch.randn(32, 100, 256)
+ >>> output = mlp(x)
+ >>> print(output.shape)
+ torch.Size([32, 100, 256])
+ """
def __init__(
self,
@@ -182,6 +365,15 @@ activation=nn.GELU,
drop: float = 0.0,
):
+ """Initialize a multi-layer perceptron with configurable input, hidden, and output dimensions.
+
+ Args:
+ in_features (int): Number of input features.
+ hidden_features (int | None, optional): Number of hidden features.
+ out_features (int | None, optional): Number of output features.
+ activation (nn.Module): Activation function applied after the first fully-connected layer.
+ drop (float, optional): Dropout probability.
+ """
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
@@ -192,6 +384,7 @@ self.drop = nn.Dropout(drop)
def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Apply MLP operations: layer norm, FC layers, activation, and dropout to the input tensor."""
x = self.norm(x)
x = self.fc1(x)
x = self.act(x)
@@ -201,6 +394,34 @@
class Attention(torch.nn.Module):
+ """Multi-head attention module with spatial awareness and trainable attention biases.
+
+ This module implements a multi-head attention mechanism with support for spatial awareness, applying attention
+ biases based on spatial resolution. It includes trainable attention biases for each unique offset between spatial
+ positions in the resolution grid.
+
+ Attributes:
+ num_heads (int): Number of attention heads.
+ scale (float): Scaling factor for attention scores.
+ key_dim (int): Dimensionality of the keys and queries.
+ nh_kd (int): Product of num_heads and key_dim.
+ d (int): Dimensionality of the value vectors.
+ dh (int): Product of d and num_heads.
+ attn_ratio (float): Attention ratio affecting the dimensions of the value vectors.
+ norm (nn.LayerNorm): Layer normalization applied to input.
+ qkv (nn.Linear): Linear layer for computing query, key, and value projections.
+ proj (nn.Linear): Linear layer for final projection.
+ attention_biases (nn.Parameter): Learnable attention biases.
+ attention_bias_idxs (torch.Tensor): Indices for attention biases.
+ ab (torch.Tensor): Cached attention biases for inference, deleted during training.
+
+ Examples:
+ >>> attn = Attention(dim=256, key_dim=64, num_heads=8, resolution=(14, 14))
+ >>> x = torch.randn(1, 196, 256)
+ >>> output = attn(x)
+ >>> print(output.shape)
+ torch.Size([1, 196, 256])
+ """
def __init__(
self,
@@ -210,6 +431,19 @@ attn_ratio: float = 4,
resolution: tuple[int, int] = (14, 14),
):
+ """Initialize the Attention module for multi-head attention with spatial awareness.
+
+ This module implements a multi-head attention mechanism with support for spatial awareness, applying attention
+ biases based on spatial resolution. It includes trainable attention biases for each unique offset between
+ spatial positions in the resolution grid.
+
+ Args:
+ dim (int): The dimensionality of the input and output.
+ key_dim (int): The dimensionality of the keys and queries.
+ num_heads (int, optional): Number of attention heads.
+ attn_ratio (float, optional): Attention ratio, affecting the dimensions of the value vectors.
+ resolution (tuple[int, int], optional): Spatial resolution of the input feature map.
+ """
super().__init__()
assert isinstance(resolution, tuple) and len(resolution) == 2, "'resolution' argument not tuple of length 2"
@@ -241,6 +475,7 @@
@torch.no_grad()
def train(self, mode: bool = True):
+ """Set the module in training mode and handle the 'ab' attribute for cached attention biases."""
super().train(mode)
if mode and hasattr(self, "ab"):
del self.ab
@@ -248,6 +483,7 @@ self.ab = self.attention_biases[:, self.attention_bias_idxs]
def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Apply multi-head attention with spatial awareness and trainable attention biases."""
B, N, _ = x.shape # B, N, C
# Normalization
@@ -271,6 +507,30 @@
class TinyViTBlock(nn.Module):
+ """TinyViT Block that applies self-attention and a local convolution to the input.
+
+ This block is a key component of the TinyViT architecture, combining self-attention mechanisms with local
+ convolutions to process input features efficiently. It supports windowed attention for computational efficiency and
+ includes residual connections.
+
+ Attributes:
+ dim (int): The dimensionality of the input and output.
+ input_resolution (tuple[int, int]): Spatial resolution of the input feature map.
+ num_heads (int): Number of attention heads.
+ window_size (int): Size of the attention window.
+ mlp_ratio (float): Ratio of MLP hidden dimension to embedding dimension.
+ drop_path (nn.Module): Stochastic depth layer, identity function during inference.
+ attn (Attention): Self-attention module.
+ mlp (MLP): Multi-layer perceptron module.
+ local_conv (Conv2d_BN): Depth-wise local convolution layer.
+
+ Examples:
+ >>> input_tensor = torch.randn(1, 196, 192)
+ >>> block = TinyViTBlock(dim=192, input_resolution=(14, 14), num_heads=3)
+ >>> output = block(input_tensor)
+ >>> print(output.shape)
+ torch.Size([1, 196, 192])
+ """
def __init__(
self,
@@ -284,6 +544,22 @@ local_conv_size: int = 3,
activation=nn.GELU,
):
+ """Initialize a TinyViT block with self-attention and local convolution.
+
+ This block is a key component of the TinyViT architecture, combining self-attention mechanisms with local
+ convolutions to process input features efficiently.
+
+ Args:
+ dim (int): Dimensionality of the input and output features.
+ input_resolution (tuple[int, int]): Spatial resolution of the input feature map (height, width).
+ num_heads (int): Number of attention heads.
+ window_size (int, optional): Size of the attention window. Must be greater than 0.
+ mlp_ratio (float, optional): Ratio of MLP hidden dimension to embedding dimension.
+ drop (float, optional): Dropout rate.
+ drop_path (float, optional): Stochastic depth rate.
+ local_conv_size (int, optional): Kernel size of the local convolution.
+ activation (nn.Module): Activation function for MLP.
+ """
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
@@ -310,6 +586,7 @@ self.local_conv = Conv2d_BN(dim, dim, ks=local_conv_size, stride=1, pad=pad, groups=dim)
def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Apply self-attention, local convolution, and MLP operations to the input tensor."""
h, w = self.input_resolution
b, hw, c = x.shape # batch, height*width, channels
assert hw == h * w, "input feature has wrong size"
@@ -351,6 +628,19 @@ return x + self.drop_path(self.mlp(x))
def extra_repr(self) -> str:
+ """Return a string representation of the TinyViTBlock's parameters.
+
+ This method provides a formatted string containing key information about the TinyViTBlock, including its
+ dimension, input resolution, number of attention heads, window size, and MLP ratio.
+
+ Returns:
+ (str): A formatted string containing the block's parameters.
+
+ Examples:
+ >>> block = TinyViTBlock(dim=192, input_resolution=(14, 14), num_heads=3, window_size=7, mlp_ratio=4.0)
+ >>> print(block.extra_repr())
+ dim=192, input_resolution=(14, 14), num_heads=3, window_size=7, mlp_ratio=4.0
+ """
return (
f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, "
f"window_size={self.window_size}, mlp_ratio={self.mlp_ratio}"
@@ -358,6 +648,27 @@
class BasicLayer(nn.Module):
+ """A basic TinyViT layer for one stage in a TinyViT architecture.
+
+ This class represents a single layer in the TinyViT model, consisting of multiple TinyViT blocks and an optional
+ downsampling operation. It processes features at a specific resolution and dimensionality within the overall
+ architecture.
+
+ Attributes:
+ dim (int): The dimensionality of the input and output features.
+ input_resolution (tuple[int, int]): Spatial resolution of the input feature map.
+ depth (int): Number of TinyViT blocks in this layer.
+ use_checkpoint (bool): Whether to use gradient checkpointing to save memory.
+ blocks (nn.ModuleList): List of TinyViT blocks that make up this layer.
+ downsample (nn.Module | None): Downsample layer at the end of the layer, if specified.
+
+ Examples:
+ >>> input_tensor = torch.randn(1, 3136, 192)
+ >>> layer = BasicLayer(dim=192, input_resolution=(56, 56), depth=2, num_heads=3, window_size=7)
+ >>> output = layer(input_tensor)
+ >>> print(output.shape)
+ torch.Size([1, 784, 384])
+ """
def __init__(
self,
@@ -375,6 +686,28 @@ activation=nn.GELU,
out_dim: int | None = None,
):
+ """Initialize a BasicLayer in the TinyViT architecture.
+
+ This layer consists of multiple TinyViT blocks and an optional downsampling operation. It is designed to process
+ feature maps at a specific resolution and dimensionality within the TinyViT model.
+
+ Args:
+ dim (int): Dimensionality of the input and output features.
+ input_resolution (tuple[int, int]): Spatial resolution of the input feature map (height, width).
+ depth (int): Number of TinyViT blocks in this layer.
+ num_heads (int): Number of attention heads in each TinyViT block.
+ window_size (int): Size of the local window for attention computation.
+ mlp_ratio (float, optional): Ratio of MLP hidden dimension to embedding dimension.
+ drop (float, optional): Dropout rate.
+ drop_path (float | list[float], optional): Stochastic depth rate. Can be a float or a list of floats for
+ each block.
+ downsample (nn.Module | None, optional): Downsampling layer at the end of the layer. None to skip
+ downsampling.
+ use_checkpoint (bool, optional): Whether to use gradient checkpointing to save memory.
+ local_conv_size (int, optional): Kernel size for the local convolution in each TinyViT block.
+ activation (nn.Module): Activation function used in the MLP.
+ out_dim (int | None, optional): Output dimension after downsampling. None means it will be the same as dim.
+ """
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
@@ -407,15 +740,43 @@ )
def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Process input through TinyViT blocks and optional downsampling."""
for blk in self.blocks:
x = torch.utils.checkpoint(blk, x) if self.use_checkpoint else blk(x) # warn: checkpoint is slow import
return x if self.downsample is None else self.downsample(x)
def extra_repr(self) -> str:
+ """Return a string with the layer's parameters for printing."""
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
class TinyViT(nn.Module):
+ """TinyViT: A compact vision transformer architecture for efficient image classification and feature extraction.
+
+ This class implements the TinyViT model, which combines elements of vision transformers and convolutional neural
+ networks for improved efficiency and performance on vision tasks. It features hierarchical processing with patch
+ embedding, multiple stages of attention and convolution blocks, and a feature refinement neck.
+
+ Attributes:
+ img_size (int): Input image size.
+ num_classes (int): Number of classification classes.
+ depths (tuple[int, int, int, int]): Number of blocks in each stage.
+ num_layers (int): Total number of layers in the network.
+ mlp_ratio (float): Ratio of MLP hidden dimension to embedding dimension.
+ patch_embed (PatchEmbed): Module for patch embedding.
+ patches_resolution (tuple[int, int]): Resolution of embedded patches.
+ layers (nn.ModuleList): List of network layers.
+ norm_head (nn.LayerNorm): Layer normalization for the classifier head.
+ head (nn.Linear): Linear layer for final classification.
+ neck (nn.Sequential): Neck module for feature refinement.
+
+ Examples:
+ >>> model = TinyViT(img_size=224, num_classes=1000)
+ >>> x = torch.randn(1, 3, 224, 224)
+ >>> features = model.forward_features(x)
+ >>> print(features.shape)
+ torch.Size([1, 256, 56, 56])
+ """
def __init__(
self,
@@ -434,6 +795,27 @@ local_conv_size: int = 3,
layer_lr_decay: float = 1.0,
):
+ """Initialize the TinyViT model.
+
+ This constructor sets up the TinyViT architecture, including patch embedding, multiple layers of attention and
+ convolution blocks, and a classification head.
+
+ Args:
+ img_size (int, optional): Size of the input image.
+ in_chans (int, optional): Number of input channels.
+ num_classes (int, optional): Number of classes for classification.
+ embed_dims (tuple[int, int, int, int], optional): Embedding dimensions for each stage.
+ depths (tuple[int, int, int, int], optional): Number of blocks in each stage.
+ num_heads (tuple[int, int, int, int], optional): Number of attention heads in each stage.
+ window_sizes (tuple[int, int, int, int], optional): Window sizes for each stage.
+ mlp_ratio (float, optional): Ratio of MLP hidden dim to embedding dim.
+ drop_rate (float, optional): Dropout rate.
+ drop_path_rate (float, optional): Stochastic depth rate.
+ use_checkpoint (bool, optional): Whether to use checkpointing to save memory.
+ mbconv_expand_ratio (float, optional): Expansion ratio for MBConv layer.
+ local_conv_size (int, optional): Kernel size for local convolutions.
+ layer_lr_decay (float, optional): Layer-wise learning rate decay factor.
+ """
super().__init__()
self.img_size = img_size
self.num_classes = num_classes
@@ -510,6 +892,7 @@ )
def set_layer_lr_decay(self, layer_lr_decay: float):
+ """Set layer-wise learning rate decay for the TinyViT model based on depth."""
decay_rate = layer_lr_decay
# Layers -> blocks (depth)
@@ -517,6 +900,7 @@ lr_scales = [decay_rate ** (depth - i - 1) for i in range(depth)]
def _set_lr_scale(m, scale):
+ """Set the learning rate scale for each layer in the model based on the layer's depth."""
for p in m.parameters():
p.lr_scale = scale
@@ -536,6 +920,7 @@ p.param_name = k
def _check_lr_scale(m):
+ """Check if the learning rate scale attribute is present in module's parameters."""
for p in m.parameters():
assert hasattr(p, "lr_scale"), p.param_name
@@ -543,6 +928,7 @@
@staticmethod
def _init_weights(m):
+ """Initialize weights for linear and normalization layers in the TinyViT model."""
if isinstance(m, nn.Linear):
# NOTE: This initialization is needed only for training.
# trunc_normal_(m.weight, std=.02)
@@ -554,9 +940,11 @@
@torch.jit.ignore
def no_weight_decay_keywords(self):
+ """Return a set of keywords for parameters that should not use weight decay."""
return {"attention_biases"}
def forward_features(self, x: torch.Tensor) -> torch.Tensor:
+ """Process input through feature extraction layers, returning spatial features."""
x = self.patch_embed(x) # x input is (N, C, H, W)
x = self.layers[0](x)
@@ -571,9 +959,11 @@ return self.neck(x)
def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Perform the forward pass through the TinyViT model, extracting features from the input image."""
return self.forward_features(x)
def set_imgsz(self, imgsz: list[int] = [1024, 1024]):
+ """Set image size to make model compatible with different image sizes."""
imgsz = [s // 4 for s in imgsz]
self.patches_resolution = imgsz
for i, layer in enumerate(self.layers):
@@ -586,4 +976,4 @@ layer.downsample.input_resolution = input_resolution
if isinstance(layer, BasicLayer):
for b in layer.blocks:
- b.input_resolution = input_resolution+ b.input_resolution = input_resolution
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/sam/modules/tiny_encoder.py |
Add missing documentation to my Python functions | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
import torch.nn as nn
from ultralytics.nn.modules.transformer import MLP
from ultralytics.utils.patches import torch_load
from .modules.blocks import PositionEmbeddingSine, RoPEAttention
from .modules.encoders import MemoryEncoder
from .modules.memory_attention import MemoryAttention, MemoryAttentionLayer
from .modules.sam import SAM3Model
from .sam3.decoder import TransformerDecoder, TransformerDecoderLayer
from .sam3.encoder import TransformerEncoderFusion, TransformerEncoderLayer
from .sam3.geometry_encoders import SequenceGeometryEncoder
from .sam3.maskformer_segmentation import PixelDecoder, UniversalSegmentationHead
from .sam3.model_misc import DotProductScoring, TransformerWrapper
from .sam3.necks import Sam3DualViTDetNeck
from .sam3.sam3_image import SAM3SemanticModel
from .sam3.text_encoder_ve import VETextEncoder
from .sam3.vitdet import ViT
from .sam3.vl_combiner import SAM3VLBackbone
def _create_vision_backbone(compile_mode=None, enable_inst_interactivity=True) -> Sam3DualViTDetNeck:
# Position encoding
position_encoding = PositionEmbeddingSine(
num_pos_feats=256,
normalize=True,
scale=None,
temperature=10000,
)
# ViT backbone
vit_backbone = ViT(
img_size=1008,
pretrain_img_size=336,
patch_size=14,
embed_dim=1024,
depth=32,
num_heads=16,
mlp_ratio=4.625,
norm_layer="LayerNorm",
drop_path_rate=0.1,
qkv_bias=True,
use_abs_pos=True,
tile_abs_pos=True,
global_att_blocks=(7, 15, 23, 31),
rel_pos_blocks=(),
use_rope=True,
use_interp_rope=True,
window_size=24,
pretrain_use_cls_token=True,
retain_cls_token=False,
ln_pre=True,
ln_post=False,
return_interm_layers=False,
bias_patch_embed=False,
compile_mode=compile_mode,
)
return Sam3DualViTDetNeck(
position_encoding=position_encoding,
d_model=256,
scale_factors=[4.0, 2.0, 1.0, 0.5],
trunk=vit_backbone,
add_sam2_neck=enable_inst_interactivity,
)
def _create_sam3_transformer() -> TransformerWrapper:
encoder: TransformerEncoderFusion = TransformerEncoderFusion(
layer=TransformerEncoderLayer(
d_model=256,
dim_feedforward=2048,
dropout=0.1,
pos_enc_at_attn=True,
pos_enc_at_cross_attn_keys=False,
pos_enc_at_cross_attn_queries=False,
pre_norm=True,
self_attention=nn.MultiheadAttention(
num_heads=8,
dropout=0.1,
embed_dim=256,
batch_first=True,
),
cross_attention=nn.MultiheadAttention(
num_heads=8,
dropout=0.1,
embed_dim=256,
batch_first=True,
),
),
num_layers=6,
d_model=256,
num_feature_levels=1,
frozen=False,
use_act_checkpoint=True,
add_pooled_text_to_img_feat=False,
pool_text_with_mask=True,
)
decoder: TransformerDecoder = TransformerDecoder(
layer=TransformerDecoderLayer(
d_model=256,
dim_feedforward=2048,
dropout=0.1,
cross_attention=nn.MultiheadAttention(
num_heads=8,
dropout=0.1,
embed_dim=256,
),
n_heads=8,
use_text_cross_attention=True,
),
num_layers=6,
num_queries=200,
return_intermediate=True,
box_refine=True,
num_o2m_queries=0,
dac=True,
boxRPB="log",
d_model=256,
frozen=False,
interaction_layer=None,
dac_use_selfatt_ln=True,
use_act_checkpoint=True,
presence_token=True,
)
return TransformerWrapper(encoder=encoder, decoder=decoder, d_model=256)
def build_sam3_image_model(checkpoint_path: str, enable_segmentation: bool = True, compile: bool = False):
try:
import clip
except ImportError:
from ultralytics.utils.checks import check_requirements
check_requirements("git+https://github.com/ultralytics/CLIP.git")
import clip
# Create visual components
compile_mode = "default" if compile else None
vision_encoder = _create_vision_backbone(compile_mode=compile_mode, enable_inst_interactivity=True)
# Create text components
text_encoder = VETextEncoder(
tokenizer=clip.simple_tokenizer.SimpleTokenizer(),
d_model=256,
width=1024,
heads=16,
layers=24,
)
# Create visual-language backbone
backbone = SAM3VLBackbone(visual=vision_encoder, text=text_encoder, scalp=1)
# Create transformer components
transformer = _create_sam3_transformer()
# Create dot product scoring
dot_prod_scoring = DotProductScoring(
d_model=256,
d_proj=256,
prompt_mlp=MLP(
input_dim=256,
hidden_dim=2048,
output_dim=256,
num_layers=2,
residual=True,
out_norm=nn.LayerNorm(256),
),
)
# Create segmentation head if enabled
segmentation_head = (
UniversalSegmentationHead(
hidden_dim=256,
upsampling_stages=3,
aux_masks=False,
presence_head=False,
dot_product_scorer=None,
act_ckpt=True,
cross_attend_prompt=nn.MultiheadAttention(
num_heads=8,
dropout=0,
embed_dim=256,
),
pixel_decoder=PixelDecoder(
num_upsampling_stages=3,
interpolation_mode="nearest",
hidden_dim=256,
compile_mode=compile_mode,
),
)
if enable_segmentation
else None
)
# Create geometry encoder
input_geometry_encoder = SequenceGeometryEncoder(
pos_enc=PositionEmbeddingSine(
num_pos_feats=256,
normalize=True,
scale=None,
temperature=10000,
),
encode_boxes_as_points=False,
boxes_direct_project=True,
boxes_pool=True,
boxes_pos_enc=True,
d_model=256,
num_layers=3,
layer=TransformerEncoderLayer(
d_model=256,
dim_feedforward=2048,
dropout=0.1,
pos_enc_at_attn=False,
pre_norm=True,
pos_enc_at_cross_attn_queries=False,
pos_enc_at_cross_attn_keys=True,
),
use_act_ckpt=True,
add_cls=True,
add_post_encode_proj=True,
)
# Create the SAM3SemanticModel model
model = SAM3SemanticModel(
backbone=backbone,
transformer=transformer,
input_geometry_encoder=input_geometry_encoder,
segmentation_head=segmentation_head,
num_feature_levels=1,
o2m_mask_predict=True,
dot_prod_scoring=dot_prod_scoring,
use_instance_query=False,
multimask_output=True,
)
# Load checkpoint
model = _load_checkpoint(model, checkpoint_path)
model.eval()
return model
def build_interactive_sam3(checkpoint_path: str, compile=None, with_backbone=True) -> SAM3Model:
# Create model components
memory_encoder = MemoryEncoder(out_dim=64, interpol_size=[1152, 1152])
memory_attention = MemoryAttention(
batch_first=True,
d_model=256,
pos_enc_at_input=True,
layer=MemoryAttentionLayer(
dim_feedforward=2048,
dropout=0.1,
pos_enc_at_attn=False,
pos_enc_at_cross_attn_keys=True,
pos_enc_at_cross_attn_queries=False,
self_attn=RoPEAttention(
embedding_dim=256,
num_heads=1,
downsample_rate=1,
rope_theta=10000.0,
feat_sizes=[72, 72],
),
d_model=256,
cross_attn=RoPEAttention(
embedding_dim=256,
num_heads=1,
downsample_rate=1,
kv_in_dim=64,
rope_theta=10000.0,
feat_sizes=[72, 72],
rope_k_repeat=True,
),
),
num_layers=4,
)
backbone = (
SAM3VLBackbone(scalp=1, visual=_create_vision_backbone(compile_mode=compile), text=None)
if with_backbone
else None
)
model = SAM3Model(
image_size=1008,
image_encoder=backbone,
memory_attention=memory_attention,
memory_encoder=memory_encoder,
backbone_stride=14,
num_maskmem=7,
sigmoid_scale_for_mem_enc=20.0,
sigmoid_bias_for_mem_enc=-10.0,
use_mask_input_as_output_without_sam=True,
directly_add_no_mem_embed=True,
use_high_res_features_in_sam=True,
multimask_output_in_sam=True,
iou_prediction_use_sigmoid=True,
use_obj_ptrs_in_encoder=True,
add_tpos_enc_to_obj_ptrs=True,
only_obj_ptrs_in_the_past_for_eval=True,
pred_obj_scores=True,
pred_obj_scores_mlp=True,
fixed_no_obj_ptr=True,
multimask_output_for_tracking=True,
use_multimask_token_for_obj_ptr=True,
multimask_min_pt_num=0,
multimask_max_pt_num=1,
use_mlp_for_obj_ptr_proj=True,
compile_image_encoder=False,
no_obj_embed_spatial=True,
proj_tpos_enc_in_obj_ptrs=True,
use_signed_tpos_enc_to_obj_ptrs=True,
sam_mask_decoder_extra_args=dict(
dynamic_multimask_via_stability=True,
dynamic_multimask_stability_delta=0.05,
dynamic_multimask_stability_thresh=0.98,
),
)
# Load checkpoint if provided
model = _load_checkpoint(model, checkpoint_path, interactive=True)
# Setup device and mode
model.eval()
return model
def _load_checkpoint(model, checkpoint, interactive=False):
with open(checkpoint, "rb") as f:
ckpt = torch_load(f)
if "model" in ckpt and isinstance(ckpt["model"], dict):
ckpt = ckpt["model"]
sam3_image_ckpt = {k.replace("detector.", ""): v for k, v in ckpt.items() if "detector" in k}
if interactive:
sam3_image_ckpt.update(
{
k.replace("backbone.vision_backbone", "image_encoder.vision_backbone"): v
for k, v in sam3_image_ckpt.items()
if "backbone.vision_backbone" in k
}
)
sam3_image_ckpt.update(
{
k.replace("tracker.transformer.encoder", "memory_attention"): v
for k, v in ckpt.items()
if "tracker.transformer" in k
}
)
sam3_image_ckpt.update(
{
k.replace("tracker.maskmem_backbone", "memory_encoder"): v
for k, v in ckpt.items()
if "tracker.maskmem_backbone" in k
}
)
sam3_image_ckpt.update({k.replace("tracker.", ""): v for k, v in ckpt.items() if "tracker." in k})
model.load_state_dict(sam3_image_ckpt, strict=False)
return model | --- +++ @@ -24,6 +24,7 @@
def _create_vision_backbone(compile_mode=None, enable_inst_interactivity=True) -> Sam3DualViTDetNeck:
+ """Create SAM3 visual backbone with ViT and neck."""
# Position encoding
position_encoding = PositionEmbeddingSine(
num_pos_feats=256,
@@ -69,6 +70,7 @@
def _create_sam3_transformer() -> TransformerWrapper:
+ """Create SAM3 detector encoder and decoder."""
encoder: TransformerEncoderFusion = TransformerEncoderFusion(
layer=TransformerEncoderLayer(
d_model=256,
@@ -131,6 +133,16 @@
def build_sam3_image_model(checkpoint_path: str, enable_segmentation: bool = True, compile: bool = False):
+ """Build SAM3 image model.
+
+ Args:
+ checkpoint_path: Optional path to model checkpoint
+ enable_segmentation: Whether to enable segmentation head
+ compile: Whether to enable compilation of the model
+
+ Returns:
+ A SAM3 image model
+ """
try:
import clip
except ImportError:
@@ -244,6 +256,16 @@
def build_interactive_sam3(checkpoint_path: str, compile=None, with_backbone=True) -> SAM3Model:
+ """Build the SAM3 Tracker module for video tracking.
+
+ Args:
+ checkpoint_path (str): Path to model checkpoint.
+ compile (str | None): Compilation mode for the vision backbone.
+ with_backbone (bool): Whether to include the vision backbone in the model.
+
+ Returns:
+ (SAM3Model): A configured and initialized SAM3 model.
+ """
# Create model components
memory_encoder = MemoryEncoder(out_dim=64, interpol_size=[1152, 1152])
memory_attention = MemoryAttention(
@@ -327,6 +349,7 @@
def _load_checkpoint(model, checkpoint, interactive=False):
+ """Load SAM3 model checkpoint from file."""
with open(checkpoint, "rb") as f:
ckpt = torch_load(f)
if "model" in ckpt and isinstance(ckpt["model"], dict):
@@ -356,4 +379,4 @@ )
sam3_image_ckpt.update({k.replace("tracker.", ""): v for k, v in ckpt.items() if "tracker." in k})
model.load_state_dict(sam3_image_ckpt, strict=False)
- return model+ return model
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/sam/build_sam3.py |
Add inline docstrings for readability | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import torch
from PIL import Image
from ultralytics.models.yolo.segment import SegmentationPredictor
from ultralytics.utils import DEFAULT_CFG
from ultralytics.utils.metrics import box_iou
from ultralytics.utils.ops import scale_masks
from ultralytics.utils.torch_utils import TORCH_1_10
from .utils import adjust_bboxes_to_image_border
class FastSAMPredictor(SegmentationPredictor):
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks: dict | None = None):
super().__init__(cfg, overrides, _callbacks)
self.prompts = {}
def postprocess(self, preds, img, orig_imgs):
bboxes = self.prompts.pop("bboxes", None)
points = self.prompts.pop("points", None)
labels = self.prompts.pop("labels", None)
texts = self.prompts.pop("texts", None)
results = super().postprocess(preds, img, orig_imgs)
for result in results:
full_box = torch.tensor(
[0, 0, result.orig_shape[1], result.orig_shape[0]], device=result.boxes.data.device, dtype=torch.float32
)
boxes = adjust_bboxes_to_image_border(result.boxes.xyxy, result.orig_shape)
idx = torch.nonzero(box_iou(full_box[None], boxes) > 0.9).flatten()
if idx.numel() != 0:
result.boxes.xyxy[idx] = full_box
return self.prompt(results, bboxes=bboxes, points=points, labels=labels, texts=texts)
def prompt(self, results, bboxes=None, points=None, labels=None, texts=None):
if bboxes is None and points is None and texts is None:
return results
prompt_results = []
if not isinstance(results, list):
results = [results]
for result in results:
if len(result) == 0:
prompt_results.append(result)
continue
masks = result.masks.data
if masks.shape[1:] != result.orig_shape:
masks = (scale_masks(masks[None].float(), result.orig_shape)[0] > 0.5).byte()
# bboxes prompt
idx = torch.zeros(len(result), dtype=torch.bool, device=self.device)
if bboxes is not None:
bboxes = torch.as_tensor(bboxes, dtype=torch.int32, device=self.device)
bboxes = bboxes[None] if bboxes.ndim == 1 else bboxes
bbox_areas = (bboxes[:, 3] - bboxes[:, 1]) * (bboxes[:, 2] - bboxes[:, 0])
mask_areas = torch.stack([masks[:, b[1] : b[3], b[0] : b[2]].sum(dim=(1, 2)) for b in bboxes])
full_mask_areas = torch.sum(masks, dim=(1, 2))
union = bbox_areas[:, None] + full_mask_areas - mask_areas
idx[torch.argmax(mask_areas / union, dim=1)] = True
if points is not None:
points = torch.as_tensor(points, dtype=torch.int32, device=self.device)
points = points[None] if points.ndim == 1 else points
if labels is None:
labels = torch.ones(points.shape[0])
labels = torch.as_tensor(labels, dtype=torch.int32, device=self.device)
assert len(labels) == len(points), (
f"Expected `labels` to have the same length as `points`, but got {len(labels)} and {len(points)}."
)
point_idx = (
torch.ones(len(result), dtype=torch.bool, device=self.device)
if labels.sum() == 0 # all negative points
else torch.zeros(len(result), dtype=torch.bool, device=self.device)
)
for point, label in zip(points, labels):
point_idx[torch.nonzero(masks[:, point[1], point[0]], as_tuple=True)[0]] = bool(label)
idx |= point_idx
if texts is not None:
if isinstance(texts, str):
texts = [texts]
crop_ims, filter_idx = [], []
for i, b in enumerate(result.boxes.xyxy.tolist()):
x1, y1, x2, y2 = (int(x) for x in b)
if (masks[i].sum() if TORCH_1_10 else masks[i].sum(0).sum()) <= 100: # torch 1.9 bug workaround
filter_idx.append(i)
continue
crop = result.orig_img[y1:y2, x1:x2] * masks[i, y1:y2, x1:x2, None].cpu().numpy()
crop_ims.append(Image.fromarray(crop[:, :, ::-1]))
similarity = self._clip_inference(crop_ims, texts)
text_idx = torch.argmax(similarity, dim=-1) # (M, )
if len(filter_idx):
# Remap text_idx to its original index before filter
ori_idxs = [i for i in range(len(result)) if i not in filter_idx]
text_idx = torch.tensor(ori_idxs[int(text_idx)], device=self.device)
idx[text_idx] = True
prompt_results.append(result[idx])
return prompt_results
def _clip_inference(self, images, texts):
from ultralytics.nn.text_model import CLIP
if not hasattr(self, "clip"):
self.clip = CLIP("ViT-B/32", device=self.device)
images = torch.stack([self.clip.image_preprocess(image).to(self.device) for image in images])
image_features = self.clip.encode_image(images)
text_features = self.clip.encode_text(self.clip.tokenize(texts))
return text_features @ image_features.T # (M, N)
def set_prompts(self, prompts):
self.prompts = prompts | --- +++ @@ -15,12 +15,49 @@
class FastSAMPredictor(SegmentationPredictor):
+ """FastSAMPredictor is specialized for fast SAM (Segment Anything Model) segmentation prediction tasks.
+
+ This class extends the SegmentationPredictor, customizing the prediction pipeline specifically for fast SAM. It
+ adjusts post-processing steps to incorporate mask prediction and non-maximum suppression while optimizing for
+ single-class segmentation.
+
+ Attributes:
+ prompts (dict): Dictionary containing prompt information for segmentation (bboxes, points, labels, texts).
+ device (torch.device): Device on which model and tensors are processed.
+ clip (Any, optional): CLIP model used for text-based prompting, loaded on demand.
+
+ Methods:
+ postprocess: Apply postprocessing to FastSAM predictions and handle prompts.
+ prompt: Perform image segmentation inference based on various prompt types.
+ set_prompts: Set prompts to be used during inference.
+ """
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks: dict | None = None):
+ """Initialize the FastSAMPredictor with configuration and callbacks.
+
+ This initializes a predictor specialized for Fast SAM (Segment Anything Model) segmentation tasks. The predictor
+ extends SegmentationPredictor with custom post-processing for mask prediction and non-maximum suppression
+ optimized for single-class segmentation.
+
+ Args:
+ cfg (dict): Configuration for the predictor.
+ overrides (dict, optional): Configuration overrides.
+ _callbacks (dict, optional): Dictionary of callback functions.
+ """
super().__init__(cfg, overrides, _callbacks)
self.prompts = {}
def postprocess(self, preds, img, orig_imgs):
+ """Apply postprocessing to FastSAM predictions and handle prompts.
+
+ Args:
+ preds (list[torch.Tensor]): Raw predictions from the model.
+ img (torch.Tensor): Input image tensor that was fed to the model.
+ orig_imgs (list[np.ndarray]): Original images before preprocessing.
+
+ Returns:
+ (list[Results]): Processed results with prompts applied.
+ """
bboxes = self.prompts.pop("bboxes", None)
points = self.prompts.pop("points", None)
labels = self.prompts.pop("labels", None)
@@ -38,6 +75,18 @@ return self.prompt(results, bboxes=bboxes, points=points, labels=labels, texts=texts)
def prompt(self, results, bboxes=None, points=None, labels=None, texts=None):
+ """Perform image segmentation inference based on cues like bounding boxes, points, and text prompts.
+
+ Args:
+ results (Results | list[Results]): Original inference results from FastSAM models without any prompts.
+ bboxes (np.ndarray | list, optional): Bounding boxes with shape (N, 4), in XYXY format.
+ points (np.ndarray | list, optional): Points indicating object locations with shape (N, 2), in pixels.
+ labels (np.ndarray | list, optional): Labels for point prompts, shape (N, ). 1 = foreground, 0 = background.
+ texts (str | list[str], optional): Textual prompts, a list containing string objects.
+
+ Returns:
+ (list[Results]): Output results filtered and determined by the provided prompts.
+ """
if bboxes is None and points is None and texts is None:
return results
prompt_results = []
@@ -102,6 +151,15 @@ return prompt_results
def _clip_inference(self, images, texts):
+ """Perform CLIP inference to calculate similarity between images and text prompts.
+
+ Args:
+ images (list[PIL.Image]): List of source images, each should be PIL.Image with RGB channel order.
+ texts (list[str]): List of prompt texts, each should be a string object.
+
+ Returns:
+ (torch.Tensor): Similarity matrix between given images and texts with shape (M, N).
+ """
from ultralytics.nn.text_model import CLIP
if not hasattr(self, "clip"):
@@ -112,4 +170,5 @@ return text_features @ image_features.T # (M, N)
def set_prompts(self, prompts):
- self.prompts = prompts+ """Set prompts to be used during inference."""
+ self.prompts = prompts
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/fastsam/predict.py |
Create docstrings for reusable components | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from pathlib import Path
from ultralytics.data import YOLOConcatDataset, build_grounding, build_yolo_dataset
from ultralytics.data.utils import check_det_dataset
from ultralytics.models.yolo.world import WorldTrainer
from ultralytics.utils import DATASETS_DIR, DEFAULT_CFG, LOGGER
from ultralytics.utils.checks import check_file
from ultralytics.utils.torch_utils import unwrap_model
class WorldTrainerFromScratch(WorldTrainer):
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks: dict | None = None):
if overrides is None:
overrides = {}
super().__init__(cfg, overrides, _callbacks)
def build_dataset(self, img_path, mode="train", batch=None):
gs = max(int(unwrap_model(self.model).stride.max() if self.model else 0), 32)
if mode != "train":
return build_yolo_dataset(self.args, img_path, batch, self.data, mode=mode, rect=False, stride=gs)
datasets = [
build_yolo_dataset(self.args, im_path, batch, self.training_data[im_path], stride=gs, multi_modal=True)
if isinstance(im_path, str)
else build_grounding(
# assign `nc` from validation set to max number of text samples for training consistency
self.args,
im_path["img_path"],
im_path["json_file"],
batch,
stride=gs,
max_samples=self.data["nc"],
)
for im_path in img_path
]
self.set_text_embeddings(datasets, batch) # cache text embeddings to accelerate training
return YOLOConcatDataset(datasets) if len(datasets) > 1 else datasets[0]
@staticmethod
def check_data_config(data: dict | str | Path) -> dict:
# If string, load from YAML file
if not isinstance(data, dict):
from ultralytics.utils import YAML
return YAML.load(check_file(data))
return data
def get_dataset(self):
final_data = {}
self.args.data = data_yaml = self.check_data_config(self.args.data)
assert data_yaml.get("train", False), "train dataset not found" # object365.yaml
assert data_yaml.get("val", False), "validation dataset not found" # lvis.yaml
data = {k: [check_det_dataset(d) for d in v.get("yolo_data", [])] for k, v in data_yaml.items()}
assert len(data["val"]) == 1, f"Only support validating on 1 dataset for now, but got {len(data['val'])}."
val_split = "minival" if "lvis" in data["val"][0]["val"] else "val"
for d in data["val"]:
if d.get("minival") is None: # for lvis dataset
continue
d["minival"] = str(d["path"] / d["minival"])
for s in {"train", "val"}:
final_data[s] = [d["train" if s == "train" else val_split] for d in data[s]]
# save grounding data if there's one
grounding_data = data_yaml[s].get("grounding_data")
if grounding_data is None:
continue
grounding_data = grounding_data if isinstance(grounding_data, list) else [grounding_data]
for g in grounding_data:
assert isinstance(g, dict), f"Grounding data should be provided in dict format, but got {type(g)}"
for k in {"img_path", "json_file"}:
path = Path(g[k])
if not path.exists() and not path.is_absolute():
g[k] = str((DATASETS_DIR / g[k]).resolve()) # path relative to DATASETS_DIR
final_data[s] += grounding_data
# assign the first val dataset as currently only one validation set is supported
data["val"] = data["val"][0]
final_data["val"] = final_data["val"][0]
# NOTE: to make training work properly, set `nc` and `names`
final_data["nc"] = data["val"]["nc"]
final_data["names"] = data["val"]["names"]
# NOTE: add path with lvis path
final_data["path"] = data["val"]["path"]
final_data["channels"] = data["val"]["channels"]
self.data = final_data
if self.args.single_cls: # consistent with base trainer
LOGGER.info("Overriding class names with single class.")
self.data["names"] = {0: "object"}
self.data["nc"] = 1
self.training_data = {}
for d in data["train"]:
if self.args.single_cls:
d["names"] = {0: "object"}
d["nc"] = 1
self.training_data[d["train"]] = d
return final_data
def plot_training_labels(self):
pass
def final_eval(self):
val = self.args.data["val"]["yolo_data"][0]
self.validator.args.data = val
self.validator.args.split = "minival" if isinstance(val, str) and "lvis" in val else "val"
return super().final_eval() | --- +++ @@ -13,13 +13,76 @@
class WorldTrainerFromScratch(WorldTrainer):
+ """A class extending the WorldTrainer for training a world model from scratch on open-set datasets.
+
+ This trainer specializes in handling mixed datasets including both object detection and grounding datasets,
+ supporting training YOLO-World models with combined vision-language capabilities.
+
+ Attributes:
+ cfg (dict): Configuration dictionary with default parameters for model training.
+ overrides (dict): Dictionary of parameter overrides to customize the configuration.
+ _callbacks (dict): Dictionary of callback functions to be executed during different stages of training.
+ data (dict): Final processed data configuration containing train/val paths and metadata.
+ training_data (dict): Dictionary mapping training dataset paths to their configurations.
+
+ Methods:
+ build_dataset: Build YOLO Dataset for training or validation with mixed dataset support.
+ get_dataset: Get train and validation paths from data dictionary.
+ plot_training_labels: Skip label plotting for YOLO-World training.
+ final_eval: Perform final evaluation and validation for the YOLO-World model.
+
+ Examples:
+ >>> from ultralytics.models.yolo.world.train_world import WorldTrainerFromScratch
+ >>> from ultralytics import YOLOWorld
+ >>> data = dict(
+ ... train=dict(
+ ... yolo_data=["Objects365.yaml"],
+ ... grounding_data=[
+ ... dict(
+ ... img_path="flickr30k/images",
+ ... json_file="flickr30k/final_flickr_separateGT_train.json",
+ ... ),
+ ... dict(
+ ... img_path="GQA/images",
+ ... json_file="GQA/final_mixed_train_no_coco.json",
+ ... ),
+ ... ],
+ ... ),
+ ... val=dict(yolo_data=["lvis.yaml"]),
+ ... )
+ >>> model = YOLOWorld("yolov8s-worldv2.yaml")
+ >>> model.train(data=data, trainer=WorldTrainerFromScratch)
+ """
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks: dict | None = None):
+ """Initialize a WorldTrainerFromScratch object.
+
+ This initializes a trainer for YOLO-World models from scratch, supporting mixed datasets including both object
+ detection and grounding datasets for vision-language capabilities.
+
+ Args:
+ cfg (dict): Configuration dictionary with default parameters for model training.
+ overrides (dict, optional): Dictionary of parameter overrides to customize the configuration.
+ _callbacks (dict, optional): Dictionary of callback functions to run during different stages of training.
+ """
if overrides is None:
overrides = {}
super().__init__(cfg, overrides, _callbacks)
def build_dataset(self, img_path, mode="train", batch=None):
+ """Build YOLO Dataset for training or validation.
+
+ This method constructs appropriate datasets based on the mode and input paths, handling both standard YOLO
+ datasets and grounding datasets with different formats.
+
+ Args:
+ img_path (list[str] | str): Path to the folder containing images or list of paths.
+ mode (str): 'train' mode or 'val' mode, allowing customized augmentations for each mode.
+ batch (int, optional): Size of batches, used for rectangular training/validation.
+
+ Returns:
+ (YOLOConcatDataset | Dataset): The constructed dataset for training or validation.
+ """
gs = max(int(unwrap_model(self.model).stride.max() if self.model else 0), 32)
if mode != "train":
return build_yolo_dataset(self.args, img_path, batch, self.data, mode=mode, rect=False, stride=gs)
@@ -42,6 +105,14 @@
@staticmethod
def check_data_config(data: dict | str | Path) -> dict:
+ """Check and load the data configuration from a YAML file or dictionary.
+
+ Args:
+ data (dict | str | Path): Data configuration as a dictionary or path to a YAML file.
+
+ Returns:
+ (dict): Data configuration dictionary loaded from YAML file or passed directly.
+ """
# If string, load from YAML file
if not isinstance(data, dict):
from ultralytics.utils import YAML
@@ -50,6 +121,17 @@ return data
def get_dataset(self):
+ """Get train and validation paths from data dictionary.
+
+ Processes the data configuration to extract paths for training and validation datasets, handling both YOLO
+ detection datasets and grounding datasets.
+
+ Returns:
+ (dict): Final processed data configuration containing train/val paths and metadata.
+
+ Raises:
+ AssertionError: If train or validation datasets are not found, or if validation has multiple datasets.
+ """
final_data = {}
self.args.data = data_yaml = self.check_data_config(self.args.data)
assert data_yaml.get("train", False), "train dataset not found" # object365.yaml
@@ -98,10 +180,18 @@ return final_data
def plot_training_labels(self):
+ """Skip label plotting for YOLO-World training."""
pass
def final_eval(self):
+ """Perform final evaluation and validation for the YOLO-World model.
+
+ Configures the validator with appropriate dataset and split information before running evaluation.
+
+ Returns:
+ (dict): Dictionary containing evaluation metrics and results.
+ """
val = self.args.data["val"]["yolo_data"][0]
self.validator.args.data = val
self.validator.args.split = "minival" if isinstance(val, str) and "lvis" in val else "val"
- return super().final_eval()+ return super().final_eval()
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/yolo/world/train_world.py |
Fully document this Python code with docstrings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from copy import copy
from typing import Any
import torch
from ultralytics.data import ClassificationDataset, build_dataloader
from ultralytics.engine.trainer import BaseTrainer
from ultralytics.models import yolo
from ultralytics.nn.tasks import ClassificationModel
from ultralytics.utils import DEFAULT_CFG, LOGGER, RANK
from ultralytics.utils.plotting import plot_images
from ultralytics.utils.torch_utils import is_parallel, torch_distributed_zero_first
class ClassificationTrainer(BaseTrainer):
def __init__(self, cfg=DEFAULT_CFG, overrides: dict[str, Any] | None = None, _callbacks: dict | None = None):
if overrides is None:
overrides = {}
overrides["task"] = "classify"
if overrides.get("imgsz") is None:
overrides["imgsz"] = 224
super().__init__(cfg, overrides, _callbacks)
def set_model_attributes(self):
self.model.names = self.data["names"]
def get_model(self, cfg=None, weights=None, verbose: bool = True):
model = ClassificationModel(cfg, nc=self.data["nc"], ch=self.data["channels"], verbose=verbose and RANK == -1)
if weights:
model.load(weights)
for m in model.modules():
if not self.args.pretrained and hasattr(m, "reset_parameters"):
m.reset_parameters()
if isinstance(m, torch.nn.Dropout) and self.args.dropout:
m.p = self.args.dropout # set dropout
for p in model.parameters():
p.requires_grad = True # for training
return model
def setup_model(self):
import torchvision # scope for faster 'import ultralytics'
if str(self.model) in torchvision.models.__dict__:
self.model = torchvision.models.__dict__[self.model](
weights="IMAGENET1K_V1" if self.args.pretrained else None
)
ckpt = None
else:
ckpt = super().setup_model()
ClassificationModel.reshape_outputs(self.model, self.data["nc"])
return ckpt
def build_dataset(self, img_path: str, mode: str = "train", batch=None):
return ClassificationDataset(root=img_path, args=self.args, augment=mode == "train", prefix=mode)
def get_dataloader(self, dataset_path: str, batch_size: int = 16, rank: int = 0, mode: str = "train"):
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
dataset = self.build_dataset(dataset_path, mode)
# Filter out samples with class indices >= nc (prevents CUDA assertion errors)
nc = self.data.get("nc", 0)
dataset_nc = len(dataset.base.classes)
if nc and dataset_nc > nc:
extra_classes = dataset.base.classes[nc:]
original_count = len(dataset.samples)
dataset.samples = [s for s in dataset.samples if s[1] < nc]
skipped = original_count - len(dataset.samples)
LOGGER.warning(
f"{mode} split has {dataset_nc} classes but model expects {nc}. "
f"Skipping {skipped} samples from extra classes: {extra_classes}"
)
loader = build_dataloader(dataset, batch_size, self.args.workers, rank=rank, drop_last=self.args.compile)
# Attach inference transforms
if mode != "train":
if is_parallel(self.model):
self.model.module.transforms = loader.dataset.torch_transforms
else:
self.model.transforms = loader.dataset.torch_transforms
return loader
def preprocess_batch(self, batch: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
batch["img"] = batch["img"].to(self.device, non_blocking=self.device.type == "cuda")
batch["cls"] = batch["cls"].to(self.device, non_blocking=self.device.type == "cuda")
return batch
def progress_string(self) -> str:
return ("\n" + "%11s" * (4 + len(self.loss_names))) % (
"Epoch",
"GPU_mem",
*self.loss_names,
"Instances",
"Size",
)
def get_validator(self):
self.loss_names = ["loss"]
return yolo.classify.ClassificationValidator(
self.test_loader, self.save_dir, args=copy(self.args), _callbacks=self.callbacks
)
def label_loss_items(self, loss_items: torch.Tensor | None = None, prefix: str = "train"):
keys = [f"{prefix}/{x}" for x in self.loss_names]
if loss_items is None:
return keys
loss_items = [round(float(loss_items), 5)]
return dict(zip(keys, loss_items))
def plot_training_samples(self, batch: dict[str, torch.Tensor], ni: int):
batch["batch_idx"] = torch.arange(batch["img"].shape[0]) # add batch index for plotting
plot_images(
labels=batch,
fname=self.save_dir / f"train_batch{ni}.jpg",
on_plot=self.on_plot,
) | --- +++ @@ -17,8 +17,46 @@
class ClassificationTrainer(BaseTrainer):
+ """A trainer class extending BaseTrainer for training image classification models.
+
+ This trainer handles the training process for image classification tasks, supporting both YOLO classification models
+ and torchvision models with comprehensive dataset handling and validation.
+
+ Attributes:
+ model (ClassificationModel): The classification model to be trained.
+ data (dict[str, Any]): Dictionary containing dataset information including class names and number of classes.
+ loss_names (list[str]): Names of the loss functions used during training.
+ validator (ClassificationValidator): Validator instance for model evaluation.
+
+ Methods:
+ set_model_attributes: Set the model's class names from the loaded dataset.
+ get_model: Return a modified PyTorch model configured for training.
+ setup_model: Load, create or download model for classification.
+ build_dataset: Create a ClassificationDataset instance.
+ get_dataloader: Return PyTorch DataLoader with transforms for image preprocessing.
+ preprocess_batch: Preprocess a batch of images and classes.
+ progress_string: Return a formatted string showing training progress.
+ get_validator: Return an instance of ClassificationValidator.
+ label_loss_items: Return a loss dict with labeled training loss items.
+ final_eval: Evaluate trained model and save validation results.
+ plot_training_samples: Plot training samples with their annotations.
+
+ Examples:
+ Initialize and train a classification model
+ >>> from ultralytics.models.yolo.classify import ClassificationTrainer
+ >>> args = dict(model="yolo26n-cls.pt", data="imagenet10", epochs=3)
+ >>> trainer = ClassificationTrainer(overrides=args)
+ >>> trainer.train()
+ """
def __init__(self, cfg=DEFAULT_CFG, overrides: dict[str, Any] | None = None, _callbacks: dict | None = None):
+ """Initialize a ClassificationTrainer object.
+
+ Args:
+ cfg (dict[str, Any], optional): Default configuration dictionary containing training parameters.
+ overrides (dict[str, Any], optional): Dictionary of parameter overrides for the default configuration.
+ _callbacks (dict, optional): Dictionary of callback functions to be executed during training.
+ """
if overrides is None:
overrides = {}
overrides["task"] = "classify"
@@ -27,9 +65,20 @@ super().__init__(cfg, overrides, _callbacks)
def set_model_attributes(self):
+ """Set the YOLO model's class names from the loaded dataset."""
self.model.names = self.data["names"]
def get_model(self, cfg=None, weights=None, verbose: bool = True):
+ """Return a modified PyTorch model configured for training YOLO classification.
+
+ Args:
+ cfg (Any, optional): Model configuration.
+ weights (Any, optional): Pre-trained model weights.
+ verbose (bool, optional): Whether to display model information.
+
+ Returns:
+ (ClassificationModel): Configured PyTorch model for classification.
+ """
model = ClassificationModel(cfg, nc=self.data["nc"], ch=self.data["channels"], verbose=verbose and RANK == -1)
if weights:
model.load(weights)
@@ -44,6 +93,11 @@ return model
def setup_model(self):
+ """Load, create or download model for classification tasks.
+
+ Returns:
+ (Any): Model checkpoint if applicable, otherwise None.
+ """
import torchvision # scope for faster 'import ultralytics'
if str(self.model) in torchvision.models.__dict__:
@@ -57,9 +111,30 @@ return ckpt
def build_dataset(self, img_path: str, mode: str = "train", batch=None):
+ """Create a ClassificationDataset instance given an image path and mode.
+
+ Args:
+ img_path (str): Path to the dataset images.
+ mode (str, optional): Dataset mode ('train', 'val', or 'test').
+ batch (Any, optional): Batch information (unused in this implementation).
+
+ Returns:
+ (ClassificationDataset): Dataset for the specified mode.
+ """
return ClassificationDataset(root=img_path, args=self.args, augment=mode == "train", prefix=mode)
def get_dataloader(self, dataset_path: str, batch_size: int = 16, rank: int = 0, mode: str = "train"):
+ """Return PyTorch DataLoader with transforms to preprocess images.
+
+ Args:
+ dataset_path (str): Path to the dataset.
+ batch_size (int, optional): Number of images per batch.
+ rank (int, optional): Process rank for distributed training.
+ mode (str, optional): 'train', 'val', or 'test' mode.
+
+ Returns:
+ (torch.utils.data.DataLoader): DataLoader for the specified dataset and mode.
+ """
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
dataset = self.build_dataset(dataset_path, mode)
@@ -86,11 +161,13 @@ return loader
def preprocess_batch(self, batch: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
+ """Preprocess a batch of images and classes."""
batch["img"] = batch["img"].to(self.device, non_blocking=self.device.type == "cuda")
batch["cls"] = batch["cls"].to(self.device, non_blocking=self.device.type == "cuda")
return batch
def progress_string(self) -> str:
+ """Return a formatted string showing training progress."""
return ("\n" + "%11s" * (4 + len(self.loss_names))) % (
"Epoch",
"GPU_mem",
@@ -100,12 +177,22 @@ )
def get_validator(self):
+ """Return an instance of ClassificationValidator for validation."""
self.loss_names = ["loss"]
return yolo.classify.ClassificationValidator(
self.test_loader, self.save_dir, args=copy(self.args), _callbacks=self.callbacks
)
def label_loss_items(self, loss_items: torch.Tensor | None = None, prefix: str = "train"):
+ """Return a loss dict with labeled training loss items tensor.
+
+ Args:
+ loss_items (torch.Tensor, optional): Loss tensor items.
+ prefix (str, optional): Prefix to prepend to loss names.
+
+ Returns:
+ (dict | list): Dictionary of labeled loss items if loss_items is provided, otherwise list of keys.
+ """
keys = [f"{prefix}/{x}" for x in self.loss_names]
if loss_items is None:
return keys
@@ -113,9 +200,15 @@ return dict(zip(keys, loss_items))
def plot_training_samples(self, batch: dict[str, torch.Tensor], ni: int):
+ """Plot training samples with their annotations.
+
+ Args:
+ batch (dict[str, torch.Tensor]): Batch containing images and class labels.
+ ni (int): Batch index used for naming the output file.
+ """
batch["batch_idx"] = torch.arange(batch["img"].shape[0]) # add batch index for plotting
plot_images(
labels=batch,
fname=self.save_dir / f"train_batch{ni}.jpg",
on_plot=self.on_plot,
- )+ )
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/yolo/classify/train.py |
Add structured docstrings to improve clarity | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
from __future__ import annotations
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from ultralytics.nn.modules.transformer import MLP
class LinearPresenceHead(nn.Sequential):
def __init__(self, d_model):
# a hack to make `LinearPresenceHead` compatible with old checkpoints
super().__init__(nn.Identity(), nn.Identity(), nn.Linear(d_model, 1))
def forward(self, hs, prompt, prompt_mask):
return super().forward(hs)
class MaskPredictor(nn.Module):
def __init__(self, hidden_dim, mask_dim):
super().__init__()
self.mask_embed = MLP(hidden_dim, hidden_dim, mask_dim, 3)
def forward(self, obj_queries, pixel_embed):
if len(obj_queries.shape) == 3:
if pixel_embed.ndim == 3:
# batch size was omitted
mask_preds = torch.einsum("bqc,chw->bqhw", self.mask_embed(obj_queries), pixel_embed)
else:
mask_preds = torch.einsum("bqc,bchw->bqhw", self.mask_embed(obj_queries), pixel_embed)
else:
# Assumed to have aux masks
if pixel_embed.ndim == 3:
# batch size was omitted
mask_preds = torch.einsum("lbqc,chw->lbqhw", self.mask_embed(obj_queries), pixel_embed)
else:
mask_preds = torch.einsum("lbqc,bchw->lbqhw", self.mask_embed(obj_queries), pixel_embed)
return mask_preds
class SegmentationHead(nn.Module):
def __init__(
self,
hidden_dim,
upsampling_stages,
use_encoder_inputs=False,
aux_masks=False,
no_dec=False,
pixel_decoder=None,
act_ckpt=False,
shared_conv=False,
compile_mode_pixel_decoder=None,
):
super().__init__()
self.use_encoder_inputs = use_encoder_inputs
self.aux_masks = aux_masks
if pixel_decoder is not None:
self.pixel_decoder = pixel_decoder
else:
self.pixel_decoder = PixelDecoder(
hidden_dim,
upsampling_stages,
shared_conv=shared_conv,
compile_mode=compile_mode_pixel_decoder,
)
self.no_dec = no_dec
if no_dec:
self.mask_predictor = nn.Conv2d(hidden_dim, 1, kernel_size=3, stride=1, padding=1)
else:
self.mask_predictor = MaskPredictor(hidden_dim, mask_dim=hidden_dim)
self.act_ckpt = act_ckpt
# used to update the output dictionary
self.instance_keys = ["pred_masks"]
def _embed_pixels(self, backbone_feats: list[torch.Tensor], encoder_hidden_states) -> torch.Tensor:
if self.use_encoder_inputs:
backbone_visual_feats = [bb_feat.clone() for bb_feat in backbone_feats]
# Extract visual embeddings
encoder_hidden_states = encoder_hidden_states.permute(1, 2, 0)
spatial_dim = math.prod(backbone_feats[-1].shape[-2:])
encoder_visual_embed = encoder_hidden_states[..., :spatial_dim].reshape(-1, *backbone_feats[-1].shape[1:])
backbone_visual_feats[-1] = encoder_visual_embed
if self.act_ckpt:
pixel_embed = checkpoint.checkpoint(self.pixel_decoder, backbone_visual_feats, use_reentrant=False)
else:
pixel_embed = self.pixel_decoder(backbone_visual_feats)
else:
backbone_feats = [x for x in backbone_feats]
pixel_embed = self.pixel_decoder(backbone_feats)
if pixel_embed.shape[0] == 1:
# For batch_size=1 training, we can avoid the indexing to save memory
pixel_embed = pixel_embed.squeeze(0)
else:
pixel_embed = pixel_embed[[0], ...]
return pixel_embed
def forward(
self,
backbone_feats: list[torch.Tensor],
obj_queries: torch.Tensor,
encoder_hidden_states: torch.Tensor = None,
**kwargs,
) -> dict[str, torch.Tensor]:
if self.use_encoder_inputs:
assert encoder_hidden_states is not None
pixel_embed = self._embed_pixels(backbone_feats=backbone_feats, encoder_hidden_states=encoder_hidden_states)
if self.no_dec:
mask_pred = self.mask_predictor(pixel_embed)
elif self.aux_masks:
mask_pred = self.mask_predictor(obj_queries, pixel_embed)
else:
mask_pred = self.mask_predictor(obj_queries[-1], pixel_embed)
return {"pred_masks": mask_pred}
class PixelDecoder(nn.Module):
def __init__(
self,
hidden_dim,
num_upsampling_stages,
interpolation_mode="nearest",
shared_conv=False,
compile_mode=None,
):
super().__init__()
self.hidden_dim = hidden_dim
self.num_upsampling_stages = num_upsampling_stages
self.interpolation_mode = interpolation_mode
conv_layers = []
norms = []
num_convs = 1 if shared_conv else num_upsampling_stages
for _ in range(num_convs):
conv_layers.append(nn.Conv2d(self.hidden_dim, self.hidden_dim, 3, 1, 1))
norms.append(nn.GroupNorm(8, self.hidden_dim))
self.conv_layers = nn.ModuleList(conv_layers)
self.norms = nn.ModuleList(norms)
self.shared_conv = shared_conv
self.out_dim = self.conv_layers[-1].out_channels
if compile_mode is not None:
self.forward = torch.compile(self.forward, mode=compile_mode, dynamic=True, fullgraph=True)
# Needed to make checkpointing happy. But we don't know if the module is checkpointed, so we disable it by default.
torch._dynamo.config.optimize_ddp = False
def forward(self, backbone_feats: list[torch.Tensor]):
prev_fpn = backbone_feats[-1]
fpn_feats = backbone_feats[:-1]
for layer_idx, bb_feat in enumerate(fpn_feats[::-1]):
curr_fpn = bb_feat
prev_fpn = curr_fpn + F.interpolate(prev_fpn, size=curr_fpn.shape[-2:], mode=self.interpolation_mode)
if self.shared_conv:
# only one conv layer
layer_idx = 0
prev_fpn = self.conv_layers[layer_idx](prev_fpn)
prev_fpn = F.relu(self.norms[layer_idx](prev_fpn))
return prev_fpn
class UniversalSegmentationHead(SegmentationHead):
def __init__(
self,
hidden_dim,
upsampling_stages,
pixel_decoder,
aux_masks=False,
no_dec=False,
act_ckpt=False,
presence_head: bool = False,
dot_product_scorer=None,
cross_attend_prompt=None,
):
super().__init__(
hidden_dim=hidden_dim,
upsampling_stages=upsampling_stages,
use_encoder_inputs=True,
aux_masks=aux_masks,
no_dec=no_dec,
pixel_decoder=pixel_decoder,
act_ckpt=act_ckpt,
)
self.d_model = hidden_dim
if dot_product_scorer is not None:
assert presence_head, "Specifying a dot product scorer without a presence head is likely a mistake"
self.presence_head = None
if presence_head:
self.presence_head = (
dot_product_scorer if dot_product_scorer is not None else LinearPresenceHead(self.d_model)
)
self.cross_attend_prompt = cross_attend_prompt
if self.cross_attend_prompt is not None:
self.cross_attn_norm = nn.LayerNorm(self.d_model)
self.semantic_seg_head = nn.Conv2d(self.pixel_decoder.out_dim, 1, kernel_size=1)
self.instance_seg_head = nn.Conv2d(self.pixel_decoder.out_dim, self.d_model, kernel_size=1)
def forward(
self,
backbone_feats: list[torch.Tensor],
obj_queries: torch.Tensor,
encoder_hidden_states: torch.Tensor = None,
prompt: torch.Tensor = None,
prompt_mask: torch.Tensor = None,
**kwargs,
) -> dict[str, torch.Tensor]:
assert encoder_hidden_states is not None
bs = encoder_hidden_states.shape[1]
if self.cross_attend_prompt is not None:
tgt2 = self.cross_attn_norm(encoder_hidden_states)
tgt2 = self.cross_attend_prompt(
query=tgt2,
key=prompt.to(tgt2.dtype),
value=prompt.to(tgt2.dtype),
key_padding_mask=prompt_mask,
need_weights=False,
)[0]
encoder_hidden_states = tgt2 + encoder_hidden_states
presence_logit = None
if self.presence_head is not None:
pooled_enc = encoder_hidden_states.mean(0)
presence_logit = (
self.presence_head(
pooled_enc.view(1, bs, 1, self.d_model),
prompt=prompt,
prompt_mask=prompt_mask,
)
.squeeze(0)
.squeeze(1)
)
pixel_embed = self._embed_pixels(backbone_feats=backbone_feats, encoder_hidden_states=encoder_hidden_states)
instance_embeds = self.instance_seg_head(pixel_embed)
if self.no_dec:
mask_pred = self.mask_predictor(instance_embeds)
elif self.aux_masks:
mask_pred = self.mask_predictor(obj_queries, instance_embeds)
else:
mask_pred = self.mask_predictor(obj_queries[-1], instance_embeds)
return {
"pred_masks": mask_pred,
"semantic_seg": self.semantic_seg_head(pixel_embed),
"presence_logit": presence_logit,
} | --- +++ @@ -15,22 +15,28 @@
class LinearPresenceHead(nn.Sequential):
+ """Linear presence head for predicting the presence of classes in an image."""
def __init__(self, d_model):
+ """Initializes the LinearPresenceHead."""
# a hack to make `LinearPresenceHead` compatible with old checkpoints
super().__init__(nn.Identity(), nn.Identity(), nn.Linear(d_model, 1))
def forward(self, hs, prompt, prompt_mask):
+ """Forward pass of the presence head."""
return super().forward(hs)
class MaskPredictor(nn.Module):
+ """Predicts masks from object queries and pixel embeddings."""
def __init__(self, hidden_dim, mask_dim):
+ """Initializes the MaskPredictor."""
super().__init__()
self.mask_embed = MLP(hidden_dim, hidden_dim, mask_dim, 3)
def forward(self, obj_queries, pixel_embed):
+ """Predicts masks from object queries and pixel embeddings."""
if len(obj_queries.shape) == 3:
if pixel_embed.ndim == 3:
# batch size was omitted
@@ -49,6 +55,7 @@
class SegmentationHead(nn.Module):
+ """Segmentation head that predicts masks from backbone features and object queries."""
def __init__(
self,
@@ -62,6 +69,7 @@ shared_conv=False,
compile_mode_pixel_decoder=None,
):
+ """Initializes the SegmentationHead."""
super().__init__()
self.use_encoder_inputs = use_encoder_inputs
self.aux_masks = aux_masks
@@ -86,6 +94,7 @@ self.instance_keys = ["pred_masks"]
def _embed_pixels(self, backbone_feats: list[torch.Tensor], encoder_hidden_states) -> torch.Tensor:
+ """Embeds pixels using the pixel decoder."""
if self.use_encoder_inputs:
backbone_visual_feats = [bb_feat.clone() for bb_feat in backbone_feats]
# Extract visual embeddings
@@ -115,6 +124,7 @@ encoder_hidden_states: torch.Tensor = None,
**kwargs,
) -> dict[str, torch.Tensor]:
+ """Forward pass of the SegmentationHead."""
if self.use_encoder_inputs:
assert encoder_hidden_states is not None
@@ -131,6 +141,7 @@
class PixelDecoder(nn.Module):
+ """Pixel decoder module that upsamples backbone features."""
def __init__(
self,
@@ -140,6 +151,7 @@ shared_conv=False,
compile_mode=None,
):
+ """Initializes the PixelDecoder."""
super().__init__()
self.hidden_dim = hidden_dim
self.num_upsampling_stages = num_upsampling_stages
@@ -161,6 +173,7 @@ torch._dynamo.config.optimize_ddp = False
def forward(self, backbone_feats: list[torch.Tensor]):
+ """Forward pass of the PixelDecoder."""
prev_fpn = backbone_feats[-1]
fpn_feats = backbone_feats[:-1]
for layer_idx, bb_feat in enumerate(fpn_feats[::-1]):
@@ -176,6 +189,7 @@
class UniversalSegmentationHead(SegmentationHead):
+ """This module handles semantic+instance segmentation."""
def __init__(
self,
@@ -189,6 +203,7 @@ dot_product_scorer=None,
cross_attend_prompt=None,
):
+ """Initializes the UniversalSegmentationHead."""
super().__init__(
hidden_dim=hidden_dim,
upsampling_stages=upsampling_stages,
@@ -225,6 +240,7 @@ prompt_mask: torch.Tensor = None,
**kwargs,
) -> dict[str, torch.Tensor]:
+ """Forward pass of the UniversalSegmentationHead."""
assert encoder_hidden_states is not None
bs = encoder_hidden_states.shape[1]
@@ -267,4 +283,4 @@ "pred_masks": mask_pred,
"semantic_seg": self.semantic_seg_head(pixel_embed),
"presence_logit": presence_logit,
- }+ }
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/sam/sam3/maskformer_segmentation.py |
Create docstrings for reusable components | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import random
import shutil
from pathlib import Path
from ultralytics.data.utils import IMG_FORMATS, img2label_paths
from ultralytics.utils import DATASETS_DIR, LOGGER, TQDM
def split_classify_dataset(source_dir: str | Path, train_ratio: float = 0.8) -> Path:
source_path = Path(source_dir)
split_path = Path(f"{source_path}_split")
train_path, val_path = split_path / "train", split_path / "val"
# Create directory structure
split_path.mkdir(exist_ok=True)
train_path.mkdir(exist_ok=True)
val_path.mkdir(exist_ok=True)
# Process class directories
class_dirs = [d for d in source_path.iterdir() if d.is_dir()]
total_images = sum(len(list(d.glob("*.*"))) for d in class_dirs)
stats = f"{len(class_dirs)} classes, {total_images} images"
LOGGER.info(f"Splitting {source_path} ({stats}) into {train_ratio:.0%} train, {1 - train_ratio:.0%} val...")
for class_dir in class_dirs:
# Create class directories
(train_path / class_dir.name).mkdir(exist_ok=True)
(val_path / class_dir.name).mkdir(exist_ok=True)
# Split and copy files
image_files = list(class_dir.glob("*.*"))
random.shuffle(image_files)
split_idx = int(len(image_files) * train_ratio)
for img in image_files[:split_idx]:
shutil.copy2(img, train_path / class_dir.name / img.name)
for img in image_files[split_idx:]:
shutil.copy2(img, val_path / class_dir.name / img.name)
LOGGER.info(f"Split complete in {split_path} ✅")
return split_path
def autosplit(
path: Path = DATASETS_DIR / "coco8/images",
weights: tuple[float, float, float] = (0.9, 0.1, 0.0),
annotated_only: bool = False,
) -> None:
path = Path(path) # images dir
files = sorted(x for x in path.rglob("*.*") if x.suffix[1:].lower() in IMG_FORMATS) # image files only
n = len(files) # number of files
random.seed(0) # for reproducibility
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ["autosplit_train.txt", "autosplit_val.txt", "autosplit_test.txt"] # 3 txt files
for x in txt:
if (path.parent / x).exists():
(path.parent / x).unlink() # remove existing
LOGGER.info(f"Autosplitting images from {path}" + ", using *.txt labeled images only" * annotated_only)
for i, img in TQDM(zip(indices, files), total=n):
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path.parent / txt[i], "a", encoding="utf-8") as f:
f.write(f"./{img.relative_to(path.parent).as_posix()}" + "\n") # add image to txt file
if __name__ == "__main__":
split_classify_dataset("caltech101") | --- +++ @@ -11,6 +11,55 @@
def split_classify_dataset(source_dir: str | Path, train_ratio: float = 0.8) -> Path:
+ """Split classification dataset into train and val directories in a new directory.
+
+ Creates a new directory '{source_dir}_split' with train/val subdirectories, preserving the original class structure
+ with an 80/20 split by default.
+
+ Directory structure:
+ Before:
+ caltech/
+ ├── class1/
+ │ ├── img1.jpg
+ │ ├── img2.jpg
+ │ └── ...
+ ├── class2/
+ │ ├── img1.jpg
+ │ └── ...
+ └── ...
+
+ After:
+ caltech_split/
+ ├── train/
+ │ ├── class1/
+ │ │ ├── img1.jpg
+ │ │ └── ...
+ │ ├── class2/
+ │ │ ├── img1.jpg
+ │ │ └── ...
+ │ └── ...
+ └── val/
+ ├── class1/
+ │ ├── img2.jpg
+ │ └── ...
+ ├── class2/
+ │ └── ...
+ └── ...
+
+ Args:
+ source_dir (str | Path): Path to classification dataset root directory.
+ train_ratio (float): Ratio for train split, between 0 and 1.
+
+ Returns:
+ (Path): Path to the created split directory.
+
+ Examples:
+ Split dataset with default 80/20 ratio
+ >>> split_classify_dataset("path/to/caltech")
+
+ Split with custom ratio
+ >>> split_classify_dataset("path/to/caltech", 0.75)
+ """
source_path = Path(source_dir)
split_path = Path(f"{source_path}_split")
train_path, val_path = split_path / "train", split_path / "val"
@@ -51,6 +100,22 @@ weights: tuple[float, float, float] = (0.9, 0.1, 0.0),
annotated_only: bool = False,
) -> None:
+ """Automatically split a dataset into train/val/test splits and save the resulting splits into autosplit_*.txt
+ files.
+
+ Args:
+ path (Path): Path to images directory.
+ weights (tuple[float, float, float]): Train, validation, and test split fractions.
+ annotated_only (bool): If True, only images with an associated txt file are used.
+
+ Examples:
+ Split images with default weights
+ >>> from ultralytics.data.split import autosplit
+ >>> autosplit()
+
+ Split with custom weights and annotated images only
+ >>> autosplit(path="path/to/images", weights=(0.8, 0.15, 0.05), annotated_only=True)
+ """
path = Path(path) # images dir
files = sorted(x for x in path.rglob("*.*") if x.suffix[1:].lower() in IMG_FORMATS) # image files only
n = len(files) # number of files
@@ -70,4 +135,4 @@
if __name__ == "__main__":
- split_classify_dataset("caltech101")+ split_classify_dataset("caltech101")
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/data/split.py |
Write reusable docstrings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
from __future__ import annotations
import math
from functools import partial
from typing import Callable
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from torch import Tensor
from ultralytics.models.sam.modules.blocks import PatchEmbed
from ultralytics.models.sam.modules.utils import (
apply_rotary_enc,
compute_axial_cis,
concat_rel_pos,
get_abs_pos,
window_partition,
window_unpartition,
)
from ultralytics.utils.checks import check_requirements
from .model_misc import LayerScale
class Attention(nn.Module):
def __init__(
self,
dim: int,
num_heads: int = 8,
qkv_bias: bool = True,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
input_size: tuple[int, int] | None = None,
cls_token: bool = False,
use_rope: bool = False,
rope_theta: float = 10000.0,
rope_pt_size: tuple[int, int] | None = None,
rope_interp: bool = False,
):
super().__init__()
self.num_heads = num_heads
self.head_dim = dim // num_heads
self.scale = self.head_dim**-0.5
self.cls_token = cls_token
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
# rel_pos embeddings and rope
self.use_rel_pos = use_rel_pos
self.input_size = input_size
self.use_rope = use_rope
self.rope_theta = rope_theta
self.rope_pt_size = rope_pt_size
self.rope_interp = rope_interp
# init rel_pos embeddings and rope
self._setup_rel_pos(rel_pos_zero_init, input_size)
self._setup_rope_freqs(input_size)
def _setup_rel_pos(self, rel_pos_zero_init: bool = True, input_size: tuple[int, int] | None = None) -> None:
if not self.use_rel_pos:
self.rel_pos_h = None
self.rel_pos_w = None
return
assert input_size is not None
assert self.cls_token is False, "not supported"
# initialize relative positional embeddings
self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, self.head_dim))
self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, self.head_dim))
if not rel_pos_zero_init:
nn.init.trunc_normal_(self.rel_pos_h, std=0.02)
nn.init.trunc_normal_(self.rel_pos_w, std=0.02)
# Precompute the relative coords
H, W = input_size
q_coords = torch.arange(H)[:, None]
k_coords = torch.arange(W)[None, :]
relative_coords = (q_coords - k_coords) + (H - 1)
self.relative_coords = relative_coords.long()
def _setup_rope_freqs(self, input_size: tuple[int, int] | None = None) -> None:
if not self.use_rope:
self.freqs_cis = None
return
assert input_size is not None
# determine rope input size
if self.rope_pt_size is None:
self.rope_pt_size = input_size
# initialize 2d rope freqs
self.compute_cis = partial(
compute_axial_cis,
dim=self.head_dim,
theta=self.rope_theta,
)
# interpolate rope
scale_pos = 1.0
if self.rope_interp:
scale_pos = self.rope_pt_size[0] / input_size[0]
# get scaled freqs_cis
freqs_cis = self.compute_cis(
end_x=input_size[0],
end_y=input_size[1],
scale_pos=scale_pos,
)
if self.cls_token:
t = torch.zeros(
self.head_dim // 2,
dtype=torch.float32,
device=freqs_cis.device,
)
cls_freqs_cis = torch.polar(torch.ones_like(t), t)[None, :]
freqs_cis = torch.cat([cls_freqs_cis, freqs_cis], dim=0)
self.freqs_cis = freqs_cis
def _apply_rope(self, q, k) -> tuple[Tensor, Tensor]:
if not self.use_rope:
return q, k
assert self.freqs_cis is not None
return apply_rotary_enc(q, k, freqs_cis=self.freqs_cis.to(q.device))
def forward(self, x: Tensor) -> Tensor:
s = 1 if self.cls_token else 0 # used to exclude cls_token
if x.ndim == 4:
B, H, W, _ = x.shape
assert s == 0 # no cls_token
L = H * W
ndim = 4
else:
assert x.ndim == 3
B, L, _ = x.shape
ndim = 3
H = W = math.sqrt(L - s)
# qkv with shape (3, B, nHead, L, C)
qkv = self.qkv(x).reshape(B, L, 3, self.num_heads, -1)
# q, k, v with shape (B, nHead, L, C)
q, k, v = qkv.permute(2, 0, 3, 1, 4).unbind(0)
# handle rope and rel pos embeddings
q, k = self._apply_rope(q, k)
if self.use_rel_pos:
q, k = concat_rel_pos(
q.flatten(0, 1),
k.flatten(0, 1),
(H, W),
x.shape[1:3],
self.rel_pos_h,
self.rel_pos_w,
rescale=True,
relative_coords=self.relative_coords,
)
# sdpa expects [B, nheads, H*W, C] so we transpose back
q = q.reshape(B, self.num_heads, H * W, -1)
k = k.reshape(B, self.num_heads, H * W, -1)
x = F.scaled_dot_product_attention(q, k, v)
if ndim == 4:
x = x.view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1)
else:
x = x.view(B, self.num_heads, L, -1).permute(0, 2, 1, 3).reshape(B, L, -1)
x = self.proj(x)
return x
class Block(nn.Module):
def __init__(
self,
dim: int,
num_heads: int,
mlp_ratio: float = 4.0,
qkv_bias: bool = True,
drop_path: float = 0.0,
norm_layer: Callable[..., nn.Module] = nn.LayerNorm,
act_layer: Callable[..., nn.Module] = nn.GELU,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
window_size: int = 0,
input_size: tuple[int, int] | None = None,
use_rope: bool = False,
rope_pt_size: tuple[int, int] | None = None,
rope_interp: bool = False,
cls_token: bool = False,
dropout: float = 0.0,
init_values: float | None = None,
):
super().__init__()
check_requirements("timm")
from timm.layers import DropPath, Mlp
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
input_size=input_size if window_size == 0 else (window_size, window_size),
use_rope=use_rope,
rope_pt_size=rope_pt_size,
rope_interp=rope_interp,
cls_token=cls_token,
)
self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=(dropout, 0.0),
)
self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
self.dropout = nn.Dropout(dropout)
self.window_size = window_size
def forward(self, x: Tensor) -> Tensor:
shortcut = x
x = self.norm1(x)
# Window partition
if self.window_size > 0:
H, W = x.shape[1], x.shape[2]
x, pad_hw = window_partition(x, self.window_size)
x = self.ls1(self.attn(x))
# Reverse window partition
if self.window_size > 0:
x = window_unpartition(x, self.window_size, pad_hw, (H, W))
x = shortcut + self.dropout(self.drop_path(x))
x = x + self.dropout(self.drop_path(self.ls2(self.mlp(self.norm2(x)))))
return x
class ViT(nn.Module):
def __init__(
self,
img_size: int = 1024,
patch_size: int = 16,
in_chans: int = 3,
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
mlp_ratio: float = 4.0,
qkv_bias: bool = True,
drop_path_rate: float = 0.0,
norm_layer: Callable[..., nn.Module] | str = "LayerNorm",
act_layer: Callable[..., nn.Module] = nn.GELU,
use_abs_pos: bool = True,
tile_abs_pos: bool = True,
rel_pos_blocks: tuple[int, ...] | bool = (2, 5, 8, 11),
rel_pos_zero_init: bool = True,
window_size: int = 14,
global_att_blocks: tuple[int, ...] = (2, 5, 8, 11),
use_rope: bool = False,
rope_pt_size: int | None = None,
use_interp_rope: bool = False,
pretrain_img_size: int = 224,
pretrain_use_cls_token: bool = True,
retain_cls_token: bool = True,
dropout: float = 0.0,
return_interm_layers: bool = False,
init_values: float | None = None, # for layerscale
ln_pre: bool = False,
ln_post: bool = False,
bias_patch_embed: bool = True,
compile_mode: str | None = None,
use_act_checkpoint: bool = True,
):
super().__init__()
self.pretrain_use_cls_token = pretrain_use_cls_token
window_block_indexes = [i for i in range(depth) if i not in global_att_blocks]
self.full_attn_ids = list(global_att_blocks)
self.rel_pos_blocks = [False] * depth
if isinstance(rel_pos_blocks, bool) and rel_pos_blocks:
self.rel_pos_blocks = [True] * depth
else:
for i in rel_pos_blocks:
self.rel_pos_blocks[i] = True
self.retain_cls_token = retain_cls_token
if self.retain_cls_token:
assert pretrain_use_cls_token
assert len(window_block_indexes) == 0, "windowing not supported with cls token"
assert sum(self.rel_pos_blocks) == 0, "rel pos not supported with cls token"
scale = embed_dim**-0.5
self.class_embedding = nn.Parameter(scale * torch.randn(1, 1, embed_dim))
if isinstance(norm_layer, str):
norm_layer = partial(getattr(nn, norm_layer), eps=1e-5)
self.patch_embed = PatchEmbed(
kernel_size=(patch_size, patch_size),
stride=(patch_size, patch_size),
in_chans=in_chans,
embed_dim=embed_dim,
bias=bias_patch_embed,
)
# Handle absolute positional embedding
self.tile_abs_pos = tile_abs_pos
self.use_abs_pos = use_abs_pos
if self.tile_abs_pos:
assert self.use_abs_pos
if self.use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
num_patches = (pretrain_img_size // patch_size) * (pretrain_img_size // patch_size)
num_positions = (num_patches + 1) if pretrain_use_cls_token else num_patches
self.pos_embed = nn.Parameter(torch.zeros(1, num_positions, embed_dim))
else:
self.pos_embed = None
# stochastic depth decay rule
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
self.patch_size = patch_size
self.window_size = window_size
self.blocks = nn.ModuleList()
cur_stage = 1
for i in range(depth):
block = Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
drop_path=dpr[i],
norm_layer=norm_layer,
act_layer=act_layer,
use_rel_pos=self.rel_pos_blocks[i],
rel_pos_zero_init=rel_pos_zero_init,
window_size=window_size if i in window_block_indexes else 0,
input_size=(img_size // patch_size, img_size // patch_size),
use_rope=use_rope,
rope_pt_size=((window_size, window_size) if rope_pt_size is None else (rope_pt_size, rope_pt_size)),
rope_interp=use_interp_rope,
cls_token=self.retain_cls_token,
dropout=dropout,
init_values=init_values,
)
if i not in window_block_indexes:
cur_stage += 1
self.use_act_checkpoint = use_act_checkpoint
self.blocks.append(block)
self.return_interm_layers = return_interm_layers
self.channel_list = [embed_dim] * len(self.full_attn_ids) if return_interm_layers else [embed_dim]
if self.pos_embed is not None:
nn.init.trunc_normal_(self.pos_embed, std=0.02)
self.ln_pre = norm_layer(embed_dim) if ln_pre else nn.Identity()
self.ln_post = norm_layer(embed_dim) if ln_post else nn.Identity()
self.apply(self._init_weights)
if compile_mode is not None:
self.forward = torch.compile(self.forward, mode=compile_mode, fullgraph=True)
if self.use_act_checkpoint and self.training:
torch._dynamo.config.optimize_ddp = False
@staticmethod
def _init_weights(m: nn.Module) -> None:
if isinstance(m, nn.Linear):
nn.init.trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x: torch.Tensor) -> list[torch.Tensor]:
x = self.patch_embed(x)
h, w = x.shape[1], x.shape[2]
s = 0
if self.retain_cls_token:
# If cls_token is retained, we don't
# maintain spatial shape
x = torch.cat([self.class_embedding, x.flatten(1, 2)], dim=1)
s = 1
if self.pos_embed is not None:
x = x + get_abs_pos(
self.pos_embed,
self.pretrain_use_cls_token,
(h, w),
self.retain_cls_token,
tiling=self.tile_abs_pos,
)
x = self.ln_pre(x)
outputs = []
for i, blk in enumerate(self.blocks):
if self.use_act_checkpoint and self.training:
x = checkpoint.checkpoint(blk, x, use_reentrant=False)
else:
x = blk(x)
if (i == self.full_attn_ids[-1]) or (self.return_interm_layers and i in self.full_attn_ids):
if i == self.full_attn_ids[-1]:
x = self.ln_post(x)
feats = x[:, s:]
if feats.ndim == 4:
feats = feats.permute(0, 3, 1, 2)
else:
assert feats.ndim == 3
h = w = math.sqrt(feats.shape[1])
feats = feats.reshape(feats.shape[0], h, w, feats.shape[-1]).permute(0, 3, 1, 2)
outputs.append(feats)
return outputs
def set_imgsz(self, imgsz: list[int] = [1008, 1008]):
for block in self.blocks:
if block.window_size != 0:
continue
block.attn._setup_rel_pos(input_size=(imgsz[0] // self.patch_size, imgsz[1] // self.patch_size))
block.attn._setup_rope_freqs(input_size=(imgsz[0] // self.patch_size, imgsz[1] // self.patch_size)) | --- +++ @@ -2,6 +2,15 @@
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
+"""
+ViTDet backbone adapted from Detectron2.
+This module implements Vision Transformer (ViT) backbone for object detection.
+
+Rope embedding code adopted from:
+1. https://github.com/meta-llama/codellama/blob/main/llama/model.py
+2. https://github.com/naver-ai/rope-vit
+3. https://github.com/lucidrains/rotary-embedding-torch
+"""
from __future__ import annotations
@@ -30,6 +39,7 @@
class Attention(nn.Module):
+ """Multi-head Attention block with relative position embeddings and 2d-rope."""
def __init__(
self,
@@ -45,6 +55,22 @@ rope_pt_size: tuple[int, int] | None = None,
rope_interp: bool = False,
):
+ """
+ Args:
+ dim (int): Number of input channels.
+ num_heads (int): Number of attention heads.
+ qkv_bias (bool): If True, add a learnable bias to query, key, value.
+ use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
+ rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
+ input_size (tuple[int, int] or None): Input resolution for calculating the relative positional parameter
+ size or rope size.
+ cls_token (bool): Whether a cls_token is present.
+ use_rope (bool): Whether to use rope 2d (independent of use_rel_pos, as it can be used together).
+ rope_theta (float): Control frequencies of rope.
+ rope_pt_size (tuple[int, int] or None): Size of rope in previous stage of training, needed for interpolation
+ or tiling.
+ rope_interp (bool): Whether to interpolate (or extrapolate) rope to match input size.
+ """
super().__init__()
self.num_heads = num_heads
self.head_dim = dim // num_heads
@@ -68,6 +94,7 @@ self._setup_rope_freqs(input_size)
def _setup_rel_pos(self, rel_pos_zero_init: bool = True, input_size: tuple[int, int] | None = None) -> None:
+ """Setup relative positional embeddings."""
if not self.use_rel_pos:
self.rel_pos_h = None
self.rel_pos_w = None
@@ -91,6 +118,7 @@ self.relative_coords = relative_coords.long()
def _setup_rope_freqs(self, input_size: tuple[int, int] | None = None) -> None:
+ """Setup 2d-rope frequencies."""
if not self.use_rope:
self.freqs_cis = None
return
@@ -129,6 +157,7 @@ self.freqs_cis = freqs_cis
def _apply_rope(self, q, k) -> tuple[Tensor, Tensor]:
+ """Apply 2d-rope to q and k."""
if not self.use_rope:
return q, k
@@ -136,6 +165,7 @@ return apply_rotary_enc(q, k, freqs_cis=self.freqs_cis.to(q.device))
def forward(self, x: Tensor) -> Tensor:
+ """Forward pass of attention block."""
s = 1 if self.cls_token else 0 # used to exclude cls_token
if x.ndim == 4:
B, H, W, _ = x.shape
@@ -184,6 +214,7 @@
class Block(nn.Module):
+ """Transformer blocks with support of window attention."""
def __init__(
self,
@@ -205,6 +236,29 @@ dropout: float = 0.0,
init_values: float | None = None,
):
+ """
+ Args:
+ dim (int): Number of input channels.
+ num_heads (int): Number of attention heads in each ViT block.
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
+ qkv_bias (bool): If True, add a learnable bias to query, key, value.
+ drop_path (float): Stochastic depth rate.
+ norm_layer (Callable): Normalization layer constructor.
+ act_layer (Callable): Activation layer constructor.
+ use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
+ rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
+ window_size (int): Window size for window attention blocks. If it equals 0, then not use window attention.
+ input_size (tuple[int, int] | None): Input resolution for calculating the relative positional parameter
+ size.
+ use_rope (bool): Whether to use rope 2d (independent of use_rel_pos, as it can be used together).
+ rope_pt_size (tuple[int, int] | None): Size of rope in previous stage of training, needed for interpolation
+ or tiling.
+ rope_interp (bool): Whether to interpolate (or extrapolate) rope to match target input size, expected to
+ specify source size as rope_pt_size.
+ cls_token (bool): Whether a cls_token is present.
+ dropout (float): Dropout rate.
+ init_values (float | None): Layer scale init, None for no layer scale.
+ """
super().__init__()
check_requirements("timm")
@@ -238,6 +292,7 @@ self.window_size = window_size
def forward(self, x: Tensor) -> Tensor:
+ """Forward pass of the transformer block."""
shortcut = x
x = self.norm1(x)
# Window partition
@@ -257,6 +312,9 @@
class ViT(nn.Module):
+ """This module implements Vision Transformer (ViT) backbone in :paper:`vitdet`. "Exploring Plain Vision Transformer
+ Backbones for Object Detection", https://arxiv.org/abs/2203.16527.
+ """
def __init__(
self,
@@ -292,6 +350,42 @@ compile_mode: str | None = None,
use_act_checkpoint: bool = True,
):
+ """
+ Args:
+ img_size (int): Input image size. Only relevant for rel pos or rope.
+ patch_size (int): Patch size.
+ in_chans (int): Number of input image channels.
+ embed_dim (int): Patch embedding dimension.
+ depth (int): Depth of ViT.
+ num_heads (int): Number of attention heads in each ViT block.
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
+ qkv_bias (bool): If True, add a learnable bias to query, key, value.
+ drop_path_rate (float): Stochastic depth rate.
+ norm_layer (Callable or str): Normalization layer constructor or name.
+ act_layer (Callable): Activation layer constructor.
+ use_abs_pos (bool): If True, use absolute positional embeddings.
+ tile_abs_pos (bool): If True, tile absolute positional embeddings instead of interpolation.
+ rel_pos_blocks (tuple[int, ...] | bool): Blocks which have rel pos embeddings.
+ rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
+ window_size (int): Window size for window attention blocks.
+ global_att_blocks (tuple[int, ...]): Indexes for blocks using global attention (other blocks use window
+ attention).
+ use_rope (bool): Whether to use rope 2d (independent of rel_pos_blocks, as it can be used together).
+ rope_pt_size (int | None): Size of rope in previous stage of training, needed for interpolation or tiling.
+ use_interp_rope (bool): Whether to interpolate (or extrapolate) rope to match target input size, expected to
+ specify source size as rope_pt_size.
+ pretrain_img_size (int): Input image size for pretraining models.
+ pretrain_use_cls_token (bool): If True, pretraining models use class token.
+ retain_cls_token (bool): Whether cls_token should be retained.
+ dropout (float): Dropout rate. Applied in residual blocks of attn, mlp and inside the mlp.
+ return_interm_layers (bool): Whether to return intermediate layers (all global attention blocks).
+ init_values (float | None): Layer scale init, None for no layer scale.
+ ln_pre (bool): If True, apply layer norm before transformer blocks.
+ ln_post (bool): If True, apply layer norm after transformer blocks.
+ bias_patch_embed (bool): If True, use bias in conv for patch embed.
+ compile_mode (str | None): Mode to compile the forward, or None to disable.
+ use_act_checkpoint (bool): If True, use activation checkpointing.
+ """
super().__init__()
self.pretrain_use_cls_token = pretrain_use_cls_token
@@ -392,6 +486,7 @@
@staticmethod
def _init_weights(m: nn.Module) -> None:
+ """Initialize the weights."""
if isinstance(m, nn.Linear):
nn.init.trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
@@ -401,6 +496,7 @@ nn.init.constant_(m.weight, 1.0)
def forward(self, x: torch.Tensor) -> list[torch.Tensor]:
+ """Vit forward path and get feature maps."""
x = self.patch_embed(x)
h, w = x.shape[1], x.shape[2]
@@ -445,8 +541,9 @@ return outputs
def set_imgsz(self, imgsz: list[int] = [1008, 1008]):
+ """Setup rel pos embeddings and rope freqs for a new input image size."""
for block in self.blocks:
if block.window_size != 0:
continue
block.attn._setup_rel_pos(input_size=(imgsz[0] // self.patch_size, imgsz[1] // self.patch_size))
- block.attn._setup_rope_freqs(input_size=(imgsz[0] // self.patch_size, imgsz[1] // self.patch_size))+ block.attn._setup_rope_freqs(input_size=(imgsz[0] // self.patch_size, imgsz[1] // self.patch_size))
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/sam/sam3/vitdet.py |
Generate documentation strings for clarity | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import math
import random
from copy import copy
from typing import Any
import numpy as np
import torch
import torch.nn as nn
from ultralytics.data import build_dataloader, build_yolo_dataset
from ultralytics.engine.trainer import BaseTrainer
from ultralytics.models import yolo
from ultralytics.nn.tasks import DetectionModel
from ultralytics.utils import DEFAULT_CFG, LOGGER, RANK
from ultralytics.utils.patches import override_configs
from ultralytics.utils.plotting import plot_images, plot_labels
from ultralytics.utils.torch_utils import torch_distributed_zero_first, unwrap_model
class DetectionTrainer(BaseTrainer):
def __init__(self, cfg=DEFAULT_CFG, overrides: dict[str, Any] | None = None, _callbacks: dict | None = None):
super().__init__(cfg, overrides, _callbacks)
def build_dataset(self, img_path: str, mode: str = "train", batch: int | None = None):
gs = max(int(unwrap_model(self.model).stride.max()), 32)
return build_yolo_dataset(self.args, img_path, batch, self.data, mode=mode, rect=mode == "val", stride=gs)
def get_dataloader(self, dataset_path: str, batch_size: int = 16, rank: int = 0, mode: str = "train"):
assert mode in {"train", "val"}, f"Mode must be 'train' or 'val', not {mode}."
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
dataset = self.build_dataset(dataset_path, mode, batch_size)
shuffle = mode == "train"
if getattr(dataset, "rect", False) and shuffle and not np.all(dataset.batch_shapes == dataset.batch_shapes[0]):
LOGGER.warning("'rect=True' is incompatible with DataLoader shuffle, setting shuffle=False")
shuffle = False
return build_dataloader(
dataset,
batch=batch_size,
workers=self.args.workers if mode == "train" else self.args.workers * 2,
shuffle=shuffle,
rank=rank,
drop_last=self.args.compile and mode == "train",
)
def preprocess_batch(self, batch: dict) -> dict:
for k, v in batch.items():
if isinstance(v, torch.Tensor):
batch[k] = v.to(self.device, non_blocking=self.device.type == "cuda")
batch["img"] = batch["img"].float() / 255
if self.args.multi_scale > 0.0:
imgs = batch["img"]
sz = (
random.randrange(
int(self.args.imgsz * (1.0 - self.args.multi_scale)),
int(self.args.imgsz * (1.0 + self.args.multi_scale) + self.stride),
)
// self.stride
* self.stride
) # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [
math.ceil(x * sf / self.stride) * self.stride for x in imgs.shape[2:]
] # new shape (stretched to gs-multiple)
imgs = nn.functional.interpolate(imgs, size=ns, mode="bilinear", align_corners=False)
batch["img"] = imgs
return batch
def set_model_attributes(self):
# Nl = de_parallel(self.model).model[-1].nl # number of detection layers (to scale hyps)
# self.args.box *= 3 / nl # scale to layers
# self.args.cls *= self.data["nc"] / 80 * 3 / nl # scale to classes and layers
# self.args.cls *= (self.args.imgsz / 640) ** 2 * 3 / nl # scale to image size and layers
self.model.nc = self.data["nc"] # attach number of classes to model
self.model.names = self.data["names"] # attach class names to model
self.model.args = self.args # attach hyperparameters to model
if getattr(self.model, "end2end"):
self.model.set_head_attr(max_det=self.args.max_det)
# TODO: self.model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc
def get_model(self, cfg: str | None = None, weights: str | None = None, verbose: bool = True):
model = DetectionModel(cfg, nc=self.data["nc"], ch=self.data["channels"], verbose=verbose and RANK == -1)
if weights:
model.load(weights)
return model
def get_validator(self):
self.loss_names = "box_loss", "cls_loss", "dfl_loss"
return yolo.detect.DetectionValidator(
self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks
)
def label_loss_items(self, loss_items: list[float] | None = None, prefix: str = "train"):
keys = [f"{prefix}/{x}" for x in self.loss_names]
if loss_items is not None:
loss_items = [round(float(x), 5) for x in loss_items] # convert tensors to 5 decimal place floats
return dict(zip(keys, loss_items))
else:
return keys
def progress_string(self):
return ("\n" + "%11s" * (4 + len(self.loss_names))) % (
"Epoch",
"GPU_mem",
*self.loss_names,
"Instances",
"Size",
)
def plot_training_samples(self, batch: dict[str, Any], ni: int) -> None:
plot_images(
labels=batch,
paths=batch["im_file"],
fname=self.save_dir / f"train_batch{ni}.jpg",
on_plot=self.on_plot,
)
def plot_training_labels(self):
boxes = np.concatenate([lb["bboxes"] for lb in self.train_loader.dataset.labels], 0)
cls = np.concatenate([lb["cls"] for lb in self.train_loader.dataset.labels], 0)
plot_labels(boxes, cls.squeeze(), names=self.data["names"], save_dir=self.save_dir, on_plot=self.on_plot)
def auto_batch(self):
with override_configs(self.args, overrides={"cache": False}) as self.args:
train_dataset = self.build_dataset(self.data["train"], mode="train", batch=16)
max_num_obj = max(len(label["cls"]) for label in train_dataset.labels) * 4 # 4 for mosaic augmentation
del train_dataset # free memory
return super().auto_batch(max_num_obj) | --- +++ @@ -22,15 +22,72 @@
class DetectionTrainer(BaseTrainer):
+ """A class extending the BaseTrainer class for training based on a detection model.
+
+ This trainer specializes in object detection tasks, handling the specific requirements for training YOLO models for
+ object detection including dataset building, data loading, preprocessing, and model configuration.
+
+ Attributes:
+ model (DetectionModel): The YOLO detection model being trained.
+ data (dict): Dictionary containing dataset information including class names and number of classes.
+ loss_names (tuple): Names of the loss components used in training (box_loss, cls_loss, dfl_loss).
+
+ Methods:
+ build_dataset: Build YOLO dataset for training or validation.
+ get_dataloader: Construct and return dataloader for the specified mode.
+ preprocess_batch: Preprocess a batch of images by scaling and converting to float.
+ set_model_attributes: Set model attributes based on dataset information.
+ get_model: Return a YOLO detection model.
+ get_validator: Return a validator for model evaluation.
+ label_loss_items: Return a loss dictionary with labeled training loss items.
+ progress_string: Return a formatted string of training progress.
+ plot_training_samples: Plot training samples with their annotations.
+ plot_training_labels: Create a labeled training plot of the YOLO model.
+ auto_batch: Calculate optimal batch size based on model memory requirements.
+
+ Examples:
+ >>> from ultralytics.models.yolo.detect import DetectionTrainer
+ >>> args = dict(model="yolo26n.pt", data="coco8.yaml", epochs=3)
+ >>> trainer = DetectionTrainer(overrides=args)
+ >>> trainer.train()
+ """
def __init__(self, cfg=DEFAULT_CFG, overrides: dict[str, Any] | None = None, _callbacks: dict | None = None):
+ """Initialize a DetectionTrainer object for training YOLO object detection models.
+
+ Args:
+ cfg (dict, optional): Default configuration dictionary containing training parameters.
+ overrides (dict, optional): Dictionary of parameter overrides for the default configuration.
+ _callbacks (dict, optional): Dictionary of callback functions to be executed during training.
+ """
super().__init__(cfg, overrides, _callbacks)
def build_dataset(self, img_path: str, mode: str = "train", batch: int | None = None):
+ """Build YOLO Dataset for training or validation.
+
+ Args:
+ img_path (str): Path to the folder containing images.
+ mode (str): 'train' mode or 'val' mode, users are able to customize different augmentations for each mode.
+ batch (int, optional): Size of batches, this is for 'rect' mode.
+
+ Returns:
+ (Dataset): YOLO dataset object configured for the specified mode.
+ """
gs = max(int(unwrap_model(self.model).stride.max()), 32)
return build_yolo_dataset(self.args, img_path, batch, self.data, mode=mode, rect=mode == "val", stride=gs)
def get_dataloader(self, dataset_path: str, batch_size: int = 16, rank: int = 0, mode: str = "train"):
+ """Construct and return dataloader for the specified mode.
+
+ Args:
+ dataset_path (str): Path to the dataset.
+ batch_size (int): Number of images per batch.
+ rank (int): Process rank for distributed training.
+ mode (str): 'train' for training dataloader, 'val' for validation dataloader.
+
+ Returns:
+ (DataLoader): PyTorch dataloader object.
+ """
assert mode in {"train", "val"}, f"Mode must be 'train' or 'val', not {mode}."
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
dataset = self.build_dataset(dataset_path, mode, batch_size)
@@ -48,6 +105,14 @@ )
def preprocess_batch(self, batch: dict) -> dict:
+ """Preprocess a batch of images by scaling and converting to float.
+
+ Args:
+ batch (dict): Dictionary containing batch data with 'img' tensor.
+
+ Returns:
+ (dict): Preprocessed batch with normalized images.
+ """
for k, v in batch.items():
if isinstance(v, torch.Tensor):
batch[k] = v.to(self.device, non_blocking=self.device.type == "cuda")
@@ -72,6 +137,7 @@ return batch
def set_model_attributes(self):
+ """Set model attributes based on dataset information."""
# Nl = de_parallel(self.model).model[-1].nl # number of detection layers (to scale hyps)
# self.args.box *= 3 / nl # scale to layers
# self.args.cls *= self.data["nc"] / 80 * 3 / nl # scale to classes and layers
@@ -84,18 +150,38 @@ # TODO: self.model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc
def get_model(self, cfg: str | None = None, weights: str | None = None, verbose: bool = True):
+ """Return a YOLO detection model.
+
+ Args:
+ cfg (str, optional): Path to model configuration file.
+ weights (str, optional): Path to model weights.
+ verbose (bool): Whether to display model information.
+
+ Returns:
+ (DetectionModel): YOLO detection model.
+ """
model = DetectionModel(cfg, nc=self.data["nc"], ch=self.data["channels"], verbose=verbose and RANK == -1)
if weights:
model.load(weights)
return model
def get_validator(self):
+ """Return a DetectionValidator for YOLO model validation."""
self.loss_names = "box_loss", "cls_loss", "dfl_loss"
return yolo.detect.DetectionValidator(
self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks
)
def label_loss_items(self, loss_items: list[float] | None = None, prefix: str = "train"):
+ """Return a loss dict with labeled training loss items tensor.
+
+ Args:
+ loss_items (list[float], optional): List of loss values.
+ prefix (str): Prefix for keys in the returned dictionary.
+
+ Returns:
+ (dict | list): Dictionary of labeled loss items if loss_items is provided, otherwise list of keys.
+ """
keys = [f"{prefix}/{x}" for x in self.loss_names]
if loss_items is not None:
loss_items = [round(float(x), 5) for x in loss_items] # convert tensors to 5 decimal place floats
@@ -104,6 +190,7 @@ return keys
def progress_string(self):
+ """Return a formatted string of training progress with epoch, GPU memory, loss, instances and size."""
return ("\n" + "%11s" * (4 + len(self.loss_names))) % (
"Epoch",
"GPU_mem",
@@ -113,6 +200,12 @@ )
def plot_training_samples(self, batch: dict[str, Any], ni: int) -> None:
+ """Plot training samples with their annotations.
+
+ Args:
+ batch (dict[str, Any]): Dictionary containing batch data.
+ ni (int): Batch index used for naming the output file.
+ """
plot_images(
labels=batch,
paths=batch["im_file"],
@@ -121,13 +214,19 @@ )
def plot_training_labels(self):
+ """Create a labeled training plot of the YOLO model."""
boxes = np.concatenate([lb["bboxes"] for lb in self.train_loader.dataset.labels], 0)
cls = np.concatenate([lb["cls"] for lb in self.train_loader.dataset.labels], 0)
plot_labels(boxes, cls.squeeze(), names=self.data["names"], save_dir=self.save_dir, on_plot=self.on_plot)
def auto_batch(self):
+ """Get optimal batch size by calculating memory occupation of model.
+
+ Returns:
+ (int): Optimal batch size.
+ """
with override_configs(self.args, overrides={"cache": False}) as self.args:
train_dataset = self.build_dataset(self.data["train"], mode="train", batch=16)
max_num_obj = max(len(label["cls"]) for label in train_dataset.labels) * 4 # 4 for mosaic augmentation
del train_dataset # free memory
- return super().auto_batch(max_num_obj)+ return super().auto_batch(max_num_obj)
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/yolo/detect/train.py |
Add documentation for all methods | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from copy import copy
from ultralytics.models.yolo.detect import DetectionTrainer
from ultralytics.nn.tasks import RTDETRDetectionModel
from ultralytics.utils import RANK, colorstr
from .val import RTDETRDataset, RTDETRValidator
class RTDETRTrainer(DetectionTrainer):
def get_model(self, cfg: dict | None = None, weights: str | None = None, verbose: bool = True):
model = RTDETRDetectionModel(cfg, nc=self.data["nc"], ch=self.data["channels"], verbose=verbose and RANK == -1)
if weights:
model.load(weights)
return model
def build_dataset(self, img_path: str, mode: str = "val", batch: int | None = None):
return RTDETRDataset(
img_path=img_path,
imgsz=self.args.imgsz,
batch_size=batch,
augment=mode == "train",
hyp=self.args,
rect=False,
cache=self.args.cache or None,
single_cls=self.args.single_cls or False,
prefix=colorstr(f"{mode}: "),
classes=self.args.classes,
data=self.data,
fraction=self.args.fraction if mode == "train" else 1.0,
)
def get_validator(self):
self.loss_names = "giou_loss", "cls_loss", "l1_loss"
return RTDETRValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args)) | --- +++ @@ -12,14 +12,62 @@
class RTDETRTrainer(DetectionTrainer):
+ """Trainer class for the RT-DETR model developed by Baidu for real-time object detection.
+
+ This class extends the DetectionTrainer class for YOLO to adapt to the specific features and architecture of
+ RT-DETR. The model leverages Vision Transformers and has capabilities like IoU-aware query selection and adaptable
+ inference speed.
+
+ Attributes:
+ loss_names (tuple): Names of the loss components used for training.
+ data (dict): Dataset configuration containing class count and other parameters.
+ args (dict): Training arguments and hyperparameters.
+ save_dir (Path): Directory to save training results.
+ test_loader (DataLoader): DataLoader for validation/testing data.
+
+ Methods:
+ get_model: Initialize and return an RT-DETR model for object detection tasks.
+ build_dataset: Build and return an RT-DETR dataset for training or validation.
+ get_validator: Return a DetectionValidator suitable for RT-DETR model validation.
+
+ Examples:
+ >>> from ultralytics.models.rtdetr.train import RTDETRTrainer
+ >>> args = dict(model="rtdetr-l.yaml", data="coco8.yaml", imgsz=640, epochs=3)
+ >>> trainer = RTDETRTrainer(overrides=args)
+ >>> trainer.train()
+
+ Notes:
+ - F.grid_sample used in RT-DETR does not support the `deterministic=True` argument.
+ - AMP training can lead to NaN outputs and may produce errors during bipartite graph matching.
+ """
def get_model(self, cfg: dict | None = None, weights: str | None = None, verbose: bool = True):
+ """Initialize and return an RT-DETR model for object detection tasks.
+
+ Args:
+ cfg (dict, optional): Model configuration.
+ weights (str, optional): Path to pre-trained model weights.
+ verbose (bool): Verbose logging if True.
+
+ Returns:
+ (RTDETRDetectionModel): Initialized model.
+ """
model = RTDETRDetectionModel(cfg, nc=self.data["nc"], ch=self.data["channels"], verbose=verbose and RANK == -1)
if weights:
model.load(weights)
return model
def build_dataset(self, img_path: str, mode: str = "val", batch: int | None = None):
+ """Build and return an RT-DETR dataset for training or validation.
+
+ Args:
+ img_path (str): Path to the folder containing images.
+ mode (str): Dataset mode, either 'train' or 'val'.
+ batch (int, optional): Batch size for rectangle training.
+
+ Returns:
+ (RTDETRDataset): Dataset object for the specific mode.
+ """
return RTDETRDataset(
img_path=img_path,
imgsz=self.args.imgsz,
@@ -36,5 +84,6 @@ )
def get_validator(self):
+ """Return an RTDETRValidator suitable for RT-DETR model validation."""
self.loss_names = "giou_loss", "cls_loss", "l1_loss"
- return RTDETRValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args))+ return RTDETRValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args))
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/rtdetr/train.py |
Write beginner-friendly docstrings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from ultralytics.engine.results import Results
from ultralytics.models.yolo.detect.predict import DetectionPredictor
from ultralytics.utils import DEFAULT_CFG, ops
class SegmentationPredictor(DetectionPredictor):
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks: dict | None = None):
super().__init__(cfg, overrides, _callbacks)
self.args.task = "segment"
def postprocess(self, preds, img, orig_imgs):
# Extract protos - tuple if PyTorch model or array if exported
protos = preds[0][1] if isinstance(preds[0], tuple) else preds[1]
return super().postprocess(preds[0], img, orig_imgs, protos=protos)
def construct_results(self, preds, img, orig_imgs, protos):
return [
self.construct_result(pred, img, orig_img, img_path, proto)
for pred, orig_img, img_path, proto in zip(preds, orig_imgs, self.batch[0], protos)
]
def construct_result(self, pred, img, orig_img, img_path, proto):
if pred.shape[0] == 0: # save empty boxes
masks = None
elif self.args.retina_masks:
pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape)
masks = ops.process_mask_native(proto, pred[:, 6:], pred[:, :4], orig_img.shape[:2]) # NHW
else:
masks = ops.process_mask(proto, pred[:, 6:], pred[:, :4], img.shape[2:], upsample=True) # NHW
pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape)
if masks is not None:
keep = masks.amax((-2, -1)) > 0 # only keep predictions with masks
if not all(keep): # most predictions have masks
pred, masks = pred[keep], masks[keep] # indexing is slow
return Results(orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6], masks=masks) | --- +++ @@ -8,23 +8,94 @@
class SegmentationPredictor(DetectionPredictor):
+ """A class extending the DetectionPredictor class for prediction based on a segmentation model.
+
+ This class specializes in processing segmentation model outputs, handling both bounding boxes and masks in the
+ prediction results.
+
+ Attributes:
+ args (dict): Configuration arguments for the predictor.
+ model (torch.nn.Module): The loaded YOLO segmentation model.
+ batch (list): Current batch of images being processed.
+
+ Methods:
+ postprocess: Apply non-max suppression and process segmentation detections.
+ construct_results: Construct a list of result objects from predictions.
+ construct_result: Construct a single result object from a prediction.
+
+ Examples:
+ >>> from ultralytics.utils import ASSETS
+ >>> from ultralytics.models.yolo.segment import SegmentationPredictor
+ >>> args = dict(model="yolo26n-seg.pt", source=ASSETS)
+ >>> predictor = SegmentationPredictor(overrides=args)
+ >>> predictor.predict_cli()
+ """
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks: dict | None = None):
+ """Initialize the SegmentationPredictor with configuration, overrides, and callbacks.
+
+ This class specializes in processing segmentation model outputs, handling both bounding boxes and masks in the
+ prediction results.
+
+ Args:
+ cfg (dict): Configuration for the predictor.
+ overrides (dict, optional): Configuration overrides that take precedence over cfg.
+ _callbacks (dict, optional): Dictionary of callback functions to be invoked during prediction.
+ """
super().__init__(cfg, overrides, _callbacks)
self.args.task = "segment"
def postprocess(self, preds, img, orig_imgs):
+ """Apply non-max suppression and process segmentation detections for each image in the input batch.
+
+ Args:
+ preds (tuple): Model predictions, containing bounding boxes, scores, classes, and mask coefficients.
+ img (torch.Tensor): Input image tensor in model format, with shape (B, C, H, W).
+ orig_imgs (list | torch.Tensor | np.ndarray): Original image or batch of images.
+
+ Returns:
+ (list): List of Results objects containing the segmentation predictions for each image in the batch. Each
+ Results object includes both bounding boxes and segmentation masks.
+
+ Examples:
+ >>> predictor = SegmentationPredictor(overrides=dict(model="yolo26n-seg.pt"))
+ >>> results = predictor.postprocess(preds, img, orig_img)
+ """
# Extract protos - tuple if PyTorch model or array if exported
protos = preds[0][1] if isinstance(preds[0], tuple) else preds[1]
return super().postprocess(preds[0], img, orig_imgs, protos=protos)
def construct_results(self, preds, img, orig_imgs, protos):
+ """Construct a list of result objects from the predictions.
+
+ Args:
+ preds (list[torch.Tensor]): List of predicted bounding boxes, scores, and masks.
+ img (torch.Tensor): The image after preprocessing.
+ orig_imgs (list[np.ndarray]): List of original images before preprocessing.
+ protos (torch.Tensor): Prototype masks tensor with shape (B, C, H, W).
+
+ Returns:
+ (list[Results]): List of result objects containing the original images, image paths, class names, bounding
+ boxes, and masks.
+ """
return [
self.construct_result(pred, img, orig_img, img_path, proto)
for pred, orig_img, img_path, proto in zip(preds, orig_imgs, self.batch[0], protos)
]
def construct_result(self, pred, img, orig_img, img_path, proto):
+ """Construct a single result object from the prediction.
+
+ Args:
+ pred (torch.Tensor): The predicted bounding boxes, scores, and masks.
+ img (torch.Tensor): The image after preprocessing.
+ orig_img (np.ndarray): The original image before preprocessing.
+ img_path (str): The path to the original image.
+ proto (torch.Tensor): The prototype masks.
+
+ Returns:
+ (Results): Result object containing the original image, image path, class names, bounding boxes, and masks.
+ """
if pred.shape[0] == 0: # save empty boxes
masks = None
elif self.args.retina_masks:
@@ -37,4 +108,4 @@ keep = masks.amax((-2, -1)) > 0 # only keep predictions with masks
if not all(keep): # most predictions have masks
pred, masks = pred[keep], masks[keep] # indexing is slow
- return Results(orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6], masks=masks)+ return Results(orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6], masks=masks)
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/yolo/segment/predict.py |
Document all public functions with docstrings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from ultralytics.engine.predictor import BasePredictor
from ultralytics.engine.results import Results
from ultralytics.utils import nms, ops
class DetectionPredictor(BasePredictor):
def postprocess(self, preds, img, orig_imgs, **kwargs):
save_feats = getattr(self, "_feats", None) is not None
preds = nms.non_max_suppression(
preds,
self.args.conf,
self.args.iou,
self.args.classes,
self.args.agnostic_nms,
max_det=self.args.max_det,
nc=0 if self.args.task == "detect" else len(self.model.names),
end2end=getattr(self.model, "end2end", False),
rotated=self.args.task == "obb",
return_idxs=save_feats,
)
if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list
orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)[..., ::-1]
if save_feats:
obj_feats = self.get_obj_feats(self._feats, preds[1])
preds = preds[0]
results = self.construct_results(preds, img, orig_imgs, **kwargs)
if save_feats:
for r, f in zip(results, obj_feats):
r.feats = f # add object features to results
return results
@staticmethod
def get_obj_feats(feat_maps, idxs):
import torch
s = min(x.shape[1] for x in feat_maps) # find shortest vector length
obj_feats = torch.cat(
[x.permute(0, 2, 3, 1).reshape(x.shape[0], -1, s, x.shape[1] // s).mean(dim=-1) for x in feat_maps], dim=1
) # mean reduce all vectors to same length
return [feats[idx] if idx.shape[0] else [] for feats, idx in zip(obj_feats, idxs)] # for each img in batch
def construct_results(self, preds, img, orig_imgs):
return [
self.construct_result(pred, img, orig_img, img_path)
for pred, orig_img, img_path in zip(preds, orig_imgs, self.batch[0])
]
def construct_result(self, pred, img, orig_img, img_path):
pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape)
return Results(orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6]) | --- +++ @@ -6,8 +6,50 @@
class DetectionPredictor(BasePredictor):
+ """A class extending the BasePredictor class for prediction based on a detection model.
+
+ This predictor specializes in object detection tasks, processing model outputs into meaningful detection results
+ with bounding boxes and class predictions.
+
+ Attributes:
+ args (namespace): Configuration arguments for the predictor.
+ model (nn.Module): The detection model used for inference.
+ batch (list): Batch of images and metadata for processing.
+
+ Methods:
+ postprocess: Process raw model predictions into detection results.
+ construct_results: Build Results objects from processed predictions.
+ construct_result: Create a single Result object from a prediction.
+ get_obj_feats: Extract object features from the feature maps.
+
+ Examples:
+ >>> from ultralytics.utils import ASSETS
+ >>> from ultralytics.models.yolo.detect import DetectionPredictor
+ >>> args = dict(model="yolo26n.pt", source=ASSETS)
+ >>> predictor = DetectionPredictor(overrides=args)
+ >>> predictor.predict_cli()
+ """
def postprocess(self, preds, img, orig_imgs, **kwargs):
+ """Post-process predictions and return a list of Results objects.
+
+ This method applies non-maximum suppression to raw model predictions and prepares them for visualization and
+ further analysis.
+
+ Args:
+ preds (torch.Tensor): Raw predictions from the model.
+ img (torch.Tensor): Processed input image tensor in model input format.
+ orig_imgs (torch.Tensor | list): Original input images before preprocessing.
+ **kwargs (Any): Additional keyword arguments.
+
+ Returns:
+ (list): List of Results objects containing the post-processed predictions.
+
+ Examples:
+ >>> predictor = DetectionPredictor(overrides=dict(model="yolo26n.pt"))
+ >>> results = predictor.predict("path/to/image.jpg")
+ >>> processed_results = predictor.postprocess(preds, img, orig_imgs)
+ """
save_feats = getattr(self, "_feats", None) is not None
preds = nms.non_max_suppression(
preds,
@@ -39,6 +81,7 @@
@staticmethod
def get_obj_feats(feat_maps, idxs):
+ """Extract object features from the feature maps."""
import torch
s = min(x.shape[1] for x in feat_maps) # find shortest vector length
@@ -48,11 +91,32 @@ return [feats[idx] if idx.shape[0] else [] for feats, idx in zip(obj_feats, idxs)] # for each img in batch
def construct_results(self, preds, img, orig_imgs):
+ """Construct a list of Results objects from model predictions.
+
+ Args:
+ preds (list[torch.Tensor]): List of predicted bounding boxes and scores for each image.
+ img (torch.Tensor): Batch of preprocessed images used for inference.
+ orig_imgs (list[np.ndarray]): List of original images before preprocessing.
+
+ Returns:
+ (list[Results]): List of Results objects containing detection information for each image.
+ """
return [
self.construct_result(pred, img, orig_img, img_path)
for pred, orig_img, img_path in zip(preds, orig_imgs, self.batch[0])
]
def construct_result(self, pred, img, orig_img, img_path):
+ """Construct a single Results object from one image prediction.
+
+ Args:
+ pred (torch.Tensor): Predicted boxes and scores with shape (N, 6) where N is the number of detections.
+ img (torch.Tensor): Preprocessed image tensor used for inference.
+ orig_img (np.ndarray): Original image before preprocessing.
+ img_path (str): Path to the original image file.
+
+ Returns:
+ (Results): Results object containing the original image, image path, class names, and scaled bounding boxes.
+ """
pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape)
- return Results(orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6])+ return Results(orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6])
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/yolo/detect/predict.py |
Add well-formatted docstrings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from copy import copy
from pathlib import Path
from ultralytics.models import yolo
from ultralytics.nn.tasks import SegmentationModel
from ultralytics.utils import DEFAULT_CFG, RANK
class SegmentationTrainer(yolo.detect.DetectionTrainer):
def __init__(self, cfg=DEFAULT_CFG, overrides: dict | None = None, _callbacks: dict | None = None):
if overrides is None:
overrides = {}
overrides["task"] = "segment"
super().__init__(cfg, overrides, _callbacks)
def get_model(self, cfg: dict | str | None = None, weights: str | Path | None = None, verbose: bool = True):
model = SegmentationModel(cfg, nc=self.data["nc"], ch=self.data["channels"], verbose=verbose and RANK == -1)
if weights:
model.load(weights)
return model
def get_validator(self):
self.loss_names = "box_loss", "seg_loss", "cls_loss", "dfl_loss", "sem_loss"
return yolo.segment.SegmentationValidator(
self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks
) | --- +++ @@ -11,14 +11,50 @@
class SegmentationTrainer(yolo.detect.DetectionTrainer):
+ """A class extending the DetectionTrainer class for training based on a segmentation model.
+
+ This trainer specializes in handling segmentation tasks, extending the detection trainer with segmentation-specific
+ functionality including model initialization, validation, and visualization.
+
+ Attributes:
+ loss_names (tuple[str]): Names of the loss components used during training.
+
+ Examples:
+ >>> from ultralytics.models.yolo.segment import SegmentationTrainer
+ >>> args = dict(model="yolo26n-seg.pt", data="coco8-seg.yaml", epochs=3)
+ >>> trainer = SegmentationTrainer(overrides=args)
+ >>> trainer.train()
+ """
def __init__(self, cfg=DEFAULT_CFG, overrides: dict | None = None, _callbacks: dict | None = None):
+ """Initialize a SegmentationTrainer object.
+
+ Args:
+ cfg (dict): Configuration dictionary with default training settings.
+ overrides (dict, optional): Dictionary of parameter overrides for the default configuration.
+ _callbacks (dict, optional): Dictionary of callback functions to be executed during training.
+ """
if overrides is None:
overrides = {}
overrides["task"] = "segment"
super().__init__(cfg, overrides, _callbacks)
def get_model(self, cfg: dict | str | None = None, weights: str | Path | None = None, verbose: bool = True):
+ """Initialize and return a SegmentationModel with specified configuration and weights.
+
+ Args:
+ cfg (dict | str, optional): Model configuration. Can be a dictionary, a path to a YAML file, or None.
+ weights (str | Path, optional): Path to pretrained weights file.
+ verbose (bool): Whether to display model information during initialization.
+
+ Returns:
+ (SegmentationModel): Initialized segmentation model with loaded weights if specified.
+
+ Examples:
+ >>> trainer = SegmentationTrainer()
+ >>> model = trainer.get_model(cfg="yolo26n-seg.yaml")
+ >>> model = trainer.get_model(weights="yolo26n-seg.pt", verbose=False)
+ """
model = SegmentationModel(cfg, nc=self.data["nc"], ch=self.data["channels"], verbose=verbose and RANK == -1)
if weights:
model.load(weights)
@@ -26,7 +62,8 @@ return model
def get_validator(self):
+ """Return an instance of SegmentationValidator for validation of YOLO model."""
self.loss_names = "box_loss", "seg_loss", "cls_loss", "dfl_loss", "sem_loss"
return yolo.segment.SegmentationValidator(
self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks
- )+ )
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/yolo/segment/train.py |
Document helper functions with docstrings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from ultralytics.data.utils import HUBDatasetStats
from ultralytics.hub.auth import Auth
from ultralytics.hub.session import HUBTrainingSession
from ultralytics.hub.utils import HUB_API_ROOT, HUB_WEB_ROOT, PREFIX
from ultralytics.utils import LOGGER, SETTINGS, checks
__all__ = (
"HUB_WEB_ROOT",
"PREFIX",
"HUBTrainingSession",
"check_dataset",
"export_fmts_hub",
"export_model",
"get_export",
"login",
"logout",
"reset_model",
)
def login(api_key: str | None = None, save: bool = True) -> bool:
checks.check_requirements("hub-sdk>=0.0.12")
from hub_sdk import HUBClient
api_key_url = f"{HUB_WEB_ROOT}/settings?tab=api+keys" # set the redirect URL
saved_key = SETTINGS.get("api_key")
active_key = api_key or saved_key
credentials = {"api_key": active_key} if active_key and active_key != "" else None # set credentials
client = HUBClient(credentials) # initialize HUBClient
if client.authenticated:
# Successfully authenticated with HUB
if save and client.api_key != saved_key:
SETTINGS.update({"api_key": client.api_key}) # update settings with valid API key
# Set message based on whether key was provided or retrieved from settings
log_message = (
"New authentication successful ✅" if client.api_key == api_key or not credentials else "Authenticated ✅"
)
LOGGER.info(f"{PREFIX}{log_message}")
return True
else:
# Failed to authenticate with HUB
LOGGER.info(f"{PREFIX}Get API key from {api_key_url} and then run 'yolo login API_KEY'")
return False
def logout():
SETTINGS["api_key"] = ""
LOGGER.info(f"{PREFIX}logged out ✅. To log in again, use 'yolo login'.")
def reset_model(model_id: str = ""):
import requests # scoped as slow import
r = requests.post(f"{HUB_API_ROOT}/model-reset", json={"modelId": model_id}, headers={"x-api-key": Auth().api_key})
if r.status_code == 200:
LOGGER.info(f"{PREFIX}Model reset successfully")
return
LOGGER.warning(f"{PREFIX}Model reset failure {r.status_code} {r.reason}")
def export_fmts_hub():
from ultralytics.engine.exporter import export_formats
return [*list(export_formats()["Argument"][1:]), "ultralytics_tflite", "ultralytics_coreml"]
def export_model(model_id: str = "", format: str = "torchscript"):
import requests # scoped as slow import
assert format in export_fmts_hub(), f"Unsupported export format '{format}', valid formats are {export_fmts_hub()}"
r = requests.post(
f"{HUB_API_ROOT}/v1/models/{model_id}/export", json={"format": format}, headers={"x-api-key": Auth().api_key}
)
assert r.status_code == 200, f"{PREFIX}{format} export failure {r.status_code} {r.reason}"
LOGGER.info(f"{PREFIX}{format} export started ✅")
def get_export(model_id: str = "", format: str = "torchscript"):
import requests # scoped as slow import
assert format in export_fmts_hub(), f"Unsupported export format '{format}', valid formats are {export_fmts_hub()}"
r = requests.post(
f"{HUB_API_ROOT}/get-export",
json={"apiKey": Auth().api_key, "modelId": model_id, "format": format},
headers={"x-api-key": Auth().api_key},
)
assert r.status_code == 200, f"{PREFIX}{format} get_export failure {r.status_code} {r.reason}"
return r.json()
def check_dataset(path: str, task: str) -> None:
HUBDatasetStats(path=path, task=task).get_json()
LOGGER.info(f"Checks completed correctly ✅. Upload this dataset to {HUB_WEB_ROOT}/datasets/.") | --- +++ @@ -23,6 +23,19 @@
def login(api_key: str | None = None, save: bool = True) -> bool:
+ """Log in to the Ultralytics HUB API using the provided API key.
+
+ The session is not stored; a new session is created when needed using the saved SETTINGS or the HUB_API_KEY
+ environment variable if successfully authenticated.
+
+ Args:
+ api_key (str, optional): API key to use for authentication. If not provided, it will be retrieved from SETTINGS
+ or HUB_API_KEY environment variable.
+ save (bool, optional): Whether to save the API key to SETTINGS if authentication is successful.
+
+ Returns:
+ (bool): True if authentication is successful, False otherwise.
+ """
checks.check_requirements("hub-sdk>=0.0.12")
from hub_sdk import HUBClient
@@ -53,11 +66,13 @@
def logout():
+ """Log out of Ultralytics HUB by removing the API key from the settings file."""
SETTINGS["api_key"] = ""
LOGGER.info(f"{PREFIX}logged out ✅. To log in again, use 'yolo login'.")
def reset_model(model_id: str = ""):
+ """Reset a trained model to an untrained state."""
import requests # scoped as slow import
r = requests.post(f"{HUB_API_ROOT}/model-reset", json={"modelId": model_id}, headers={"x-api-key": Auth().api_key})
@@ -68,12 +83,27 @@
def export_fmts_hub():
+ """Return a list of HUB-supported export formats."""
from ultralytics.engine.exporter import export_formats
return [*list(export_formats()["Argument"][1:]), "ultralytics_tflite", "ultralytics_coreml"]
def export_model(model_id: str = "", format: str = "torchscript"):
+ """Export a model to a specified format for deployment via the Ultralytics HUB API.
+
+ Args:
+ model_id (str): The ID of the model to export. An empty string will use the default model.
+ format (str): The format to export the model to. Must be one of the supported formats returned by
+ export_fmts_hub().
+
+ Raises:
+ AssertionError: If the specified format is not supported or if the export request fails.
+
+ Examples:
+ >>> from ultralytics import hub
+ >>> hub.export_model(model_id="your_model_id", format="torchscript")
+ """
import requests # scoped as slow import
assert format in export_fmts_hub(), f"Unsupported export format '{format}', valid formats are {export_fmts_hub()}"
@@ -85,6 +115,22 @@
def get_export(model_id: str = "", format: str = "torchscript"):
+ """Retrieve an exported model in the specified format from Ultralytics HUB using the model ID.
+
+ Args:
+ model_id (str): The ID of the model to retrieve from Ultralytics HUB.
+ format (str): The export format to retrieve. Must be one of the supported formats returned by export_fmts_hub().
+
+ Returns:
+ (dict): JSON response containing the exported model information.
+
+ Raises:
+ AssertionError: If the specified format is not supported or if the API request fails.
+
+ Examples:
+ >>> from ultralytics import hub
+ >>> result = hub.get_export(model_id="your_model_id", format="torchscript")
+ """
import requests # scoped as slow import
assert format in export_fmts_hub(), f"Unsupported export format '{format}', valid formats are {export_fmts_hub()}"
@@ -98,5 +144,23 @@
def check_dataset(path: str, task: str) -> None:
+ """Check HUB dataset Zip file for errors before upload.
+
+ Args:
+ path (str): Path to data.zip (with data.yaml inside data.zip).
+ task (str): Dataset task. Options are 'detect', 'segment', 'pose', 'classify', 'obb'.
+
+ Examples:
+ >>> from ultralytics.hub import check_dataset
+ >>> check_dataset("path/to/coco8.zip", task="detect") # detect dataset
+ >>> check_dataset("path/to/coco8-seg.zip", task="segment") # segment dataset
+ >>> check_dataset("path/to/coco8-pose.zip", task="pose") # pose dataset
+ >>> check_dataset("path/to/dota8.zip", task="obb") # OBB dataset
+ >>> check_dataset("path/to/imagenet10.zip", task="classify") # classification dataset
+
+ Notes:
+ Download *.zip files from https://github.com/ultralytics/hub/tree/main/example_datasets
+ i.e. https://github.com/ultralytics/hub/raw/main/example_datasets/coco8.zip for coco8.zip.
+ """
HUBDatasetStats(path=path, task=task).get_json()
- LOGGER.info(f"Checks completed correctly ✅. Upload this dataset to {HUB_WEB_ROOT}/datasets/.")+ LOGGER.info(f"Checks completed correctly ✅. Upload this dataset to {HUB_WEB_ROOT}/datasets/.")
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/hub/__init__.py |
Add inline docstrings for readability | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
import torch
import torch.nn as nn
import torchvision
from ultralytics.nn.modules.utils import _get_clones
from ultralytics.utils.ops import xywh2xyxy
def is_right_padded(mask: torch.Tensor):
return (mask.long() == torch.sort(mask.long(), dim=-1)[0]).all()
def concat_padded_sequences(seq1, mask1, seq2, mask2, return_index: bool = False):
seq1_length, batch_size, hidden_size = seq1.shape
seq2_length, batch_size, hidden_size = seq2.shape
assert batch_size == seq1.size(1) == seq2.size(1) == mask1.size(0) == mask2.size(0)
assert hidden_size == seq1.size(2) == seq2.size(2)
assert seq1_length == mask1.size(1)
assert seq2_length == mask2.size(1)
torch._assert(is_right_padded(mask1), "Mask is not right padded")
torch._assert(is_right_padded(mask2), "Mask is not right padded")
actual_seq1_lengths = (~mask1).sum(dim=-1)
actual_seq2_lengths = (~mask2).sum(dim=-1)
final_lengths = actual_seq1_lengths + actual_seq2_lengths
max_length = seq1_length + seq2_length
concatenated_mask = (
torch.arange(max_length, device=seq2.device)[None].repeat(batch_size, 1) >= final_lengths[:, None]
)
# (max_len, batch_size, hidden_size)
concatenated_sequence = torch.zeros((max_length, batch_size, hidden_size), device=seq2.device, dtype=seq2.dtype)
concatenated_sequence[:seq1_length, :, :] = seq1
# At this point, the element of seq1 are in the right place
# We just need to shift the elements of seq2
index = torch.arange(seq2_length, device=seq2.device)[:, None].repeat(1, batch_size)
index = index + actual_seq1_lengths[None]
concatenated_sequence = concatenated_sequence.scatter(0, index[:, :, None].expand(-1, -1, hidden_size), seq2)
if return_index:
return concatenated_sequence, concatenated_mask, index
return concatenated_sequence, concatenated_mask
class Prompt:
def __init__(self, box_embeddings=None, box_mask=None, box_labels=None):
# Check for null prompt
# Check for null prompt
if box_embeddings is None:
self.box_embeddings = None
self.box_labels = None
self.box_mask = None
return
# Get sequence length, batch size, and device
box_seq_len = box_embeddings.shape[0]
bs = box_embeddings.shape[1]
device = box_embeddings.device
# Initialize labels and attention mask if not provided
if box_labels is None:
box_labels = torch.ones(box_seq_len, bs, device=device, dtype=torch.long)
if box_mask is None:
box_mask = torch.zeros(bs, box_seq_len, device=device, dtype=torch.bool)
# Dimension checks
assert list(box_embeddings.shape[:2]) == [box_seq_len, bs], (
f"Wrong dimension for box embeddings. Expected [{box_seq_len}, {bs}, *] got {box_embeddings.shape}"
)
assert box_embeddings.shape[-1] == 4, (
f"Expected box embeddings to have 4 coordinates, got {box_embeddings.shape[-1]}"
)
assert list(box_mask.shape) == [bs, box_seq_len], (
f"Wrong dimension for box mask. Expected [{bs}, {box_seq_len}] got {box_mask.shape}"
)
assert list(box_labels.shape) == [box_seq_len, bs], (
f"Wrong dimension for box labels. Expected [{box_seq_len}, {bs}] got {box_labels.shape}"
)
# Device checks
assert box_embeddings.device == device, (
f"Expected box embeddings to be on device {device}, got {box_embeddings.device}"
)
assert box_mask.device == device, f"Expected box mask to be on device {device}, got {box_mask.device}"
assert box_labels.device == device, f"Expected box labels to be on device {device}, got {box_labels.device}"
self.box_embeddings = box_embeddings
self.box_mask = box_mask
self.box_labels = box_labels
def append_boxes(self, boxes, labels=None, mask=None):
if self.box_embeddings is None:
# First boxes - initialize
self.box_embeddings = boxes
bs = boxes.shape[1]
box_seq_len = boxes.shape[0]
if labels is None:
labels = torch.ones(box_seq_len, bs, device=boxes.device, dtype=torch.long)
if mask is None:
mask = torch.zeros(bs, box_seq_len, device=boxes.device, dtype=torch.bool)
self.box_labels = labels
self.box_mask = mask
return
# Append to existing boxes
bs = self.box_embeddings.shape[1]
assert boxes.shape[1] == bs, f"Batch size mismatch: expected {bs}, got {boxes.shape[1]}"
if labels is None:
labels = torch.ones(boxes.shape[0], bs, device=boxes.device, dtype=torch.long)
if mask is None:
mask = torch.zeros(bs, boxes.shape[0], dtype=torch.bool, device=boxes.device)
assert list(boxes.shape[:2]) == list(labels.shape[:2]), (
f"Shape mismatch between boxes {boxes.shape} and labels {labels.shape}"
)
# Concatenate using the helper function
self.box_labels, _ = concat_padded_sequences(
self.box_labels.unsqueeze(-1), self.box_mask, labels.unsqueeze(-1), mask
)
self.box_labels = self.box_labels.squeeze(-1)
self.box_embeddings, self.box_mask = concat_padded_sequences(self.box_embeddings, self.box_mask, boxes, mask)
class SequenceGeometryEncoder(nn.Module):
def __init__(
self,
encode_boxes_as_points: bool,
boxes_direct_project: bool,
boxes_pool: bool,
boxes_pos_enc: bool,
d_model: int,
pos_enc,
num_layers: int,
layer: nn.Module,
roi_size: int = 7,
add_cls: bool = True,
add_post_encode_proj: bool = True,
use_act_ckpt: bool = False,
):
super().__init__()
self.d_model = d_model
self.pos_enc = pos_enc
self.encode_boxes_as_points = encode_boxes_as_points
self.roi_size = roi_size
# Label embeddings: 2 labels if encoding as boxes (pos/neg)
# 6 labels if encoding as points (regular pos/neg, top-left pos/neg, bottom-right pos/neg)
num_labels = 6 if self.encode_boxes_as_points else 2
self.label_embed = torch.nn.Embedding(num_labels, self.d_model)
# CLS token for pooling
self.cls_embed = None
if add_cls:
self.cls_embed = torch.nn.Embedding(1, self.d_model)
# Point encoding (used when encode_boxes_as_points is True)
if encode_boxes_as_points:
self.points_direct_project = nn.Linear(2, self.d_model)
self.points_pool_project = None
self.points_pos_enc_project = None
else:
# Box encoding modules
assert boxes_direct_project or boxes_pos_enc or boxes_pool, "Error: need at least one way to encode boxes"
self.points_direct_project = None
self.points_pool_project = None
self.points_pos_enc_project = None
self.boxes_direct_project = None
self.boxes_pool_project = None
self.boxes_pos_enc_project = None
if boxes_direct_project:
self.boxes_direct_project = nn.Linear(4, self.d_model)
if boxes_pool:
self.boxes_pool_project = nn.Conv2d(self.d_model, self.d_model, self.roi_size)
if boxes_pos_enc:
self.boxes_pos_enc_project = nn.Linear(self.d_model + 2, self.d_model)
self.final_proj = None
if add_post_encode_proj:
self.final_proj = nn.Linear(self.d_model, self.d_model)
self.norm = nn.LayerNorm(self.d_model)
self.img_pre_norm = nn.Identity()
if self.points_pool_project is not None or self.boxes_pool_project is not None:
self.img_pre_norm = nn.LayerNorm(self.d_model)
self.encode = None
if num_layers > 0:
assert add_cls, "It's currently highly recommended to add a CLS when using a transformer"
self.encode = _get_clones(layer, num_layers)
self.encode_norm = nn.LayerNorm(self.d_model)
self.use_act_ckpt = use_act_ckpt
def _encode_points(self, points, points_mask, points_labels, img_feats):
# Direct projection of coordinates
points_embed = self.points_direct_project(points.to(img_feats.dtype))
# Add label embeddings
type_embed = self.label_embed(points_labels.long())
return type_embed + points_embed, points_mask
def _encode_boxes(self, boxes, boxes_mask, boxes_labels, img_feats: torch.Tensor):
boxes_embed = None
n_boxes, bs = boxes.shape[:2]
if self.boxes_direct_project is not None:
proj = self.boxes_direct_project(boxes.to(img_feats.dtype))
boxes_embed = proj
if self.boxes_pool_project is not None:
H, W = img_feats.shape[-2:]
# Convert boxes to xyxy format and denormalize
boxes_xyxy = xywh2xyxy(boxes.to(img_feats.dtype))
scale = torch.tensor([W, H, W, H], dtype=boxes_xyxy.dtype)
scale = scale.to(device=boxes_xyxy.device, non_blocking=True)
scale = scale.view(1, 1, 4)
boxes_xyxy = boxes_xyxy * scale
# RoI align
sampled = torchvision.ops.roi_align(img_feats, boxes_xyxy.transpose(0, 1).unbind(0), self.roi_size)
assert list(sampled.shape) == [
bs * n_boxes,
self.d_model,
self.roi_size,
self.roi_size,
]
proj = self.boxes_pool_project(sampled)
proj = proj.view(bs, n_boxes, self.d_model).transpose(0, 1)
if boxes_embed is None:
boxes_embed = proj
else:
boxes_embed = boxes_embed + proj
if self.boxes_pos_enc_project is not None:
cx, cy, w, h = boxes.unbind(-1)
enc = self.pos_enc.encode_boxes(cx.flatten(), cy.flatten(), w.flatten(), h.flatten())
enc = enc.view(boxes.shape[0], boxes.shape[1], enc.shape[-1])
proj = self.boxes_pos_enc_project(enc.to(img_feats.dtype))
if boxes_embed is None:
boxes_embed = proj
else:
boxes_embed = boxes_embed + proj
# Add label embeddings
type_embed = self.label_embed(boxes_labels.long())
return type_embed + boxes_embed, boxes_mask
def forward(self, geo_prompt: Prompt, img_feats, img_sizes, img_pos_embeds=None):
boxes = geo_prompt.box_embeddings
boxes_mask = geo_prompt.box_mask
boxes_labels = geo_prompt.box_labels
seq_first_img_feats = img_feats[-1] # [H*W, B, C]
seq_first_img_pos_embeds = (
img_pos_embeds[-1] if img_pos_embeds is not None else torch.zeros_like(seq_first_img_feats)
)
# Prepare image features for pooling if needed
if self.points_pool_project or self.boxes_pool_project:
assert len(img_feats) == len(img_sizes)
cur_img_feat = img_feats[-1]
cur_img_feat = self.img_pre_norm(cur_img_feat)
H, W = img_sizes[-1]
assert cur_img_feat.shape[0] == H * W
N, C = cur_img_feat.shape[-2:]
# Reshape to NxCxHxW
cur_img_feat = cur_img_feat.permute(1, 2, 0)
cur_img_feat = cur_img_feat.view(N, C, H, W)
img_feats = cur_img_feat
if self.encode_boxes_as_points:
# Convert boxes to corner points
assert boxes is not None and boxes.shape[-1] == 4
boxes_xyxy = xywh2xyxy(boxes)
top_left, bottom_right = boxes_xyxy.split(split_size=2, dim=-1)
# Adjust labels for corner points (offset by 2 and 4)
labels_tl = boxes_labels + 2
labels_br = boxes_labels + 4
# Concatenate top-left and bottom-right points
points = torch.cat([top_left, bottom_right], dim=0)
points_labels = torch.cat([labels_tl, labels_br], dim=0)
points_mask = torch.cat([boxes_mask, boxes_mask], dim=1)
final_embeds, final_mask = self._encode_points(
points=points,
points_mask=points_mask,
points_labels=points_labels,
img_feats=img_feats,
)
else:
# Encode boxes directly
final_embeds, final_mask = self._encode_boxes(
boxes=boxes,
boxes_mask=boxes_mask,
boxes_labels=boxes_labels,
img_feats=img_feats,
)
bs = final_embeds.shape[1]
assert final_mask.shape[0] == bs
# Add CLS token if configured
if self.cls_embed is not None:
cls = self.cls_embed.weight.view(1, 1, self.d_model).repeat(1, bs, 1)
cls_mask = torch.zeros(bs, 1, dtype=final_mask.dtype, device=final_mask.device)
final_embeds, final_mask = concat_padded_sequences(final_embeds, final_mask, cls, cls_mask)
# Final projection
if self.final_proj is not None:
final_embeds = self.norm(self.final_proj(final_embeds))
# Transformer encoding layers
if self.encode is not None:
for lay in self.encode:
final_embeds = lay(
tgt=final_embeds,
memory=seq_first_img_feats,
tgt_key_padding_mask=final_mask,
pos=seq_first_img_pos_embeds,
)
final_embeds = self.encode_norm(final_embeds)
return final_embeds, final_mask | --- +++ @@ -11,10 +11,29 @@
def is_right_padded(mask: torch.Tensor):
+ """Given a padding mask (following pytorch convention, 1s for padded values), returns whether the padding is on the
+ right or not.
+ """
return (mask.long() == torch.sort(mask.long(), dim=-1)[0]).all()
def concat_padded_sequences(seq1, mask1, seq2, mask2, return_index: bool = False):
+ """
+ Concatenates two right-padded sequences, such that the resulting sequence
+ is contiguous and also right-padded.
+
+ Following pytorch's convention, tensors are sequence first, and the mask are
+ batch first, with 1s for padded values.
+
+ :param seq1: A tensor of shape (seq1_length, batch_size, hidden_size).
+ :param mask1: A tensor of shape (batch_size, seq1_length).
+ :param seq2: A tensor of shape (seq2_length, batch_size, hidden_size).
+ :param mask2: A tensor of shape (batch_size, seq2_length).
+ :param return_index: If True, also returns the index of the ids of the element of seq2
+ in the concatenated sequence. This can be used to retrieve the elements of seq2
+ :return: A tuple (concatenated_sequence, concatenated_mask) if return_index is False,
+ otherwise (concatenated_sequence, concatenated_mask, index).
+ """
seq1_length, batch_size, hidden_size = seq1.shape
seq2_length, batch_size, hidden_size = seq2.shape
@@ -54,8 +73,21 @@
class Prompt:
+ """Utility class to manipulate geometric prompts.
+
+ We expect the sequences in pytorch convention, that is sequence first, batch second The dimensions are expected as
+ follows: box_embeddings shape: N_boxes x B x C_box box_mask shape: B x N_boxes. Can be None if nothing is masked out
+ point_embeddings shape: N_points x B x C_point point_mask shape: B x N_points. Can be None if nothing is masked out
+ mask_embeddings shape: N_masks x B x 1 x H_mask x W_mask mask_mask shape: B x N_masks. Can be None if nothing is
+ masked out
+
+ We also store positive/negative labels. These tensors are also stored batch-first If they are None, we'll assume
+ positive labels everywhere box_labels: long tensor of shape N_boxes x B point_labels: long tensor of shape N_points
+ x B mask_labels: long tensor of shape N_masks x B
+ """
def __init__(self, box_embeddings=None, box_mask=None, box_labels=None):
+ """Initialize the Prompt object."""
# Check for null prompt
# Check for null prompt
if box_embeddings is None:
@@ -101,6 +133,13 @@ self.box_labels = box_labels
def append_boxes(self, boxes, labels=None, mask=None):
+ """Append box prompts to existing prompts.
+
+ Args:
+ boxes (torch.Tensor): Tensor of shape (N_new_boxes, B, 4) with normalized box coordinates.
+ labels (torch.Tensor | None): Optional tensor of shape (N_new_boxes, B) with positive/negative labels.
+ mask (torch.Tensor | None): Optional tensor of shape (B, N_new_boxes) for attention mask.
+ """
if self.box_embeddings is None:
# First boxes - initialize
self.box_embeddings = boxes
@@ -138,6 +177,19 @@
class SequenceGeometryEncoder(nn.Module):
+ """Encoder for geometric box prompts. Assumes boxes are passed in the "normalized CxCyWH" format.
+
+ Boxes can be encoded with any of the three possibilities:
+ - direct projection: linear projection from coordinate space to d_model
+ - pooling: RoI align features from the backbone
+ - pos encoder: position encoding of the box center
+
+ These three options are mutually compatible and will be summed if multiple are selected.
+
+ As an alternative, boxes can be encoded as two corner points (top-left and bottom-right).
+
+ The encoded sequence can be further processed with a transformer.
+ """
def __init__(
self,
@@ -154,6 +206,7 @@ add_post_encode_proj: bool = True,
use_act_ckpt: bool = False,
):
+ """Initialize the SequenceGeometryEncoder."""
super().__init__()
self.d_model = d_model
@@ -212,6 +265,7 @@ self.use_act_ckpt = use_act_ckpt
def _encode_points(self, points, points_mask, points_labels, img_feats):
+ """Encode points (used when boxes are converted to corner points)."""
# Direct projection of coordinates
points_embed = self.points_direct_project(points.to(img_feats.dtype))
@@ -220,6 +274,7 @@ return type_embed + points_embed, points_mask
def _encode_boxes(self, boxes, boxes_mask, boxes_labels, img_feats: torch.Tensor):
+ """Encode boxes using configured encoding methods."""
boxes_embed = None
n_boxes, bs = boxes.shape[:2]
@@ -269,6 +324,17 @@ return type_embed + boxes_embed, boxes_mask
def forward(self, geo_prompt: Prompt, img_feats, img_sizes, img_pos_embeds=None):
+ """Encode geometric box prompts.
+
+ Args:
+ geo_prompt (Prompt): Prompt object containing box embeddings, masks, and labels.
+ img_feats (list[torch.Tensor]): List of image features from backbone.
+ img_sizes (list[tuple[int, int]]): List of (H, W) tuples for each feature level.
+ img_pos_embeds (list[torch.Tensor] | None): Optional position embeddings for image features.
+
+ Returns:
+ Tuple of (encoded_embeddings, attention_mask)
+ """
boxes = geo_prompt.box_embeddings
boxes_mask = geo_prompt.box_mask
boxes_labels = geo_prompt.box_labels
@@ -346,4 +412,4 @@ )
final_embeds = self.encode_norm(final_embeds)
- return final_embeds, final_mask+ return final_embeds, final_mask
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/sam/sam3/geometry_encoders.py |
Improve my code by adding docstrings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from copy import copy
from pathlib import Path
from typing import Any
from ultralytics.models import yolo
from ultralytics.nn.tasks import PoseModel
from ultralytics.utils import DEFAULT_CFG
from ultralytics.utils.torch_utils import unwrap_model
class PoseTrainer(yolo.detect.DetectionTrainer):
def __init__(self, cfg=DEFAULT_CFG, overrides: dict[str, Any] | None = None, _callbacks: dict | None = None):
if overrides is None:
overrides = {}
overrides["task"] = "pose"
super().__init__(cfg, overrides, _callbacks)
def get_model(
self,
cfg: str | Path | dict[str, Any] | None = None,
weights: str | Path | None = None,
verbose: bool = True,
) -> PoseModel:
model = PoseModel(
cfg, nc=self.data["nc"], ch=self.data["channels"], data_kpt_shape=self.data["kpt_shape"], verbose=verbose
)
if weights:
model.load(weights)
return model
def set_model_attributes(self):
super().set_model_attributes()
self.model.kpt_shape = self.data["kpt_shape"]
kpt_names = self.data.get("kpt_names")
if not kpt_names:
names = list(map(str, range(self.model.kpt_shape[0])))
kpt_names = {i: names for i in range(self.model.nc)}
self.model.kpt_names = kpt_names
def get_validator(self):
self.loss_names = "box_loss", "pose_loss", "kobj_loss", "cls_loss", "dfl_loss"
if getattr(unwrap_model(self.model).model[-1], "flow_model", None) is not None:
self.loss_names += ("rle_loss",)
return yolo.pose.PoseValidator(
self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks
)
def get_dataset(self) -> dict[str, Any]:
data = super().get_dataset()
if "kpt_shape" not in data:
raise KeyError(f"No `kpt_shape` in the {self.args.data}. See https://docs.ultralytics.com/datasets/pose/")
return data | --- +++ @@ -13,8 +13,43 @@
class PoseTrainer(yolo.detect.DetectionTrainer):
+ """A class extending the DetectionTrainer class for training YOLO pose estimation models.
+
+ This trainer specializes in handling pose estimation tasks, managing model training, validation, and visualization
+ of pose keypoints alongside bounding boxes.
+
+ Attributes:
+ args (dict): Configuration arguments for training.
+ model (PoseModel): The pose estimation model being trained.
+ data (dict): Dataset configuration including keypoint shape information.
+ loss_names (tuple): Names of the loss components used in training.
+
+ Methods:
+ get_model: Retrieve a pose estimation model with specified configuration.
+ set_model_attributes: Set keypoints shape attribute on the model.
+ get_validator: Create a validator instance for model evaluation.
+ plot_training_samples: Visualize training samples with keypoints.
+ get_dataset: Retrieve the dataset and ensure it contains required kpt_shape key.
+
+ Examples:
+ >>> from ultralytics.models.yolo.pose import PoseTrainer
+ >>> args = dict(model="yolo26n-pose.pt", data="coco8-pose.yaml", epochs=3)
+ >>> trainer = PoseTrainer(overrides=args)
+ >>> trainer.train()
+ """
def __init__(self, cfg=DEFAULT_CFG, overrides: dict[str, Any] | None = None, _callbacks: dict | None = None):
+ """Initialize a PoseTrainer object for training YOLO pose estimation models.
+
+ Args:
+ cfg (dict, optional): Default configuration dictionary containing training parameters.
+ overrides (dict, optional): Dictionary of parameter overrides for the default configuration.
+ _callbacks (dict, optional): Dictionary of callback functions to be executed during training.
+
+ Notes:
+ This trainer will automatically set the task to 'pose' regardless of what is provided in overrides.
+ A warning is issued when using Apple MPS device due to known bugs with pose models.
+ """
if overrides is None:
overrides = {}
overrides["task"] = "pose"
@@ -26,6 +61,16 @@ weights: str | Path | None = None,
verbose: bool = True,
) -> PoseModel:
+ """Get pose estimation model with specified configuration and weights.
+
+ Args:
+ cfg (str | Path | dict, optional): Model configuration file path or dictionary.
+ weights (str | Path, optional): Path to the model weights file.
+ verbose (bool): Whether to display model information.
+
+ Returns:
+ (PoseModel): Initialized pose estimation model.
+ """
model = PoseModel(
cfg, nc=self.data["nc"], ch=self.data["channels"], data_kpt_shape=self.data["kpt_shape"], verbose=verbose
)
@@ -35,6 +80,7 @@ return model
def set_model_attributes(self):
+ """Set keypoints shape attribute of PoseModel."""
super().set_model_attributes()
self.model.kpt_shape = self.data["kpt_shape"]
kpt_names = self.data.get("kpt_names")
@@ -44,6 +90,7 @@ self.model.kpt_names = kpt_names
def get_validator(self):
+ """Return an instance of the PoseValidator class for validation."""
self.loss_names = "box_loss", "pose_loss", "kobj_loss", "cls_loss", "dfl_loss"
if getattr(unwrap_model(self.model).model[-1], "flow_model", None) is not None:
self.loss_names += ("rle_loss",)
@@ -52,7 +99,15 @@ )
def get_dataset(self) -> dict[str, Any]:
+ """Retrieve the dataset and ensure it contains the required `kpt_shape` key.
+
+ Returns:
+ (dict): A dictionary containing the training/validation/test dataset and category names.
+
+ Raises:
+ KeyError: If the `kpt_shape` key is not present in the dataset.
+ """
data = super().get_dataset()
if "kpt_shape" not in data:
raise KeyError(f"No `kpt_shape` in the {self.args.data}. See https://docs.ultralytics.com/datasets/pose/")
- return data+ return data
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/yolo/pose/train.py |
Generate consistent docstrings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from typing import Any
import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy.optimize import linear_sum_assignment
from ultralytics.utils.metrics import bbox_iou
from ultralytics.utils.ops import xywh2xyxy, xyxy2xywh
class HungarianMatcher(nn.Module):
def __init__(
self,
cost_gain: dict[str, float] | None = None,
use_fl: bool = True,
with_mask: bool = False,
num_sample_points: int = 12544,
alpha: float = 0.25,
gamma: float = 2.0,
):
super().__init__()
if cost_gain is None:
cost_gain = {"class": 1, "bbox": 5, "giou": 2, "mask": 1, "dice": 1}
self.cost_gain = cost_gain
self.use_fl = use_fl
self.with_mask = with_mask
self.num_sample_points = num_sample_points
self.alpha = alpha
self.gamma = gamma
def forward(
self,
pred_bboxes: torch.Tensor,
pred_scores: torch.Tensor,
gt_bboxes: torch.Tensor,
gt_cls: torch.Tensor,
gt_groups: list[int],
masks: torch.Tensor | None = None,
gt_mask: list[torch.Tensor] | None = None,
) -> list[tuple[torch.Tensor, torch.Tensor]]:
bs, nq, nc = pred_scores.shape
if sum(gt_groups) == 0:
return [(torch.tensor([], dtype=torch.long), torch.tensor([], dtype=torch.long)) for _ in range(bs)]
# Flatten to compute cost matrices in batch format
pred_scores = pred_scores.detach().view(-1, nc)
pred_scores = F.sigmoid(pred_scores) if self.use_fl else F.softmax(pred_scores, dim=-1)
pred_bboxes = pred_bboxes.detach().view(-1, 4)
# Compute classification cost
pred_scores = pred_scores[:, gt_cls]
if self.use_fl:
neg_cost_class = (1 - self.alpha) * (pred_scores**self.gamma) * (-(1 - pred_scores + 1e-8).log())
pos_cost_class = self.alpha * ((1 - pred_scores) ** self.gamma) * (-(pred_scores + 1e-8).log())
cost_class = pos_cost_class - neg_cost_class
else:
cost_class = -pred_scores
# Compute L1 cost between boxes
cost_bbox = (pred_bboxes.unsqueeze(1) - gt_bboxes.unsqueeze(0)).abs().sum(-1) # (bs*num_queries, num_gt)
# Compute GIoU cost between boxes, (bs*num_queries, num_gt)
cost_giou = 1.0 - bbox_iou(pred_bboxes.unsqueeze(1), gt_bboxes.unsqueeze(0), xywh=True, GIoU=True).squeeze(-1)
# Combine costs into final cost matrix
C = (
self.cost_gain["class"] * cost_class
+ self.cost_gain["bbox"] * cost_bbox
+ self.cost_gain["giou"] * cost_giou
)
# Add mask costs if available
if self.with_mask:
C += self._cost_mask(bs, gt_groups, masks, gt_mask)
# Set invalid values (NaNs and infinities) to 0
C[C.isnan() | C.isinf()] = 0.0
C = C.view(bs, nq, -1).cpu()
indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(gt_groups, -1))]
gt_groups = torch.as_tensor([0, *gt_groups[:-1]]).cumsum_(0) # (idx for queries, idx for gt)
return [
(torch.tensor(i, dtype=torch.long), torch.tensor(j, dtype=torch.long) + gt_groups[k])
for k, (i, j) in enumerate(indices)
]
# This function is for future RT-DETR Segment models
# def _cost_mask(self, bs, num_gts, masks=None, gt_mask=None):
# assert masks is not None and gt_mask is not None, 'Make sure the input has `mask` and `gt_mask`'
# # all masks share the same set of points for efficient matching
# sample_points = torch.rand([bs, 1, self.num_sample_points, 2])
# sample_points = 2.0 * sample_points - 1.0
#
# out_mask = F.grid_sample(masks.detach(), sample_points, align_corners=False).squeeze(-2)
# out_mask = out_mask.flatten(0, 1)
#
# tgt_mask = torch.cat(gt_mask).unsqueeze(1)
# sample_points = torch.cat([a.repeat(b, 1, 1, 1) for a, b in zip(sample_points, num_gts) if b > 0])
# tgt_mask = F.grid_sample(tgt_mask, sample_points, align_corners=False).squeeze([1, 2])
#
# with torch.amp.autocast("cuda", enabled=False):
# # binary cross entropy cost
# pos_cost_mask = F.binary_cross_entropy_with_logits(out_mask, torch.ones_like(out_mask), reduction='none')
# neg_cost_mask = F.binary_cross_entropy_with_logits(out_mask, torch.zeros_like(out_mask), reduction='none')
# cost_mask = torch.matmul(pos_cost_mask, tgt_mask.T) + torch.matmul(neg_cost_mask, 1 - tgt_mask.T)
# cost_mask /= self.num_sample_points
#
# # dice cost
# out_mask = F.sigmoid(out_mask)
# numerator = 2 * torch.matmul(out_mask, tgt_mask.T)
# denominator = out_mask.sum(-1, keepdim=True) + tgt_mask.sum(-1).unsqueeze(0)
# cost_dice = 1 - (numerator + 1) / (denominator + 1)
#
# C = self.cost_gain['mask'] * cost_mask + self.cost_gain['dice'] * cost_dice
# return C
def get_cdn_group(
batch: dict[str, Any],
num_classes: int,
num_queries: int,
class_embed: torch.Tensor,
num_dn: int = 100,
cls_noise_ratio: float = 0.5,
box_noise_scale: float = 1.0,
training: bool = False,
) -> tuple[torch.Tensor | None, torch.Tensor | None, torch.Tensor | None, dict[str, Any] | None]:
if (not training) or num_dn <= 0 or batch is None:
return None, None, None, None
gt_groups = batch["gt_groups"]
total_num = sum(gt_groups)
max_nums = max(gt_groups)
if max_nums == 0:
return None, None, None, None
num_group = num_dn // max_nums
num_group = 1 if num_group == 0 else num_group
# Pad gt to max_num of a batch
bs = len(gt_groups)
gt_cls = batch["cls"] # (bs*num, )
gt_bbox = batch["bboxes"] # bs*num, 4
b_idx = batch["batch_idx"]
# Each group has positive and negative queries
dn_cls = gt_cls.repeat(2 * num_group) # (2*num_group*bs*num, )
dn_bbox = gt_bbox.repeat(2 * num_group, 1) # 2*num_group*bs*num, 4
dn_b_idx = b_idx.repeat(2 * num_group).view(-1) # (2*num_group*bs*num, )
# Positive and negative mask
# (bs*num*num_group, ), the second total_num*num_group part as negative samples
neg_idx = torch.arange(total_num * num_group, dtype=torch.long, device=gt_bbox.device) + num_group * total_num
if cls_noise_ratio > 0:
# Apply class label noise to half of the samples
mask = torch.rand(dn_cls.shape) < (cls_noise_ratio * 0.5)
idx = torch.nonzero(mask).squeeze(-1)
# Randomly assign new class labels
new_label = torch.randint_like(idx, 0, num_classes, dtype=dn_cls.dtype, device=dn_cls.device)
dn_cls[idx] = new_label
if box_noise_scale > 0:
known_bbox = xywh2xyxy(dn_bbox)
diff = (dn_bbox[..., 2:] * 0.5).repeat(1, 2) * box_noise_scale # 2*num_group*bs*num, 4
rand_sign = torch.randint_like(dn_bbox, 0, 2) * 2.0 - 1.0
rand_part = torch.rand_like(dn_bbox)
rand_part[neg_idx] += 1.0
rand_part *= rand_sign
known_bbox += rand_part * diff
known_bbox.clip_(min=0.0, max=1.0)
dn_bbox = xyxy2xywh(known_bbox)
dn_bbox = torch.logit(dn_bbox, eps=1e-6) # inverse sigmoid
num_dn = int(max_nums * 2 * num_group) # total denoising queries
dn_cls_embed = class_embed[dn_cls] # bs*num * 2 * num_group, 256
padding_cls = torch.zeros(bs, num_dn, dn_cls_embed.shape[-1], device=gt_cls.device)
padding_bbox = torch.zeros(bs, num_dn, 4, device=gt_bbox.device)
map_indices = torch.cat([torch.tensor(range(num), dtype=torch.long) for num in gt_groups])
pos_idx = torch.stack([map_indices + max_nums * i for i in range(num_group)], dim=0)
map_indices = torch.cat([map_indices + max_nums * i for i in range(2 * num_group)])
padding_cls[(dn_b_idx, map_indices)] = dn_cls_embed
padding_bbox[(dn_b_idx, map_indices)] = dn_bbox
tgt_size = num_dn + num_queries
attn_mask = torch.zeros([tgt_size, tgt_size], dtype=torch.bool)
# Match query cannot see the reconstruct
attn_mask[num_dn:, :num_dn] = True
# Reconstruct cannot see each other
for i in range(num_group):
if i == 0:
attn_mask[max_nums * 2 * i : max_nums * 2 * (i + 1), max_nums * 2 * (i + 1) : num_dn] = True
if i == num_group - 1:
attn_mask[max_nums * 2 * i : max_nums * 2 * (i + 1), : max_nums * i * 2] = True
else:
attn_mask[max_nums * 2 * i : max_nums * 2 * (i + 1), max_nums * 2 * (i + 1) : num_dn] = True
attn_mask[max_nums * 2 * i : max_nums * 2 * (i + 1), : max_nums * 2 * i] = True
dn_meta = {
"dn_pos_idx": [p.reshape(-1) for p in pos_idx.cpu().split(list(gt_groups), dim=1)],
"dn_num_group": num_group,
"dn_num_split": [num_dn, num_queries],
}
return (
padding_cls.to(class_embed.device),
padding_bbox.to(class_embed.device),
attn_mask.to(class_embed.device),
dn_meta,
) | --- +++ @@ -14,6 +14,37 @@
class HungarianMatcher(nn.Module):
+ """A module implementing the HungarianMatcher for optimal assignment between predictions and ground truth.
+
+ HungarianMatcher performs optimal bipartite assignment over predicted and ground truth bounding boxes using a cost
+ function that considers classification scores, bounding box coordinates, and optionally mask predictions. This is
+ used in end-to-end object detection models like DETR.
+
+ Attributes:
+ cost_gain (dict[str, float]): Dictionary of cost coefficients for 'class', 'bbox', 'giou', 'mask', and 'dice'
+ components.
+ use_fl (bool): Whether to use Focal Loss for classification cost calculation.
+ with_mask (bool): Whether the model makes mask predictions.
+ num_sample_points (int): Number of sample points used in mask cost calculation.
+ alpha (float): Alpha factor in Focal Loss calculation.
+ gamma (float): Gamma factor in Focal Loss calculation.
+
+ Methods:
+ forward: Compute optimal assignment between predictions and ground truths for a batch.
+ _cost_mask: Compute mask cost and dice cost if masks are predicted.
+
+ Examples:
+ Initialize a HungarianMatcher with custom cost gains
+ >>> matcher = HungarianMatcher(cost_gain={"class": 2, "bbox": 5, "giou": 2})
+
+ Perform matching between predictions and ground truth
+ >>> pred_boxes = torch.rand(2, 100, 4) # batch_size=2, num_queries=100
+ >>> pred_scores = torch.rand(2, 100, 80) # 80 classes
+ >>> gt_boxes = torch.rand(10, 4) # 10 ground truth boxes
+ >>> gt_classes = torch.randint(0, 80, (10,))
+ >>> gt_groups = [5, 5] # 5 GT boxes per image
+ >>> indices = matcher(pred_boxes, pred_scores, gt_boxes, gt_classes, gt_groups)
+ """
def __init__(
self,
@@ -24,6 +55,17 @@ alpha: float = 0.25,
gamma: float = 2.0,
):
+ """Initialize HungarianMatcher for optimal assignment of predicted and ground truth bounding boxes.
+
+ Args:
+ cost_gain (dict[str, float], optional): Dictionary of cost coefficients for different matching cost
+ components. Should contain keys 'class', 'bbox', 'giou', 'mask', and 'dice'.
+ use_fl (bool): Whether to use Focal Loss for classification cost calculation.
+ with_mask (bool): Whether the model makes mask predictions.
+ num_sample_points (int): Number of sample points used in mask cost calculation.
+ alpha (float): Alpha factor in Focal Loss calculation.
+ gamma (float): Gamma factor in Focal Loss calculation.
+ """
super().__init__()
if cost_gain is None:
cost_gain = {"class": 1, "bbox": 5, "giou": 2, "mask": 1, "dice": 1}
@@ -44,6 +86,27 @@ masks: torch.Tensor | None = None,
gt_mask: list[torch.Tensor] | None = None,
) -> list[tuple[torch.Tensor, torch.Tensor]]:
+ """Compute optimal assignment between predictions and ground truth using Hungarian algorithm.
+
+ This method calculates matching costs based on classification scores, bounding box coordinates, and optionally
+ mask predictions, then finds the optimal bipartite assignment between predictions and ground truth.
+
+ Args:
+ pred_bboxes (torch.Tensor): Predicted bounding boxes with shape (batch_size, num_queries, 4).
+ pred_scores (torch.Tensor): Predicted classification scores with shape (batch_size, num_queries,
+ num_classes).
+ gt_bboxes (torch.Tensor): Ground truth bounding boxes with shape (num_gts, 4).
+ gt_cls (torch.Tensor): Ground truth class labels with shape (num_gts,).
+ gt_groups (list[int]): Number of ground truth boxes for each image in the batch.
+ masks (torch.Tensor, optional): Predicted masks with shape (batch_size, num_queries, height, width).
+ gt_mask (list[torch.Tensor], optional): Ground truth masks, each with shape (num_masks, Height, Width).
+
+ Returns:
+ (list[tuple[torch.Tensor, torch.Tensor]]): A list of size batch_size, each element is a tuple (index_i,
+ index_j), where index_i is the tensor of indices of the selected predictions (in order) and index_j is
+ the tensor of indices of the corresponding selected ground truth targets (in order).
+ For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes).
+ """
bs, nq, nc = pred_scores.shape
if sum(gt_groups) == 0:
@@ -132,6 +195,40 @@ box_noise_scale: float = 1.0,
training: bool = False,
) -> tuple[torch.Tensor | None, torch.Tensor | None, torch.Tensor | None, dict[str, Any] | None]:
+ """Generate contrastive denoising training group with positive and negative samples from ground truths.
+
+ This function creates denoising queries for contrastive denoising training by adding noise to ground truth bounding
+ boxes and class labels. It generates both positive and negative samples to improve model robustness.
+
+ Args:
+ batch (dict[str, Any]): Batch dictionary containing 'cls' (torch.Tensor with shape (num_gts,)), 'bboxes'
+ (torch.Tensor with shape (num_gts, 4)), 'batch_idx' (torch.Tensor), and 'gt_groups' (list[int]) indicating
+ number of ground truths per image.
+ num_classes (int): Total number of object classes.
+ num_queries (int): Number of object queries.
+ class_embed (torch.Tensor): Class embedding weights to map labels to embedding space.
+ num_dn (int): Number of denoising queries to generate.
+ cls_noise_ratio (float): Noise ratio for class labels.
+ box_noise_scale (float): Noise scale for bounding box coordinates.
+ training (bool): Whether model is in training mode.
+
+ Returns:
+ padding_cls (torch.Tensor | None): Modified class embeddings for denoising with shape (bs, num_dn, embed_dim).
+ padding_bbox (torch.Tensor | None): Modified bounding boxes for denoising with shape (bs, num_dn, 4).
+ attn_mask (torch.Tensor | None): Attention mask for denoising with shape (tgt_size, tgt_size).
+ dn_meta (dict[str, Any] | None): Meta information dictionary containing denoising parameters.
+
+ Examples:
+ Generate denoising group for training
+ >>> batch = {
+ ... "cls": torch.tensor([0, 1, 2]),
+ ... "bboxes": torch.rand(3, 4),
+ ... "batch_idx": torch.tensor([0, 0, 1]),
+ ... "gt_groups": [2, 1],
+ ... }
+ >>> class_embed = torch.rand(80, 256) # 80 classes, 256 embedding dim
+ >>> cdn_outputs = get_cdn_group(batch, 80, 100, class_embed, training=True)
+ """
if (not training) or num_dn <= 0 or batch is None:
return None, None, None, None
gt_groups = batch["gt_groups"]
@@ -215,4 +312,4 @@ padding_bbox.to(class_embed.device),
attn_mask.to(class_embed.device),
dn_meta,
- )+ )
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/utils/ops.py |
Help me document legacy Python code | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from pathlib import Path
from typing import Any
import torch
from ultralytics.data.build import load_inference_source
from ultralytics.engine.model import Model
from ultralytics.models import yolo
from ultralytics.nn.tasks import (
ClassificationModel,
DetectionModel,
OBBModel,
PoseModel,
SegmentationModel,
WorldModel,
YOLOEModel,
YOLOESegModel,
)
from ultralytics.utils import ROOT, YAML
class YOLO(Model):
def __init__(self, model: str | Path = "yolo26n.pt", task: str | None = None, verbose: bool = False):
path = Path(model if isinstance(model, (str, Path)) else "")
if "-world" in path.stem and path.suffix in {".pt", ".yaml", ".yml"}: # if YOLOWorld PyTorch model
new_instance = YOLOWorld(path, verbose=verbose)
self.__class__ = type(new_instance)
self.__dict__ = new_instance.__dict__
elif "yoloe" in path.stem and path.suffix in {".pt", ".yaml", ".yml"}: # if YOLOE PyTorch model
new_instance = YOLOE(path, task=task, verbose=verbose)
self.__class__ = type(new_instance)
self.__dict__ = new_instance.__dict__
else:
# Continue with default YOLO initialization
super().__init__(model=model, task=task, verbose=verbose)
if hasattr(self.model, "model") and "RTDETR" in self.model.model[-1]._get_name(): # if RTDETR head
from ultralytics import RTDETR
new_instance = RTDETR(self)
self.__class__ = type(new_instance)
self.__dict__ = new_instance.__dict__
@property
def task_map(self) -> dict[str, dict[str, Any]]:
return {
"classify": {
"model": ClassificationModel,
"trainer": yolo.classify.ClassificationTrainer,
"validator": yolo.classify.ClassificationValidator,
"predictor": yolo.classify.ClassificationPredictor,
},
"detect": {
"model": DetectionModel,
"trainer": yolo.detect.DetectionTrainer,
"validator": yolo.detect.DetectionValidator,
"predictor": yolo.detect.DetectionPredictor,
},
"segment": {
"model": SegmentationModel,
"trainer": yolo.segment.SegmentationTrainer,
"validator": yolo.segment.SegmentationValidator,
"predictor": yolo.segment.SegmentationPredictor,
},
"pose": {
"model": PoseModel,
"trainer": yolo.pose.PoseTrainer,
"validator": yolo.pose.PoseValidator,
"predictor": yolo.pose.PosePredictor,
},
"obb": {
"model": OBBModel,
"trainer": yolo.obb.OBBTrainer,
"validator": yolo.obb.OBBValidator,
"predictor": yolo.obb.OBBPredictor,
},
}
class YOLOWorld(Model):
def __init__(self, model: str | Path = "yolov8s-world.pt", verbose: bool = False) -> None:
super().__init__(model=model, task="detect", verbose=verbose)
# Assign default COCO class names when there are no custom names
if not hasattr(self.model, "names"):
self.model.names = YAML.load(ROOT / "cfg/datasets/coco8.yaml").get("names")
@property
def task_map(self) -> dict[str, dict[str, Any]]:
return {
"detect": {
"model": WorldModel,
"validator": yolo.detect.DetectionValidator,
"predictor": yolo.detect.DetectionPredictor,
"trainer": yolo.world.WorldTrainer,
}
}
def set_classes(self, classes: list[str]) -> None:
self.model.set_classes(classes)
# Remove background if it's given
background = " "
if background in classes:
classes.remove(background)
self.model.names = classes
# Reset method class names
if self.predictor:
self.predictor.model.names = classes
class YOLOE(Model):
def __init__(self, model: str | Path = "yoloe-11s-seg.pt", task: str | None = None, verbose: bool = False) -> None:
super().__init__(model=model, task=task, verbose=verbose)
@property
def task_map(self) -> dict[str, dict[str, Any]]:
return {
"detect": {
"model": YOLOEModel,
"validator": yolo.yoloe.YOLOEDetectValidator,
"predictor": yolo.detect.DetectionPredictor,
"trainer": yolo.yoloe.YOLOETrainer,
},
"segment": {
"model": YOLOESegModel,
"validator": yolo.yoloe.YOLOESegValidator,
"predictor": yolo.segment.SegmentationPredictor,
"trainer": yolo.yoloe.YOLOESegTrainer,
},
}
def get_text_pe(self, texts):
assert isinstance(self.model, YOLOEModel)
return self.model.get_text_pe(texts)
def get_visual_pe(self, img, visual):
assert isinstance(self.model, YOLOEModel)
return self.model.get_visual_pe(img, visual)
def set_vocab(self, vocab: list[str], names: list[str]) -> None:
assert isinstance(self.model, YOLOEModel)
self.model.set_vocab(vocab, names=names)
def get_vocab(self, names):
assert isinstance(self.model, YOLOEModel)
return self.model.get_vocab(names)
def set_classes(self, classes: list[str], embeddings: torch.Tensor | None = None) -> None:
# Verify no background class is present
assert " " not in classes
assert isinstance(self.model, YOLOEModel)
if sorted(list(self.model.names.values())) != sorted(classes):
if embeddings is None:
embeddings = self.get_text_pe(classes) # generate text embeddings if not provided
self.model.set_classes(classes, embeddings)
# Reset method class names
if self.predictor:
self.predictor.model.names = self.model.names
def val(
self,
validator=None,
load_vp: bool = False,
refer_data: str | None = None,
**kwargs,
):
custom = {"rect": not load_vp} # method defaults
args = {**self.overrides, **custom, **kwargs, "mode": "val"} # highest priority args on the right
validator = (validator or self._smart_load("validator"))(args=args, _callbacks=self.callbacks)
validator(model=self.model, load_vp=load_vp, refer_data=refer_data)
self.metrics = validator.metrics
return validator.metrics
def predict(
self,
source=None,
stream: bool = False,
visual_prompts: dict[str, list] = {},
refer_image=None,
predictor=yolo.yoloe.YOLOEVPDetectPredictor,
**kwargs,
):
if len(visual_prompts):
assert "bboxes" in visual_prompts and "cls" in visual_prompts, (
f"Expected 'bboxes' and 'cls' in visual prompts, but got {visual_prompts.keys()}"
)
assert len(visual_prompts["bboxes"]) == len(visual_prompts["cls"]), (
f"Expected equal number of bounding boxes and classes, but got {len(visual_prompts['bboxes'])} and "
f"{len(visual_prompts['cls'])} respectively"
)
if type(self.predictor) is not predictor:
self.predictor = predictor(
overrides={
"task": self.model.task,
"mode": "predict",
"save": False,
"verbose": refer_image is None,
"batch": 1,
"device": kwargs.get("device", None),
"half": kwargs.get("half", False),
"imgsz": kwargs.get("imgsz", self.overrides.get("imgsz", 640)),
},
_callbacks=self.callbacks,
)
num_cls = (
max(len(set(c)) for c in visual_prompts["cls"])
if isinstance(source, list) and refer_image is None # means multiple images
else len(set(visual_prompts["cls"]))
)
self.model.model[-1].nc = num_cls
self.model.names = [f"object{i}" for i in range(num_cls)]
self.predictor.set_prompts(visual_prompts.copy())
self.predictor.setup_model(model=self.model)
if refer_image is None and source is not None:
dataset = load_inference_source(source)
if dataset.mode in {"video", "stream"}:
# NOTE: set the first frame as refer image for videos/streams inference
refer_image = next(iter(dataset))[1][0]
if refer_image is not None:
vpe = self.predictor.get_vpe(refer_image)
self.model.set_classes(self.model.names, vpe)
self.task = "segment" if isinstance(self.predictor, yolo.segment.SegmentationPredictor) else "detect"
self.predictor = None # reset predictor
elif isinstance(self.predictor, yolo.yoloe.YOLOEVPDetectPredictor):
self.predictor = None # reset predictor if no visual prompts
self.overrides["agnostic_nms"] = True # use agnostic nms for YOLOE default
return super().predict(source, stream, **kwargs) | --- +++ @@ -24,8 +24,44 @@
class YOLO(Model):
+ """YOLO (You Only Look Once) object detection model.
+
+ This class provides a unified interface for YOLO models, automatically switching to specialized model types
+ (YOLOWorld or YOLOE) based on the model filename. It supports various computer vision tasks including object
+ detection, segmentation, classification, pose estimation, and oriented bounding box detection.
+
+ Attributes:
+ model: The loaded YOLO model instance.
+ task: The task type (detect, segment, classify, pose, obb).
+ overrides: Configuration overrides for the model.
+
+ Methods:
+ __init__: Initialize a YOLO model with automatic type detection.
+ task_map: Map tasks to their corresponding model, trainer, validator, and predictor classes.
+
+ Examples:
+ Load a pretrained YOLO26n detection model
+ >>> model = YOLO("yolo26n.pt")
+
+ Load a pretrained YOLO26n segmentation model
+ >>> model = YOLO("yolo26n-seg.pt")
+
+ Initialize from a YAML configuration
+ >>> model = YOLO("yolo26n.yaml")
+ """
def __init__(self, model: str | Path = "yolo26n.pt", task: str | None = None, verbose: bool = False):
+ """Initialize a YOLO model.
+
+ This constructor initializes a YOLO model, automatically switching to specialized model types (YOLOWorld or
+ YOLOE) based on the model filename.
+
+ Args:
+ model (str | Path): Model name or path to model file, i.e. 'yolo26n.pt', 'yolo26n.yaml'.
+ task (str, optional): YOLO task specification, i.e. 'detect', 'segment', 'classify', 'pose', 'obb'. Defaults
+ to auto-detection based on model.
+ verbose (bool): Display model info on load.
+ """
path = Path(model if isinstance(model, (str, Path)) else "")
if "-world" in path.stem and path.suffix in {".pt", ".yaml", ".yml"}: # if YOLOWorld PyTorch model
new_instance = YOLOWorld(path, verbose=verbose)
@@ -47,6 +83,7 @@
@property
def task_map(self) -> dict[str, dict[str, Any]]:
+ """Map head to model, trainer, validator, and predictor classes."""
return {
"classify": {
"model": ClassificationModel,
@@ -82,8 +119,40 @@
class YOLOWorld(Model):
+ """YOLO-World object detection model.
+
+ YOLO-World is an open-vocabulary object detection model that can detect objects based on text descriptions without
+ requiring training on specific classes. It extends the YOLO architecture to support real-time open-vocabulary
+ detection.
+
+ Attributes:
+ model: The loaded YOLO-World model instance.
+ task: Always set to 'detect' for object detection.
+ overrides: Configuration overrides for the model.
+
+ Methods:
+ __init__: Initialize YOLOv8-World model with a pre-trained model file.
+ task_map: Map tasks to their corresponding model, trainer, validator, and predictor classes.
+ set_classes: Set the model's class names for detection.
+
+ Examples:
+ Load a YOLOv8-World model
+ >>> model = YOLOWorld("yolov8s-world.pt")
+
+ Set custom classes for detection
+ >>> model.set_classes(["person", "car", "bicycle"])
+ """
def __init__(self, model: str | Path = "yolov8s-world.pt", verbose: bool = False) -> None:
+ """Initialize YOLOv8-World model with a pre-trained model file.
+
+ Loads a YOLOv8-World model for object detection. If no custom class names are provided, it assigns default COCO
+ class names.
+
+ Args:
+ model (str | Path): Path to the pre-trained model file. Supports *.pt and *.yaml formats.
+ verbose (bool): If True, prints additional information during initialization.
+ """
super().__init__(model=model, task="detect", verbose=verbose)
# Assign default COCO class names when there are no custom names
@@ -92,6 +161,7 @@
@property
def task_map(self) -> dict[str, dict[str, Any]]:
+ """Map head to model, trainer, validator, and predictor classes."""
return {
"detect": {
"model": WorldModel,
@@ -102,6 +172,11 @@ }
def set_classes(self, classes: list[str]) -> None:
+ """Set the model's class names for detection.
+
+ Args:
+ classes (list[str]): A list of categories i.e. ["person"].
+ """
self.model.set_classes(classes)
# Remove background if it's given
background = " "
@@ -115,12 +190,52 @@
class YOLOE(Model):
+ """YOLOE object detection and segmentation model.
+
+ YOLOE is an enhanced YOLO model that supports both object detection and instance segmentation tasks with improved
+ performance and additional features like visual and text positional embeddings.
+
+ Attributes:
+ model: The loaded YOLOE model instance.
+ task: The task type (detect or segment).
+ overrides: Configuration overrides for the model.
+
+ Methods:
+ __init__: Initialize YOLOE model with a pre-trained model file.
+ task_map: Map tasks to their corresponding model, trainer, validator, and predictor classes.
+ get_text_pe: Get text positional embeddings for the given texts.
+ get_visual_pe: Get visual positional embeddings for the given image and visual features.
+ set_vocab: Set vocabulary and class names for the YOLOE model.
+ get_vocab: Get vocabulary for the given class names.
+ set_classes: Set the model's class names and embeddings for detection.
+ val: Validate the model using text or visual prompts.
+ predict: Run prediction on images, videos, directories, streams, etc.
+
+ Examples:
+ Load a YOLOE detection model
+ >>> model = YOLOE("yoloe-11s-seg.pt")
+
+ Set vocabulary and class names
+ >>> model.set_vocab(["person", "car", "dog"], ["person", "car", "dog"])
+
+ Predict with visual prompts
+ >>> prompts = {"bboxes": [[10, 20, 100, 200]], "cls": ["person"]}
+ >>> results = model.predict("image.jpg", visual_prompts=prompts)
+ """
def __init__(self, model: str | Path = "yoloe-11s-seg.pt", task: str | None = None, verbose: bool = False) -> None:
+ """Initialize YOLOE model with a pre-trained model file.
+
+ Args:
+ model (str | Path): Path to the pre-trained model file. Supports *.pt and *.yaml formats.
+ task (str, optional): Task type for the model. Auto-detected if None.
+ verbose (bool): If True, prints additional information during initialization.
+ """
super().__init__(model=model, task=task, verbose=verbose)
@property
def task_map(self) -> dict[str, dict[str, Any]]:
+ """Map head to model, trainer, validator, and predictor classes."""
return {
"detect": {
"model": YOLOEModel,
@@ -137,22 +252,64 @@ }
def get_text_pe(self, texts):
+ """Get text positional embeddings for the given texts."""
assert isinstance(self.model, YOLOEModel)
return self.model.get_text_pe(texts)
def get_visual_pe(self, img, visual):
+ """Get visual positional embeddings for the given image and visual features.
+
+ This method extracts positional embeddings from visual features based on the input image. It requires that the
+ model is an instance of YOLOEModel.
+
+ Args:
+ img (torch.Tensor): Input image tensor.
+ visual (torch.Tensor): Visual features extracted from the image.
+
+ Returns:
+ (torch.Tensor): Visual positional embeddings.
+
+ Examples:
+ >>> model = YOLOE("yoloe-11s-seg.pt")
+ >>> img = torch.rand(1, 3, 640, 640)
+ >>> visual_features = torch.rand(1, 1, 80, 80)
+ >>> pe = model.get_visual_pe(img, visual_features)
+ """
assert isinstance(self.model, YOLOEModel)
return self.model.get_visual_pe(img, visual)
def set_vocab(self, vocab: list[str], names: list[str]) -> None:
+ """Set vocabulary and class names for the YOLOE model.
+
+ This method configures the vocabulary and class names used by the model for text processing and classification
+ tasks. The model must be an instance of YOLOEModel.
+
+ Args:
+ vocab (list[str]): Vocabulary list containing tokens or words used by the model for text processing.
+ names (list[str]): List of class names that the model can detect or classify.
+
+ Raises:
+ AssertionError: If the model is not an instance of YOLOEModel.
+
+ Examples:
+ >>> model = YOLOE("yoloe-11s-seg.pt")
+ >>> model.set_vocab(["person", "car", "dog"], ["person", "car", "dog"])
+ """
assert isinstance(self.model, YOLOEModel)
self.model.set_vocab(vocab, names=names)
def get_vocab(self, names):
+ """Get vocabulary for the given class names."""
assert isinstance(self.model, YOLOEModel)
return self.model.get_vocab(names)
def set_classes(self, classes: list[str], embeddings: torch.Tensor | None = None) -> None:
+ """Set the model's class names and embeddings for detection.
+
+ Args:
+ classes (list[str]): A list of categories i.e. ["person"].
+ embeddings (torch.Tensor, optional): Embeddings corresponding to the classes.
+ """
# Verify no background class is present
assert " " not in classes
assert isinstance(self.model, YOLOEModel)
@@ -172,6 +329,17 @@ refer_data: str | None = None,
**kwargs,
):
+ """Validate the model using text or visual prompts.
+
+ Args:
+ validator (callable, optional): A callable validator function. If None, a default validator is loaded.
+ load_vp (bool): Whether to load visual prompts. If False, text prompts are used.
+ refer_data (str, optional): Path to the reference data for visual prompts.
+ **kwargs (Any): Additional keyword arguments to override default settings.
+
+ Returns:
+ (dict): Validation statistics containing metrics computed during validation.
+ """
custom = {"rect": not load_vp} # method defaults
args = {**self.overrides, **custom, **kwargs, "mode": "val"} # highest priority args on the right
@@ -189,6 +357,30 @@ predictor=yolo.yoloe.YOLOEVPDetectPredictor,
**kwargs,
):
+ """Run prediction on images, videos, directories, streams, etc.
+
+ Args:
+ source (str | int | PIL.Image | np.ndarray, optional): Source for prediction. Accepts image paths, directory
+ paths, URL/YouTube streams, PIL images, numpy arrays, or webcam indices.
+ stream (bool): Whether to stream the prediction results. If True, results are yielded as a generator as they
+ are computed.
+ visual_prompts (dict[str, list]): Dictionary containing visual prompts for the model. Must include 'bboxes'
+ and 'cls' keys when non-empty.
+ refer_image (str | PIL.Image | np.ndarray, optional): Reference image for visual prompts.
+ predictor (callable): Custom predictor class for visual prompt predictions. Defaults to
+ YOLOEVPDetectPredictor.
+ **kwargs (Any): Additional keyword arguments passed to the predictor.
+
+ Returns:
+ (list | generator): List of Results objects or generator of Results objects if stream=True.
+
+ Examples:
+ >>> model = YOLOE("yoloe-11s-seg.pt")
+ >>> results = model.predict("path/to/image.jpg")
+ >>> # With visual prompts
+ >>> prompts = {"bboxes": [[10, 20, 100, 200]], "cls": ["person"]}
+ >>> results = model.predict("path/to/image.jpg", visual_prompts=prompts)
+ """
if len(visual_prompts):
assert "bboxes" in visual_prompts and "cls" in visual_prompts, (
f"Expected 'bboxes' and 'cls' in visual prompts, but got {visual_prompts.keys()}"
@@ -236,4 +428,4 @@ self.predictor = None # reset predictor if no visual prompts
self.overrides["agnostic_nms"] = True # use agnostic nms for YOLOE default
- return super().predict(source, stream, **kwargs)+ return super().predict(source, stream, **kwargs)
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/yolo/model.py |
Document this module using docstrings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import cv2
import torch
from PIL import Image
from ultralytics.data.augment import classify_transforms
from ultralytics.engine.predictor import BasePredictor
from ultralytics.engine.results import Results
from ultralytics.utils import DEFAULT_CFG, ops
class ClassificationPredictor(BasePredictor):
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks: dict | None = None):
super().__init__(cfg, overrides, _callbacks)
self.args.task = "classify"
def setup_source(self, source):
super().setup_source(source)
updated = (
self.model.model.transforms.transforms[0].size != max(self.imgsz)
if hasattr(self.model.model, "transforms") and hasattr(self.model.model.transforms.transforms[0], "size")
else False
)
self.transforms = (
classify_transforms(self.imgsz) if updated or self.model.format != "pt" else self.model.model.transforms
)
def preprocess(self, img):
if not isinstance(img, torch.Tensor):
img = torch.stack(
[self.transforms(Image.fromarray(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))) for im in img], dim=0
)
img = (img if isinstance(img, torch.Tensor) else torch.from_numpy(img)).to(self.model.device)
return img.half() if self.model.fp16 else img.float() # Convert uint8 to fp16/32
def postprocess(self, preds, img, orig_imgs):
if not isinstance(orig_imgs, list): # Input images are a torch.Tensor, not a list
orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)[..., ::-1]
preds = preds[0] if isinstance(preds, (list, tuple)) else preds
return [
Results(orig_img, path=img_path, names=self.model.names, probs=pred)
for pred, orig_img, img_path in zip(preds, orig_imgs, self.batch[0])
] | --- +++ @@ -13,12 +13,45 @@
class ClassificationPredictor(BasePredictor):
+ """A class extending the BasePredictor class for prediction based on a classification model.
+
+ This predictor handles the specific requirements of classification models, including preprocessing images and
+ postprocessing predictions to generate classification results.
+
+ Attributes:
+ args (dict): Configuration arguments for the predictor.
+
+ Methods:
+ preprocess: Convert input images to model-compatible format.
+ postprocess: Process model predictions into Results objects.
+
+ Examples:
+ >>> from ultralytics.utils import ASSETS
+ >>> from ultralytics.models.yolo.classify import ClassificationPredictor
+ >>> args = dict(model="yolo26n-cls.pt", source=ASSETS)
+ >>> predictor = ClassificationPredictor(overrides=args)
+ >>> predictor.predict_cli()
+
+ Notes:
+ - Torchvision classification models can also be passed to the 'model' argument, i.e. model='resnet18'.
+ """
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks: dict | None = None):
+ """Initialize the ClassificationPredictor with the specified configuration and set task to 'classify'.
+
+ This constructor initializes a ClassificationPredictor instance, which extends BasePredictor for classification
+ tasks. It ensures the task is set to 'classify' regardless of input configuration.
+
+ Args:
+ cfg (dict): Default configuration dictionary containing prediction settings.
+ overrides (dict, optional): Configuration overrides that take precedence over cfg.
+ _callbacks (dict, optional): Dictionary of callback functions to be executed during prediction.
+ """
super().__init__(cfg, overrides, _callbacks)
self.args.task = "classify"
def setup_source(self, source):
+ """Set up source and inference mode and classify transforms."""
super().setup_source(source)
updated = (
self.model.model.transforms.transforms[0].size != max(self.imgsz)
@@ -30,6 +63,7 @@ )
def preprocess(self, img):
+ """Convert input images to model-compatible tensor format with appropriate normalization."""
if not isinstance(img, torch.Tensor):
img = torch.stack(
[self.transforms(Image.fromarray(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))) for im in img], dim=0
@@ -38,6 +72,16 @@ return img.half() if self.model.fp16 else img.float() # Convert uint8 to fp16/32
def postprocess(self, preds, img, orig_imgs):
+ """Process predictions to return Results objects with classification probabilities.
+
+ Args:
+ preds (torch.Tensor): Raw predictions from the model.
+ img (torch.Tensor): Input images after preprocessing.
+ orig_imgs (list[np.ndarray] | torch.Tensor): Original images before preprocessing.
+
+ Returns:
+ (list[Results]): List of Results objects containing classification results for each image.
+ """
if not isinstance(orig_imgs, list): # Input images are a torch.Tensor, not a list
orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)[..., ::-1]
@@ -45,4 +89,4 @@ return [
Results(orig_img, path=img_path, names=self.model.names, probs=pred)
for pred, orig_img, img_path in zip(preds, orig_imgs, self.batch[0])
- ]+ ]
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/yolo/classify/predict.py |
Fill in missing docstrings in my code | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import itertools
from glob import glob
from math import ceil
from pathlib import Path
from typing import Any
import cv2
import numpy as np
from PIL import Image
from ultralytics.data.utils import exif_size, img2label_paths
from ultralytics.utils import TQDM
from ultralytics.utils.checks import check_requirements
def bbox_iof(polygon1: np.ndarray, bbox2: np.ndarray, eps: float = 1e-6) -> np.ndarray:
check_requirements("shapely>=2.0.0")
from shapely.geometry import Polygon
polygon1 = polygon1.reshape(-1, 4, 2)
lt_point = np.min(polygon1, axis=-2) # left-top
rb_point = np.max(polygon1, axis=-2) # right-bottom
bbox1 = np.concatenate([lt_point, rb_point], axis=-1)
lt = np.maximum(bbox1[:, None, :2], bbox2[..., :2])
rb = np.minimum(bbox1[:, None, 2:], bbox2[..., 2:])
wh = np.clip(rb - lt, 0, np.inf)
h_overlaps = wh[..., 0] * wh[..., 1]
left, top, right, bottom = (bbox2[..., i] for i in range(4))
polygon2 = np.stack([left, top, right, top, right, bottom, left, bottom], axis=-1).reshape(-1, 4, 2)
sg_polys1 = [Polygon(p) for p in polygon1]
sg_polys2 = [Polygon(p) for p in polygon2]
overlaps = np.zeros(h_overlaps.shape)
for p in zip(*np.nonzero(h_overlaps)):
overlaps[p] = sg_polys1[p[0]].intersection(sg_polys2[p[-1]]).area
unions = np.array([p.area for p in sg_polys1], dtype=np.float32)
unions = unions[..., None]
unions = np.clip(unions, eps, np.inf)
outputs = overlaps / unions
if outputs.ndim == 1:
outputs = outputs[..., None]
return outputs
def load_yolo_dota(data_root: str, split: str = "train") -> list[dict[str, Any]]:
assert split in {"train", "val"}, f"Split must be 'train' or 'val', not {split}."
im_dir = Path(data_root) / "images" / split
assert im_dir.exists(), f"Can't find {im_dir}, please check your data root."
im_files = glob(str(Path(data_root) / "images" / split / "*"))
lb_files = img2label_paths(im_files)
annos = []
for im_file, lb_file in zip(im_files, lb_files):
w, h = exif_size(Image.open(im_file))
with open(lb_file, encoding="utf-8") as f:
lb = [x.split() for x in f.read().strip().splitlines() if len(x)]
lb = np.array(lb, dtype=np.float32)
annos.append(dict(ori_size=(h, w), label=lb, filepath=im_file))
return annos
def get_windows(
im_size: tuple[int, int],
crop_sizes: tuple[int, ...] = (1024,),
gaps: tuple[int, ...] = (200,),
im_rate_thr: float = 0.6,
eps: float = 0.01,
) -> np.ndarray:
h, w = im_size
windows = []
for crop_size, gap in zip(crop_sizes, gaps):
assert crop_size > gap, f"invalid crop_size gap pair [{crop_size} {gap}]"
step = crop_size - gap
xn = 1 if w <= crop_size else ceil((w - crop_size) / step + 1)
xs = [step * i for i in range(xn)]
if len(xs) > 1 and xs[-1] + crop_size > w:
xs[-1] = w - crop_size
yn = 1 if h <= crop_size else ceil((h - crop_size) / step + 1)
ys = [step * i for i in range(yn)]
if len(ys) > 1 and ys[-1] + crop_size > h:
ys[-1] = h - crop_size
start = np.array(list(itertools.product(xs, ys)), dtype=np.int64)
stop = start + crop_size
windows.append(np.concatenate([start, stop], axis=1))
windows = np.concatenate(windows, axis=0)
im_in_wins = windows.copy()
im_in_wins[:, 0::2] = np.clip(im_in_wins[:, 0::2], 0, w)
im_in_wins[:, 1::2] = np.clip(im_in_wins[:, 1::2], 0, h)
im_areas = (im_in_wins[:, 2] - im_in_wins[:, 0]) * (im_in_wins[:, 3] - im_in_wins[:, 1])
win_areas = (windows[:, 2] - windows[:, 0]) * (windows[:, 3] - windows[:, 1])
im_rates = im_areas / win_areas
if not (im_rates > im_rate_thr).any():
max_rate = im_rates.max()
im_rates[abs(im_rates - max_rate) < eps] = 1
return windows[im_rates > im_rate_thr]
def get_window_obj(anno: dict[str, Any], windows: np.ndarray, iof_thr: float = 0.7) -> list[np.ndarray]:
h, w = anno["ori_size"]
label = anno["label"]
if len(label):
label[:, 1::2] *= w
label[:, 2::2] *= h
iofs = bbox_iof(label[:, 1:], windows)
# Unnormalized and misaligned coordinates
return [(label[iofs[:, i] >= iof_thr]) for i in range(len(windows))] # window_anns
else:
return [np.zeros((0, 9), dtype=np.float32) for _ in range(len(windows))] # window_anns
def crop_and_save(
anno: dict[str, Any],
windows: np.ndarray,
window_objs: list[np.ndarray],
im_dir: str,
lb_dir: str,
allow_background_images: bool = True,
) -> None:
im = cv2.imread(anno["filepath"])
name = Path(anno["filepath"]).stem
for i, window in enumerate(windows):
x_start, y_start, x_stop, y_stop = window.tolist()
new_name = f"{name}__{x_stop - x_start}__{x_start}___{y_start}"
patch_im = im[y_start:y_stop, x_start:x_stop]
ph, pw = patch_im.shape[:2]
label = window_objs[i]
if len(label) or allow_background_images:
cv2.imwrite(str(Path(im_dir) / f"{new_name}.jpg"), patch_im)
if len(label):
label[:, 1::2] -= x_start
label[:, 2::2] -= y_start
label[:, 1::2] /= pw
label[:, 2::2] /= ph
with open(Path(lb_dir) / f"{new_name}.txt", "w", encoding="utf-8") as f:
for lb in label:
formatted_coords = [f"{coord:.6g}" for coord in lb[1:]]
f.write(f"{int(lb[0])} {' '.join(formatted_coords)}\n")
def split_images_and_labels(
data_root: str,
save_dir: str,
split: str = "train",
crop_sizes: tuple[int, ...] = (1024,),
gaps: tuple[int, ...] = (200,),
) -> None:
im_dir = Path(save_dir) / "images" / split
im_dir.mkdir(parents=True, exist_ok=True)
lb_dir = Path(save_dir) / "labels" / split
lb_dir.mkdir(parents=True, exist_ok=True)
annos = load_yolo_dota(data_root, split=split)
for anno in TQDM(annos, total=len(annos), desc=split):
windows = get_windows(anno["ori_size"], crop_sizes, gaps)
window_objs = get_window_obj(anno, windows)
crop_and_save(anno, windows, window_objs, str(im_dir), str(lb_dir))
def split_trainval(
data_root: str, save_dir: str, crop_size: int = 1024, gap: int = 200, rates: tuple[float, ...] = (1.0,)
) -> None:
crop_sizes, gaps = [], []
for r in rates:
crop_sizes.append(int(crop_size / r))
gaps.append(int(gap / r))
for split in {"train", "val"}:
split_images_and_labels(data_root, save_dir, split, crop_sizes, gaps)
def split_test(
data_root: str, save_dir: str, crop_size: int = 1024, gap: int = 200, rates: tuple[float, ...] = (1.0,)
) -> None:
crop_sizes, gaps = [], []
for r in rates:
crop_sizes.append(int(crop_size / r))
gaps.append(int(gap / r))
save_dir = Path(save_dir) / "images" / "test"
save_dir.mkdir(parents=True, exist_ok=True)
im_dir = Path(data_root) / "images" / "test"
assert im_dir.exists(), f"Can't find {im_dir}, please check your data root."
im_files = glob(str(im_dir / "*"))
for im_file in TQDM(im_files, total=len(im_files), desc="test"):
w, h = exif_size(Image.open(im_file))
windows = get_windows((h, w), crop_sizes=crop_sizes, gaps=gaps)
im = cv2.imread(im_file)
name = Path(im_file).stem
for window in windows:
x_start, y_start, x_stop, y_stop = window.tolist()
new_name = f"{name}__{x_stop - x_start}__{x_start}___{y_start}"
patch_im = im[y_start:y_stop, x_start:x_stop]
cv2.imwrite(str(save_dir / f"{new_name}.jpg"), patch_im)
if __name__ == "__main__":
split_trainval(data_root="DOTAv2", save_dir="DOTAv2-split")
split_test(data_root="DOTAv2", save_dir="DOTAv2-split") | --- +++ @@ -18,6 +18,20 @@
def bbox_iof(polygon1: np.ndarray, bbox2: np.ndarray, eps: float = 1e-6) -> np.ndarray:
+ """Calculate Intersection over Foreground (IoF) between polygons and bounding boxes.
+
+ Args:
+ polygon1 (np.ndarray): Polygon coordinates with shape (N, 8).
+ bbox2 (np.ndarray): Bounding boxes with shape (M, 4).
+ eps (float, optional): Small value to prevent division by zero.
+
+ Returns:
+ (np.ndarray): IoF scores with shape (N, M).
+
+ Notes:
+ Polygon format: [x1, y1, x2, y2, x3, y3, x4, y4].
+ Bounding box format: [x_min, y_min, x_max, y_max].
+ """
check_requirements("shapely>=2.0.0")
from shapely.geometry import Polygon
@@ -50,6 +64,25 @@
def load_yolo_dota(data_root: str, split: str = "train") -> list[dict[str, Any]]:
+ """Load DOTA dataset annotations and image information.
+
+ Args:
+ data_root (str): Data root directory.
+ split (str, optional): The split data set, could be 'train' or 'val'.
+
+ Returns:
+ (list[dict[str, Any]]): List of annotation dictionaries containing image information.
+
+ Notes:
+ The directory structure assumed for the DOTA dataset:
+ - data_root
+ - images
+ - train
+ - val
+ - labels
+ - train
+ - val
+ """
assert split in {"train", "val"}, f"Split must be 'train' or 'val', not {split}."
im_dir = Path(data_root) / "images" / split
assert im_dir.exists(), f"Can't find {im_dir}, please check your data root."
@@ -72,6 +105,18 @@ im_rate_thr: float = 0.6,
eps: float = 0.01,
) -> np.ndarray:
+ """Get the coordinates of sliding windows for image cropping.
+
+ Args:
+ im_size (tuple[int, int]): Original image size, (H, W).
+ crop_sizes (tuple[int, ...], optional): Crop size of windows.
+ gaps (tuple[int, ...], optional): Gap between crops.
+ im_rate_thr (float, optional): Threshold for the ratio of image area within a window to the total window area.
+ eps (float, optional): Epsilon value for math operations.
+
+ Returns:
+ (np.ndarray): Array of window coordinates of shape (N, 4) where each row is [x_start, y_start, x_stop, y_stop].
+ """
h, w = im_size
windows = []
for crop_size, gap in zip(crop_sizes, gaps):
@@ -106,6 +151,7 @@
def get_window_obj(anno: dict[str, Any], windows: np.ndarray, iof_thr: float = 0.7) -> list[np.ndarray]:
+ """Get objects for each window based on IoF threshold."""
h, w = anno["ori_size"]
label = anno["label"]
if len(label):
@@ -126,6 +172,26 @@ lb_dir: str,
allow_background_images: bool = True,
) -> None:
+ """Crop images and save new labels for each window.
+
+ Args:
+ anno (dict[str, Any]): Annotation dict, including 'filepath', 'label', 'ori_size' as its keys.
+ windows (np.ndarray): Array of windows coordinates with shape (N, 4).
+ window_objs (list[np.ndarray]): A list of labels inside each window.
+ im_dir (str): The output directory path of images.
+ lb_dir (str): The output directory path of labels.
+ allow_background_images (bool, optional): Whether to include background images without labels.
+
+ Notes:
+ The directory structure assumed for the DOTA dataset:
+ - data_root
+ - images
+ - train
+ - val
+ - labels
+ - train
+ - val
+ """
im = cv2.imread(anno["filepath"])
name = Path(anno["filepath"]).stem
for i, window in enumerate(windows):
@@ -156,6 +222,29 @@ crop_sizes: tuple[int, ...] = (1024,),
gaps: tuple[int, ...] = (200,),
) -> None:
+ """Split both images and labels for a given dataset split.
+
+ Args:
+ data_root (str): Root directory of the dataset.
+ save_dir (str): Directory to save the split dataset.
+ split (str, optional): The split data set, could be 'train' or 'val'.
+ crop_sizes (tuple[int, ...], optional): Tuple of crop sizes.
+ gaps (tuple[int, ...], optional): Tuple of gaps between crops.
+
+ Notes:
+ The directory structure assumed for the DOTA dataset:
+ - data_root
+ - images
+ - split
+ - labels
+ - split
+ and the output directory structure is:
+ - save_dir
+ - images
+ - split
+ - labels
+ - split
+ """
im_dir = Path(save_dir) / "images" / split
im_dir.mkdir(parents=True, exist_ok=True)
lb_dir = Path(save_dir) / "labels" / split
@@ -171,6 +260,33 @@ def split_trainval(
data_root: str, save_dir: str, crop_size: int = 1024, gap: int = 200, rates: tuple[float, ...] = (1.0,)
) -> None:
+ """Split train and val sets of DOTA dataset with multiple scaling rates.
+
+ Args:
+ data_root (str): Root directory of the dataset.
+ save_dir (str): Directory to save the split dataset.
+ crop_size (int, optional): Base crop size.
+ gap (int, optional): Base gap between crops.
+ rates (tuple[float, ...], optional): Scaling rates for crop_size and gap.
+
+ Notes:
+ The directory structure assumed for the DOTA dataset:
+ - data_root
+ - images
+ - train
+ - val
+ - labels
+ - train
+ - val
+ and the output directory structure is:
+ - save_dir
+ - images
+ - train
+ - val
+ - labels
+ - train
+ - val
+ """
crop_sizes, gaps = [], []
for r in rates:
crop_sizes.append(int(crop_size / r))
@@ -182,6 +298,25 @@ def split_test(
data_root: str, save_dir: str, crop_size: int = 1024, gap: int = 200, rates: tuple[float, ...] = (1.0,)
) -> None:
+ """Split test set of DOTA dataset, labels are not included within this set.
+
+ Args:
+ data_root (str): Root directory of the dataset.
+ save_dir (str): Directory to save the split dataset.
+ crop_size (int, optional): Base crop size.
+ gap (int, optional): Base gap between crops.
+ rates (tuple[float, ...], optional): Scaling rates for crop_size and gap.
+
+ Notes:
+ The directory structure assumed for the DOTA dataset:
+ - data_root
+ - images
+ - test
+ and the output directory structure is:
+ - save_dir
+ - images
+ - test
+ """
crop_sizes, gaps = [], []
for r in rates:
crop_sizes.append(int(crop_size / r))
@@ -206,4 +341,4 @@
if __name__ == "__main__":
split_trainval(data_root="DOTAv2", save_dir="DOTAv2-split")
- split_test(data_root="DOTAv2", save_dir="DOTAv2-split")+ split_test(data_root="DOTAv2", save_dir="DOTAv2-split")
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/data/split_dota.py |
Generate helpful docstrings for debugging | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from ultralytics.models.yolo.detect.predict import DetectionPredictor
from ultralytics.utils import DEFAULT_CFG, ops
class PosePredictor(DetectionPredictor):
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks: dict | None = None):
super().__init__(cfg, overrides, _callbacks)
self.args.task = "pose"
def construct_result(self, pred, img, orig_img, img_path):
result = super().construct_result(pred, img, orig_img, img_path)
# Extract keypoints from prediction and reshape according to model's keypoint shape
pred_kpts = pred[:, 6:].view(pred.shape[0], *self.model.kpt_shape)
# Scale keypoints coordinates to match the original image dimensions
pred_kpts = ops.scale_coords(img.shape[2:], pred_kpts, orig_img.shape)
result.update(keypoints=pred_kpts)
return result | --- +++ @@ -7,16 +7,61 @@
class PosePredictor(DetectionPredictor):
+ """A class extending the DetectionPredictor class for prediction based on a pose model.
+
+ This class specializes in pose estimation, handling keypoints detection alongside standard object detection
+ capabilities inherited from DetectionPredictor.
+
+ Attributes:
+ args (namespace): Configuration arguments for the predictor.
+ model (torch.nn.Module): The loaded YOLO pose model with keypoint detection capabilities.
+
+ Methods:
+ construct_result: Construct the result object from the prediction, including keypoints.
+
+ Examples:
+ >>> from ultralytics.utils import ASSETS
+ >>> from ultralytics.models.yolo.pose import PosePredictor
+ >>> args = dict(model="yolo26n-pose.pt", source=ASSETS)
+ >>> predictor = PosePredictor(overrides=args)
+ >>> predictor.predict_cli()
+ """
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks: dict | None = None):
+ """Initialize PosePredictor for pose estimation tasks.
+
+ Sets up a PosePredictor instance, configuring it for pose detection tasks and handling device-specific warnings
+ for Apple MPS.
+
+ Args:
+ cfg (Any): Configuration for the predictor.
+ overrides (dict, optional): Configuration overrides that take precedence over cfg.
+ _callbacks (dict, optional): Dictionary of callback functions to be invoked during prediction.
+ """
super().__init__(cfg, overrides, _callbacks)
self.args.task = "pose"
def construct_result(self, pred, img, orig_img, img_path):
+ """Construct the result object from the prediction, including keypoints.
+
+ Extends the parent class implementation by extracting keypoint data from predictions and adding them to the
+ result object.
+
+ Args:
+ pred (torch.Tensor): The predicted bounding boxes, scores, and keypoints with shape (N, 6+K*D) where N is
+ the number of detections, K is the number of keypoints, and D is the keypoint dimension.
+ img (torch.Tensor): The processed input image tensor with shape (B, C, H, W).
+ orig_img (np.ndarray): The original unprocessed image as a numpy array.
+ img_path (str): The path to the original image file.
+
+ Returns:
+ (Results): The result object containing the original image, image path, class names, bounding boxes, and
+ keypoints.
+ """
result = super().construct_result(pred, img, orig_img, img_path)
# Extract keypoints from prediction and reshape according to model's keypoint shape
pred_kpts = pred[:, 6:].view(pred.shape[0], *self.model.kpt_shape)
# Scale keypoints coordinates to match the original image dimensions
pred_kpts = ops.scale_coords(img.shape[2:], pred_kpts, orig_img.shape)
result.update(keypoints=pred_kpts)
- return result+ return result
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/yolo/pose/predict.py |
Add docstrings to improve collaboration | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from pathlib import Path
from typing import Any
import torch
from ultralytics.data import YOLODataset
from ultralytics.data.augment import Compose, Format, v8_transforms
from ultralytics.models.yolo.detect import DetectionValidator
from ultralytics.utils import colorstr, ops
__all__ = ("RTDETRValidator",) # tuple or list
class RTDETRDataset(YOLODataset):
def __init__(self, *args, data=None, **kwargs):
super().__init__(*args, data=data, **kwargs)
def load_image(self, i, rect_mode=False):
return super().load_image(i=i, rect_mode=rect_mode)
def build_transforms(self, hyp=None):
if self.augment:
hyp.mosaic = hyp.mosaic if self.augment and not self.rect else 0.0
hyp.mixup = hyp.mixup if self.augment and not self.rect else 0.0
hyp.cutmix = hyp.cutmix if self.augment and not self.rect else 0.0
transforms = v8_transforms(self, self.imgsz, hyp, stretch=True)
else:
# transforms = Compose([LetterBox(new_shape=(self.imgsz, self.imgsz), auto=False, scale_fill=True)])
transforms = Compose([])
transforms.append(
Format(
bbox_format="xywh",
normalize=True,
return_mask=self.use_segments,
return_keypoint=self.use_keypoints,
batch_idx=True,
mask_ratio=hyp.mask_ratio,
mask_overlap=hyp.overlap_mask,
)
)
return transforms
class RTDETRValidator(DetectionValidator):
def build_dataset(self, img_path, mode="val", batch=None):
return RTDETRDataset(
img_path=img_path,
imgsz=self.args.imgsz,
batch_size=batch,
augment=False, # no augmentation
hyp=self.args,
rect=False, # no rect
cache=self.args.cache or None,
prefix=colorstr(f"{mode}: "),
data=self.data,
)
def scale_preds(self, predn: dict[str, torch.Tensor], pbatch: dict[str, Any]) -> dict[str, torch.Tensor]:
return predn
def postprocess(
self, preds: torch.Tensor | list[torch.Tensor] | tuple[torch.Tensor]
) -> list[dict[str, torch.Tensor]]:
if not isinstance(preds, (list, tuple)): # list for PyTorch inference but list[0] Tensor for export inference
preds = [preds, None]
bs, _, nd = preds[0].shape
bboxes, scores = preds[0].split((4, nd - 4), dim=-1)
bboxes *= self.args.imgsz
outputs = [torch.zeros((0, 6), device=bboxes.device)] * bs
for i, bbox in enumerate(bboxes): # (300, 4)
bbox = ops.xywh2xyxy(bbox)
score, cls = scores[i].max(-1) # (300, )
pred = torch.cat([bbox, score[..., None], cls[..., None]], dim=-1) # filter
# Sort by confidence to correctly get internal metrics
pred = pred[score.argsort(descending=True)]
outputs[i] = pred[score > self.args.conf]
return [{"bboxes": x[:, :4], "conf": x[:, 4], "cls": x[:, 5]} for x in outputs]
def pred_to_json(self, predn: dict[str, torch.Tensor], pbatch: dict[str, Any]) -> None:
path = Path(pbatch["im_file"])
stem = path.stem
image_id = int(stem) if stem.isnumeric() else stem
box = predn["bboxes"].clone()
box[..., [0, 2]] *= pbatch["ori_shape"][1] / self.args.imgsz # native-space pred
box[..., [1, 3]] *= pbatch["ori_shape"][0] / self.args.imgsz # native-space pred
box = ops.xyxy2xywh(box) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for b, s, c in zip(box.tolist(), predn["conf"].tolist(), predn["cls"].tolist()):
self.jdict.append(
{
"image_id": image_id,
"file_name": path.name,
"category_id": self.class_map[int(c)],
"bbox": [round(x, 3) for x in b],
"score": round(s, 5),
}
) | --- +++ @@ -16,14 +16,69 @@
class RTDETRDataset(YOLODataset):
+ """Real-Time DEtection and TRacking (RT-DETR) dataset class extending the base YOLODataset class.
+
+ This specialized dataset class is designed for use with the RT-DETR object detection model and is optimized for
+ real-time detection and tracking tasks.
+
+ Attributes:
+ augment (bool): Whether to apply data augmentation.
+ rect (bool): Whether to use rectangular training.
+ use_segments (bool): Whether to use segmentation masks.
+ use_keypoints (bool): Whether to use keypoint annotations.
+ imgsz (int): Target image size for training.
+
+ Methods:
+ load_image: Load one image from dataset index.
+ build_transforms: Build transformation pipeline for the dataset.
+
+ Examples:
+ Initialize an RT-DETR dataset
+ >>> dataset = RTDETRDataset(img_path="path/to/images", imgsz=640)
+ >>> image, hw0, hw = dataset.load_image(0)
+ """
def __init__(self, *args, data=None, **kwargs):
+ """Initialize the RTDETRDataset class by inheriting from the YOLODataset class.
+
+ This constructor sets up a dataset specifically optimized for the RT-DETR (Real-Time DEtection and TRacking)
+ model, building upon the base YOLODataset functionality.
+
+ Args:
+ *args (Any): Variable length argument list passed to the parent YOLODataset class.
+ data (dict | None): Dictionary containing dataset information. If None, default values will be used.
+ **kwargs (Any): Additional keyword arguments passed to the parent YOLODataset class.
+ """
super().__init__(*args, data=data, **kwargs)
def load_image(self, i, rect_mode=False):
+ """Load one image from dataset index 'i'.
+
+ Args:
+ i (int): Index of the image to load.
+ rect_mode (bool, optional): Whether to use rectangular mode for batch inference.
+
+ Returns:
+ im (np.ndarray): Loaded image as a NumPy array.
+ hw_original (tuple[int, int]): Original image dimensions in (height, width) format.
+ hw_resized (tuple[int, int]): Resized image dimensions in (height, width) format.
+
+ Examples:
+ Load an image from the dataset
+ >>> dataset = RTDETRDataset(img_path="path/to/images")
+ >>> image, hw0, hw = dataset.load_image(0)
+ """
return super().load_image(i=i, rect_mode=rect_mode)
def build_transforms(self, hyp=None):
+ """Build transformation pipeline for the dataset.
+
+ Args:
+ hyp (dict, optional): Hyperparameters for transformations.
+
+ Returns:
+ (Compose): Composition of transformation functions.
+ """
if self.augment:
hyp.mosaic = hyp.mosaic if self.augment and not self.rect else 0.0
hyp.mixup = hyp.mixup if self.augment and not self.rect else 0.0
@@ -47,8 +102,43 @@
class RTDETRValidator(DetectionValidator):
+ """RTDETRValidator extends the DetectionValidator class to provide validation capabilities specifically tailored for
+ the RT-DETR (Real-Time DETR) object detection model.
+
+ The class allows building of an RTDETR-specific dataset for validation, applies confidence thresholding for
+ post-processing, and updates evaluation metrics accordingly.
+
+ Attributes:
+ args (Namespace): Configuration arguments for validation.
+ data (dict): Dataset configuration dictionary.
+
+ Methods:
+ build_dataset: Build an RTDETR Dataset for validation.
+ postprocess: Apply confidence thresholding to prediction outputs.
+
+ Examples:
+ Initialize and run RT-DETR validation
+ >>> from ultralytics.models.rtdetr import RTDETRValidator
+ >>> args = dict(model="rtdetr-l.pt", data="coco8.yaml")
+ >>> validator = RTDETRValidator(args=args)
+ >>> validator()
+
+ Notes:
+ For further details on the attributes and methods, refer to the parent DetectionValidator class.
+ """
def build_dataset(self, img_path, mode="val", batch=None):
+ """Build an RTDETR Dataset.
+
+ Args:
+ img_path (str): Path to the folder containing images.
+ mode (str, optional): `train` mode or `val` mode, users are able to customize different augmentations for
+ each mode.
+ batch (int, optional): Size of batches, this is for `rect`.
+
+ Returns:
+ (RTDETRDataset): Dataset configured for RT-DETR validation.
+ """
return RTDETRDataset(
img_path=img_path,
imgsz=self.args.imgsz,
@@ -62,11 +152,25 @@ )
def scale_preds(self, predn: dict[str, torch.Tensor], pbatch: dict[str, Any]) -> dict[str, torch.Tensor]:
+ """Return predictions unchanged as RT-DETR handles scaling in postprocessing."""
return predn
def postprocess(
self, preds: torch.Tensor | list[torch.Tensor] | tuple[torch.Tensor]
) -> list[dict[str, torch.Tensor]]:
+ """Apply confidence thresholding to prediction outputs.
+
+ Args:
+ preds (torch.Tensor | list | tuple): Raw predictions from the model. If tensor, should have shape
+ (batch_size, num_predictions, num_classes + 4) where last dimension contains bbox coords and
+ class scores.
+
+ Returns:
+ (list[dict[str, torch.Tensor]]): List of dictionaries for each image, each containing:
+ - 'bboxes': Tensor of shape (N, 4) with bounding box coordinates
+ - 'conf': Tensor of shape (N,) with confidence scores
+ - 'cls': Tensor of shape (N,) with class indices
+ """
if not isinstance(preds, (list, tuple)): # list for PyTorch inference but list[0] Tensor for export inference
preds = [preds, None]
@@ -85,6 +189,13 @@ return [{"bboxes": x[:, :4], "conf": x[:, 4], "cls": x[:, 5]} for x in outputs]
def pred_to_json(self, predn: dict[str, torch.Tensor], pbatch: dict[str, Any]) -> None:
+ """Serialize YOLO predictions to COCO json format.
+
+ Args:
+ predn (dict[str, torch.Tensor]): Predictions dictionary containing 'bboxes', 'conf', and 'cls' keys with
+ bounding box coordinates, confidence scores, and class predictions.
+ pbatch (dict[str, Any]): Batch dictionary containing 'imgsz', 'ori_shape', 'ratio_pad', and 'im_file'.
+ """
path = Path(pbatch["im_file"])
stem = path.stem
image_id = int(stem) if stem.isnumeric() else stem
@@ -102,4 +213,4 @@ "bbox": [round(x, 3) for x in b],
"score": round(s, 5),
}
- )+ )
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/rtdetr/val.py |
Add concise docstrings to each method | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import gc
import random
import shutil
import subprocess
import time
from datetime import datetime
import numpy as np
import torch
from ultralytics.cfg import CFG_INT_KEYS, get_cfg, get_save_dir
from ultralytics.utils import DEFAULT_CFG, LOGGER, YAML, callbacks, colorstr, remove_colorstr
from ultralytics.utils.checks import check_requirements
from ultralytics.utils.patches import torch_load
from ultralytics.utils.plotting import plot_tune_results
class Tuner:
def __init__(self, args=DEFAULT_CFG, _callbacks: dict | None = None):
self.space = args.pop("space", None) or { # key: (min, max, gain(optional))
# 'optimizer': tune.choice(['SGD', 'Adam', 'AdamW', 'NAdam', 'RAdam', 'RMSProp']),
"lr0": (1e-5, 1e-2), # initial learning rate (i.e. SGD=1E-2, Adam=1E-3)
"lrf": (0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
"momentum": (0.7, 0.98, 0.3), # SGD momentum/Adam beta1
"weight_decay": (0.0, 0.001), # optimizer weight decay 5e-4
"warmup_epochs": (0.0, 5.0), # warmup epochs (fractions ok)
"warmup_momentum": (0.0, 0.95), # warmup initial momentum
"box": (1.0, 20.0), # box loss gain
"cls": (0.1, 4.0), # cls loss gain (scale with pixels)
"dfl": (0.4, 12.0), # dfl loss gain
"hsv_h": (0.0, 0.1), # image HSV-Hue augmentation (fraction)
"hsv_s": (0.0, 0.9), # image HSV-Saturation augmentation (fraction)
"hsv_v": (0.0, 0.9), # image HSV-Value augmentation (fraction)
"degrees": (0.0, 45.0), # image rotation (+/- deg)
"translate": (0.0, 0.9), # image translation (+/- fraction)
"scale": (0.0, 0.95), # image scale (+/- gain)
"shear": (0.0, 10.0), # image shear (+/- deg)
"perspective": (0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
"flipud": (0.0, 1.0), # image flip up-down (probability)
"fliplr": (0.0, 1.0), # image flip left-right (probability)
"bgr": (0.0, 1.0), # image channel bgr (probability)
"mosaic": (0.0, 1.0), # image mosaic (probability)
"mixup": (0.0, 1.0), # image mixup (probability)
"cutmix": (0.0, 1.0), # image cutmix (probability)
"copy_paste": (0.0, 1.0), # segment copy-paste (probability)
"close_mosaic": (0.0, 10.0), # close dataloader mosaic (epochs)
}
mongodb_uri = args.pop("mongodb_uri", None)
mongodb_db = args.pop("mongodb_db", "ultralytics")
mongodb_collection = args.pop("mongodb_collection", "tuner_results")
self.args = get_cfg(overrides=args)
self.args.exist_ok = self.args.resume # resume w/ same tune_dir
self.tune_dir = get_save_dir(self.args, name=self.args.name or "tune")
self.args.name, self.args.exist_ok, self.args.resume = (None, False, False) # reset to not affect training
self.tune_csv = self.tune_dir / "tune_results.csv"
self.callbacks = _callbacks or callbacks.get_default_callbacks()
self.prefix = colorstr("Tuner: ")
callbacks.add_integration_callbacks(self)
# MongoDB Atlas support (optional)
self.mongodb = None
if mongodb_uri:
self._init_mongodb(mongodb_uri, mongodb_db, mongodb_collection)
LOGGER.info(
f"{self.prefix}Initialized Tuner instance with 'tune_dir={self.tune_dir}'\n"
f"{self.prefix}💡 Learn about tuning at https://docs.ultralytics.com/guides/hyperparameter-tuning"
)
def _connect(self, uri: str = "mongodb+srv://username:password@cluster.mongodb.net/", max_retries: int = 3):
check_requirements("pymongo")
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure, ServerSelectionTimeoutError
for attempt in range(max_retries):
try:
client = MongoClient(
uri,
serverSelectionTimeoutMS=30000,
connectTimeoutMS=20000,
socketTimeoutMS=40000,
retryWrites=True,
retryReads=True,
maxPoolSize=30,
minPoolSize=3,
maxIdleTimeMS=60000,
)
client.admin.command("ping") # Test connection
LOGGER.info(f"{self.prefix}Connected to MongoDB Atlas (attempt {attempt + 1})")
return client
except (ConnectionFailure, ServerSelectionTimeoutError):
if attempt == max_retries - 1:
raise
wait_time = 2**attempt
LOGGER.warning(
f"{self.prefix}MongoDB connection failed (attempt {attempt + 1}), retrying in {wait_time}s..."
)
time.sleep(wait_time)
def _init_mongodb(self, mongodb_uri="", mongodb_db="", mongodb_collection=""):
self.mongodb = self._connect(mongodb_uri)
self.collection = self.mongodb[mongodb_db][mongodb_collection]
self.collection.create_index([("fitness", -1)], background=True)
LOGGER.info(f"{self.prefix}Using MongoDB Atlas for distributed tuning")
def _get_mongodb_results(self, n: int = 5) -> list:
try:
return list(self.collection.find().sort("fitness", -1).limit(n))
except Exception:
return []
def _save_to_mongodb(self, fitness: float, hyperparameters: dict[str, float], metrics: dict, iteration: int):
try:
self.collection.insert_one(
{
"fitness": fitness,
"hyperparameters": {k: (v.item() if hasattr(v, "item") else v) for k, v in hyperparameters.items()},
"metrics": metrics,
"timestamp": datetime.now(),
"iteration": iteration,
}
)
except Exception as e:
LOGGER.warning(f"{self.prefix}MongoDB save failed: {e}")
def _sync_mongodb_to_csv(self):
try:
# Get all results from MongoDB
all_results = list(self.collection.find().sort("iteration", 1))
if not all_results:
return
# Write to CSV
headers = ",".join(["fitness", *list(self.space.keys())]) + "\n"
with open(self.tune_csv, "w", encoding="utf-8") as f:
f.write(headers)
for result in all_results:
fitness = result["fitness"] or 0.0
hyp_values = [result["hyperparameters"].get(k, self.args.get(k)) for k in self.space.keys()]
log_row = [round(fitness, 5), *hyp_values]
f.write(",".join(map(str, log_row)) + "\n")
except Exception as e:
LOGGER.warning(f"{self.prefix}MongoDB to CSV sync failed: {e}")
@staticmethod
def _crossover(x: np.ndarray, alpha: float = 0.2, k: int = 9) -> np.ndarray:
k = min(k, len(x))
# fitness weights (shifted to >0); fallback to uniform if degenerate
weights = x[:, 0] - x[:, 0].min() + 1e-6
if not np.isfinite(weights).all() or weights.sum() == 0:
weights = np.ones_like(weights)
idxs = random.choices(range(len(x)), weights=weights, k=k)
parents_mat = np.stack([x[i][1:] for i in idxs], 0) # (k, ng) strip fitness
lo, hi = parents_mat.min(0), parents_mat.max(0)
span = hi - lo
# given a small value when span is zero to avoid no mutation
span = np.where(span == 0, np.random.uniform(0.01, 0.1, span.shape), span)
return np.random.uniform(lo - alpha * span, hi + alpha * span)
def _mutate(
self,
n: int = 9,
mutation: float = 0.5,
sigma: float = 0.2,
) -> dict[str, float]:
x = None
# Try MongoDB first if available
if self.mongodb:
if results := self._get_mongodb_results(n):
# MongoDB already sorted by fitness DESC, so results[0] is best
x = np.array(
[
[r["fitness"]] + [r["hyperparameters"].get(k, self.args.get(k)) for k in self.space.keys()]
for r in results
]
)
elif self.collection.name in self.collection.database.list_collection_names(): # Tuner started elsewhere
x = np.array([[0.0] + [getattr(self.args, k) for k in self.space.keys()]])
# Fall back to CSV if MongoDB unavailable or empty
if x is None and self.tune_csv.exists():
csv_data = np.loadtxt(self.tune_csv, ndmin=2, delimiter=",", skiprows=1)
if len(csv_data) > 0:
fitness = csv_data[:, 0] # first column
order = np.argsort(-fitness)
x = csv_data[order][:n] # top-n sorted by fitness DESC
# Mutate if we have data, otherwise use defaults
if x is not None:
np.random.seed(int(time.time()))
ng = len(self.space)
# Crossover
genes = self._crossover(x)
# Mutation
gains = np.array([v[2] if len(v) == 3 else 1.0 for v in self.space.values()]) # gains 0-1
factors = np.ones(ng)
while np.all(factors == 1): # mutate until a change occurs (prevent duplicates)
mask = np.random.random(ng) < mutation
step = np.random.randn(ng) * (sigma * gains)
factors = np.where(mask, np.exp(step), 1.0).clip(0.25, 4.0)
hyp = {k: float(genes[i] * factors[i]) for i, k in enumerate(self.space.keys())}
else:
hyp = {k: getattr(self.args, k) for k in self.space.keys()}
# Constrain to limits
for k, bounds in self.space.items():
hyp[k] = round(min(max(hyp[k], bounds[0]), bounds[1]), 5)
# Update types
if "close_mosaic" in hyp:
hyp["close_mosaic"] = round(hyp["close_mosaic"])
if "epochs" in hyp:
hyp["epochs"] = round(hyp["epochs"])
return hyp
def __call__(self, iterations: int = 10, cleanup: bool = True):
t0 = time.time()
best_save_dir, best_metrics = None, None
(self.tune_dir / "weights").mkdir(parents=True, exist_ok=True)
# Sync MongoDB to CSV at startup for proper resume logic
if self.mongodb:
self._sync_mongodb_to_csv()
start = 0
if self.tune_csv.exists():
x = np.loadtxt(self.tune_csv, ndmin=2, delimiter=",", skiprows=1)
start = x.shape[0]
LOGGER.info(f"{self.prefix}Resuming tuning run {self.tune_dir} from iteration {start + 1}...")
for i in range(start, iterations):
# Linearly decay sigma from 0.2 → 0.1 over first 300 iterations
frac = min(i / 300.0, 1.0)
sigma_i = 0.2 - 0.1 * frac
# Mutate hyperparameters
mutated_hyp = self._mutate(sigma=sigma_i)
LOGGER.info(f"{self.prefix}Starting iteration {i + 1}/{iterations} with hyperparameters: {mutated_hyp}")
metrics = {}
train_args = {**vars(self.args), **mutated_hyp}
save_dir = get_save_dir(get_cfg(train_args))
train_args["save_dir"] = str(save_dir) # pass save_dir to subprocess to ensure same path is used
weights_dir = save_dir / "weights"
try:
# Train YOLO model with mutated hyperparameters (run in subprocess to avoid dataloader hang)
launch = [__import__("sys").executable, "-m", "ultralytics.cfg.__init__"] # workaround yolo not found
cmd = [*launch, "train", *(f"{k}={v}" for k, v in train_args.items())]
return_code = subprocess.run(cmd, check=True).returncode
ckpt_file = weights_dir / ("best.pt" if (weights_dir / "best.pt").exists() else "last.pt")
metrics = torch_load(ckpt_file)["train_metrics"]
assert return_code == 0, "training failed"
# Cleanup
time.sleep(1)
gc.collect()
torch.cuda.empty_cache()
except Exception as e:
LOGGER.error(f"training failure for hyperparameter tuning iteration {i + 1}\n{e}")
# Save results - MongoDB takes precedence
fitness = metrics.get("fitness") or 0.0
if self.mongodb:
self._save_to_mongodb(fitness, mutated_hyp, metrics, i + 1)
self._sync_mongodb_to_csv()
total_mongo_iterations = self.collection.count_documents({})
if total_mongo_iterations >= iterations:
LOGGER.info(
f"{self.prefix}Target iterations ({iterations}) reached in MongoDB ({total_mongo_iterations}). Stopping."
)
break
else:
# Save to CSV only if no MongoDB
log_row = [round(fitness, 5)] + [mutated_hyp[k] for k in self.space.keys()]
headers = "" if self.tune_csv.exists() else (",".join(["fitness", *list(self.space.keys())]) + "\n")
with open(self.tune_csv, "a", encoding="utf-8") as f:
f.write(headers + ",".join(map(str, log_row)) + "\n")
# Get best results
x = np.loadtxt(self.tune_csv, ndmin=2, delimiter=",", skiprows=1)
fitness = x[:, 0] # first column
best_idx = fitness.argmax()
best_is_current = best_idx == i
if best_is_current:
best_save_dir = str(save_dir)
best_metrics = {k: round(v, 5) for k, v in metrics.items()}
for ckpt in weights_dir.glob("*.pt"):
shutil.copy2(ckpt, self.tune_dir / "weights")
elif cleanup and best_save_dir:
shutil.rmtree(best_save_dir, ignore_errors=True) # remove iteration dirs to reduce storage space
# Plot tune results
plot_tune_results(str(self.tune_csv))
# Save and print tune results
header = (
f"{self.prefix}{i + 1}/{iterations} iterations complete ✅ ({time.time() - t0:.2f}s)\n"
f"{self.prefix}Results saved to {colorstr('bold', self.tune_dir)}\n"
f"{self.prefix}Best fitness={fitness[best_idx]} observed at iteration {best_idx + 1}\n"
f"{self.prefix}Best fitness metrics are {best_metrics}\n"
f"{self.prefix}Best fitness model is {best_save_dir}"
)
LOGGER.info("\n" + header)
data = {k: int(v) if k in CFG_INT_KEYS else float(v) for k, v in zip(self.space.keys(), x[best_idx, 1:])}
YAML.save(
self.tune_dir / "best_hyperparameters.yaml",
data=data,
header=remove_colorstr(header.replace(self.prefix, "# ")) + "\n",
)
YAML.print(self.tune_dir / "best_hyperparameters.yaml") | --- +++ @@ -1,4 +1,18 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""
+Module provides functionalities for hyperparameter tuning of the Ultralytics YOLO models for object detection, instance
+segmentation, image classification, pose estimation, and multi-object tracking.
+
+Hyperparameter tuning is the process of systematically searching for the optimal set of hyperparameters
+that yield the best model performance. This is particularly crucial in deep learning models like YOLO,
+where small changes in hyperparameters can lead to significant differences in model accuracy and efficiency.
+
+Examples:
+ Tune hyperparameters for YOLO26n on COCO8 at imgsz=640 and epochs=10 for 300 tuning iterations.
+ >>> from ultralytics import YOLO
+ >>> model = YOLO("yolo26n.pt")
+ >>> model.tune(data="coco8.yaml", epochs=10, iterations=300, optimizer="AdamW", plots=False, save=False, val=False)
+"""
from __future__ import annotations
@@ -20,8 +34,60 @@
class Tuner:
+ """A class for hyperparameter tuning of YOLO models.
+
+ The class evolves YOLO model hyperparameters over a given number of iterations by mutating them according to the
+ search space and retraining the model to evaluate their performance. Supports both local CSV storage and distributed
+ MongoDB Atlas coordination for multi-machine hyperparameter optimization.
+
+ Attributes:
+ space (dict[str, tuple]): Hyperparameter search space containing bounds and scaling factors for mutation.
+ tune_dir (Path): Directory where evolution logs and results will be saved.
+ tune_csv (Path): Path to the CSV file where evolution logs are saved.
+ args (SimpleNamespace): Configuration arguments for the tuning process.
+ callbacks (dict): Callback functions to be executed during tuning.
+ prefix (str): Prefix string for logging messages.
+ mongodb (MongoClient): Optional MongoDB client for distributed tuning.
+ collection (Collection): MongoDB collection for storing tuning results.
+
+ Methods:
+ _mutate: Mutate hyperparameters based on bounds and scaling factors.
+ __call__: Execute the hyperparameter evolution across multiple iterations.
+
+ Examples:
+ Tune hyperparameters for YOLO26n on COCO8 at imgsz=640 and epochs=10 for 300 tuning iterations.
+ >>> from ultralytics import YOLO
+ >>> model = YOLO("yolo26n.pt")
+ >>> model.tune(
+ >>> data="coco8.yaml",
+ >>> epochs=10,
+ >>> iterations=300,
+ >>> plots=False,
+ >>> save=False,
+ >>> val=False
+ >>> )
+
+ Tune with distributed MongoDB Atlas coordination across multiple machines:
+ >>> model.tune(
+ >>> data="coco8.yaml",
+ >>> epochs=10,
+ >>> iterations=300,
+ >>> mongodb_uri="mongodb+srv://user:pass@cluster.mongodb.net/",
+ >>> mongodb_db="ultralytics",
+ >>> mongodb_collection="tune_results"
+ >>> )
+
+ Tune with custom search space:
+ >>> model.tune(space={"lr0": (1e-5, 1e-1), "momentum": (0.6, 0.98)})
+ """
def __init__(self, args=DEFAULT_CFG, _callbacks: dict | None = None):
+ """Initialize the Tuner with configurations.
+
+ Args:
+ args (dict): Configuration for hyperparameter evolution.
+ _callbacks (dict | None, optional): Callback functions to be executed during tuning.
+ """
self.space = args.pop("space", None) or { # key: (min, max, gain(optional))
# 'optimizer': tune.choice(['SGD', 'Adam', 'AdamW', 'NAdam', 'RAdam', 'RMSProp']),
"lr0": (1e-5, 1e-2), # initial learning rate (i.e. SGD=1E-2, Adam=1E-3)
@@ -74,6 +140,15 @@ )
def _connect(self, uri: str = "mongodb+srv://username:password@cluster.mongodb.net/", max_retries: int = 3):
+ """Create MongoDB client with exponential backoff retry on connection failures.
+
+ Args:
+ uri (str): MongoDB connection string with credentials and cluster information.
+ max_retries (int): Maximum number of connection attempts before giving up.
+
+ Returns:
+ (MongoClient): Connected MongoDB client instance.
+ """
check_requirements("pymongo")
from pymongo import MongoClient
@@ -105,18 +180,49 @@ time.sleep(wait_time)
def _init_mongodb(self, mongodb_uri="", mongodb_db="", mongodb_collection=""):
+ """Initialize MongoDB connection for distributed tuning.
+
+ Connects to MongoDB Atlas for distributed hyperparameter optimization across multiple machines. Each worker
+ saves results to a shared collection and reads the latest best hyperparameters from all workers for evolution.
+
+ Args:
+ mongodb_uri (str): MongoDB connection string, e.g. 'mongodb+srv://username:password@cluster.mongodb.net/'.
+ mongodb_db (str, optional): Database name.
+ mongodb_collection (str, optional): Collection name.
+
+ Notes:
+ - Creates a fitness index for fast queries of top results
+ - Falls back to CSV-only mode if connection fails
+ - Uses connection pooling and retry logic for production reliability
+ """
self.mongodb = self._connect(mongodb_uri)
self.collection = self.mongodb[mongodb_db][mongodb_collection]
self.collection.create_index([("fitness", -1)], background=True)
LOGGER.info(f"{self.prefix}Using MongoDB Atlas for distributed tuning")
def _get_mongodb_results(self, n: int = 5) -> list:
+ """Get top N results from MongoDB sorted by fitness.
+
+ Args:
+ n (int): Number of top results to retrieve.
+
+ Returns:
+ (list[dict]): List of result documents with fitness scores and hyperparameters.
+ """
try:
return list(self.collection.find().sort("fitness", -1).limit(n))
except Exception:
return []
def _save_to_mongodb(self, fitness: float, hyperparameters: dict[str, float], metrics: dict, iteration: int):
+ """Save results to MongoDB with proper type conversion.
+
+ Args:
+ fitness (float): Fitness score achieved with these hyperparameters.
+ hyperparameters (dict[str, float]): Dictionary of hyperparameter values.
+ metrics (dict): Complete training metrics dictionary (mAP, precision, recall, losses, etc.).
+ iteration (int): Current iteration number.
+ """
try:
self.collection.insert_one(
{
@@ -131,6 +237,11 @@ LOGGER.warning(f"{self.prefix}MongoDB save failed: {e}")
def _sync_mongodb_to_csv(self):
+ """Sync MongoDB results to CSV for plotting compatibility.
+
+ Downloads all results from MongoDB and writes them to the local CSV file in chronological order. This enables
+ the existing plotting functions to work seamlessly with distributed MongoDB data.
+ """
try:
# Get all results from MongoDB
all_results = list(self.collection.find().sort("iteration", 1))
@@ -152,6 +263,7 @@
@staticmethod
def _crossover(x: np.ndarray, alpha: float = 0.2, k: int = 9) -> np.ndarray:
+ """BLX-α crossover from up to top-k parents (x[:,0]=fitness, rest=genes)."""
k = min(k, len(x))
# fitness weights (shifted to >0); fallback to uniform if degenerate
weights = x[:, 0] - x[:, 0].min() + 1e-6
@@ -171,6 +283,16 @@ mutation: float = 0.5,
sigma: float = 0.2,
) -> dict[str, float]:
+ """Mutate hyperparameters based on bounds and scaling factors specified in `self.space`.
+
+ Args:
+ n (int): Number of top parents to consider.
+ mutation (float): Probability of a parameter mutation in any given iteration.
+ sigma (float): Standard deviation for Gaussian random number generator.
+
+ Returns:
+ (dict[str, float]): A dictionary containing mutated hyperparameters.
+ """
x = None
# Try MongoDB first if available
@@ -226,6 +348,19 @@ return hyp
def __call__(self, iterations: int = 10, cleanup: bool = True):
+ """Execute the hyperparameter evolution process when the Tuner instance is called.
+
+ This method iterates through the specified number of iterations, performing the following steps:
+ 1. Sync MongoDB results to CSV (if using distributed mode)
+ 2. Mutate hyperparameters using the best previous results or defaults
+ 3. Train a YOLO model with the mutated hyperparameters
+ 4. Log fitness scores and hyperparameters to MongoDB and/or CSV
+ 5. Track the best performing configuration across all iterations
+
+ Args:
+ iterations (int): The number of generations to run the evolution for.
+ cleanup (bool): Whether to delete iteration weights to reduce storage space during tuning.
+ """
t0 = time.time()
best_save_dir, best_metrics = None, None
(self.tune_dir / "weights").mkdir(parents=True, exist_ok=True)
@@ -319,4 +454,4 @@ data=data,
header=remove_colorstr(header.replace(self.prefix, "# ")) + "\n",
)
- YAML.print(self.tune_dir / "best_hyperparameters.yaml")+ YAML.print(self.tune_dir / "best_hyperparameters.yaml")
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/engine/tuner.py |
Write clean docstrings for readability | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
from __future__ import annotations
from collections import OrderedDict
from typing import Callable
import torch
import torch.nn as nn
from torch.utils.checkpoint import checkpoint
from .model_misc import LayerScale
class ResidualAttentionBlock(nn.Module):
def __init__(
self,
d_model: int,
n_head: int,
mlp_ratio: float = 4.0,
ls_init_value: float | None = None,
act_layer: Callable[[], nn.Module] = nn.GELU,
norm_layer: Callable[[int], nn.Module] = nn.LayerNorm,
):
super().__init__()
# Attention
self.attn = nn.MultiheadAttention(d_model, n_head, batch_first=True)
# LayerNorm, LayerScale
self.ln_1 = norm_layer(d_model)
self.ln_2 = norm_layer(d_model)
self.ls_1 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
self.ls_2 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
# MLP
mlp_width = int(d_model * mlp_ratio)
self.mlp = nn.Sequential(
OrderedDict(
[
("c_fc", nn.Linear(d_model, mlp_width)),
("gelu", act_layer()),
("c_proj", nn.Linear(mlp_width, d_model)),
]
)
)
def attention(
self, q_x: torch.Tensor, k_x: torch.Tensor = None, v_x: torch.Tensor = None, attn_mask: torch.Tensor = None
) -> torch.Tensor:
k_x = k_x if k_x is not None else q_x
v_x = v_x if v_x is not None else q_x
if attn_mask is not None:
# Leave boolean masks as is
if not attn_mask.dtype == torch.bool:
attn_mask = attn_mask.to(q_x.dtype)
return self.attn(q_x, k_x, v_x, need_weights=False, attn_mask=attn_mask)[0]
def forward(
self, q_x: torch.Tensor, k_x: torch.Tensor = None, v_x: torch.Tensor = None, attn_mask: torch.Tensor = None
) -> torch.Tensor:
k_x = self.ln_1_kv(k_x) if hasattr(self, "ln_1_kv") and k_x is not None else None
v_x = self.ln_1_kv(v_x) if hasattr(self, "ln_1_kv") and v_x is not None else None
x = q_x + self.ls_1(self.attention(q_x=self.ln_1(q_x), k_x=k_x, v_x=v_x, attn_mask=attn_mask))
x = x + self.ls_2(self.mlp(self.ln_2(x)))
return x
class Transformer(nn.Module):
def __init__(
self,
width: int,
layers: int,
heads: int,
mlp_ratio: float = 4.0,
ls_init_value: float | None = None,
act_layer: Callable[[], nn.Module] = nn.GELU,
norm_layer: Callable[[int], nn.Module] = nn.LayerNorm,
compile_mode: str | None = None,
use_act_checkpoint: bool = False,
):
super().__init__()
self.width = width
self.layers = layers
self.grad_checkpointing = use_act_checkpoint
self.resblocks = nn.ModuleList(
[
ResidualAttentionBlock(
width,
heads,
mlp_ratio,
ls_init_value=ls_init_value,
act_layer=act_layer,
norm_layer=norm_layer,
)
for _ in range(layers)
]
)
if compile_mode is not None:
self.forward = torch.compile(self.forward, mode=compile_mode, fullgraph=True)
if self.grad_checkpointing:
torch._dynamo.config.optimize_ddp = False
def forward(self, x: torch.Tensor, attn_mask: torch.Tensor = None) -> torch.Tensor:
for _, r in enumerate(self.resblocks):
if self.grad_checkpointing and not torch.jit.is_scripting() and self.training:
x = checkpoint(r, x, None, None, attn_mask, use_reentrant=False)
else:
x = r(x, attn_mask=attn_mask)
return x
def text_global_pool(
x: torch.Tensor, text: torch.Tensor = None, pool_type: str = "argmax"
) -> tuple[torch.Tensor, torch.Tensor]:
if pool_type == "first":
pooled, tokens = x[:, 0], x[:, 1:]
elif pool_type == "last":
pooled, tokens = x[:, -1], x[:, :-1]
elif pool_type == "argmax":
# take features from the eot embedding (eot_token is the highest number in each sequence)
assert text is not None
pooled, tokens = x[torch.arange(x.shape[0]), text.argmax(dim=-1)], x
else:
pooled = tokens = x
return pooled, tokens
class TextTransformer(nn.Module):
def __init__(
self,
context_length: int = 77,
vocab_size: int = 49408,
width: int = 512,
heads: int = 8,
layers: int = 12,
mlp_ratio: float = 4.0,
ls_init_value: float | None = None,
output_dim: int = 512,
no_causal_mask: bool = False,
pool_type: str = "none", # no pooling
proj_bias: bool = False,
act_layer: Callable = nn.GELU,
norm_layer: Callable = nn.LayerNorm,
output_tokens: bool = False,
use_ln_post: bool = True,
compile_mode: str | None = None,
use_act_checkpoint: bool = False,
):
super().__init__()
assert pool_type in ("first", "last", "argmax", "none")
self.output_tokens = output_tokens
self.num_pos = self.context_length = context_length
self.vocab_size = vocab_size
self.width = width
self.output_dim = output_dim
self.heads = heads
self.pool_type = pool_type
self.token_embedding = nn.Embedding(self.vocab_size, width)
self.positional_embedding = nn.Parameter(torch.empty(self.num_pos, width))
self.transformer = Transformer(
width=width,
layers=layers,
heads=heads,
mlp_ratio=mlp_ratio,
ls_init_value=ls_init_value,
act_layer=act_layer,
norm_layer=norm_layer,
compile_mode=compile_mode,
use_act_checkpoint=use_act_checkpoint,
)
self.ln_final = norm_layer(width) if use_ln_post else nn.Identity()
if no_causal_mask:
self.attn_mask = None
else:
self.register_buffer("attn_mask", self.build_causal_mask(), persistent=False)
if proj_bias:
self.text_projection = nn.Linear(width, output_dim)
else:
self.text_projection = nn.Parameter(torch.empty(width, output_dim))
def build_causal_mask(self) -> torch.Tensor:
# lazily create causal attention mask, with full attention between the tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.num_pos, self.num_pos)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
def forward(self, text: torch.Tensor) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
seq_len = text.shape[1]
x = self.token_embedding(text) # [batch_size, n_ctx, d_model]
attn_mask = self.attn_mask
if attn_mask is not None:
attn_mask = attn_mask[:seq_len, :seq_len]
x = x + self.positional_embedding[:seq_len]
x = self.transformer(x, attn_mask=attn_mask)
x = self.ln_final(x)
pooled, tokens = text_global_pool(x, text, pool_type=self.pool_type)
if self.text_projection is not None:
if isinstance(self.text_projection, nn.Linear):
pooled = self.text_projection(pooled)
else:
pooled = pooled @ self.text_projection
if self.output_tokens:
return pooled, tokens
return pooled
class VETextEncoder(nn.Module):
def __init__(
self,
d_model: int,
tokenizer: Callable,
width: int = 1024,
heads: int = 16,
layers: int = 24,
context_length: int = 32,
vocab_size: int = 49408,
use_ln_post: bool = True,
compile_mode: str | None = None,
use_act_checkpoint: bool = True,
):
super().__init__()
self.context_length = context_length
self.use_ln_post = use_ln_post
self.tokenizer = tokenizer
self.encoder = TextTransformer(
context_length=self.context_length,
vocab_size=vocab_size,
width=width,
heads=heads,
layers=layers,
# we want the tokens, not just the pooled output
output_tokens=True,
use_ln_post=use_ln_post,
compile_mode=compile_mode,
use_act_checkpoint=use_act_checkpoint,
)
self.resizer = nn.Linear(self.encoder.width, d_model)
def forward(
self, text: list[str] | tuple[torch.Tensor, torch.Tensor, dict], input_boxes: list | None = None
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
if isinstance(text[0], str):
# no use case for this
assert input_boxes is None or len(input_boxes) == 0, "not supported"
# Encode the text
tokenized = self.tokenizer(text, context_length=self.context_length).to(
self.resizer.weight.device
) # [b, seq_len]
text_attention_mask = (tokenized != 0).bool()
# manually embed the tokens
inputs_embeds = self.encoder.token_embedding(tokenized) # [b, seq_len, d=1024]
_, text_memory = self.encoder(tokenized) # [b, seq_len, d=1024]
assert text_memory.shape[1] == inputs_embeds.shape[1]
# Invert attention mask because its the opposite in pytorch transformer
text_attention_mask = text_attention_mask.ne(1)
# Transpose memory because pytorch's attention expects sequence first
text_memory = text_memory.transpose(0, 1)
# Resize the encoder hidden states to be of the same d_model as the decoder
text_memory_resized = self.resizer(text_memory)
else:
# The text is already encoded, use as is.
text_attention_mask, text_memory_resized, tokenized = text
inputs_embeds = tokenized["inputs_embeds"]
assert input_boxes is None or len(input_boxes) == 0, "Can't replace boxes in text if it's already encoded"
# Note that the input_embeds are returned in pytorch's convention (sequence first)
return (
text_attention_mask,
text_memory_resized,
inputs_embeds.transpose(0, 1),
) | --- +++ @@ -15,6 +15,7 @@
class ResidualAttentionBlock(nn.Module):
+ """Transformer block with multi-head attention, layer normalization, and MLP feed-forward network."""
def __init__(
self,
@@ -25,6 +26,7 @@ act_layer: Callable[[], nn.Module] = nn.GELU,
norm_layer: Callable[[int], nn.Module] = nn.LayerNorm,
):
+ """Initialize residual attention block with configurable dimensions and normalization."""
super().__init__()
# Attention
self.attn = nn.MultiheadAttention(d_model, n_head, batch_first=True)
@@ -51,6 +53,7 @@ def attention(
self, q_x: torch.Tensor, k_x: torch.Tensor = None, v_x: torch.Tensor = None, attn_mask: torch.Tensor = None
) -> torch.Tensor:
+ """Compute multi-head attention with optional cross-attention support and masking."""
k_x = k_x if k_x is not None else q_x
v_x = v_x if v_x is not None else q_x
if attn_mask is not None:
@@ -63,6 +66,7 @@ def forward(
self, q_x: torch.Tensor, k_x: torch.Tensor = None, v_x: torch.Tensor = None, attn_mask: torch.Tensor = None
) -> torch.Tensor:
+ """Apply residual attention with layer normalization and MLP, supporting optional cross-attention."""
k_x = self.ln_1_kv(k_x) if hasattr(self, "ln_1_kv") and k_x is not None else None
v_x = self.ln_1_kv(v_x) if hasattr(self, "ln_1_kv") and v_x is not None else None
x = q_x + self.ls_1(self.attention(q_x=self.ln_1(q_x), k_x=k_x, v_x=v_x, attn_mask=attn_mask))
@@ -71,6 +75,7 @@
class Transformer(nn.Module):
+ """Stack of residual attention blocks forming a transformer encoder with optional gradient checkpointing."""
def __init__(
self,
@@ -84,6 +89,7 @@ compile_mode: str | None = None,
use_act_checkpoint: bool = False,
):
+ """Initialize transformer with configurable depth, width, and optional compilation/checkpointing."""
super().__init__()
self.width = width
self.layers = layers
@@ -108,6 +114,7 @@ torch._dynamo.config.optimize_ddp = False
def forward(self, x: torch.Tensor, attn_mask: torch.Tensor = None) -> torch.Tensor:
+ """Process input through all transformer blocks with optional gradient checkpointing during training."""
for _, r in enumerate(self.resblocks):
if self.grad_checkpointing and not torch.jit.is_scripting() and self.training:
x = checkpoint(r, x, None, None, attn_mask, use_reentrant=False)
@@ -119,6 +126,9 @@ def text_global_pool(
x: torch.Tensor, text: torch.Tensor = None, pool_type: str = "argmax"
) -> tuple[torch.Tensor, torch.Tensor]:
+ """Extract pooled representation and tokens from text embeddings using specified pooling strategy
+ (first/last/argmax/none).
+ """
if pool_type == "first":
pooled, tokens = x[:, 0], x[:, 1:]
elif pool_type == "last":
@@ -133,6 +143,7 @@
class TextTransformer(nn.Module):
+ """Text transformer encoder with causal masking and flexible pooling strategies."""
def __init__(
self,
@@ -154,6 +165,7 @@ compile_mode: str | None = None,
use_act_checkpoint: bool = False,
):
+ """Initialize text transformer with embedding layers, transformer blocks, and pooling options."""
super().__init__()
assert pool_type in ("first", "last", "argmax", "none")
self.output_tokens = output_tokens
@@ -188,6 +200,7 @@ self.text_projection = nn.Parameter(torch.empty(width, output_dim))
def build_causal_mask(self) -> torch.Tensor:
+ """Create a causal attention mask to prevent attention to future tokens."""
# lazily create causal attention mask, with full attention between the tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.num_pos, self.num_pos)
@@ -196,6 +209,7 @@ return mask
def forward(self, text: torch.Tensor) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
+ """Forward pass through the text transformer, returning pooled output and optionally token embeddings."""
seq_len = text.shape[1]
x = self.token_embedding(text) # [batch_size, n_ctx, d_model]
@@ -219,6 +233,7 @@
class VETextEncoder(nn.Module):
+ """Text encoder for Vision Encoder (VE) models, combining a text transformer and a linear resizer."""
def __init__(
self,
@@ -233,6 +248,7 @@ compile_mode: str | None = None,
use_act_checkpoint: bool = True,
):
+ """Initialize VE text encoder with a text transformer and a linear resizer to match decoder dimensions."""
super().__init__()
self.context_length = context_length
self.use_ln_post = use_ln_post
@@ -255,6 +271,7 @@ def forward(
self, text: list[str] | tuple[torch.Tensor, torch.Tensor, dict], input_boxes: list | None = None
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ """Encode text input, either raw strings or pre-encoded tensors, and resize to match decoder dimensions."""
if isinstance(text[0], str):
# no use case for this
assert input_boxes is None or len(input_boxes) == 0, "not supported"
@@ -287,4 +304,4 @@ text_attention_mask,
text_memory_resized,
inputs_embeds.transpose(0, 1),
- )+ )
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/sam/sam3/text_encoder_ve.py |
Generate docstrings for exported functions | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from typing import Any
import torch
import torch.nn as nn
import torch.nn.functional as F
from ultralytics.utils.loss import FocalLoss, VarifocalLoss
from ultralytics.utils.metrics import bbox_iou
from .ops import HungarianMatcher
class DETRLoss(nn.Module):
def __init__(
self,
nc: int = 80,
loss_gain: dict[str, float] | None = None,
aux_loss: bool = True,
use_fl: bool = True,
use_vfl: bool = False,
use_uni_match: bool = False,
uni_match_ind: int = 0,
gamma: float = 1.5,
alpha: float = 0.25,
):
super().__init__()
if loss_gain is None:
loss_gain = {"class": 1, "bbox": 5, "giou": 2, "no_object": 0.1, "mask": 1, "dice": 1}
self.nc = nc
self.matcher = HungarianMatcher(cost_gain={"class": 2, "bbox": 5, "giou": 2})
self.loss_gain = loss_gain
self.aux_loss = aux_loss
self.fl = FocalLoss(gamma, alpha) if use_fl else None
self.vfl = VarifocalLoss(gamma, alpha) if use_vfl else None
self.use_uni_match = use_uni_match
self.uni_match_ind = uni_match_ind
self.device = None
def _get_loss_class(
self, pred_scores: torch.Tensor, targets: torch.Tensor, gt_scores: torch.Tensor, num_gts: int, postfix: str = ""
) -> dict[str, torch.Tensor]:
# Logits: [b, query, num_classes], gt_class: list[[n, 1]]
name_class = f"loss_class{postfix}"
bs, nq = pred_scores.shape[:2]
# one_hot = F.one_hot(targets, self.nc + 1)[..., :-1] # (bs, num_queries, num_classes)
one_hot = torch.zeros((bs, nq, self.nc + 1), dtype=torch.int64, device=targets.device)
one_hot.scatter_(2, targets.unsqueeze(-1), 1)
one_hot = one_hot[..., :-1]
gt_scores = gt_scores.view(bs, nq, 1) * one_hot
if self.fl:
if num_gts and self.vfl:
loss_cls = self.vfl(pred_scores, gt_scores, one_hot)
else:
loss_cls = self.fl(pred_scores, one_hot.float())
loss_cls /= max(num_gts, 1) / nq
else:
loss_cls = nn.BCEWithLogitsLoss(reduction="none")(pred_scores, gt_scores).mean(1).sum() # YOLO CLS loss
return {name_class: loss_cls.squeeze() * self.loss_gain["class"]}
def _get_loss_bbox(
self, pred_bboxes: torch.Tensor, gt_bboxes: torch.Tensor, postfix: str = ""
) -> dict[str, torch.Tensor]:
# Boxes: [b, query, 4], gt_bbox: list[[n, 4]]
name_bbox = f"loss_bbox{postfix}"
name_giou = f"loss_giou{postfix}"
loss = {}
if len(gt_bboxes) == 0:
loss[name_bbox] = torch.tensor(0.0, device=self.device)
loss[name_giou] = torch.tensor(0.0, device=self.device)
return loss
loss[name_bbox] = self.loss_gain["bbox"] * F.l1_loss(pred_bboxes, gt_bboxes, reduction="sum") / len(gt_bboxes)
loss[name_giou] = 1.0 - bbox_iou(pred_bboxes, gt_bboxes, xywh=True, GIoU=True)
loss[name_giou] = loss[name_giou].sum() / len(gt_bboxes)
loss[name_giou] = self.loss_gain["giou"] * loss[name_giou]
return {k: v.squeeze() for k, v in loss.items()}
# This function is for future RT-DETR Segment models
# def _get_loss_mask(self, masks, gt_mask, match_indices, postfix=''):
# # masks: [b, query, h, w], gt_mask: list[[n, H, W]]
# name_mask = f'loss_mask{postfix}'
# name_dice = f'loss_dice{postfix}'
#
# loss = {}
# if sum(len(a) for a in gt_mask) == 0:
# loss[name_mask] = torch.tensor(0., device=self.device)
# loss[name_dice] = torch.tensor(0., device=self.device)
# return loss
#
# num_gts = len(gt_mask)
# src_masks, target_masks = self._get_assigned_bboxes(masks, gt_mask, match_indices)
# src_masks = F.interpolate(src_masks.unsqueeze(0), size=target_masks.shape[-2:], mode='bilinear')[0]
# # TODO: torch does not have `sigmoid_focal_loss`, but it's not urgent since we don't use mask branch for now.
# loss[name_mask] = self.loss_gain['mask'] * F.sigmoid_focal_loss(src_masks, target_masks,
# torch.tensor([num_gts], dtype=torch.float32))
# loss[name_dice] = self.loss_gain['dice'] * self._dice_loss(src_masks, target_masks, num_gts)
# return loss
# This function is for future RT-DETR Segment models
# @staticmethod
# def _dice_loss(inputs, targets, num_gts):
# inputs = F.sigmoid(inputs).flatten(1)
# targets = targets.flatten(1)
# numerator = 2 * (inputs * targets).sum(1)
# denominator = inputs.sum(-1) + targets.sum(-1)
# loss = 1 - (numerator + 1) / (denominator + 1)
# return loss.sum() / num_gts
def _get_loss_aux(
self,
pred_bboxes: torch.Tensor,
pred_scores: torch.Tensor,
gt_bboxes: torch.Tensor,
gt_cls: torch.Tensor,
gt_groups: list[int],
match_indices: list[tuple] | None = None,
postfix: str = "",
masks: torch.Tensor | None = None,
gt_mask: torch.Tensor | None = None,
) -> dict[str, torch.Tensor]:
# NOTE: loss class, bbox, giou, mask, dice
loss = torch.zeros(5 if masks is not None else 3, device=pred_bboxes.device)
if match_indices is None and self.use_uni_match:
match_indices = self.matcher(
pred_bboxes[self.uni_match_ind],
pred_scores[self.uni_match_ind],
gt_bboxes,
gt_cls,
gt_groups,
masks=masks[self.uni_match_ind] if masks is not None else None,
gt_mask=gt_mask,
)
for i, (aux_bboxes, aux_scores) in enumerate(zip(pred_bboxes, pred_scores)):
aux_masks = masks[i] if masks is not None else None
loss_ = self._get_loss(
aux_bboxes,
aux_scores,
gt_bboxes,
gt_cls,
gt_groups,
masks=aux_masks,
gt_mask=gt_mask,
postfix=postfix,
match_indices=match_indices,
)
loss[0] += loss_[f"loss_class{postfix}"]
loss[1] += loss_[f"loss_bbox{postfix}"]
loss[2] += loss_[f"loss_giou{postfix}"]
# if masks is not None and gt_mask is not None:
# loss_ = self._get_loss_mask(aux_masks, gt_mask, match_indices, postfix)
# loss[3] += loss_[f'loss_mask{postfix}']
# loss[4] += loss_[f'loss_dice{postfix}']
loss = {
f"loss_class_aux{postfix}": loss[0],
f"loss_bbox_aux{postfix}": loss[1],
f"loss_giou_aux{postfix}": loss[2],
}
# if masks is not None and gt_mask is not None:
# loss[f'loss_mask_aux{postfix}'] = loss[3]
# loss[f'loss_dice_aux{postfix}'] = loss[4]
return loss
@staticmethod
def _get_index(match_indices: list[tuple]) -> tuple[tuple[torch.Tensor, torch.Tensor], torch.Tensor]:
batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(match_indices)])
src_idx = torch.cat([src for (src, _) in match_indices])
dst_idx = torch.cat([dst for (_, dst) in match_indices])
return (batch_idx, src_idx), dst_idx
def _get_assigned_bboxes(
self, pred_bboxes: torch.Tensor, gt_bboxes: torch.Tensor, match_indices: list[tuple]
) -> tuple[torch.Tensor, torch.Tensor]:
pred_assigned = torch.cat(
[
t[i] if len(i) > 0 else torch.zeros(0, t.shape[-1], device=self.device)
for t, (i, _) in zip(pred_bboxes, match_indices)
]
)
gt_assigned = torch.cat(
[
t[j] if len(j) > 0 else torch.zeros(0, t.shape[-1], device=self.device)
for t, (_, j) in zip(gt_bboxes, match_indices)
]
)
return pred_assigned, gt_assigned
def _get_loss(
self,
pred_bboxes: torch.Tensor,
pred_scores: torch.Tensor,
gt_bboxes: torch.Tensor,
gt_cls: torch.Tensor,
gt_groups: list[int],
masks: torch.Tensor | None = None,
gt_mask: torch.Tensor | None = None,
postfix: str = "",
match_indices: list[tuple] | None = None,
) -> dict[str, torch.Tensor]:
if match_indices is None:
match_indices = self.matcher(
pred_bboxes, pred_scores, gt_bboxes, gt_cls, gt_groups, masks=masks, gt_mask=gt_mask
)
idx, gt_idx = self._get_index(match_indices)
pred_bboxes, gt_bboxes = pred_bboxes[idx], gt_bboxes[gt_idx]
bs, nq = pred_scores.shape[:2]
targets = torch.full((bs, nq), self.nc, device=pred_scores.device, dtype=gt_cls.dtype)
targets[idx] = gt_cls[gt_idx]
gt_scores = torch.zeros([bs, nq], device=pred_scores.device)
if len(gt_bboxes):
gt_scores[idx] = bbox_iou(pred_bboxes.detach(), gt_bboxes, xywh=True).squeeze(-1)
return {
**self._get_loss_class(pred_scores, targets, gt_scores, len(gt_bboxes), postfix),
**self._get_loss_bbox(pred_bboxes, gt_bboxes, postfix),
# **(self._get_loss_mask(masks, gt_mask, match_indices, postfix) if masks is not None and gt_mask is not None else {})
}
def forward(
self,
pred_bboxes: torch.Tensor,
pred_scores: torch.Tensor,
batch: dict[str, Any],
postfix: str = "",
**kwargs: Any,
) -> dict[str, torch.Tensor]:
self.device = pred_bboxes.device
match_indices = kwargs.get("match_indices", None)
gt_cls, gt_bboxes, gt_groups = batch["cls"], batch["bboxes"], batch["gt_groups"]
total_loss = self._get_loss(
pred_bboxes[-1], pred_scores[-1], gt_bboxes, gt_cls, gt_groups, postfix=postfix, match_indices=match_indices
)
if self.aux_loss:
total_loss.update(
self._get_loss_aux(
pred_bboxes[:-1], pred_scores[:-1], gt_bboxes, gt_cls, gt_groups, match_indices, postfix
)
)
return total_loss
class RTDETRDetectionLoss(DETRLoss):
def forward(
self,
preds: tuple[torch.Tensor, torch.Tensor],
batch: dict[str, Any],
dn_bboxes: torch.Tensor | None = None,
dn_scores: torch.Tensor | None = None,
dn_meta: dict[str, Any] | None = None,
) -> dict[str, torch.Tensor]:
pred_bboxes, pred_scores = preds
total_loss = super().forward(pred_bboxes, pred_scores, batch)
# Check for denoising metadata to compute denoising training loss
if dn_meta is not None:
dn_pos_idx, dn_num_group = dn_meta["dn_pos_idx"], dn_meta["dn_num_group"]
assert len(batch["gt_groups"]) == len(dn_pos_idx)
# Get the match indices for denoising
match_indices = self.get_dn_match_indices(dn_pos_idx, dn_num_group, batch["gt_groups"])
# Compute the denoising training loss
dn_loss = super().forward(dn_bboxes, dn_scores, batch, postfix="_dn", match_indices=match_indices)
total_loss.update(dn_loss)
else:
# If no denoising metadata is provided, set denoising loss to zero
total_loss.update({f"{k}_dn": torch.tensor(0.0, device=self.device) for k in total_loss.keys()})
return total_loss
@staticmethod
def get_dn_match_indices(
dn_pos_idx: list[torch.Tensor], dn_num_group: int, gt_groups: list[int]
) -> list[tuple[torch.Tensor, torch.Tensor]]:
dn_match_indices = []
idx_groups = torch.as_tensor([0, *gt_groups[:-1]]).cumsum_(0)
for i, num_gt in enumerate(gt_groups):
if num_gt > 0:
gt_idx = torch.arange(end=num_gt, dtype=torch.long) + idx_groups[i]
gt_idx = gt_idx.repeat(dn_num_group)
assert len(dn_pos_idx[i]) == len(gt_idx), (
f"Expected the same length, but got {len(dn_pos_idx[i])} and {len(gt_idx)} respectively."
)
dn_match_indices.append((dn_pos_idx[i], gt_idx))
else:
dn_match_indices.append((torch.zeros([0], dtype=torch.long), torch.zeros([0], dtype=torch.long)))
return dn_match_indices | --- +++ @@ -15,6 +15,24 @@
class DETRLoss(nn.Module):
+ """DETR (DEtection TRansformer) Loss class for calculating various loss components.
+
+ This class computes classification loss, bounding box loss, GIoU loss, and optionally auxiliary losses for the DETR
+ object detection model.
+
+ Attributes:
+ nc (int): Number of classes.
+ loss_gain (dict[str, float]): Coefficients for different loss components.
+ aux_loss (bool): Whether to compute auxiliary losses.
+ use_fl (bool): Whether to use FocalLoss.
+ use_vfl (bool): Whether to use VarifocalLoss.
+ use_uni_match (bool): Whether to use a fixed layer for auxiliary branch label assignment.
+ uni_match_ind (int): Index of fixed layer to use if use_uni_match is True.
+ matcher (HungarianMatcher): Object to compute matching cost and indices.
+ fl (FocalLoss | None): Focal Loss object if use_fl is True, otherwise None.
+ vfl (VarifocalLoss | None): Varifocal Loss object if use_vfl is True, otherwise None.
+ device (torch.device): Device on which tensors are stored.
+ """
def __init__(
self,
@@ -28,6 +46,22 @@ gamma: float = 1.5,
alpha: float = 0.25,
):
+ """Initialize DETR loss function with customizable components and gains.
+
+ Uses default loss_gain if not provided. Initializes HungarianMatcher with preset cost gains. Supports auxiliary
+ losses and various loss types.
+
+ Args:
+ nc (int): Number of classes.
+ loss_gain (dict[str, float], optional): Coefficients for different loss components.
+ aux_loss (bool): Whether to use auxiliary losses from each decoder layer.
+ use_fl (bool): Whether to use FocalLoss.
+ use_vfl (bool): Whether to use VarifocalLoss.
+ use_uni_match (bool): Whether to use fixed layer for auxiliary branch label assignment.
+ uni_match_ind (int): Index of fixed layer for uni_match.
+ gamma (float): The focusing parameter that controls how much the loss focuses on hard-to-classify examples.
+ alpha (float): The balancing factor used to address class imbalance.
+ """
super().__init__()
if loss_gain is None:
@@ -46,6 +80,24 @@ def _get_loss_class(
self, pred_scores: torch.Tensor, targets: torch.Tensor, gt_scores: torch.Tensor, num_gts: int, postfix: str = ""
) -> dict[str, torch.Tensor]:
+ """Compute classification loss based on predictions, target values, and ground truth scores.
+
+ Args:
+ pred_scores (torch.Tensor): Predicted class scores with shape (B, N, C).
+ targets (torch.Tensor): Target class indices with shape (B, N).
+ gt_scores (torch.Tensor): Ground truth confidence scores with shape (B, N).
+ num_gts (int): Number of ground truth objects.
+ postfix (str, optional): String to append to the loss name for identification in multi-loss scenarios.
+
+ Returns:
+ (dict[str, torch.Tensor]): Dictionary containing classification loss value.
+
+ Notes:
+ The function supports different classification loss types:
+ - Varifocal Loss (if self.vfl is not None and num_gts > 0)
+ - Focal Loss (if self.fl is not None)
+ - BCE Loss (default fallback)
+ """
# Logits: [b, query, num_classes], gt_class: list[[n, 1]]
name_class = f"loss_class{postfix}"
bs, nq = pred_scores.shape[:2]
@@ -69,6 +121,21 @@ def _get_loss_bbox(
self, pred_bboxes: torch.Tensor, gt_bboxes: torch.Tensor, postfix: str = ""
) -> dict[str, torch.Tensor]:
+ """Compute bounding box and GIoU losses for predicted and ground truth bounding boxes.
+
+ Args:
+ pred_bboxes (torch.Tensor): Predicted bounding boxes with shape (N, 4).
+ gt_bboxes (torch.Tensor): Ground truth bounding boxes with shape (N, 4).
+ postfix (str, optional): String to append to the loss names for identification in multi-loss scenarios.
+
+ Returns:
+ (dict[str, torch.Tensor]): Dictionary containing:
+ - loss_bbox{postfix}: L1 loss between predicted and ground truth boxes, scaled by the bbox loss gain.
+ - loss_giou{postfix}: GIoU loss between predicted and ground truth boxes, scaled by the giou loss gain.
+
+ Notes:
+ If no ground truth boxes are provided (empty list), zero-valued tensors are returned for both losses.
+ """
# Boxes: [b, query, 4], gt_bbox: list[[n, 4]]
name_bbox = f"loss_bbox{postfix}"
name_giou = f"loss_giou{postfix}"
@@ -128,6 +195,22 @@ masks: torch.Tensor | None = None,
gt_mask: torch.Tensor | None = None,
) -> dict[str, torch.Tensor]:
+ """Get auxiliary losses for intermediate decoder layers.
+
+ Args:
+ pred_bboxes (torch.Tensor): Predicted bounding boxes from auxiliary layers.
+ pred_scores (torch.Tensor): Predicted scores from auxiliary layers.
+ gt_bboxes (torch.Tensor): Ground truth bounding boxes.
+ gt_cls (torch.Tensor): Ground truth classes.
+ gt_groups (list[int]): Number of ground truths per image.
+ match_indices (list[tuple], optional): Pre-computed matching indices.
+ postfix (str, optional): String to append to loss names.
+ masks (torch.Tensor, optional): Predicted masks if using segmentation.
+ gt_mask (torch.Tensor, optional): Ground truth masks if using segmentation.
+
+ Returns:
+ (dict[str, torch.Tensor]): Dictionary of auxiliary losses.
+ """
# NOTE: loss class, bbox, giou, mask, dice
loss = torch.zeros(5 if masks is not None else 3, device=pred_bboxes.device)
if match_indices is None and self.use_uni_match:
@@ -173,6 +256,15 @@
@staticmethod
def _get_index(match_indices: list[tuple]) -> tuple[tuple[torch.Tensor, torch.Tensor], torch.Tensor]:
+ """Extract batch indices, source indices, and destination indices from match indices.
+
+ Args:
+ match_indices (list[tuple]): List of tuples containing matched indices.
+
+ Returns:
+ batch_idx (tuple[torch.Tensor, torch.Tensor]): Tuple containing (batch_idx, src_idx).
+ dst_idx (torch.Tensor): Destination indices.
+ """
batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(match_indices)])
src_idx = torch.cat([src for (src, _) in match_indices])
dst_idx = torch.cat([dst for (_, dst) in match_indices])
@@ -181,6 +273,17 @@ def _get_assigned_bboxes(
self, pred_bboxes: torch.Tensor, gt_bboxes: torch.Tensor, match_indices: list[tuple]
) -> tuple[torch.Tensor, torch.Tensor]:
+ """Assign predicted bounding boxes to ground truth bounding boxes based on match indices.
+
+ Args:
+ pred_bboxes (torch.Tensor): Predicted bounding boxes.
+ gt_bboxes (torch.Tensor): Ground truth bounding boxes.
+ match_indices (list[tuple]): List of tuples containing matched indices.
+
+ Returns:
+ pred_assigned (torch.Tensor): Assigned predicted bounding boxes.
+ gt_assigned (torch.Tensor): Assigned ground truth bounding boxes.
+ """
pred_assigned = torch.cat(
[
t[i] if len(i) > 0 else torch.zeros(0, t.shape[-1], device=self.device)
@@ -207,6 +310,22 @@ postfix: str = "",
match_indices: list[tuple] | None = None,
) -> dict[str, torch.Tensor]:
+ """Calculate losses for a single prediction layer.
+
+ Args:
+ pred_bboxes (torch.Tensor): Predicted bounding boxes.
+ pred_scores (torch.Tensor): Predicted class scores.
+ gt_bboxes (torch.Tensor): Ground truth bounding boxes.
+ gt_cls (torch.Tensor): Ground truth classes.
+ gt_groups (list[int]): Number of ground truths per image.
+ masks (torch.Tensor, optional): Predicted masks if using segmentation.
+ gt_mask (torch.Tensor, optional): Ground truth masks if using segmentation.
+ postfix (str, optional): String to append to loss names.
+ match_indices (list[tuple], optional): Pre-computed matching indices.
+
+ Returns:
+ (dict[str, torch.Tensor]): Dictionary of losses.
+ """
if match_indices is None:
match_indices = self.matcher(
pred_bboxes, pred_scores, gt_bboxes, gt_cls, gt_groups, masks=masks, gt_mask=gt_mask
@@ -237,6 +356,22 @@ postfix: str = "",
**kwargs: Any,
) -> dict[str, torch.Tensor]:
+ """Calculate loss for predicted bounding boxes and scores.
+
+ Args:
+ pred_bboxes (torch.Tensor): Predicted bounding boxes, shape (L, B, N, 4).
+ pred_scores (torch.Tensor): Predicted class scores, shape (L, B, N, C).
+ batch (dict[str, Any]): Batch information containing cls, bboxes, and gt_groups.
+ postfix (str, optional): Postfix for loss names.
+ **kwargs (Any): Additional arguments, may include 'match_indices'.
+
+ Returns:
+ (dict[str, torch.Tensor]): Computed losses, including main and auxiliary (if enabled).
+
+ Notes:
+ Uses last elements of pred_bboxes and pred_scores for main loss, and the rest for auxiliary losses if
+ self.aux_loss is True.
+ """
self.device = pred_bboxes.device
match_indices = kwargs.get("match_indices", None)
gt_cls, gt_bboxes, gt_groups = batch["cls"], batch["bboxes"], batch["gt_groups"]
@@ -256,6 +391,11 @@
class RTDETRDetectionLoss(DETRLoss):
+ """Real-Time DEtection TRansformer (RT-DETR) Detection Loss class that extends the DETRLoss.
+
+ This class computes the detection loss for the RT-DETR model, which includes the standard detection loss as well as
+ an additional denoising training loss when provided with denoising metadata.
+ """
def forward(
self,
@@ -265,6 +405,18 @@ dn_scores: torch.Tensor | None = None,
dn_meta: dict[str, Any] | None = None,
) -> dict[str, torch.Tensor]:
+ """Forward pass to compute detection loss with optional denoising loss.
+
+ Args:
+ preds (tuple[torch.Tensor, torch.Tensor]): Tuple containing predicted bounding boxes and scores.
+ batch (dict[str, Any]): Batch data containing ground truth information.
+ dn_bboxes (torch.Tensor, optional): Denoising bounding boxes.
+ dn_scores (torch.Tensor, optional): Denoising scores.
+ dn_meta (dict[str, Any], optional): Metadata for denoising.
+
+ Returns:
+ (dict[str, torch.Tensor]): Dictionary containing total loss and denoising loss if applicable.
+ """
pred_bboxes, pred_scores = preds
total_loss = super().forward(pred_bboxes, pred_scores, batch)
@@ -289,6 +441,16 @@ def get_dn_match_indices(
dn_pos_idx: list[torch.Tensor], dn_num_group: int, gt_groups: list[int]
) -> list[tuple[torch.Tensor, torch.Tensor]]:
+ """Get match indices for denoising.
+
+ Args:
+ dn_pos_idx (list[torch.Tensor]): List of tensors containing positive indices for denoising.
+ dn_num_group (int): Number of denoising groups.
+ gt_groups (list[int]): List of integers representing number of ground truths per image.
+
+ Returns:
+ (list[tuple[torch.Tensor, torch.Tensor]]): List of tuples containing matched indices for denoising.
+ """
dn_match_indices = []
idx_groups = torch.as_tensor([0, *gt_groups[:-1]]).cumsum_(0)
for i, num_gt in enumerate(gt_groups):
@@ -301,4 +463,4 @@ dn_match_indices.append((dn_pos_idx[i], gt_idx))
else:
dn_match_indices.append((torch.zeros([0], dtype=torch.long), torch.zeros([0], dtype=torch.long)))
- return dn_match_indices+ return dn_match_indices
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/utils/loss.py |
Add minimal docstrings for each function | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from pathlib import Path
from typing import Any
import torch
import torch.distributed as dist
from ultralytics.data import ClassificationDataset, build_dataloader
from ultralytics.engine.validator import BaseValidator
from ultralytics.utils import LOGGER, RANK
from ultralytics.utils.metrics import ClassifyMetrics, ConfusionMatrix
from ultralytics.utils.plotting import plot_images
class ClassificationValidator(BaseValidator):
def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks: dict | None = None) -> None:
super().__init__(dataloader, save_dir, args, _callbacks)
self.targets = None
self.pred = None
self.args.task = "classify"
self.metrics = ClassifyMetrics()
def get_desc(self) -> str:
return ("%22s" + "%11s" * 2) % ("classes", "top1_acc", "top5_acc")
def init_metrics(self, model: torch.nn.Module) -> None:
self.names = model.names
self.nc = len(model.names)
self.pred = []
self.targets = []
self.confusion_matrix = ConfusionMatrix(names=model.names)
def preprocess(self, batch: dict[str, Any]) -> dict[str, Any]:
batch["img"] = batch["img"].to(self.device, non_blocking=self.device.type == "cuda")
batch["img"] = batch["img"].half() if self.args.half else batch["img"].float()
batch["cls"] = batch["cls"].to(self.device, non_blocking=self.device.type == "cuda")
return batch
def update_metrics(self, preds: torch.Tensor, batch: dict[str, Any]) -> None:
n5 = min(len(self.names), 5)
self.pred.append(preds.argsort(1, descending=True)[:, :n5].type(torch.int32).cpu())
self.targets.append(batch["cls"].type(torch.int32).cpu())
def finalize_metrics(self) -> None:
self.confusion_matrix.process_cls_preds(self.pred, self.targets)
if self.args.plots:
for normalize in True, False:
self.confusion_matrix.plot(save_dir=self.save_dir, normalize=normalize, on_plot=self.on_plot)
self.metrics.speed = self.speed
self.metrics.save_dir = self.save_dir
self.metrics.confusion_matrix = self.confusion_matrix
def postprocess(self, preds: torch.Tensor | list[torch.Tensor] | tuple[torch.Tensor]) -> torch.Tensor:
return preds[0] if isinstance(preds, (list, tuple)) else preds
def get_stats(self) -> dict[str, float]:
self.metrics.process(self.targets, self.pred)
return self.metrics.results_dict
def gather_stats(self) -> None:
if RANK == 0:
gathered_preds = [None] * dist.get_world_size()
gathered_targets = [None] * dist.get_world_size()
dist.gather_object(self.pred, gathered_preds, dst=0)
dist.gather_object(self.targets, gathered_targets, dst=0)
self.pred = [pred for rank in gathered_preds for pred in rank]
self.targets = [targets for rank in gathered_targets for targets in rank]
elif RANK > 0:
dist.gather_object(self.pred, None, dst=0)
dist.gather_object(self.targets, None, dst=0)
def build_dataset(self, img_path: str) -> ClassificationDataset:
return ClassificationDataset(root=img_path, args=self.args, augment=False, prefix=self.args.split)
def get_dataloader(self, dataset_path: Path | str, batch_size: int) -> torch.utils.data.DataLoader:
dataset = self.build_dataset(dataset_path)
return build_dataloader(dataset, batch_size, self.args.workers, rank=-1)
def print_results(self) -> None:
pf = "%22s" + "%11.3g" * len(self.metrics.keys) # print format
LOGGER.info(pf % ("all", self.metrics.top1, self.metrics.top5))
def plot_val_samples(self, batch: dict[str, Any], ni: int) -> None:
batch["batch_idx"] = torch.arange(batch["img"].shape[0]) # add batch index for plotting
plot_images(
labels=batch,
fname=self.save_dir / f"val_batch{ni}_labels.jpg",
names=self.names,
on_plot=self.on_plot,
)
def plot_predictions(self, batch: dict[str, Any], preds: torch.Tensor, ni: int) -> None:
batched_preds = dict(
img=batch["img"],
batch_idx=torch.arange(batch["img"].shape[0]),
cls=torch.argmax(preds, dim=1),
conf=torch.amax(preds, dim=1),
)
plot_images(
batched_preds,
fname=self.save_dir / f"val_batch{ni}_pred.jpg",
names=self.names,
on_plot=self.on_plot,
) # pred | --- +++ @@ -16,8 +16,52 @@
class ClassificationValidator(BaseValidator):
+ """A class extending the BaseValidator class for validation based on a classification model.
+
+ This validator handles the validation process for classification models, including metrics calculation, confusion
+ matrix generation, and visualization of results.
+
+ Attributes:
+ targets (list[torch.Tensor]): Ground truth class labels.
+ pred (list[torch.Tensor]): Model predictions.
+ metrics (ClassifyMetrics): Object to calculate and store classification metrics.
+ names (dict): Mapping of class indices to class names.
+ nc (int): Number of classes.
+ confusion_matrix (ConfusionMatrix): Matrix to evaluate model performance across classes.
+
+ Methods:
+ get_desc: Return a formatted string summarizing classification metrics.
+ init_metrics: Initialize confusion matrix, class names, and tracking containers.
+ preprocess: Preprocess input batch by moving data to device.
+ update_metrics: Update running metrics with model predictions and batch targets.
+ finalize_metrics: Finalize metrics including confusion matrix and processing speed.
+ postprocess: Extract the primary prediction from model output.
+ get_stats: Calculate and return a dictionary of metrics.
+ build_dataset: Create a ClassificationDataset instance for validation.
+ get_dataloader: Build and return a data loader for classification validation.
+ print_results: Print evaluation metrics for the classification model.
+ plot_val_samples: Plot validation image samples with their ground truth labels.
+ plot_predictions: Plot images with their predicted class labels.
+
+ Examples:
+ >>> from ultralytics.models.yolo.classify import ClassificationValidator
+ >>> args = dict(model="yolo26n-cls.pt", data="imagenet10")
+ >>> validator = ClassificationValidator(args=args)
+ >>> validator()
+
+ Notes:
+ Torchvision classification models can also be passed to the 'model' argument, i.e. model='resnet18'.
+ """
def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks: dict | None = None) -> None:
+ """Initialize ClassificationValidator with dataloader, save directory, and other parameters.
+
+ Args:
+ dataloader (torch.utils.data.DataLoader, optional): DataLoader to use for validation.
+ save_dir (str | Path, optional): Directory to save results.
+ args (dict, optional): Arguments containing model and validation configuration.
+ _callbacks (dict, optional): Dictionary of callback functions to be called during validation.
+ """
super().__init__(dataloader, save_dir, args, _callbacks)
self.targets = None
self.pred = None
@@ -25,9 +69,11 @@ self.metrics = ClassifyMetrics()
def get_desc(self) -> str:
+ """Return a formatted string summarizing classification metrics."""
return ("%22s" + "%11s" * 2) % ("classes", "top1_acc", "top5_acc")
def init_metrics(self, model: torch.nn.Module) -> None:
+ """Initialize confusion matrix, class names, and tracking containers for predictions and targets."""
self.names = model.names
self.nc = len(model.names)
self.pred = []
@@ -35,17 +81,41 @@ self.confusion_matrix = ConfusionMatrix(names=model.names)
def preprocess(self, batch: dict[str, Any]) -> dict[str, Any]:
+ """Preprocess input batch by moving data to device and converting to appropriate dtype."""
batch["img"] = batch["img"].to(self.device, non_blocking=self.device.type == "cuda")
batch["img"] = batch["img"].half() if self.args.half else batch["img"].float()
batch["cls"] = batch["cls"].to(self.device, non_blocking=self.device.type == "cuda")
return batch
def update_metrics(self, preds: torch.Tensor, batch: dict[str, Any]) -> None:
+ """Update running metrics with model predictions and batch targets.
+
+ Args:
+ preds (torch.Tensor): Model predictions, typically logits or probabilities for each class.
+ batch (dict): Batch data containing images and class labels.
+
+ Notes:
+ This method appends the top-N predictions (sorted by confidence in descending order) to the
+ prediction list for later evaluation. N is limited to the minimum of 5 and the number of classes.
+ """
n5 = min(len(self.names), 5)
self.pred.append(preds.argsort(1, descending=True)[:, :n5].type(torch.int32).cpu())
self.targets.append(batch["cls"].type(torch.int32).cpu())
def finalize_metrics(self) -> None:
+ """Finalize metrics including confusion matrix and processing speed.
+
+ Examples:
+ >>> validator = ClassificationValidator()
+ >>> validator.pred = [torch.tensor([[0, 1, 2]])] # Top-3 predictions for one sample
+ >>> validator.targets = [torch.tensor([0])] # Ground truth class
+ >>> validator.finalize_metrics()
+ >>> print(validator.metrics.confusion_matrix) # Access the confusion matrix
+
+ Notes:
+ This method processes the accumulated predictions and targets to generate the confusion matrix,
+ optionally plots it, and updates the metrics object with speed information.
+ """
self.confusion_matrix.process_cls_preds(self.pred, self.targets)
if self.args.plots:
for normalize in True, False:
@@ -55,13 +125,16 @@ self.metrics.confusion_matrix = self.confusion_matrix
def postprocess(self, preds: torch.Tensor | list[torch.Tensor] | tuple[torch.Tensor]) -> torch.Tensor:
+ """Extract the primary prediction from model output if it's in a list or tuple format."""
return preds[0] if isinstance(preds, (list, tuple)) else preds
def get_stats(self) -> dict[str, float]:
+ """Calculate and return a dictionary of metrics by processing targets and predictions."""
self.metrics.process(self.targets, self.pred)
return self.metrics.results_dict
def gather_stats(self) -> None:
+ """Gather stats from all GPUs."""
if RANK == 0:
gathered_preds = [None] * dist.get_world_size()
gathered_targets = [None] * dist.get_world_size()
@@ -74,17 +147,39 @@ dist.gather_object(self.targets, None, dst=0)
def build_dataset(self, img_path: str) -> ClassificationDataset:
+ """Create a ClassificationDataset instance for validation."""
return ClassificationDataset(root=img_path, args=self.args, augment=False, prefix=self.args.split)
def get_dataloader(self, dataset_path: Path | str, batch_size: int) -> torch.utils.data.DataLoader:
+ """Build and return a data loader for classification validation.
+
+ Args:
+ dataset_path (str | Path): Path to the dataset directory.
+ batch_size (int): Number of samples per batch.
+
+ Returns:
+ (torch.utils.data.DataLoader): DataLoader object for the classification validation dataset.
+ """
dataset = self.build_dataset(dataset_path)
return build_dataloader(dataset, batch_size, self.args.workers, rank=-1)
def print_results(self) -> None:
+ """Print evaluation metrics for the classification model."""
pf = "%22s" + "%11.3g" * len(self.metrics.keys) # print format
LOGGER.info(pf % ("all", self.metrics.top1, self.metrics.top5))
def plot_val_samples(self, batch: dict[str, Any], ni: int) -> None:
+ """Plot validation image samples with their ground truth labels.
+
+ Args:
+ batch (dict[str, Any]): Dictionary containing batch data with 'img' (images) and 'cls' (class labels).
+ ni (int): Batch index used for naming the output file.
+
+ Examples:
+ >>> validator = ClassificationValidator()
+ >>> batch = {"img": torch.rand(16, 3, 224, 224), "cls": torch.randint(0, 10, (16,))}
+ >>> validator.plot_val_samples(batch, 0)
+ """
batch["batch_idx"] = torch.arange(batch["img"].shape[0]) # add batch index for plotting
plot_images(
labels=batch,
@@ -94,6 +189,19 @@ )
def plot_predictions(self, batch: dict[str, Any], preds: torch.Tensor, ni: int) -> None:
+ """Plot images with their predicted class labels and save the visualization.
+
+ Args:
+ batch (dict[str, Any]): Batch data containing images and other information.
+ preds (torch.Tensor): Model predictions with shape (batch_size, num_classes).
+ ni (int): Batch index used for naming the output file.
+
+ Examples:
+ >>> validator = ClassificationValidator()
+ >>> batch = {"img": torch.rand(16, 3, 224, 224)}
+ >>> preds = torch.rand(16, 10) # 16 images, 10 classes
+ >>> validator.plot_predictions(batch, preds, 0)
+ """
batched_preds = dict(
img=batch["img"],
batch_idx=torch.arange(batch["img"].shape[0]),
@@ -105,4 +213,4 @@ fname=self.save_dir / f"val_batch{ni}_pred.jpg",
names=self.names,
on_plot=self.on_plot,
- ) # pred+ ) # pred
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/yolo/classify/val.py |
Add docstrings for production code | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from pathlib import Path
from typing import Any
import torch
from ultralytics.engine.model import Model
from ultralytics.utils import DEFAULT_CFG_DICT
from ultralytics.utils.downloads import attempt_download_asset
from ultralytics.utils.patches import torch_load
from ultralytics.utils.torch_utils import model_info
from .predict import NASPredictor
from .val import NASValidator
class NAS(Model):
def __init__(self, model: str = "yolo_nas_s.pt") -> None:
assert Path(model).suffix not in {".yaml", ".yml"}, "YOLO-NAS models only support pre-trained models."
super().__init__(model, task="detect")
def _load(self, weights: str, task=None) -> None:
import super_gradients
suffix = Path(weights).suffix
if suffix == ".pt":
self.model = torch_load(attempt_download_asset(weights))
elif suffix == "":
self.model = super_gradients.training.models.get(weights, pretrained_weights="coco")
# Override the forward method to ignore additional arguments
def new_forward(x, *args, **kwargs):
return self.model._original_forward(x)
self.model._original_forward = self.model.forward
self.model.forward = new_forward
# Standardize model attributes for compatibility
self.model.fuse = lambda verbose=True: self.model
self.model.stride = torch.tensor([32])
self.model.names = dict(enumerate(self.model._class_names))
self.model.is_fused = lambda: False # for info()
self.model.yaml = {} # for info()
self.model.pt_path = str(weights) # for export()
self.model.task = "detect" # for export()
self.model.args = {**DEFAULT_CFG_DICT, **self.overrides} # for export()
self.model.eval()
def info(self, detailed: bool = False, verbose: bool = True) -> dict[str, Any]:
return model_info(self.model, detailed=detailed, verbose=verbose, imgsz=640)
@property
def task_map(self) -> dict[str, dict[str, Any]]:
return {"detect": {"predictor": NASPredictor, "validator": NASValidator}} | --- +++ @@ -18,12 +18,41 @@
class NAS(Model):
+ """YOLO-NAS model for object detection.
+
+ This class provides an interface for the YOLO-NAS models and extends the `Model` class from Ultralytics engine. It
+ is designed to facilitate the task of object detection using pre-trained or custom-trained YOLO-NAS models.
+
+ Attributes:
+ model (torch.nn.Module): The loaded YOLO-NAS model.
+ task (str): The task type for the model, defaults to 'detect'.
+ predictor (NASPredictor): The predictor instance for making predictions.
+ validator (NASValidator): The validator instance for model validation.
+
+ Methods:
+ info: Log model information and return model details.
+
+ Examples:
+ >>> from ultralytics import NAS
+ >>> model = NAS("yolo_nas_s")
+ >>> results = model.predict("ultralytics/assets/bus.jpg")
+
+ Notes:
+ YOLO-NAS models only support pre-trained models. Do not provide YAML configuration files.
+ """
def __init__(self, model: str = "yolo_nas_s.pt") -> None:
+ """Initialize the NAS model with the provided or default model."""
assert Path(model).suffix not in {".yaml", ".yml"}, "YOLO-NAS models only support pre-trained models."
super().__init__(model, task="detect")
def _load(self, weights: str, task=None) -> None:
+ """Load an existing NAS model weights or create a new NAS model with pretrained weights.
+
+ Args:
+ weights (str): Path to the model weights file or model name.
+ task (str, optional): Task type for the model.
+ """
import super_gradients
suffix = Path(weights).suffix
@@ -34,6 +63,7 @@
# Override the forward method to ignore additional arguments
def new_forward(x, *args, **kwargs):
+ """Ignore additional __call__ arguments."""
return self.model._original_forward(x)
self.model._original_forward = self.model.forward
@@ -51,8 +81,18 @@ self.model.eval()
def info(self, detailed: bool = False, verbose: bool = True) -> dict[str, Any]:
+ """Log model information.
+
+ Args:
+ detailed (bool): Show detailed information about model.
+ verbose (bool): Controls verbosity.
+
+ Returns:
+ (tuple): Model information as a tuple of (layers, parameters, gradients, GFLOPs).
+ """
return model_info(self.model, detailed=detailed, verbose=verbose, imgsz=640)
@property
def task_map(self) -> dict[str, dict[str, Any]]:
- return {"detect": {"predictor": NASPredictor, "validator": NASValidator}}+ """Return a dictionary mapping tasks to respective predictor and validator classes."""
+ return {"detect": {"predictor": NASPredictor, "validator": NASValidator}}
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/nas/model.py |
Turn comments into proper docstrings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import numpy as np
import torch
from ultralytics.data.augment import LoadVisualPrompt
from ultralytics.models.yolo.detect import DetectionPredictor
from ultralytics.models.yolo.segment import SegmentationPredictor
class YOLOEVPDetectPredictor(DetectionPredictor):
def setup_model(self, model, verbose: bool = True):
super().setup_model(model, verbose=verbose)
self.done_warmup = True
def set_prompts(self, prompts):
self.prompts = prompts
def pre_transform(self, im):
img = super().pre_transform(im)
bboxes = self.prompts.pop("bboxes", None)
masks = self.prompts.pop("masks", None)
category = self.prompts["cls"]
if len(img) == 1:
visuals = self._process_single_image(img[0].shape[:2], im[0].shape[:2], category, bboxes, masks)
prompts = visuals.unsqueeze(0).to(self.device) # (1, N, H, W)
else:
# NOTE: only supports bboxes as prompts for now
assert bboxes is not None, f"Expected bboxes, but got {bboxes}!"
# NOTE: needs list[np.ndarray]
assert isinstance(bboxes, list) and all(isinstance(b, np.ndarray) for b in bboxes), (
f"Expected list[np.ndarray], but got {bboxes}!"
)
assert isinstance(category, list) and all(isinstance(b, np.ndarray) for b in category), (
f"Expected list[np.ndarray], but got {category}!"
)
assert len(im) == len(category) == len(bboxes), (
f"Expected same length for all inputs, but got {len(im)}vs{len(category)}vs{len(bboxes)}!"
)
visuals = [
self._process_single_image(img[i].shape[:2], im[i].shape[:2], category[i], bboxes[i])
for i in range(len(img))
]
prompts = torch.nn.utils.rnn.pad_sequence(visuals, batch_first=True).to(self.device) # (B, N, H, W)
self.prompts = prompts.half() if self.model.fp16 else prompts.float()
return img
def _process_single_image(self, dst_shape, src_shape, category, bboxes=None, masks=None):
if bboxes is not None and len(bboxes):
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.ndim == 1:
bboxes = bboxes[None, :]
# Calculate scaling factor and adjust bounding boxes
gain = min(dst_shape[0] / src_shape[0], dst_shape[1] / src_shape[1]) # gain = old / new
bboxes *= gain
bboxes[..., 0::2] += round((dst_shape[1] - src_shape[1] * gain) / 2 - 0.1)
bboxes[..., 1::2] += round((dst_shape[0] - src_shape[0] * gain) / 2 - 0.1)
elif masks is not None:
# Resize and process masks
resized_masks = super().pre_transform(masks)
masks = np.stack(resized_masks) # (N, H, W)
masks[masks == 114] = 0 # Reset padding values to 0
else:
raise ValueError("Please provide valid bboxes or masks")
# Generate visuals using the visual prompt loader
return LoadVisualPrompt().get_visuals(category, dst_shape, bboxes, masks)
def inference(self, im, *args, **kwargs):
return super().inference(im, vpe=self.prompts, *args, **kwargs)
def get_vpe(self, source):
self.setup_source(source)
assert len(self.dataset) == 1, "get_vpe only supports one image!"
for _, im0s, _ in self.dataset:
im = self.preprocess(im0s)
return self.model(im, vpe=self.prompts, return_vpe=True)
class YOLOEVPSegPredictor(YOLOEVPDetectPredictor, SegmentationPredictor):
pass | --- +++ @@ -9,15 +9,58 @@
class YOLOEVPDetectPredictor(DetectionPredictor):
+ """A class extending DetectionPredictor for YOLO-EVP (Enhanced Visual Prompting) predictions.
+
+ This class provides common functionality for YOLO models that use visual prompting, including model setup, prompt
+ handling, and preprocessing transformations.
+
+ Attributes:
+ model (torch.nn.Module): The YOLO model for inference.
+ device (torch.device): Device to run the model on (CPU or CUDA).
+ prompts (dict | torch.Tensor): Visual prompts containing class indices and bounding boxes or masks.
+
+ Methods:
+ setup_model: Initialize the YOLO model and set it to evaluation mode.
+ set_prompts: Set the visual prompts for the model.
+ pre_transform: Preprocess images and prompts before inference.
+ inference: Run inference with visual prompts.
+ get_vpe: Process source to get visual prompt embeddings.
+ """
def setup_model(self, model, verbose: bool = True):
+ """Set up the model for prediction.
+
+ Args:
+ model (torch.nn.Module): Model to load or use.
+ verbose (bool, optional): If True, provides detailed logging.
+ """
super().setup_model(model, verbose=verbose)
self.done_warmup = True
def set_prompts(self, prompts):
+ """Set the visual prompts for the model.
+
+ Args:
+ prompts (dict): Dictionary containing class indices and bounding boxes or masks. Must include a 'cls' key
+ with class indices.
+ """
self.prompts = prompts
def pre_transform(self, im):
+ """Preprocess images and prompts before inference.
+
+ This method applies letterboxing to the input image and transforms the visual prompts (bounding boxes or masks)
+ accordingly.
+
+ Args:
+ im (list): List of input images.
+
+ Returns:
+ (list): Preprocessed images ready for model inference.
+
+ Raises:
+ ValueError: If neither valid bounding boxes nor masks are provided in the prompts.
+ """
img = super().pre_transform(im)
bboxes = self.prompts.pop("bboxes", None)
masks = self.prompts.pop("masks", None)
@@ -47,6 +90,21 @@ return img
def _process_single_image(self, dst_shape, src_shape, category, bboxes=None, masks=None):
+ """Process a single image by resizing bounding boxes or masks and generating visuals.
+
+ Args:
+ dst_shape (tuple): The target shape (height, width) of the image.
+ src_shape (tuple): The original shape (height, width) of the image.
+ category (list | np.ndarray): The category indices for visual prompts.
+ bboxes (list | np.ndarray, optional): A list of bounding boxes in the format [x1, y1, x2, y2].
+ masks (np.ndarray, optional): A list of masks corresponding to the image.
+
+ Returns:
+ (torch.Tensor): The processed visuals for the image.
+
+ Raises:
+ ValueError: If neither `bboxes` nor `masks` are provided.
+ """
if bboxes is not None and len(bboxes):
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.ndim == 1:
@@ -68,9 +126,29 @@ return LoadVisualPrompt().get_visuals(category, dst_shape, bboxes, masks)
def inference(self, im, *args, **kwargs):
+ """Run inference with visual prompts.
+
+ Args:
+ im (torch.Tensor): Input image tensor.
+ *args (Any): Variable length argument list.
+ **kwargs (Any): Arbitrary keyword arguments.
+
+ Returns:
+ (torch.Tensor): Model prediction results.
+ """
return super().inference(im, vpe=self.prompts, *args, **kwargs)
def get_vpe(self, source):
+ """Process the source to get the visual prompt embeddings (VPE).
+
+ Args:
+ source (str | Path | int | PIL.Image | np.ndarray | torch.Tensor | list | tuple): The source of the image to
+ make predictions on. Accepts various types including file paths, URLs, PIL images, numpy arrays, and
+ torch tensors.
+
+ Returns:
+ (torch.Tensor): The visual prompt embeddings (VPE) from the model.
+ """
self.setup_source(source)
assert len(self.dataset) == 1, "get_vpe only supports one image!"
for _, im0s, _ in self.dataset:
@@ -79,5 +157,6 @@
class YOLOEVPSegPredictor(YOLOEVPDetectPredictor, SegmentationPredictor):
+ """Predictor for YOLO-EVP segmentation tasks combining detection and segmentation capabilities."""
- pass+ pass
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/yolo/yoloe/predict.py |
Add detailed docstrings explaining each function | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import torch
import torch.nn as nn
class AGLU(nn.Module):
def __init__(self, device=None, dtype=None) -> None:
super().__init__()
self.act = nn.Softplus(beta=-1.0)
self.lambd = nn.Parameter(nn.init.uniform_(torch.empty(1, device=device, dtype=dtype))) # lambda parameter
self.kappa = nn.Parameter(nn.init.uniform_(torch.empty(1, device=device, dtype=dtype))) # kappa parameter
def forward(self, x: torch.Tensor) -> torch.Tensor:
lam = torch.clamp(self.lambd, min=0.0001) # Clamp lambda to avoid division by zero
return torch.exp((1 / lam) * self.act((self.kappa * x) - torch.log(lam))) | --- +++ @@ -1,17 +1,54 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""Activation modules."""
import torch
import torch.nn as nn
class AGLU(nn.Module):
+ """Unified activation function module from AGLU.
+
+ This class implements a parameterized activation function with learnable parameters lambda and kappa, based on the
+ AGLU (Adaptive Gated Linear Unit) approach.
+
+ Attributes:
+ act (nn.Softplus): Softplus activation function with negative beta.
+ lambd (nn.Parameter): Learnable lambda parameter initialized with uniform distribution.
+ kappa (nn.Parameter): Learnable kappa parameter initialized with uniform distribution.
+
+ Methods:
+ forward: Compute the forward pass of the Unified activation function.
+
+ Examples:
+ >>> import torch
+ >>> m = AGLU()
+ >>> input = torch.randn(2)
+ >>> output = m(input)
+ >>> print(output.shape)
+ torch.Size([2])
+
+ References:
+ https://github.com/kostas1515/AGLU
+ """
def __init__(self, device=None, dtype=None) -> None:
+ """Initialize the Unified activation function with learnable parameters."""
super().__init__()
self.act = nn.Softplus(beta=-1.0)
self.lambd = nn.Parameter(nn.init.uniform_(torch.empty(1, device=device, dtype=dtype))) # lambda parameter
self.kappa = nn.Parameter(nn.init.uniform_(torch.empty(1, device=device, dtype=dtype))) # kappa parameter
def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Apply the Adaptive Gated Linear Unit (AGLU) activation function.
+
+ This forward method implements the AGLU activation function with learnable parameters lambda and kappa. The
+ function applies a transformation that adaptively combines linear and non-linear components.
+
+ Args:
+ x (torch.Tensor): Input tensor to apply the activation function to.
+
+ Returns:
+ (torch.Tensor): Output tensor after applying the AGLU activation function, with the same shape as the input.
+ """
lam = torch.clamp(self.lambd, min=0.0001) # Clamp lambda to avoid division by zero
- return torch.exp((1 / lam) * self.act((self.kappa * x) - torch.log(lam)))+ return torch.exp((1 / lam) * self.act((self.kappa * x) - torch.log(lam)))
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/nn/modules/activation.py |
Create documentation for each function signature | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from copy import copy, deepcopy
from pathlib import Path
import torch
from ultralytics.data import YOLOConcatDataset, build_yolo_dataset
from ultralytics.data.augment import LoadVisualPrompt
from ultralytics.models.yolo.detect import DetectionTrainer, DetectionValidator
from ultralytics.nn.tasks import YOLOEModel
from ultralytics.utils import DEFAULT_CFG, LOGGER, RANK
from ultralytics.utils.torch_utils import unwrap_model
from ..world.train_world import WorldTrainerFromScratch
from .val import YOLOEDetectValidator
class YOLOETrainer(DetectionTrainer):
def __init__(self, cfg=DEFAULT_CFG, overrides: dict | None = None, _callbacks: dict | None = None):
if overrides is None:
overrides = {}
assert not overrides.get("compile"), f"Training with 'model={overrides['model']}' requires 'compile=False'"
overrides["overlap_mask"] = False
super().__init__(cfg, overrides, _callbacks)
def get_model(self, cfg=None, weights=None, verbose: bool = True):
# NOTE: This `nc` here is the max number of different text samples in one image, rather than the actual `nc`.
# NOTE: Following the official config, nc hard-coded to 80 for now.
model = YOLOEModel(
cfg["yaml_file"] if isinstance(cfg, dict) else cfg,
ch=self.data["channels"],
nc=min(self.data["nc"], 80),
verbose=verbose and RANK == -1,
)
if weights:
model.load(weights)
return model
def get_validator(self):
self.loss_names = "box", "cls", "dfl"
return YOLOEDetectValidator(
self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks
)
def build_dataset(self, img_path: str, mode: str = "train", batch: int | None = None):
gs = max(int(unwrap_model(self.model).stride.max() if self.model else 0), 32)
return build_yolo_dataset(
self.args, img_path, batch, self.data, mode=mode, rect=mode == "val", stride=gs, multi_modal=mode == "train"
)
class YOLOEPETrainer(DetectionTrainer):
def get_model(self, cfg=None, weights=None, verbose: bool = True):
# NOTE: This `nc` here is the max number of different text samples in one image, rather than the actual `nc`.
# NOTE: Following the official config, nc hard-coded to 80 for now.
model = YOLOEModel(
cfg["yaml_file"] if isinstance(cfg, dict) else cfg,
ch=self.data["channels"],
nc=self.data["nc"],
verbose=verbose and RANK == -1,
)
del model.model[-1].savpe
assert weights is not None, "Pretrained weights must be provided for linear probing."
if weights:
model.load(weights)
model.eval()
names = list(self.data["names"].values())
# NOTE: `get_text_pe` related to text model and YOLOEDetect.reprta,
# it'd get correct results as long as loading proper pretrained weights.
tpe = model.get_text_pe(names)
model.set_classes(names, tpe)
model.model[-1].fuse(model.pe) # fuse text embeddings to classify head
model.model[-1].cv3[0][2] = deepcopy(model.model[-1].cv3[0][2]).requires_grad_(True)
model.model[-1].cv3[1][2] = deepcopy(model.model[-1].cv3[1][2]).requires_grad_(True)
model.model[-1].cv3[2][2] = deepcopy(model.model[-1].cv3[2][2]).requires_grad_(True)
if getattr(model.model[-1], "one2one_cv3", None) is not None:
model.model[-1].one2one_cv3[0][2] = deepcopy(model.model[-1].cv3[0][2]).requires_grad_(True)
model.model[-1].one2one_cv3[1][2] = deepcopy(model.model[-1].cv3[1][2]).requires_grad_(True)
model.model[-1].one2one_cv3[2][2] = deepcopy(model.model[-1].cv3[2][2]).requires_grad_(True)
model.train()
return model
class YOLOETrainerFromScratch(YOLOETrainer, WorldTrainerFromScratch):
def build_dataset(self, img_path: list[str] | str, mode: str = "train", batch: int | None = None):
return WorldTrainerFromScratch.build_dataset(self, img_path, mode, batch)
def generate_text_embeddings(self, texts: list[str], batch: int, cache_dir: Path):
model = unwrap_model(self.model).text_model
cache_path = cache_dir / f"text_embeddings_{model.replace(':', '_').replace('/', '_')}.pt"
if cache_path.exists():
LOGGER.info(f"Reading existed cache from '{cache_path}'")
txt_map = torch.load(cache_path, map_location=self.device)
if sorted(txt_map.keys()) == sorted(texts):
return txt_map
LOGGER.info(f"Caching text embeddings to '{cache_path}'")
txt_feats = unwrap_model(self.model).get_text_pe(texts, batch, without_reprta=True, cache_clip_model=False)
txt_map = dict(zip(texts, txt_feats.squeeze(0)))
torch.save(txt_map, cache_path)
return txt_map
class YOLOEPEFreeTrainer(YOLOEPETrainer, YOLOETrainerFromScratch):
def get_validator(self):
self.loss_names = "box", "cls", "dfl"
return DetectionValidator(
self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks
)
def preprocess_batch(self, batch):
return DetectionTrainer.preprocess_batch(self, batch)
def set_text_embeddings(self, datasets, batch: int):
pass
class YOLOEVPTrainer(YOLOETrainerFromScratch):
def build_dataset(self, img_path: list[str] | str, mode: str = "train", batch: int | None = None):
dataset = super().build_dataset(img_path, mode, batch)
if isinstance(dataset, YOLOConcatDataset):
for d in dataset.datasets:
d.transforms.append(LoadVisualPrompt())
else:
dataset.transforms.append(LoadVisualPrompt())
return dataset
def _close_dataloader_mosaic(self):
super()._close_dataloader_mosaic()
if isinstance(self.train_loader.dataset, YOLOConcatDataset):
for d in self.train_loader.dataset.datasets:
d.transforms.append(LoadVisualPrompt())
else:
self.train_loader.dataset.transforms.append(LoadVisualPrompt()) | --- +++ @@ -19,8 +19,28 @@
class YOLOETrainer(DetectionTrainer):
+ """A trainer class for YOLOE object detection models.
+
+ This class extends DetectionTrainer to provide specialized training functionality for YOLOE models, including custom
+ model initialization, validation, and dataset building with multi-modal support.
+
+ Attributes:
+ loss_names (tuple): Names of loss components used during training.
+
+ Methods:
+ get_model: Initialize and return a YOLOEModel with specified configuration.
+ get_validator: Return a YOLOEDetectValidator for model validation.
+ build_dataset: Build YOLO dataset with multi-modal support for training.
+ """
def __init__(self, cfg=DEFAULT_CFG, overrides: dict | None = None, _callbacks: dict | None = None):
+ """Initialize the YOLOE Trainer with specified configurations.
+
+ Args:
+ cfg (dict): Configuration dictionary with default training settings from DEFAULT_CFG.
+ overrides (dict, optional): Dictionary of parameter overrides for the default configuration.
+ _callbacks (dict, optional): Dictionary of callback functions to be applied during training.
+ """
if overrides is None:
overrides = {}
assert not overrides.get("compile"), f"Training with 'model={overrides['model']}' requires 'compile=False'"
@@ -28,6 +48,22 @@ super().__init__(cfg, overrides, _callbacks)
def get_model(self, cfg=None, weights=None, verbose: bool = True):
+ """Return a YOLOEModel initialized with the specified configuration and weights.
+
+ Args:
+ cfg (dict | str, optional): Model configuration. Can be a dictionary containing a 'yaml_file' key, a direct
+ path to a YAML file, or None to use default configuration.
+ weights (str | Path, optional): Path to pretrained weights file to load into the model.
+ verbose (bool): Whether to display model information during initialization.
+
+ Returns:
+ (YOLOEModel): The initialized YOLOE model.
+
+ Notes:
+ - The number of classes (nc) is hard-coded to a maximum of 80 following the official configuration.
+ - The nc parameter here represents the maximum number of different text samples in one image,
+ rather than the actual number of classes.
+ """
# NOTE: This `nc` here is the max number of different text samples in one image, rather than the actual `nc`.
# NOTE: Following the official config, nc hard-coded to 80 for now.
model = YOLOEModel(
@@ -42,12 +78,23 @@ return model
def get_validator(self):
+ """Return a YOLOEDetectValidator for YOLOE model validation."""
self.loss_names = "box", "cls", "dfl"
return YOLOEDetectValidator(
self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks
)
def build_dataset(self, img_path: str, mode: str = "train", batch: int | None = None):
+ """Build YOLO Dataset.
+
+ Args:
+ img_path (str): Path to the folder containing images.
+ mode (str): 'train' mode or 'val' mode, users are able to customize different augmentations for each mode.
+ batch (int, optional): Size of batches, this is for rectangular training.
+
+ Returns:
+ (Dataset): YOLO dataset configured for training or validation.
+ """
gs = max(int(unwrap_model(self.model).stride.max() if self.model else 0), 32)
return build_yolo_dataset(
self.args, img_path, batch, self.data, mode=mode, rect=mode == "val", stride=gs, multi_modal=mode == "train"
@@ -55,8 +102,26 @@
class YOLOEPETrainer(DetectionTrainer):
+ """Fine-tune YOLOE model using linear probing approach.
+
+ This trainer freezes most model layers and only trains specific projection layers for efficient fine-tuning on new
+ datasets while preserving pretrained features.
+
+ Methods:
+ get_model: Initialize YOLOEModel with frozen layers except projection layers.
+ """
def get_model(self, cfg=None, weights=None, verbose: bool = True):
+ """Return YOLOEModel initialized with specified config and weights.
+
+ Args:
+ cfg (dict | str, optional): Model configuration.
+ weights (str, optional): Path to pretrained weights.
+ verbose (bool): Whether to display model information.
+
+ Returns:
+ (YOLOEModel): Initialized model with frozen layers except for specific projection layers.
+ """
# NOTE: This `nc` here is the max number of different text samples in one image, rather than the actual `nc`.
# NOTE: Following the official config, nc hard-coded to 80 for now.
model = YOLOEModel(
@@ -94,11 +159,43 @@
class YOLOETrainerFromScratch(YOLOETrainer, WorldTrainerFromScratch):
+ """Train YOLOE models from scratch with text embedding support.
+
+ This trainer combines YOLOE training capabilities with world training features, enabling training from scratch with
+ text embeddings and grounding datasets.
+
+ Methods:
+ build_dataset: Build datasets for training with grounding support.
+ generate_text_embeddings: Generate and cache text embeddings for training.
+ """
def build_dataset(self, img_path: list[str] | str, mode: str = "train", batch: int | None = None):
+ """Build YOLO Dataset for training or validation.
+
+ This method constructs appropriate datasets based on the mode and input paths, handling both standard YOLO
+ datasets and grounding datasets with different formats.
+
+ Args:
+ img_path (list[str] | str): Path to the folder containing images or list of paths.
+ mode (str): 'train' mode or 'val' mode, allowing customized augmentations for each mode.
+ batch (int, optional): Size of batches, used for rectangular training/validation.
+
+ Returns:
+ (YOLOConcatDataset | Dataset): The constructed dataset for training or validation.
+ """
return WorldTrainerFromScratch.build_dataset(self, img_path, mode, batch)
def generate_text_embeddings(self, texts: list[str], batch: int, cache_dir: Path):
+ """Generate text embeddings for a list of text samples.
+
+ Args:
+ texts (list[str]): List of text samples to encode.
+ batch (int): Batch size for processing.
+ cache_dir (Path): Directory to save/load cached embeddings.
+
+ Returns:
+ (dict): Dictionary mapping text samples to their embeddings.
+ """
model = unwrap_model(self.model).text_model
cache_path = cache_dir / f"text_embeddings_{model.replace(':', '_').replace('/', '_')}.pt"
if cache_path.exists():
@@ -114,23 +211,60 @@
class YOLOEPEFreeTrainer(YOLOEPETrainer, YOLOETrainerFromScratch):
+ """Train prompt-free YOLOE model.
+
+ This trainer combines linear probing capabilities with from-scratch training for prompt-free YOLOE models that don't
+ require text prompts during inference.
+
+ Methods:
+ get_validator: Return standard DetectionValidator for validation.
+ preprocess_batch: Preprocess batches without text features.
+ set_text_embeddings: Set text embeddings for datasets (no-op for prompt-free).
+ """
def get_validator(self):
+ """Return a DetectionValidator for YOLO model validation."""
self.loss_names = "box", "cls", "dfl"
return DetectionValidator(
self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks
)
def preprocess_batch(self, batch):
+ """Preprocess a batch of images for YOLOE training, adjusting formatting and dimensions as needed."""
return DetectionTrainer.preprocess_batch(self, batch)
def set_text_embeddings(self, datasets, batch: int):
+ """No-op override for prompt-free training that does not require text embeddings.
+
+ Args:
+ datasets (list[Dataset]): List of datasets containing category names to process.
+ batch (int): Batch size for processing text embeddings.
+ """
pass
class YOLOEVPTrainer(YOLOETrainerFromScratch):
+ """Train YOLOE model with visual prompts.
+
+ This trainer extends YOLOETrainerFromScratch to support visual prompt-based training, where visual cues are provided
+ alongside images to guide the detection process.
+
+ Methods:
+ build_dataset: Build dataset with visual prompt loading transforms.
+ """
def build_dataset(self, img_path: list[str] | str, mode: str = "train", batch: int | None = None):
+ """Build YOLO Dataset for training or validation with visual prompts.
+
+ Args:
+ img_path (list[str] | str): Path to the folder containing images or list of paths.
+ mode (str): 'train' mode or 'val' mode, allowing customized augmentations for each mode.
+ batch (int, optional): Size of batches, used for rectangular training/validation.
+
+ Returns:
+ (YOLOConcatDataset | Dataset): YOLO dataset configured for training or validation, with visual prompts for
+ training mode.
+ """
dataset = super().build_dataset(img_path, mode, batch)
if isinstance(dataset, YOLOConcatDataset):
for d in dataset.datasets:
@@ -140,9 +274,10 @@ return dataset
def _close_dataloader_mosaic(self):
+ """Close mosaic augmentation and add visual prompt loading to the training dataset."""
super()._close_dataloader_mosaic()
if isinstance(self.train_loader.dataset, YOLOConcatDataset):
for d in self.train_loader.dataset.datasets:
d.transforms.append(LoadVisualPrompt())
else:
- self.train_loader.dataset.transforms.append(LoadVisualPrompt())+ self.train_loader.dataset.transforms.append(LoadVisualPrompt())
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/yolo/yoloe/train.py |
Add docstrings for utility scripts | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from copy import copy, deepcopy
from ultralytics.models.yolo.segment import SegmentationTrainer
from ultralytics.nn.tasks import YOLOESegModel
from ultralytics.utils import RANK
from .train import YOLOETrainer, YOLOETrainerFromScratch, YOLOEVPTrainer
from .val import YOLOESegValidator
class YOLOESegTrainer(YOLOETrainer, SegmentationTrainer):
def get_model(self, cfg=None, weights=None, verbose=True):
# NOTE: This `nc` here is the max number of different text samples in one image, rather than the actual `nc`.
# NOTE: Following the official config, nc hard-coded to 80 for now.
model = YOLOESegModel(
cfg["yaml_file"] if isinstance(cfg, dict) else cfg,
ch=self.data["channels"],
nc=min(self.data["nc"], 80),
verbose=verbose and RANK == -1,
)
if weights:
model.load(weights)
return model
def get_validator(self):
self.loss_names = "box", "seg", "cls", "dfl"
return YOLOESegValidator(
self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks
)
class YOLOEPESegTrainer(SegmentationTrainer):
def get_model(self, cfg=None, weights=None, verbose=True):
# NOTE: This `nc` here is the max number of different text samples in one image, rather than the actual `nc`.
# NOTE: Following the official config, nc hard-coded to 80 for now.
model = YOLOESegModel(
cfg["yaml_file"] if isinstance(cfg, dict) else cfg,
ch=self.data["channels"],
nc=self.data["nc"],
verbose=verbose and RANK == -1,
)
del model.model[-1].savpe
assert weights is not None, "Pretrained weights must be provided for linear probing."
if weights:
model.load(weights)
model.eval()
names = list(self.data["names"].values())
# NOTE: `get_text_pe` related to text model and YOLOEDetect.reprta,
# it'd get correct results as long as loading proper pretrained weights.
tpe = model.get_text_pe(names)
model.set_classes(names, tpe)
model.model[-1].fuse(model.pe)
model.model[-1].cv3[0][2] = deepcopy(model.model[-1].cv3[0][2]).requires_grad_(True)
model.model[-1].cv3[1][2] = deepcopy(model.model[-1].cv3[1][2]).requires_grad_(True)
model.model[-1].cv3[2][2] = deepcopy(model.model[-1].cv3[2][2]).requires_grad_(True)
if getattr(model.model[-1], "one2one_cv3", None) is not None:
model.model[-1].one2one_cv3[0][2] = deepcopy(model.model[-1].cv3[0][2]).requires_grad_(True)
model.model[-1].one2one_cv3[1][2] = deepcopy(model.model[-1].cv3[1][2]).requires_grad_(True)
model.model[-1].one2one_cv3[2][2] = deepcopy(model.model[-1].cv3[2][2]).requires_grad_(True)
model.train()
return model
class YOLOESegTrainerFromScratch(YOLOETrainerFromScratch, YOLOESegTrainer):
pass
class YOLOESegVPTrainer(YOLOEVPTrainer, YOLOESegTrainerFromScratch):
pass | --- +++ @@ -11,8 +11,28 @@
class YOLOESegTrainer(YOLOETrainer, SegmentationTrainer):
+ """Trainer class for YOLOE segmentation models.
+
+ This class combines YOLOETrainer and SegmentationTrainer to provide training functionality specifically for YOLOE
+ segmentation models, enabling both object detection and instance segmentation capabilities.
+
+ Attributes:
+ cfg (dict): Configuration dictionary with training parameters.
+ overrides (dict): Dictionary with parameter overrides.
+ _callbacks (dict): Dictionary of callback functions for training events.
+ """
def get_model(self, cfg=None, weights=None, verbose=True):
+ """Return YOLOESegModel initialized with specified config and weights.
+
+ Args:
+ cfg (dict | str, optional): Model configuration dictionary or YAML file path.
+ weights (str, optional): Path to pretrained weights file.
+ verbose (bool): Whether to display model information.
+
+ Returns:
+ (YOLOESegModel): Initialized YOLOE segmentation model.
+ """
# NOTE: This `nc` here is the max number of different text samples in one image, rather than the actual `nc`.
# NOTE: Following the official config, nc hard-coded to 80 for now.
model = YOLOESegModel(
@@ -27,6 +47,11 @@ return model
def get_validator(self):
+ """Create and return a validator for YOLOE segmentation model evaluation.
+
+ Returns:
+ (YOLOESegValidator): Validator for YOLOE segmentation models.
+ """
self.loss_names = "box", "seg", "cls", "dfl"
return YOLOESegValidator(
self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks
@@ -34,8 +59,26 @@
class YOLOEPESegTrainer(SegmentationTrainer):
+ """Fine-tune YOLOESeg model in linear probing way.
+
+ This trainer specializes in fine-tuning YOLOESeg models using a linear probing approach, which involves freezing
+ most of the model and only training specific layers for efficient adaptation to new tasks.
+
+ Attributes:
+ data (dict): Dataset configuration containing channels, class names, and number of classes.
+ """
def get_model(self, cfg=None, weights=None, verbose=True):
+ """Return YOLOESegModel initialized with specified config and weights for linear probing.
+
+ Args:
+ cfg (dict | str, optional): Model configuration dictionary or YAML file path.
+ weights (str, optional): Path to pretrained weights file.
+ verbose (bool): Whether to display model information.
+
+ Returns:
+ (YOLOESegModel): Initialized YOLOE segmentation model configured for linear probing.
+ """
# NOTE: This `nc` here is the max number of different text samples in one image, rather than the actual `nc`.
# NOTE: Following the official config, nc hard-coded to 80 for now.
model = YOLOESegModel(
@@ -73,10 +116,12 @@
class YOLOESegTrainerFromScratch(YOLOETrainerFromScratch, YOLOESegTrainer):
+ """Trainer for YOLOE segmentation models trained from scratch without pretrained weights."""
pass
class YOLOESegVPTrainer(YOLOEVPTrainer, YOLOESegTrainerFromScratch):
+ """Trainer for YOLOE segmentation models with Vision Prompt (VP) capabilities."""
- pass+ pass
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/yolo/yoloe/train_seg.py |
Improve my code by adding docstrings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from copy import deepcopy
from pathlib import Path
from typing import Any
import torch
from torch.nn import functional as F
from ultralytics.data import YOLOConcatDataset, build_dataloader, build_yolo_dataset
from ultralytics.data.augment import LoadVisualPrompt
from ultralytics.data.utils import check_det_dataset
from ultralytics.models.yolo.detect import DetectionValidator
from ultralytics.models.yolo.segment import SegmentationValidator
from ultralytics.nn.modules.head import YOLOEDetect
from ultralytics.nn.tasks import YOLOEModel
from ultralytics.utils import LOGGER, TQDM
from ultralytics.utils.torch_utils import select_device, smart_inference_mode
class YOLOEDetectValidator(DetectionValidator):
@smart_inference_mode()
def get_visual_pe(self, dataloader: torch.utils.data.DataLoader, model: YOLOEModel) -> torch.Tensor:
assert isinstance(model, YOLOEModel)
names = [name.split("/", 1)[0] for name in list(dataloader.dataset.data["names"].values())]
visual_pe = torch.zeros(len(names), model.model[-1].embed, device=self.device)
cls_visual_num = torch.zeros(len(names))
desc = "Get visual prompt embeddings from samples"
# Count samples per class
for batch in dataloader:
cls = batch["cls"].squeeze(-1).to(torch.int).unique()
count = torch.bincount(cls, minlength=len(names))
cls_visual_num += count
cls_visual_num = cls_visual_num.to(self.device)
# Extract visual prompt embeddings
pbar = TQDM(dataloader, total=len(dataloader), desc=desc)
for batch in pbar:
batch = self.preprocess(batch)
preds = model.get_visual_pe(batch["img"], visual=batch["visuals"]) # (B, max_n, embed_dim)
batch_idx = batch["batch_idx"]
for i in range(preds.shape[0]):
cls = batch["cls"][batch_idx == i].squeeze(-1).to(torch.int).unique(sorted=True)
pad_cls = torch.ones(preds.shape[1], device=self.device) * -1
pad_cls[: cls.shape[0]] = cls
for c in cls:
visual_pe[c] += preds[i][pad_cls == c].sum(0) / cls_visual_num[c]
# Normalize embeddings for classes with samples, set others to zero
visual_pe[cls_visual_num != 0] = F.normalize(visual_pe[cls_visual_num != 0], dim=-1, p=2)
visual_pe[cls_visual_num == 0] = 0
return visual_pe.unsqueeze(0)
def get_vpe_dataloader(self, data: dict[str, Any]) -> torch.utils.data.DataLoader:
dataset = build_yolo_dataset(
self.args,
data.get(self.args.split, data.get("val")),
self.args.batch,
data,
mode="val",
rect=False,
)
if isinstance(dataset, YOLOConcatDataset):
for d in dataset.datasets:
d.transforms.append(LoadVisualPrompt())
else:
dataset.transforms.append(LoadVisualPrompt())
return build_dataloader(
dataset,
self.args.batch,
self.args.workers,
shuffle=False,
rank=-1,
)
@smart_inference_mode()
def __call__(
self,
trainer: Any | None = None,
model: YOLOEModel | str | None = None,
refer_data: str | None = None,
load_vp: bool = False,
) -> dict[str, Any]:
if trainer is not None:
self.device = trainer.device
model = trainer.ema.ema
names = [name.split("/", 1)[0] for name in list(self.dataloader.dataset.data["names"].values())]
if load_vp:
LOGGER.info("Validate using the visual prompt.")
self.args.half = False
# Directly use the same dataloader for visual embeddings extracted during training
vpe = self.get_visual_pe(self.dataloader, model)
model.set_classes(names, vpe)
else:
LOGGER.info("Validate using the text prompt.")
tpe = model.get_text_pe(names)
model.set_classes(names, tpe)
stats = super().__call__(trainer, model)
else:
if refer_data is not None:
assert load_vp, "Refer data is only used for visual prompt validation."
self.device = select_device(self.args.device, verbose=False)
if isinstance(model, (str, Path)):
from ultralytics.nn.tasks import load_checkpoint
model, _ = load_checkpoint(model, device=self.device) # model, ckpt
model.eval().to(self.device)
data = check_det_dataset(refer_data or self.args.data)
names = [name.split("/", 1)[0] for name in list(data["names"].values())]
if load_vp:
LOGGER.info("Validate using the visual prompt.")
self.args.half = False
# TODO: need to check if the names from refer data is consistent with the evaluated dataset
# could use same dataset or refer to extract visual prompt embeddings
dataloader = self.get_vpe_dataloader(data)
vpe = self.get_visual_pe(dataloader, model)
model.set_classes(names, vpe)
stats = super().__call__(model=deepcopy(model))
elif isinstance(model.model[-1], YOLOEDetect) and hasattr(model.model[-1], "lrpc"): # prompt-free
return super().__call__(trainer, model)
else:
LOGGER.info("Validate using the text prompt.")
tpe = model.get_text_pe(names)
model.set_classes(names, tpe)
stats = super().__call__(model=deepcopy(model))
return stats
class YOLOESegValidator(YOLOEDetectValidator, SegmentationValidator):
pass | --- +++ @@ -21,9 +21,47 @@
class YOLOEDetectValidator(DetectionValidator):
+ """A validator class for YOLOE detection models that handles both text and visual prompt embeddings.
+
+ This class extends DetectionValidator to provide specialized validation functionality for YOLOE models. It supports
+ validation using either text prompts or visual prompt embeddings extracted from training samples, enabling flexible
+ evaluation strategies for prompt-based object detection.
+
+ Attributes:
+ device (torch.device): The device on which validation is performed.
+ args (namespace): Configuration arguments for validation.
+ dataloader (DataLoader): DataLoader for validation data.
+
+ Methods:
+ get_visual_pe: Extract visual prompt embeddings from training samples.
+ preprocess: Preprocess batch data ensuring visuals are on the same device as images.
+ get_vpe_dataloader: Create a dataloader for LVIS training visual prompt samples.
+ __call__: Run validation using either text or visual prompt embeddings.
+
+ Examples:
+ Validate with text prompts
+ >>> validator = YOLOEDetectValidator()
+ >>> stats = validator(model=model, load_vp=False)
+
+ Validate with visual prompts
+ >>> stats = validator(model=model, refer_data="path/to/data.yaml", load_vp=True)
+ """
@smart_inference_mode()
def get_visual_pe(self, dataloader: torch.utils.data.DataLoader, model: YOLOEModel) -> torch.Tensor:
+ """Extract visual prompt embeddings from training samples.
+
+ This method processes a dataloader to compute visual prompt embeddings for each class using a YOLOE model. It
+ normalizes the embeddings and handles cases where no samples exist for a class by setting their embeddings to
+ zero.
+
+ Args:
+ dataloader (torch.utils.data.DataLoader): The dataloader providing training samples.
+ model (YOLOEModel): The YOLOE model from which to extract visual prompt embeddings.
+
+ Returns:
+ (torch.Tensor): Visual prompt embeddings with shape (1, num_classes, embed_dim).
+ """
assert isinstance(model, YOLOEModel)
names = [name.split("/", 1)[0] for name in list(dataloader.dataset.data["names"].values())]
visual_pe = torch.zeros(len(names), model.model[-1].embed, device=self.device)
@@ -59,6 +97,17 @@ return visual_pe.unsqueeze(0)
def get_vpe_dataloader(self, data: dict[str, Any]) -> torch.utils.data.DataLoader:
+ """Create a dataloader for LVIS training visual prompt samples.
+
+ This method prepares a dataloader for visual prompt embeddings (VPE) using the specified dataset. It applies
+ necessary transformations including LoadVisualPrompt and configurations to the dataset for validation purposes.
+
+ Args:
+ data (dict): Dataset configuration dictionary containing paths and settings.
+
+ Returns:
+ (torch.utils.data.DataLoader): The dataloader for visual prompt samples.
+ """
dataset = build_yolo_dataset(
self.args,
data.get(self.args.split, data.get("val")),
@@ -88,6 +137,21 @@ refer_data: str | None = None,
load_vp: bool = False,
) -> dict[str, Any]:
+ """Run validation on the model using either text or visual prompt embeddings.
+
+ This method validates the model using either text prompts or visual prompts, depending on the load_vp flag. It
+ supports validation during training (using a trainer object) or standalone validation with a provided model. For
+ visual prompts, reference data can be specified to extract embeddings from a different dataset.
+
+ Args:
+ trainer (object, optional): Trainer object containing the model and device.
+ model (YOLOEModel | str, optional): Model to validate. Required if trainer is not provided.
+ refer_data (str, optional): Path to reference data for visual prompts.
+ load_vp (bool): Whether to load visual prompts. If False, text prompts are used.
+
+ Returns:
+ (dict): Validation statistics containing metrics computed during validation.
+ """
if trainer is not None:
self.device = trainer.device
model = trainer.ema.ema
@@ -137,5 +201,6 @@
class YOLOESegValidator(YOLOEDetectValidator, SegmentationValidator):
-
- pass+ """YOLOE segmentation validator that supports both text and visual prompt embeddings."""
+
+ pass
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/models/yolo/yoloe/val.py |
Generate docstrings for this script | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import copy
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import constant_, xavier_uniform_
from ultralytics.utils import NOT_MACOS14
from ultralytics.utils.tal import dist2bbox, dist2rbox, make_anchors
from ultralytics.utils.torch_utils import TORCH_1_11, fuse_conv_and_bn, smart_inference_mode
from .block import DFL, SAVPE, BNContrastiveHead, ContrastiveHead, Proto, Proto26, RealNVP, Residual, SwiGLUFFN
from .conv import Conv, DWConv
from .transformer import MLP, DeformableTransformerDecoder, DeformableTransformerDecoderLayer
from .utils import bias_init_with_prob, linear_init
__all__ = "OBB", "Classify", "Detect", "Pose", "RTDETRDecoder", "Segment", "YOLOEDetect", "YOLOESegment", "v10Detect"
class Detect(nn.Module):
dynamic = False # force grid reconstruction
export = False # export mode
format = None # export format
max_det = 300 # max_det
agnostic_nms = False
shape = None
anchors = torch.empty(0) # init
strides = torch.empty(0) # init
legacy = False # backward compatibility for v3/v5/v8/v9 models
xyxy = False # xyxy or xywh output
def __init__(self, nc: int = 80, reg_max=16, end2end=False, ch: tuple = ()):
super().__init__()
self.nc = nc # number of classes
self.nl = len(ch) # number of detection layers
self.reg_max = reg_max # DFL channels (ch[0] // 16 to scale 4/8/12/16/20 for n/s/m/l/x)
self.no = nc + self.reg_max * 4 # number of outputs per anchor
self.stride = torch.zeros(self.nl) # strides computed during build
c2, c3 = max((16, ch[0] // 4, self.reg_max * 4)), max(ch[0], min(self.nc, 100)) # channels
self.cv2 = nn.ModuleList(
nn.Sequential(Conv(x, c2, 3), Conv(c2, c2, 3), nn.Conv2d(c2, 4 * self.reg_max, 1)) for x in ch
)
self.cv3 = (
nn.ModuleList(nn.Sequential(Conv(x, c3, 3), Conv(c3, c3, 3), nn.Conv2d(c3, self.nc, 1)) for x in ch)
if self.legacy
else nn.ModuleList(
nn.Sequential(
nn.Sequential(DWConv(x, x, 3), Conv(x, c3, 1)),
nn.Sequential(DWConv(c3, c3, 3), Conv(c3, c3, 1)),
nn.Conv2d(c3, self.nc, 1),
)
for x in ch
)
)
self.dfl = DFL(self.reg_max) if self.reg_max > 1 else nn.Identity()
if end2end:
self.one2one_cv2 = copy.deepcopy(self.cv2)
self.one2one_cv3 = copy.deepcopy(self.cv3)
@property
def one2many(self):
return dict(box_head=self.cv2, cls_head=self.cv3)
@property
def one2one(self):
return dict(box_head=self.one2one_cv2, cls_head=self.one2one_cv3)
@property
def end2end(self):
return getattr(self, "_end2end", True) and hasattr(self, "one2one")
@end2end.setter
def end2end(self, value):
self._end2end = value
def forward_head(
self, x: list[torch.Tensor], box_head: torch.nn.Module = None, cls_head: torch.nn.Module = None
) -> dict[str, torch.Tensor]:
if box_head is None or cls_head is None: # for fused inference
return dict()
bs = x[0].shape[0] # batch size
boxes = torch.cat([box_head[i](x[i]).view(bs, 4 * self.reg_max, -1) for i in range(self.nl)], dim=-1)
scores = torch.cat([cls_head[i](x[i]).view(bs, self.nc, -1) for i in range(self.nl)], dim=-1)
return dict(boxes=boxes, scores=scores, feats=x)
def forward(
self, x: list[torch.Tensor]
) -> dict[str, torch.Tensor] | torch.Tensor | tuple[torch.Tensor, dict[str, torch.Tensor]]:
preds = self.forward_head(x, **self.one2many)
if self.end2end:
x_detach = [xi.detach() for xi in x]
one2one = self.forward_head(x_detach, **self.one2one)
preds = {"one2many": preds, "one2one": one2one}
if self.training:
return preds
y = self._inference(preds["one2one"] if self.end2end else preds)
if self.end2end:
y = self.postprocess(y.permute(0, 2, 1))
return y if self.export else (y, preds)
def _inference(self, x: dict[str, torch.Tensor]) -> torch.Tensor:
# Inference path
dbox = self._get_decode_boxes(x)
return torch.cat((dbox, x["scores"].sigmoid()), 1)
def _get_decode_boxes(self, x: dict[str, torch.Tensor]) -> torch.Tensor:
shape = x["feats"][0].shape # BCHW
if self.dynamic or self.shape != shape:
self.anchors, self.strides = (a.transpose(0, 1) for a in make_anchors(x["feats"], self.stride, 0.5))
self.shape = shape
dbox = self.decode_bboxes(self.dfl(x["boxes"]), self.anchors.unsqueeze(0)) * self.strides
return dbox
def bias_init(self):
for i, (a, b) in enumerate(zip(self.one2many["box_head"], self.one2many["cls_head"])): # from
a[-1].bias.data[:] = 2.0 # box
b[-1].bias.data[: self.nc] = math.log(
5 / self.nc / (640 / self.stride[i]) ** 2
) # cls (.01 objects, 80 classes, 640 img)
if self.end2end:
for i, (a, b) in enumerate(zip(self.one2one["box_head"], self.one2one["cls_head"])): # from
a[-1].bias.data[:] = 2.0 # box
b[-1].bias.data[: self.nc] = math.log(
5 / self.nc / (640 / self.stride[i]) ** 2
) # cls (.01 objects, 80 classes, 640 img)
def decode_bboxes(self, bboxes: torch.Tensor, anchors: torch.Tensor, xywh: bool = True) -> torch.Tensor:
return dist2bbox(
bboxes,
anchors,
xywh=xywh and not self.end2end and not self.xyxy,
dim=1,
)
def postprocess(self, preds: torch.Tensor) -> torch.Tensor:
boxes, scores = preds.split([4, self.nc], dim=-1)
scores, conf, idx = self.get_topk_index(scores, self.max_det)
boxes = boxes.gather(dim=1, index=idx.repeat(1, 1, 4))
return torch.cat([boxes, scores, conf], dim=-1)
def get_topk_index(self, scores: torch.Tensor, max_det: int) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
batch_size, anchors, nc = scores.shape # i.e. shape(16,8400,84)
# Use max_det directly during export for TensorRT compatibility (requires k to be constant),
# otherwise use min(max_det, anchors) for safety with small inputs during Python inference
k = max_det if self.export else min(max_det, anchors)
if self.agnostic_nms:
scores, labels = scores.max(dim=-1, keepdim=True)
scores, indices = scores.topk(k, dim=1)
labels = labels.gather(1, indices)
return scores, labels, indices
ori_index = scores.max(dim=-1)[0].topk(k)[1].unsqueeze(-1)
scores = scores.gather(dim=1, index=ori_index.repeat(1, 1, nc))
scores, index = scores.flatten(1).topk(k)
idx = ori_index[torch.arange(batch_size)[..., None], index // nc] # original index
return scores[..., None], (index % nc)[..., None].float(), idx
def fuse(self) -> None:
self.cv2 = self.cv3 = None
class Segment(Detect):
def __init__(self, nc: int = 80, nm: int = 32, npr: int = 256, reg_max=16, end2end=False, ch: tuple = ()):
super().__init__(nc, reg_max, end2end, ch)
self.nm = nm # number of masks
self.npr = npr # number of protos
self.proto = Proto(ch[0], self.npr, self.nm) # protos
c4 = max(ch[0] // 4, self.nm)
self.cv4 = nn.ModuleList(nn.Sequential(Conv(x, c4, 3), Conv(c4, c4, 3), nn.Conv2d(c4, self.nm, 1)) for x in ch)
if end2end:
self.one2one_cv4 = copy.deepcopy(self.cv4)
@property
def one2many(self):
return dict(box_head=self.cv2, cls_head=self.cv3, mask_head=self.cv4)
@property
def one2one(self):
return dict(box_head=self.one2one_cv2, cls_head=self.one2one_cv3, mask_head=self.one2one_cv4)
def forward(self, x: list[torch.Tensor]) -> tuple | list[torch.Tensor] | dict[str, torch.Tensor]:
outputs = super().forward(x)
preds = outputs[1] if isinstance(outputs, tuple) else outputs
proto = self.proto(x[0]) # mask protos
if isinstance(preds, dict): # training and validating during training
if self.end2end:
preds["one2many"]["proto"] = proto
preds["one2one"]["proto"] = proto.detach()
else:
preds["proto"] = proto
if self.training:
return preds
return (outputs, proto) if self.export else ((outputs[0], proto), preds)
def _inference(self, x: dict[str, torch.Tensor]) -> torch.Tensor:
preds = super()._inference(x)
return torch.cat([preds, x["mask_coefficient"]], dim=1)
def forward_head(
self, x: list[torch.Tensor], box_head: torch.nn.Module, cls_head: torch.nn.Module, mask_head: torch.nn.Module
) -> dict[str, torch.Tensor]:
preds = super().forward_head(x, box_head, cls_head)
if mask_head is not None:
bs = x[0].shape[0] # batch size
preds["mask_coefficient"] = torch.cat([mask_head[i](x[i]).view(bs, self.nm, -1) for i in range(self.nl)], 2)
return preds
def postprocess(self, preds: torch.Tensor) -> torch.Tensor:
boxes, scores, mask_coefficient = preds.split([4, self.nc, self.nm], dim=-1)
scores, conf, idx = self.get_topk_index(scores, self.max_det)
boxes = boxes.gather(dim=1, index=idx.repeat(1, 1, 4))
mask_coefficient = mask_coefficient.gather(dim=1, index=idx.repeat(1, 1, self.nm))
return torch.cat([boxes, scores, conf, mask_coefficient], dim=-1)
def fuse(self) -> None:
self.cv2 = self.cv3 = self.cv4 = None
class Segment26(Segment):
def __init__(self, nc: int = 80, nm: int = 32, npr: int = 256, reg_max=16, end2end=False, ch: tuple = ()):
super().__init__(nc, nm, npr, reg_max, end2end, ch)
self.proto = Proto26(ch, self.npr, self.nm, nc) # protos
def forward(self, x: list[torch.Tensor]) -> tuple | list[torch.Tensor] | dict[str, torch.Tensor]:
outputs = Detect.forward(self, x)
preds = outputs[1] if isinstance(outputs, tuple) else outputs
proto = self.proto(x) # mask protos
if isinstance(preds, dict): # training and validating during training
if self.end2end:
preds["one2many"]["proto"] = proto
preds["one2one"]["proto"] = (
tuple(p.detach() for p in proto) if isinstance(proto, tuple) else proto.detach()
)
else:
preds["proto"] = proto
if self.training:
return preds
return (outputs, proto) if self.export else ((outputs[0], proto), preds)
def fuse(self) -> None:
super().fuse()
if hasattr(self.proto, "fuse"):
self.proto.fuse()
class OBB(Detect):
def __init__(self, nc: int = 80, ne: int = 1, reg_max=16, end2end=False, ch: tuple = ()):
super().__init__(nc, reg_max, end2end, ch)
self.ne = ne # number of extra parameters
c4 = max(ch[0] // 4, self.ne)
self.cv4 = nn.ModuleList(nn.Sequential(Conv(x, c4, 3), Conv(c4, c4, 3), nn.Conv2d(c4, self.ne, 1)) for x in ch)
if end2end:
self.one2one_cv4 = copy.deepcopy(self.cv4)
@property
def one2many(self):
return dict(box_head=self.cv2, cls_head=self.cv3, angle_head=self.cv4)
@property
def one2one(self):
return dict(box_head=self.one2one_cv2, cls_head=self.one2one_cv3, angle_head=self.one2one_cv4)
def _inference(self, x: dict[str, torch.Tensor]) -> torch.Tensor:
# For decode_bboxes convenience
self.angle = x["angle"]
preds = super()._inference(x)
return torch.cat([preds, x["angle"]], dim=1)
def forward_head(
self, x: list[torch.Tensor], box_head: torch.nn.Module, cls_head: torch.nn.Module, angle_head: torch.nn.Module
) -> dict[str, torch.Tensor]:
preds = super().forward_head(x, box_head, cls_head)
if angle_head is not None:
bs = x[0].shape[0] # batch size
angle = torch.cat(
[angle_head[i](x[i]).view(bs, self.ne, -1) for i in range(self.nl)], 2
) # OBB theta logits
angle = (angle.sigmoid() - 0.25) * math.pi # [-pi/4, 3pi/4]
preds["angle"] = angle
return preds
def decode_bboxes(self, bboxes: torch.Tensor, anchors: torch.Tensor) -> torch.Tensor:
return dist2rbox(bboxes, self.angle, anchors, dim=1)
def postprocess(self, preds: torch.Tensor) -> torch.Tensor:
boxes, scores, angle = preds.split([4, self.nc, self.ne], dim=-1)
scores, conf, idx = self.get_topk_index(scores, self.max_det)
boxes = boxes.gather(dim=1, index=idx.repeat(1, 1, 4))
angle = angle.gather(dim=1, index=idx.repeat(1, 1, self.ne))
return torch.cat([boxes, scores, conf, angle], dim=-1)
def fuse(self) -> None:
self.cv2 = self.cv3 = self.cv4 = None
class OBB26(OBB):
def forward_head(
self, x: list[torch.Tensor], box_head: torch.nn.Module, cls_head: torch.nn.Module, angle_head: torch.nn.Module
) -> dict[str, torch.Tensor]:
preds = Detect.forward_head(self, x, box_head, cls_head)
if angle_head is not None:
bs = x[0].shape[0] # batch size
angle = torch.cat(
[angle_head[i](x[i]).view(bs, self.ne, -1) for i in range(self.nl)], 2
) # OBB theta logits (raw output without sigmoid transformation)
preds["angle"] = angle
return preds
class Pose(Detect):
def __init__(self, nc: int = 80, kpt_shape: tuple = (17, 3), reg_max=16, end2end=False, ch: tuple = ()):
super().__init__(nc, reg_max, end2end, ch)
self.kpt_shape = kpt_shape # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible)
self.nk = kpt_shape[0] * kpt_shape[1] # number of keypoints total
c4 = max(ch[0] // 4, self.nk)
self.cv4 = nn.ModuleList(nn.Sequential(Conv(x, c4, 3), Conv(c4, c4, 3), nn.Conv2d(c4, self.nk, 1)) for x in ch)
if end2end:
self.one2one_cv4 = copy.deepcopy(self.cv4)
@property
def one2many(self):
return dict(box_head=self.cv2, cls_head=self.cv3, pose_head=self.cv4)
@property
def one2one(self):
return dict(box_head=self.one2one_cv2, cls_head=self.one2one_cv3, pose_head=self.one2one_cv4)
def _inference(self, x: dict[str, torch.Tensor]) -> torch.Tensor:
preds = super()._inference(x)
return torch.cat([preds, self.kpts_decode(x["kpts"])], dim=1)
def forward_head(
self, x: list[torch.Tensor], box_head: torch.nn.Module, cls_head: torch.nn.Module, pose_head: torch.nn.Module
) -> dict[str, torch.Tensor]:
preds = super().forward_head(x, box_head, cls_head)
if pose_head is not None:
bs = x[0].shape[0] # batch size
preds["kpts"] = torch.cat([pose_head[i](x[i]).view(bs, self.nk, -1) for i in range(self.nl)], 2)
return preds
def postprocess(self, preds: torch.Tensor) -> torch.Tensor:
boxes, scores, kpts = preds.split([4, self.nc, self.nk], dim=-1)
scores, conf, idx = self.get_topk_index(scores, self.max_det)
boxes = boxes.gather(dim=1, index=idx.repeat(1, 1, 4))
kpts = kpts.gather(dim=1, index=idx.repeat(1, 1, self.nk))
return torch.cat([boxes, scores, conf, kpts], dim=-1)
def fuse(self) -> None:
self.cv2 = self.cv3 = self.cv4 = None
def kpts_decode(self, kpts: torch.Tensor) -> torch.Tensor:
ndim = self.kpt_shape[1]
bs = kpts.shape[0]
if self.export:
y = kpts.view(bs, *self.kpt_shape, -1)
a = (y[:, :, :2] * 2.0 + (self.anchors - 0.5)) * self.strides
if ndim == 3:
a = torch.cat((a, y[:, :, 2:3].sigmoid()), 2)
return a.view(bs, self.nk, -1)
else:
y = kpts.clone()
if ndim == 3:
if NOT_MACOS14:
y[:, 2::ndim].sigmoid_()
else: # Apple macOS14 MPS bug https://github.com/ultralytics/ultralytics/pull/21878
y[:, 2::ndim] = y[:, 2::ndim].sigmoid()
y[:, 0::ndim] = (y[:, 0::ndim] * 2.0 + (self.anchors[0] - 0.5)) * self.strides
y[:, 1::ndim] = (y[:, 1::ndim] * 2.0 + (self.anchors[1] - 0.5)) * self.strides
return y
class Pose26(Pose):
def __init__(self, nc: int = 80, kpt_shape: tuple = (17, 3), reg_max=16, end2end=False, ch: tuple = ()):
super().__init__(nc, kpt_shape, reg_max, end2end, ch)
self.flow_model = RealNVP()
c4 = max(ch[0] // 4, kpt_shape[0] * (kpt_shape[1] + 2))
self.cv4 = nn.ModuleList(nn.Sequential(Conv(x, c4, 3), Conv(c4, c4, 3)) for x in ch)
self.cv4_kpts = nn.ModuleList(nn.Conv2d(c4, self.nk, 1) for _ in ch)
self.nk_sigma = kpt_shape[0] * 2 # sigma_x, sigma_y for each keypoint
self.cv4_sigma = nn.ModuleList(nn.Conv2d(c4, self.nk_sigma, 1) for _ in ch)
if end2end:
self.one2one_cv4 = copy.deepcopy(self.cv4)
self.one2one_cv4_kpts = copy.deepcopy(self.cv4_kpts)
self.one2one_cv4_sigma = copy.deepcopy(self.cv4_sigma)
@property
def one2many(self):
return dict(
box_head=self.cv2,
cls_head=self.cv3,
pose_head=self.cv4,
kpts_head=self.cv4_kpts,
kpts_sigma_head=self.cv4_sigma,
)
@property
def one2one(self):
return dict(
box_head=self.one2one_cv2,
cls_head=self.one2one_cv3,
pose_head=self.one2one_cv4,
kpts_head=self.one2one_cv4_kpts,
kpts_sigma_head=self.one2one_cv4_sigma,
)
def forward_head(
self,
x: list[torch.Tensor],
box_head: torch.nn.Module,
cls_head: torch.nn.Module,
pose_head: torch.nn.Module,
kpts_head: torch.nn.Module,
kpts_sigma_head: torch.nn.Module,
) -> dict[str, torch.Tensor]:
preds = Detect.forward_head(self, x, box_head, cls_head)
if pose_head is not None:
bs = x[0].shape[0] # batch size
features = [pose_head[i](x[i]) for i in range(self.nl)]
preds["kpts"] = torch.cat([kpts_head[i](features[i]).view(bs, self.nk, -1) for i in range(self.nl)], 2)
if self.training:
preds["kpts_sigma"] = torch.cat(
[kpts_sigma_head[i](features[i]).view(bs, self.nk_sigma, -1) for i in range(self.nl)], 2
)
return preds
def fuse(self) -> None:
super().fuse()
self.cv4_kpts = self.cv4_sigma = self.flow_model = self.one2one_cv4_sigma = None
def kpts_decode(self, kpts: torch.Tensor) -> torch.Tensor:
ndim = self.kpt_shape[1]
bs = kpts.shape[0]
if self.export:
y = kpts.view(bs, *self.kpt_shape, -1)
# NCNN fix
a = (y[:, :, :2] + self.anchors) * self.strides
if ndim == 3:
a = torch.cat((a, y[:, :, 2:3].sigmoid()), 2)
return a.view(bs, self.nk, -1)
else:
y = kpts.clone()
if ndim == 3:
if NOT_MACOS14:
y[:, 2::ndim].sigmoid_()
else: # Apple macOS14 MPS bug https://github.com/ultralytics/ultralytics/pull/21878
y[:, 2::ndim] = y[:, 2::ndim].sigmoid()
y[:, 0::ndim] = (y[:, 0::ndim] + self.anchors[0]) * self.strides
y[:, 1::ndim] = (y[:, 1::ndim] + self.anchors[1]) * self.strides
return y
class Classify(nn.Module):
export = False # export mode
def __init__(self, c1: int, c2: int, k: int = 1, s: int = 1, p: int | None = None, g: int = 1):
super().__init__()
c_ = 1280 # efficientnet_b0 size
self.conv = Conv(c1, c_, k, s, p, g)
self.pool = nn.AdaptiveAvgPool2d(1) # to x(b,c_,1,1)
self.drop = nn.Dropout(p=0.0, inplace=True)
self.linear = nn.Linear(c_, c2) # to x(b,c2)
def forward(self, x: list[torch.Tensor] | torch.Tensor) -> torch.Tensor | tuple:
if isinstance(x, list):
x = torch.cat(x, 1)
x = self.linear(self.drop(self.pool(self.conv(x)).flatten(1)))
if self.training:
return x
y = x.softmax(1) # get final output
return y if self.export else (y, x)
class WorldDetect(Detect):
def __init__(
self,
nc: int = 80,
embed: int = 512,
with_bn: bool = False,
reg_max: int = 16,
end2end: bool = False,
ch: tuple = (),
):
super().__init__(nc, reg_max=reg_max, end2end=end2end, ch=ch)
c3 = max(ch[0], min(self.nc, 100))
self.cv3 = nn.ModuleList(nn.Sequential(Conv(x, c3, 3), Conv(c3, c3, 3), nn.Conv2d(c3, embed, 1)) for x in ch)
self.cv4 = nn.ModuleList(BNContrastiveHead(embed) if with_bn else ContrastiveHead() for _ in ch)
def forward(self, x: list[torch.Tensor], text: torch.Tensor) -> dict[str, torch.Tensor] | tuple:
feats = [xi.clone() for xi in x] # save original features for anchor generation
for i in range(self.nl):
x[i] = torch.cat((self.cv2[i](x[i]), self.cv4[i](self.cv3[i](x[i]), text)), 1)
self.no = self.nc + self.reg_max * 4 # self.nc could be changed when inference with different texts
bs = x[0].shape[0]
x_cat = torch.cat([xi.view(bs, self.no, -1) for xi in x], 2)
boxes, scores = x_cat.split((self.reg_max * 4, self.nc), 1)
preds = dict(boxes=boxes, scores=scores, feats=feats)
if self.training:
return preds
y = self._inference(preds)
return y if self.export else (y, preds)
def bias_init(self):
m = self # self.model[-1] # Detect() module
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1
# ncf = math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # nominal class frequency
for a, b, s in zip(m.cv2, m.cv3, m.stride): # from
a[-1].bias.data[:] = 1.0 # box
# b[-1].bias.data[:] = math.log(5 / m.nc / (640 / s) ** 2) # cls (.01 objects, 80 classes, 640 img)
class LRPCHead(nn.Module):
def __init__(self, vocab: nn.Module, pf: nn.Module, loc: nn.Module, enabled: bool = True):
super().__init__()
self.vocab = self.conv2linear(vocab) if enabled else vocab
self.pf = pf
self.loc = loc
self.enabled = enabled
@staticmethod
def conv2linear(conv: nn.Conv2d) -> nn.Linear:
assert isinstance(conv, nn.Conv2d) and conv.kernel_size == (1, 1)
linear = nn.Linear(conv.in_channels, conv.out_channels)
linear.weight.data = conv.weight.view(conv.out_channels, -1).data
linear.bias.data = conv.bias.data
return linear
def forward(self, cls_feat: torch.Tensor, loc_feat: torch.Tensor, conf: float) -> tuple[tuple, torch.Tensor]:
if self.enabled:
pf_score = self.pf(cls_feat)[0, 0].flatten(0)
mask = pf_score.sigmoid() > conf
cls_feat = cls_feat.flatten(2).transpose(-1, -2)
cls_feat = self.vocab(cls_feat[:, mask] if conf else cls_feat * mask.unsqueeze(-1).int())
return self.loc(loc_feat), cls_feat.transpose(-1, -2), mask
else:
cls_feat = self.vocab(cls_feat)
loc_feat = self.loc(loc_feat)
return (
loc_feat,
cls_feat.flatten(2),
torch.ones(cls_feat.shape[2] * cls_feat.shape[3], device=cls_feat.device, dtype=torch.bool),
)
class YOLOEDetect(Detect):
is_fused = False
def __init__(
self, nc: int = 80, embed: int = 512, with_bn: bool = False, reg_max=16, end2end=False, ch: tuple = ()
):
super().__init__(nc, reg_max, end2end, ch)
c3 = max(ch[0], min(self.nc, 100))
assert c3 <= embed
assert with_bn
self.cv3 = (
nn.ModuleList(nn.Sequential(Conv(x, c3, 3), Conv(c3, c3, 3), nn.Conv2d(c3, embed, 1)) for x in ch)
if self.legacy
else nn.ModuleList(
nn.Sequential(
nn.Sequential(DWConv(x, x, 3), Conv(x, c3, 1)),
nn.Sequential(DWConv(c3, c3, 3), Conv(c3, c3, 1)),
nn.Conv2d(c3, embed, 1),
)
for x in ch
)
)
self.cv4 = nn.ModuleList(BNContrastiveHead(embed) if with_bn else ContrastiveHead() for _ in ch)
if end2end:
self.one2one_cv3 = copy.deepcopy(self.cv3) # overwrite with new cv3
self.one2one_cv4 = copy.deepcopy(self.cv4)
self.reprta = Residual(SwiGLUFFN(embed, embed))
self.savpe = SAVPE(ch, c3, embed)
self.embed = embed
@smart_inference_mode()
def fuse(self, txt_feats: torch.Tensor = None):
if txt_feats is None: # means eliminate one2many branch
self.cv2 = self.cv3 = self.cv4 = None
return
if self.is_fused:
return
assert not self.training
txt_feats = txt_feats.to(torch.float32).squeeze(0)
self._fuse_tp(txt_feats, self.cv3, self.cv4)
if self.end2end:
self._fuse_tp(txt_feats, self.one2one_cv3, self.one2one_cv4)
del self.reprta
self.reprta = nn.Identity()
self.is_fused = True
def _fuse_tp(self, txt_feats: torch.Tensor, cls_head: torch.nn.Module, bn_head: torch.nn.Module) -> None:
for cls_h, bn_h in zip(cls_head, bn_head):
assert isinstance(cls_h, nn.Sequential)
assert isinstance(bn_h, BNContrastiveHead)
conv = cls_h[-1]
assert isinstance(conv, nn.Conv2d)
logit_scale = bn_h.logit_scale
bias = bn_h.bias
norm = bn_h.norm
t = txt_feats * logit_scale.exp()
conv: nn.Conv2d = fuse_conv_and_bn(conv, norm)
w = conv.weight.data.squeeze(-1).squeeze(-1)
b = conv.bias.data
w = t @ w
b1 = (t @ b.reshape(-1).unsqueeze(-1)).squeeze(-1)
b2 = torch.ones_like(b1) * bias
conv = (
nn.Conv2d(
conv.in_channels,
w.shape[0],
kernel_size=1,
)
.requires_grad_(False)
.to(conv.weight.device)
)
conv.weight.data.copy_(w.unsqueeze(-1).unsqueeze(-1))
conv.bias.data.copy_(b1 + b2)
cls_h[-1] = conv
bn_h.fuse()
def get_tpe(self, tpe: torch.Tensor | None) -> torch.Tensor | None:
return None if tpe is None else F.normalize(self.reprta(tpe), dim=-1, p=2)
def get_vpe(self, x: list[torch.Tensor], vpe: torch.Tensor) -> torch.Tensor:
if vpe.shape[1] == 0: # no visual prompt embeddings
return torch.zeros(x[0].shape[0], 0, self.embed, device=x[0].device)
if vpe.ndim == 4: # (B, N, H, W)
vpe = self.savpe(x, vpe)
assert vpe.ndim == 3 # (B, N, D)
return vpe
def forward(self, x: list[torch.Tensor]) -> torch.Tensor | tuple:
if hasattr(self, "lrpc"): # for prompt-free inference
return self.forward_lrpc(x[:3])
return super().forward(x)
def forward_lrpc(self, x: list[torch.Tensor]) -> torch.Tensor | tuple:
boxes, scores, index = [], [], []
bs = x[0].shape[0]
cv2 = self.cv2 if not self.end2end else self.one2one_cv2
cv3 = self.cv3 if not self.end2end else self.one2one_cv3
for i in range(self.nl):
cls_feat = cv3[i](x[i])
loc_feat = cv2[i](x[i])
assert isinstance(self.lrpc[i], LRPCHead)
box, score, idx = self.lrpc[i](
cls_feat,
loc_feat,
0 if self.export and not self.dynamic else getattr(self, "conf", 0.001),
)
boxes.append(box.view(bs, self.reg_max * 4, -1))
scores.append(score)
index.append(idx)
preds = dict(boxes=torch.cat(boxes, 2), scores=torch.cat(scores, 2), feats=x, index=torch.cat(index))
y = self._inference(preds)
if self.end2end:
y = self.postprocess(y.permute(0, 2, 1))
return y if self.export else (y, preds)
def _get_decode_boxes(self, x):
dbox = super()._get_decode_boxes(x)
if hasattr(self, "lrpc"):
dbox = dbox if self.export and not self.dynamic else dbox[..., x["index"]]
return dbox
@property
def one2many(self):
return dict(box_head=self.cv2, cls_head=self.cv3, contrastive_head=self.cv4)
@property
def one2one(self):
return dict(box_head=self.one2one_cv2, cls_head=self.one2one_cv3, contrastive_head=self.one2one_cv4)
def forward_head(self, x, box_head, cls_head, contrastive_head):
assert len(x) == 4, f"Expected 4 features including 3 feature maps and 1 text embeddings, but got {len(x)}."
if box_head is None or cls_head is None: # for fused inference
return dict()
bs = x[0].shape[0] # batch size
boxes = torch.cat([box_head[i](x[i]).view(bs, 4 * self.reg_max, -1) for i in range(self.nl)], dim=-1)
self.nc = x[-1].shape[1]
scores = torch.cat(
[contrastive_head[i](cls_head[i](x[i]), x[-1]).reshape(bs, self.nc, -1) for i in range(self.nl)], dim=-1
)
self.no = self.nc + self.reg_max * 4 # self.nc could be changed when inference with different texts
return dict(boxes=boxes, scores=scores, feats=x[:3])
def bias_init(self):
for i, (a, b, c) in enumerate(
zip(self.one2many["box_head"], self.one2many["cls_head"], self.one2many["contrastive_head"])
):
a[-1].bias.data[:] = 2.0 # box
b[-1].bias.data[:] = 0.0
c.bias.data[:] = math.log(5 / self.nc / (640 / self.stride[i]) ** 2)
if self.end2end:
for i, (a, b, c) in enumerate(
zip(self.one2one["box_head"], self.one2one["cls_head"], self.one2one["contrastive_head"])
):
a[-1].bias.data[:] = 2.0 # box
b[-1].bias.data[:] = 0.0
c.bias.data[:] = math.log(5 / self.nc / (640 / self.stride[i]) ** 2)
class YOLOESegment(YOLOEDetect):
def __init__(
self,
nc: int = 80,
nm: int = 32,
npr: int = 256,
embed: int = 512,
with_bn: bool = False,
reg_max=16,
end2end=False,
ch: tuple = (),
):
super().__init__(nc, embed, with_bn, reg_max, end2end, ch)
self.nm = nm
self.npr = npr
self.proto = Proto(ch[0], self.npr, self.nm)
c5 = max(ch[0] // 4, self.nm)
self.cv5 = nn.ModuleList(nn.Sequential(Conv(x, c5, 3), Conv(c5, c5, 3), nn.Conv2d(c5, self.nm, 1)) for x in ch)
if end2end:
self.one2one_cv5 = copy.deepcopy(self.cv5)
@property
def one2many(self):
return dict(box_head=self.cv2, cls_head=self.cv3, mask_head=self.cv5, contrastive_head=self.cv4)
@property
def one2one(self):
return dict(
box_head=self.one2one_cv2,
cls_head=self.one2one_cv3,
mask_head=self.one2one_cv5,
contrastive_head=self.one2one_cv4,
)
def forward_lrpc(self, x: list[torch.Tensor]) -> torch.Tensor | tuple:
boxes, scores, index = [], [], []
bs = x[0].shape[0]
cv2 = self.cv2 if not self.end2end else self.one2one_cv2
cv3 = self.cv3 if not self.end2end else self.one2one_cv3
cv5 = self.cv5 if not self.end2end else self.one2one_cv5
for i in range(self.nl):
cls_feat = cv3[i](x[i])
loc_feat = cv2[i](x[i])
assert isinstance(self.lrpc[i], LRPCHead)
box, score, idx = self.lrpc[i](
cls_feat,
loc_feat,
0 if self.export and not self.dynamic else getattr(self, "conf", 0.001),
)
boxes.append(box.view(bs, self.reg_max * 4, -1))
scores.append(score)
index.append(idx)
mc = torch.cat([cv5[i](x[i]).view(bs, self.nm, -1) for i in range(self.nl)], 2)
index = torch.cat(index)
preds = dict(
boxes=torch.cat(boxes, 2),
scores=torch.cat(scores, 2),
feats=x,
index=index,
mask_coefficient=mc * index.int() if self.export and not self.dynamic else mc[..., index],
)
y = self._inference(preds)
if self.end2end:
y = self.postprocess(y.permute(0, 2, 1))
return y if self.export else (y, preds)
def forward(self, x: list[torch.Tensor]) -> tuple | list[torch.Tensor] | dict[str, torch.Tensor]:
outputs = super().forward(x)
preds = outputs[1] if isinstance(outputs, tuple) else outputs
proto = self.proto(x[0]) # mask protos
if isinstance(preds, dict): # training and validating during training
if self.end2end:
preds["one2many"]["proto"] = proto
preds["one2one"]["proto"] = proto.detach()
else:
preds["proto"] = proto
if self.training:
return preds
return (outputs, proto) if self.export else ((outputs[0], proto), preds)
def _inference(self, x: dict[str, torch.Tensor]) -> torch.Tensor:
preds = super()._inference(x)
return torch.cat([preds, x["mask_coefficient"]], dim=1)
def forward_head(
self,
x: list[torch.Tensor],
box_head: torch.nn.Module,
cls_head: torch.nn.Module,
mask_head: torch.nn.Module,
contrastive_head: torch.nn.Module,
) -> dict[str, torch.Tensor]:
preds = super().forward_head(x, box_head, cls_head, contrastive_head)
if mask_head is not None:
bs = x[0].shape[0] # batch size
preds["mask_coefficient"] = torch.cat([mask_head[i](x[i]).view(bs, self.nm, -1) for i in range(self.nl)], 2)
return preds
def postprocess(self, preds: torch.Tensor) -> torch.Tensor:
boxes, scores, mask_coefficient = preds.split([4, self.nc, self.nm], dim=-1)
scores, conf, idx = self.get_topk_index(scores, self.max_det)
boxes = boxes.gather(dim=1, index=idx.repeat(1, 1, 4))
mask_coefficient = mask_coefficient.gather(dim=1, index=idx.repeat(1, 1, self.nm))
return torch.cat([boxes, scores, conf, mask_coefficient], dim=-1)
def fuse(self, txt_feats: torch.Tensor = None):
super().fuse(txt_feats)
if txt_feats is None: # means eliminate one2many branch
self.cv5 = None
if hasattr(self.proto, "fuse"):
self.proto.fuse()
return
class YOLOESegment26(YOLOESegment):
def __init__(
self,
nc: int = 80,
nm: int = 32,
npr: int = 256,
embed: int = 512,
with_bn: bool = False,
reg_max=16,
end2end=False,
ch: tuple = (),
):
YOLOEDetect.__init__(self, nc, embed, with_bn, reg_max, end2end, ch)
self.nm = nm
self.npr = npr
self.proto = Proto26(ch, self.npr, self.nm, nc) # protos
c5 = max(ch[0] // 4, self.nm)
self.cv5 = nn.ModuleList(nn.Sequential(Conv(x, c5, 3), Conv(c5, c5, 3), nn.Conv2d(c5, self.nm, 1)) for x in ch)
if end2end:
self.one2one_cv5 = copy.deepcopy(self.cv5)
def forward(self, x: list[torch.Tensor]) -> tuple | list[torch.Tensor] | dict[str, torch.Tensor]:
outputs = YOLOEDetect.forward(self, x)
preds = outputs[1] if isinstance(outputs, tuple) else outputs
proto = self.proto([xi.detach() for xi in x], return_semseg=False) # mask protos
if isinstance(preds, dict): # training and validating during training
if self.end2end and not hasattr(self, "lrpc"): # not prompt-free
preds["one2many"]["proto"] = proto
preds["one2one"]["proto"] = proto.detach()
else:
preds["proto"] = proto
if self.training:
return preds
return (outputs, proto) if self.export else ((outputs[0], proto), preds)
class RTDETRDecoder(nn.Module):
export = False # export mode
shapes = []
anchors = torch.empty(0)
valid_mask = torch.empty(0)
dynamic = False
def __init__(
self,
nc: int = 80,
ch: tuple = (512, 1024, 2048),
hd: int = 256, # hidden dim
nq: int = 300, # num queries
ndp: int = 4, # num decoder points
nh: int = 8, # num head
ndl: int = 6, # num decoder layers
d_ffn: int = 1024, # dim of feedforward
dropout: float = 0.0,
act: nn.Module = nn.ReLU(),
eval_idx: int = -1,
# Training args
nd: int = 100, # num denoising
label_noise_ratio: float = 0.5,
box_noise_scale: float = 1.0,
learnt_init_query: bool = False,
):
super().__init__()
self.hidden_dim = hd
self.nhead = nh
self.nl = len(ch) # num level
self.nc = nc
self.num_queries = nq
self.num_decoder_layers = ndl
# Backbone feature projection
self.input_proj = nn.ModuleList(nn.Sequential(nn.Conv2d(x, hd, 1, bias=False), nn.BatchNorm2d(hd)) for x in ch)
# NOTE: simplified version but it's not consistent with .pt weights.
# self.input_proj = nn.ModuleList(Conv(x, hd, act=False) for x in ch)
# Transformer module
decoder_layer = DeformableTransformerDecoderLayer(hd, nh, d_ffn, dropout, act, self.nl, ndp)
self.decoder = DeformableTransformerDecoder(hd, decoder_layer, ndl, eval_idx)
# Denoising part
self.denoising_class_embed = nn.Embedding(nc, hd)
self.num_denoising = nd
self.label_noise_ratio = label_noise_ratio
self.box_noise_scale = box_noise_scale
# Decoder embedding
self.learnt_init_query = learnt_init_query
if learnt_init_query:
self.tgt_embed = nn.Embedding(nq, hd)
self.query_pos_head = MLP(4, 2 * hd, hd, num_layers=2)
# Encoder head
self.enc_output = nn.Sequential(nn.Linear(hd, hd), nn.LayerNorm(hd))
self.enc_score_head = nn.Linear(hd, nc)
self.enc_bbox_head = MLP(hd, hd, 4, num_layers=3)
# Decoder head
self.dec_score_head = nn.ModuleList([nn.Linear(hd, nc) for _ in range(ndl)])
self.dec_bbox_head = nn.ModuleList([MLP(hd, hd, 4, num_layers=3) for _ in range(ndl)])
self._reset_parameters()
def forward(self, x: list[torch.Tensor], batch: dict | None = None) -> tuple | torch.Tensor:
from ultralytics.models.utils.ops import get_cdn_group
# Input projection and embedding
feats, shapes = self._get_encoder_input(x)
# Prepare denoising training
dn_embed, dn_bbox, attn_mask, dn_meta = get_cdn_group(
batch,
self.nc,
self.num_queries,
self.denoising_class_embed.weight,
self.num_denoising,
self.label_noise_ratio,
self.box_noise_scale,
self.training,
)
embed, refer_bbox, enc_bboxes, enc_scores = self._get_decoder_input(feats, shapes, dn_embed, dn_bbox)
# Decoder
dec_bboxes, dec_scores = self.decoder(
embed,
refer_bbox,
feats,
shapes,
self.dec_bbox_head,
self.dec_score_head,
self.query_pos_head,
attn_mask=attn_mask,
)
x = dec_bboxes, dec_scores, enc_bboxes, enc_scores, dn_meta
if self.training:
return x
# (bs, 300, 4+nc)
y = torch.cat((dec_bboxes.squeeze(0), dec_scores.squeeze(0).sigmoid()), -1)
return y if self.export else (y, x)
@staticmethod
def _generate_anchors(
shapes: list[list[int]],
grid_size: float = 0.05,
dtype: torch.dtype = torch.float32,
device: str = "cpu",
eps: float = 1e-2,
) -> tuple[torch.Tensor, torch.Tensor]:
anchors = []
for i, (h, w) in enumerate(shapes):
sy = torch.arange(end=h, dtype=dtype, device=device)
sx = torch.arange(end=w, dtype=dtype, device=device)
grid_y, grid_x = torch.meshgrid(sy, sx, indexing="ij") if TORCH_1_11 else torch.meshgrid(sy, sx)
grid_xy = torch.stack([grid_x, grid_y], -1) # (h, w, 2)
valid_WH = torch.tensor([w, h], dtype=dtype, device=device)
grid_xy = (grid_xy.unsqueeze(0) + 0.5) / valid_WH # (1, h, w, 2)
wh = torch.ones_like(grid_xy, dtype=dtype, device=device) * grid_size * (2.0**i)
anchors.append(torch.cat([grid_xy, wh], -1).view(-1, h * w, 4)) # (1, h*w, 4)
anchors = torch.cat(anchors, 1) # (1, h*w*nl, 4)
valid_mask = ((anchors > eps) & (anchors < 1 - eps)).all(-1, keepdim=True) # 1, h*w*nl, 1
anchors = torch.log(anchors / (1 - anchors))
anchors = anchors.masked_fill(~valid_mask, float("inf"))
return anchors, valid_mask
def _get_encoder_input(self, x: list[torch.Tensor]) -> tuple[torch.Tensor, list[list[int]]]:
# Get projection features
x = [self.input_proj[i](feat) for i, feat in enumerate(x)]
# Get encoder inputs
feats = []
shapes = []
for feat in x:
h, w = feat.shape[2:]
# [b, c, h, w] -> [b, h*w, c]
feats.append(feat.flatten(2).permute(0, 2, 1))
# [nl, 2]
shapes.append([h, w])
# [b, h*w, c]
feats = torch.cat(feats, 1)
return feats, shapes
def _get_decoder_input(
self,
feats: torch.Tensor,
shapes: list[list[int]],
dn_embed: torch.Tensor | None = None,
dn_bbox: torch.Tensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
bs = feats.shape[0]
if self.dynamic or self.shapes != shapes:
self.anchors, self.valid_mask = self._generate_anchors(shapes, dtype=feats.dtype, device=feats.device)
self.shapes = shapes
# Prepare input for decoder
features = self.enc_output(self.valid_mask * feats) # bs, h*w, 256
enc_outputs_scores = self.enc_score_head(features) # (bs, h*w, nc)
# Query selection
# (bs*num_queries,)
topk_ind = torch.topk(enc_outputs_scores.max(-1).values, self.num_queries, dim=1).indices.view(-1)
# (bs*num_queries,)
batch_ind = torch.arange(end=bs, dtype=topk_ind.dtype).unsqueeze(-1).repeat(1, self.num_queries).view(-1)
# (bs, num_queries, 256)
top_k_features = features[batch_ind, topk_ind].view(bs, self.num_queries, -1)
# (bs, num_queries, 4)
top_k_anchors = self.anchors[:, topk_ind].view(bs, self.num_queries, -1)
# Dynamic anchors + static content
refer_bbox = self.enc_bbox_head(top_k_features) + top_k_anchors
enc_bboxes = refer_bbox.sigmoid()
if dn_bbox is not None:
refer_bbox = torch.cat([dn_bbox, refer_bbox], 1)
enc_scores = enc_outputs_scores[batch_ind, topk_ind].view(bs, self.num_queries, -1)
embeddings = self.tgt_embed.weight.unsqueeze(0).repeat(bs, 1, 1) if self.learnt_init_query else top_k_features
if self.training:
refer_bbox = refer_bbox.detach()
if not self.learnt_init_query:
embeddings = embeddings.detach()
if dn_embed is not None:
embeddings = torch.cat([dn_embed, embeddings], 1)
return embeddings, refer_bbox, enc_bboxes, enc_scores
def _reset_parameters(self):
# Class and bbox head init
bias_cls = bias_init_with_prob(0.01) / 80 * self.nc
# NOTE: the weight initialization in `linear_init` would cause NaN when training with custom datasets.
# linear_init(self.enc_score_head)
constant_(self.enc_score_head.bias, bias_cls)
constant_(self.enc_bbox_head.layers[-1].weight, 0.0)
constant_(self.enc_bbox_head.layers[-1].bias, 0.0)
for cls_, reg_ in zip(self.dec_score_head, self.dec_bbox_head):
# linear_init(cls_)
constant_(cls_.bias, bias_cls)
constant_(reg_.layers[-1].weight, 0.0)
constant_(reg_.layers[-1].bias, 0.0)
linear_init(self.enc_output[0])
xavier_uniform_(self.enc_output[0].weight)
if self.learnt_init_query:
xavier_uniform_(self.tgt_embed.weight)
xavier_uniform_(self.query_pos_head.layers[0].weight)
xavier_uniform_(self.query_pos_head.layers[1].weight)
for layer in self.input_proj:
xavier_uniform_(layer[0].weight)
class v10Detect(Detect):
end2end = True
def __init__(self, nc: int = 80, ch: tuple = ()):
super().__init__(nc, end2end=True, ch=ch)
c3 = max(ch[0], min(self.nc, 100)) # channels
# Light cls head
self.cv3 = nn.ModuleList(
nn.Sequential(
nn.Sequential(Conv(x, x, 3, g=x), Conv(x, c3, 1)),
nn.Sequential(Conv(c3, c3, 3, g=c3), Conv(c3, c3, 1)),
nn.Conv2d(c3, self.nc, 1),
)
for x in ch
)
self.one2one_cv3 = copy.deepcopy(self.cv3)
def fuse(self):
self.cv2 = self.cv3 = None | --- +++ @@ -1,4 +1,5 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""Model head modules."""
from __future__ import annotations
@@ -23,6 +24,45 @@
class Detect(nn.Module):
+ """YOLO Detect head for object detection models.
+
+ This class implements the detection head used in YOLO models for predicting bounding boxes and class probabilities.
+ It supports both training and inference modes, with optional end-to-end detection capabilities.
+
+ Attributes:
+ dynamic (bool): Force grid reconstruction.
+ export (bool): Export mode flag.
+ format (str): Export format.
+ end2end (bool): End-to-end detection mode.
+ max_det (int): Maximum detections per image.
+ shape (tuple): Input shape.
+ anchors (torch.Tensor): Anchor points.
+ strides (torch.Tensor): Feature map strides.
+ legacy (bool): Backward compatibility for v3/v5/v8/v9/v11 models.
+ xyxy (bool): Output format, xyxy or xywh.
+ nc (int): Number of classes.
+ nl (int): Number of detection layers.
+ reg_max (int): DFL channels.
+ no (int): Number of outputs per anchor.
+ stride (torch.Tensor): Strides computed during build.
+ cv2 (nn.ModuleList): Convolution layers for box regression.
+ cv3 (nn.ModuleList): Convolution layers for classification.
+ dfl (nn.Module): Distribution Focal Loss layer.
+ one2one_cv2 (nn.ModuleList): One-to-one convolution layers for box regression.
+ one2one_cv3 (nn.ModuleList): One-to-one convolution layers for classification.
+
+ Methods:
+ forward: Perform forward pass and return predictions.
+ bias_init: Initialize detection head biases.
+ decode_bboxes: Decode bounding boxes from predictions.
+ postprocess: Post-process model predictions.
+
+ Examples:
+ Create a detection head for 80 classes
+ >>> detect = Detect(nc=80, ch=(256, 512, 1024))
+ >>> x = [torch.randn(1, 256, 80, 80), torch.randn(1, 512, 40, 40), torch.randn(1, 1024, 20, 20)]
+ >>> outputs = detect(x)
+ """
dynamic = False # force grid reconstruction
export = False # export mode
@@ -36,6 +76,14 @@ xyxy = False # xyxy or xywh output
def __init__(self, nc: int = 80, reg_max=16, end2end=False, ch: tuple = ()):
+ """Initialize the YOLO detection layer with specified number of classes and channels.
+
+ Args:
+ nc (int): Number of classes.
+ reg_max (int): Maximum number of DFL channels.
+ end2end (bool): Whether to use end-to-end NMS-free detection.
+ ch (tuple): Tuple of channel sizes from backbone feature maps.
+ """
super().__init__()
self.nc = nc # number of classes
self.nl = len(ch) # number of detection layers
@@ -66,23 +114,28 @@
@property
def one2many(self):
+ """Returns the one-to-many head components, here for v3/v5/v8/v9/v11 backward compatibility."""
return dict(box_head=self.cv2, cls_head=self.cv3)
@property
def one2one(self):
+ """Returns the one-to-one head components."""
return dict(box_head=self.one2one_cv2, cls_head=self.one2one_cv3)
@property
def end2end(self):
+ """Checks if the model has one2one for v3/v5/v8/v9/v11 backward compatibility."""
return getattr(self, "_end2end", True) and hasattr(self, "one2one")
@end2end.setter
def end2end(self, value):
+ """Override the end-to-end detection mode."""
self._end2end = value
def forward_head(
self, x: list[torch.Tensor], box_head: torch.nn.Module = None, cls_head: torch.nn.Module = None
) -> dict[str, torch.Tensor]:
+ """Concatenates and returns predicted bounding boxes and class probabilities."""
if box_head is None or cls_head is None: # for fused inference
return dict()
bs = x[0].shape[0] # batch size
@@ -93,6 +146,7 @@ def forward(
self, x: list[torch.Tensor]
) -> dict[str, torch.Tensor] | torch.Tensor | tuple[torch.Tensor, dict[str, torch.Tensor]]:
+ """Concatenates and returns predicted bounding boxes and class probabilities."""
preds = self.forward_head(x, **self.one2many)
if self.end2end:
x_detach = [xi.detach() for xi in x]
@@ -106,11 +160,20 @@ return y if self.export else (y, preds)
def _inference(self, x: dict[str, torch.Tensor]) -> torch.Tensor:
+ """Decode predicted bounding boxes and class probabilities based on multiple-level feature maps.
+
+ Args:
+ x (dict[str, torch.Tensor]): Dictionary of predictions from detection layers.
+
+ Returns:
+ (torch.Tensor): Concatenated tensor of decoded bounding boxes and class probabilities.
+ """
# Inference path
dbox = self._get_decode_boxes(x)
return torch.cat((dbox, x["scores"].sigmoid()), 1)
def _get_decode_boxes(self, x: dict[str, torch.Tensor]) -> torch.Tensor:
+ """Get decoded boxes based on anchors and strides."""
shape = x["feats"][0].shape # BCHW
if self.dynamic or self.shape != shape:
self.anchors, self.strides = (a.transpose(0, 1) for a in make_anchors(x["feats"], self.stride, 0.5))
@@ -120,6 +183,7 @@ return dbox
def bias_init(self):
+ """Initialize Detect() biases, WARNING: requires stride availability."""
for i, (a, b) in enumerate(zip(self.one2many["box_head"], self.one2many["cls_head"])): # from
a[-1].bias.data[:] = 2.0 # box
b[-1].bias.data[: self.nc] = math.log(
@@ -133,6 +197,7 @@ ) # cls (.01 objects, 80 classes, 640 img)
def decode_bboxes(self, bboxes: torch.Tensor, anchors: torch.Tensor, xywh: bool = True) -> torch.Tensor:
+ """Decode bounding boxes from predictions."""
return dist2bbox(
bboxes,
anchors,
@@ -141,12 +206,31 @@ )
def postprocess(self, preds: torch.Tensor) -> torch.Tensor:
+ """Post-processes YOLO model predictions.
+
+ Args:
+ preds (torch.Tensor): Raw predictions with shape (batch_size, num_anchors, 4 + nc) with last dimension
+ format [x1, y1, x2, y2, class_probs].
+
+ Returns:
+ (torch.Tensor): Processed predictions with shape (batch_size, min(max_det, num_anchors), 6) and last
+ dimension format [x1, y1, x2, y2, max_class_prob, class_index].
+ """
boxes, scores = preds.split([4, self.nc], dim=-1)
scores, conf, idx = self.get_topk_index(scores, self.max_det)
boxes = boxes.gather(dim=1, index=idx.repeat(1, 1, 4))
return torch.cat([boxes, scores, conf], dim=-1)
def get_topk_index(self, scores: torch.Tensor, max_det: int) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ """Get top-k indices from scores.
+
+ Args:
+ scores (torch.Tensor): Scores tensor with shape (batch_size, num_anchors, num_classes).
+ max_det (int): Maximum detections per image.
+
+ Returns:
+ (torch.Tensor, torch.Tensor, torch.Tensor): Top scores, class indices, and filtered indices.
+ """
batch_size, anchors, nc = scores.shape # i.e. shape(16,8400,84)
# Use max_det directly during export for TensorRT compatibility (requires k to be constant),
# otherwise use min(max_det, anchors) for safety with small inputs during Python inference
@@ -163,12 +247,42 @@ return scores[..., None], (index % nc)[..., None].float(), idx
def fuse(self) -> None:
+ """Remove the one2many head for inference optimization."""
self.cv2 = self.cv3 = None
class Segment(Detect):
+ """YOLO Segment head for segmentation models.
+
+ This class extends the Detect head to include mask prediction capabilities for instance segmentation tasks.
+
+ Attributes:
+ nm (int): Number of masks.
+ npr (int): Number of protos.
+ proto (Proto): Prototype generation module.
+ cv4 (nn.ModuleList): Convolution layers for mask coefficients.
+
+ Methods:
+ forward: Return model outputs and mask coefficients.
+
+ Examples:
+ Create a segmentation head
+ >>> segment = Segment(nc=80, nm=32, npr=256, ch=(256, 512, 1024))
+ >>> x = [torch.randn(1, 256, 80, 80), torch.randn(1, 512, 40, 40), torch.randn(1, 1024, 20, 20)]
+ >>> outputs = segment(x)
+ """
def __init__(self, nc: int = 80, nm: int = 32, npr: int = 256, reg_max=16, end2end=False, ch: tuple = ()):
+ """Initialize the YOLO model attributes such as the number of masks, prototypes, and the convolution layers.
+
+ Args:
+ nc (int): Number of classes.
+ nm (int): Number of masks.
+ npr (int): Number of protos.
+ reg_max (int): Maximum number of DFL channels.
+ end2end (bool): Whether to use end-to-end NMS-free detection.
+ ch (tuple): Tuple of channel sizes from backbone feature maps.
+ """
super().__init__(nc, reg_max, end2end, ch)
self.nm = nm # number of masks
self.npr = npr # number of protos
@@ -181,13 +295,16 @@
@property
def one2many(self):
+ """Returns the one-to-many head components, here for backward compatibility."""
return dict(box_head=self.cv2, cls_head=self.cv3, mask_head=self.cv4)
@property
def one2one(self):
+ """Returns the one-to-one head components."""
return dict(box_head=self.one2one_cv2, cls_head=self.one2one_cv3, mask_head=self.one2one_cv4)
def forward(self, x: list[torch.Tensor]) -> tuple | list[torch.Tensor] | dict[str, torch.Tensor]:
+ """Return model outputs and mask coefficients if training, otherwise return outputs and mask coefficients."""
outputs = super().forward(x)
preds = outputs[1] if isinstance(outputs, tuple) else outputs
proto = self.proto(x[0]) # mask protos
@@ -202,12 +319,14 @@ return (outputs, proto) if self.export else ((outputs[0], proto), preds)
def _inference(self, x: dict[str, torch.Tensor]) -> torch.Tensor:
+ """Decode predicted bounding boxes and class probabilities, concatenated with mask coefficients."""
preds = super()._inference(x)
return torch.cat([preds, x["mask_coefficient"]], dim=1)
def forward_head(
self, x: list[torch.Tensor], box_head: torch.nn.Module, cls_head: torch.nn.Module, mask_head: torch.nn.Module
) -> dict[str, torch.Tensor]:
+ """Concatenates and returns predicted bounding boxes, class probabilities, and mask coefficients."""
preds = super().forward_head(x, box_head, cls_head)
if mask_head is not None:
bs = x[0].shape[0] # batch size
@@ -215,6 +334,16 @@ return preds
def postprocess(self, preds: torch.Tensor) -> torch.Tensor:
+ """Post-process YOLO model predictions.
+
+ Args:
+ preds (torch.Tensor): Raw predictions with shape (batch_size, num_anchors, 4 + nc + nm) with last dimension
+ format [x1, y1, x2, y2, class_probs, mask_coefficient].
+
+ Returns:
+ (torch.Tensor): Processed predictions with shape (batch_size, min(max_det, num_anchors), 6 + nm) and last
+ dimension format [x1, y1, x2, y2, max_class_prob, class_index, mask_coefficient].
+ """
boxes, scores, mask_coefficient = preds.split([4, self.nc, self.nm], dim=-1)
scores, conf, idx = self.get_topk_index(scores, self.max_det)
boxes = boxes.gather(dim=1, index=idx.repeat(1, 1, 4))
@@ -222,16 +351,47 @@ return torch.cat([boxes, scores, conf, mask_coefficient], dim=-1)
def fuse(self) -> None:
+ """Remove the one2many head for inference optimization."""
self.cv2 = self.cv3 = self.cv4 = None
class Segment26(Segment):
+ """YOLO26 Segment head for segmentation models.
+
+ This class extends the Segment head with Proto26 for mask prediction in instance segmentation tasks.
+
+ Attributes:
+ nm (int): Number of masks.
+ npr (int): Number of protos.
+ proto (Proto26): Prototype generation module.
+ cv4 (nn.ModuleList): Convolution layers for mask coefficients.
+
+ Methods:
+ forward: Return model outputs and mask coefficients.
+
+ Examples:
+ Create a segmentation head
+ >>> segment = Segment26(nc=80, nm=32, npr=256, ch=(256, 512, 1024))
+ >>> x = [torch.randn(1, 256, 80, 80), torch.randn(1, 512, 40, 40), torch.randn(1, 1024, 20, 20)]
+ >>> outputs = segment(x)
+ """
def __init__(self, nc: int = 80, nm: int = 32, npr: int = 256, reg_max=16, end2end=False, ch: tuple = ()):
+ """Initialize the YOLO model attributes such as the number of masks, prototypes, and the convolution layers.
+
+ Args:
+ nc (int): Number of classes.
+ nm (int): Number of masks.
+ npr (int): Number of protos.
+ reg_max (int): Maximum number of DFL channels.
+ end2end (bool): Whether to use end-to-end NMS-free detection.
+ ch (tuple): Tuple of channel sizes from backbone feature maps.
+ """
super().__init__(nc, nm, npr, reg_max, end2end, ch)
self.proto = Proto26(ch, self.npr, self.nm, nc) # protos
def forward(self, x: list[torch.Tensor]) -> tuple | list[torch.Tensor] | dict[str, torch.Tensor]:
+ """Return model outputs and mask coefficients if training, otherwise return outputs and mask coefficients."""
outputs = Detect.forward(self, x)
preds = outputs[1] if isinstance(outputs, tuple) else outputs
proto = self.proto(x) # mask protos
@@ -248,14 +408,43 @@ return (outputs, proto) if self.export else ((outputs[0], proto), preds)
def fuse(self) -> None:
+ """Remove the one2many head and extra part of proto module for inference optimization."""
super().fuse()
if hasattr(self.proto, "fuse"):
self.proto.fuse()
class OBB(Detect):
+ """YOLO OBB detection head for detection with rotation models.
+
+ This class extends the Detect head to include oriented bounding box prediction with rotation angles.
+
+ Attributes:
+ ne (int): Number of extra parameters.
+ cv4 (nn.ModuleList): Convolution layers for angle prediction.
+ angle (torch.Tensor): Predicted rotation angles.
+
+ Methods:
+ forward: Concatenate and return predicted bounding boxes and class probabilities.
+ decode_bboxes: Decode rotated bounding boxes.
+
+ Examples:
+ Create an OBB detection head
+ >>> obb = OBB(nc=80, ne=1, ch=(256, 512, 1024))
+ >>> x = [torch.randn(1, 256, 80, 80), torch.randn(1, 512, 40, 40), torch.randn(1, 1024, 20, 20)]
+ >>> outputs = obb(x)
+ """
def __init__(self, nc: int = 80, ne: int = 1, reg_max=16, end2end=False, ch: tuple = ()):
+ """Initialize OBB with number of classes `nc` and layer channels `ch`.
+
+ Args:
+ nc (int): Number of classes.
+ ne (int): Number of extra parameters.
+ reg_max (int): Maximum number of DFL channels.
+ end2end (bool): Whether to use end-to-end NMS-free detection.
+ ch (tuple): Tuple of channel sizes from backbone feature maps.
+ """
super().__init__(nc, reg_max, end2end, ch)
self.ne = ne # number of extra parameters
@@ -266,13 +455,16 @@
@property
def one2many(self):
+ """Returns the one-to-many head components, here for backward compatibility."""
return dict(box_head=self.cv2, cls_head=self.cv3, angle_head=self.cv4)
@property
def one2one(self):
+ """Returns the one-to-one head components."""
return dict(box_head=self.one2one_cv2, cls_head=self.one2one_cv3, angle_head=self.one2one_cv4)
def _inference(self, x: dict[str, torch.Tensor]) -> torch.Tensor:
+ """Decode predicted bounding boxes and class probabilities, concatenated with rotation angles."""
# For decode_bboxes convenience
self.angle = x["angle"]
preds = super()._inference(x)
@@ -281,6 +473,7 @@ def forward_head(
self, x: list[torch.Tensor], box_head: torch.nn.Module, cls_head: torch.nn.Module, angle_head: torch.nn.Module
) -> dict[str, torch.Tensor]:
+ """Concatenates and returns predicted bounding boxes, class probabilities, and angles."""
preds = super().forward_head(x, box_head, cls_head)
if angle_head is not None:
bs = x[0].shape[0] # batch size
@@ -292,9 +485,20 @@ return preds
def decode_bboxes(self, bboxes: torch.Tensor, anchors: torch.Tensor) -> torch.Tensor:
+ """Decode rotated bounding boxes."""
return dist2rbox(bboxes, self.angle, anchors, dim=1)
def postprocess(self, preds: torch.Tensor) -> torch.Tensor:
+ """Post-process YOLO model predictions.
+
+ Args:
+ preds (torch.Tensor): Raw predictions with shape (batch_size, num_anchors, 4 + nc + ne) with last dimension
+ format [x, y, w, h, class_probs, angle].
+
+ Returns:
+ (torch.Tensor): Processed predictions with shape (batch_size, min(max_det, num_anchors), 7) and last
+ dimension format [x, y, w, h, max_class_prob, class_index, angle].
+ """
boxes, scores, angle = preds.split([4, self.nc, self.ne], dim=-1)
scores, conf, idx = self.get_topk_index(scores, self.max_det)
boxes = boxes.gather(dim=1, index=idx.repeat(1, 1, 4))
@@ -302,14 +506,34 @@ return torch.cat([boxes, scores, conf, angle], dim=-1)
def fuse(self) -> None:
+ """Remove the one2many head for inference optimization."""
self.cv2 = self.cv3 = self.cv4 = None
class OBB26(OBB):
+ """YOLO26 OBB detection head for detection with rotation models. This class extends the OBB head with modified angle
+ processing that outputs raw angle predictions without sigmoid transformation, compared to the original
+ OBB class.
+
+ Attributes:
+ ne (int): Number of extra parameters.
+ cv4 (nn.ModuleList): Convolution layers for angle prediction.
+ angle (torch.Tensor): Predicted rotation angles.
+
+ Methods:
+ forward_head: Concatenate and return predicted bounding boxes, class probabilities, and raw angles.
+
+ Examples:
+ Create an OBB26 detection head
+ >>> obb26 = OBB26(nc=80, ne=1, ch=(256, 512, 1024))
+ >>> x = [torch.randn(1, 256, 80, 80), torch.randn(1, 512, 40, 40), torch.randn(1, 1024, 20, 20)]
+ >>> outputs = obb26(x)
+ """
def forward_head(
self, x: list[torch.Tensor], box_head: torch.nn.Module, cls_head: torch.nn.Module, angle_head: torch.nn.Module
) -> dict[str, torch.Tensor]:
+ """Concatenates and returns predicted bounding boxes, class probabilities, and raw angles."""
preds = Detect.forward_head(self, x, box_head, cls_head)
if angle_head is not None:
bs = x[0].shape[0] # batch size
@@ -321,8 +545,36 @@
class Pose(Detect):
+ """YOLO Pose head for keypoints models.
+
+ This class extends the Detect head to include keypoint prediction capabilities for pose estimation tasks.
+
+ Attributes:
+ kpt_shape (tuple): Number of keypoints and dimensions (2 for x,y or 3 for x,y,visible).
+ nk (int): Total number of keypoint values.
+ cv4 (nn.ModuleList): Convolution layers for keypoint prediction.
+
+ Methods:
+ forward: Perform forward pass through YOLO model and return predictions.
+ kpts_decode: Decode keypoints from predictions.
+
+ Examples:
+ Create a pose detection head
+ >>> pose = Pose(nc=80, kpt_shape=(17, 3), ch=(256, 512, 1024))
+ >>> x = [torch.randn(1, 256, 80, 80), torch.randn(1, 512, 40, 40), torch.randn(1, 1024, 20, 20)]
+ >>> outputs = pose(x)
+ """
def __init__(self, nc: int = 80, kpt_shape: tuple = (17, 3), reg_max=16, end2end=False, ch: tuple = ()):
+ """Initialize YOLO network with default parameters and Convolutional Layers.
+
+ Args:
+ nc (int): Number of classes.
+ kpt_shape (tuple): Number of keypoints, number of dims (2 for x,y or 3 for x,y,visible).
+ reg_max (int): Maximum number of DFL channels.
+ end2end (bool): Whether to use end-to-end NMS-free detection.
+ ch (tuple): Tuple of channel sizes from backbone feature maps.
+ """
super().__init__(nc, reg_max, end2end, ch)
self.kpt_shape = kpt_shape # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible)
self.nk = kpt_shape[0] * kpt_shape[1] # number of keypoints total
@@ -334,19 +586,23 @@
@property
def one2many(self):
+ """Returns the one-to-many head components, here for backward compatibility."""
return dict(box_head=self.cv2, cls_head=self.cv3, pose_head=self.cv4)
@property
def one2one(self):
+ """Returns the one-to-one head components."""
return dict(box_head=self.one2one_cv2, cls_head=self.one2one_cv3, pose_head=self.one2one_cv4)
def _inference(self, x: dict[str, torch.Tensor]) -> torch.Tensor:
+ """Decode predicted bounding boxes and class probabilities, concatenated with keypoints."""
preds = super()._inference(x)
return torch.cat([preds, self.kpts_decode(x["kpts"])], dim=1)
def forward_head(
self, x: list[torch.Tensor], box_head: torch.nn.Module, cls_head: torch.nn.Module, pose_head: torch.nn.Module
) -> dict[str, torch.Tensor]:
+ """Concatenates and returns predicted bounding boxes, class probabilities, and keypoints."""
preds = super().forward_head(x, box_head, cls_head)
if pose_head is not None:
bs = x[0].shape[0] # batch size
@@ -354,6 +610,16 @@ return preds
def postprocess(self, preds: torch.Tensor) -> torch.Tensor:
+ """Post-process YOLO model predictions.
+
+ Args:
+ preds (torch.Tensor): Raw predictions with shape (batch_size, num_anchors, 4 + nc + nk) with last dimension
+ format [x1, y1, x2, y2, class_probs, keypoints].
+
+ Returns:
+ (torch.Tensor): Processed predictions with shape (batch_size, min(max_det, num_anchors), 6 + self.nk) and
+ last dimension format [x1, y1, x2, y2, max_class_prob, class_index, keypoints].
+ """
boxes, scores, kpts = preds.split([4, self.nc, self.nk], dim=-1)
scores, conf, idx = self.get_topk_index(scores, self.max_det)
boxes = boxes.gather(dim=1, index=idx.repeat(1, 1, 4))
@@ -361,9 +627,11 @@ return torch.cat([boxes, scores, conf, kpts], dim=-1)
def fuse(self) -> None:
+ """Remove the one2many head for inference optimization."""
self.cv2 = self.cv3 = self.cv4 = None
def kpts_decode(self, kpts: torch.Tensor) -> torch.Tensor:
+ """Decode keypoints from predictions."""
ndim = self.kpt_shape[1]
bs = kpts.shape[0]
if self.export:
@@ -385,8 +653,36 @@
class Pose26(Pose):
+ """YOLO26 Pose head for keypoints models.
+
+ This class extends the Pose head with normalizing flow for keypoint prediction in pose estimation tasks.
+
+ Attributes:
+ kpt_shape (tuple): Number of keypoints and dimensions (2 for x,y or 3 for x,y,visible).
+ nk (int): Total number of keypoint values.
+ cv4 (nn.ModuleList): Convolution layers for keypoint prediction.
+
+ Methods:
+ forward: Perform forward pass through YOLO model and return predictions.
+ kpts_decode: Decode keypoints from predictions.
+
+ Examples:
+ Create a pose detection head
+ >>> pose = Pose26(nc=80, kpt_shape=(17, 3), ch=(256, 512, 1024))
+ >>> x = [torch.randn(1, 256, 80, 80), torch.randn(1, 512, 40, 40), torch.randn(1, 1024, 20, 20)]
+ >>> outputs = pose(x)
+ """
def __init__(self, nc: int = 80, kpt_shape: tuple = (17, 3), reg_max=16, end2end=False, ch: tuple = ()):
+ """Initialize YOLO network with default parameters and Convolutional Layers.
+
+ Args:
+ nc (int): Number of classes.
+ kpt_shape (tuple): Number of keypoints, number of dims (2 for x,y or 3 for x,y,visible).
+ reg_max (int): Maximum number of DFL channels.
+ end2end (bool): Whether to use end-to-end NMS-free detection.
+ ch (tuple): Tuple of channel sizes from backbone feature maps.
+ """
super().__init__(nc, kpt_shape, reg_max, end2end, ch)
self.flow_model = RealNVP()
@@ -404,6 +700,7 @@
@property
def one2many(self):
+ """Returns the one-to-many head components, here for backward compatibility."""
return dict(
box_head=self.cv2,
cls_head=self.cv3,
@@ -414,6 +711,7 @@
@property
def one2one(self):
+ """Returns the one-to-one head components."""
return dict(
box_head=self.one2one_cv2,
cls_head=self.one2one_cv3,
@@ -431,6 +729,7 @@ kpts_head: torch.nn.Module,
kpts_sigma_head: torch.nn.Module,
) -> dict[str, torch.Tensor]:
+ """Concatenates and returns predicted bounding boxes, class probabilities, and keypoints."""
preds = Detect.forward_head(self, x, box_head, cls_head)
if pose_head is not None:
bs = x[0].shape[0] # batch size
@@ -443,10 +742,12 @@ return preds
def fuse(self) -> None:
+ """Remove the one2many head for inference optimization."""
super().fuse()
self.cv4_kpts = self.cv4_sigma = self.flow_model = self.one2one_cv4_sigma = None
def kpts_decode(self, kpts: torch.Tensor) -> torch.Tensor:
+ """Decode keypoints from predictions."""
ndim = self.kpt_shape[1]
bs = kpts.shape[0]
if self.export:
@@ -469,10 +770,40 @@
class Classify(nn.Module):
+ """YOLO classification head, i.e. x(b,c1,20,20) to x(b,c2).
+
+ This class implements a classification head that transforms feature maps into class predictions.
+
+ Attributes:
+ export (bool): Export mode flag.
+ conv (Conv): Convolutional layer for feature transformation.
+ pool (nn.AdaptiveAvgPool2d): Global average pooling layer.
+ drop (nn.Dropout): Dropout layer for regularization.
+ linear (nn.Linear): Linear layer for final classification.
+
+ Methods:
+ forward: Perform forward pass on input feature maps.
+
+ Examples:
+ Create a classification head
+ >>> classify = Classify(c1=1024, c2=1000)
+ >>> x = torch.randn(1, 1024, 20, 20)
+ >>> output = classify(x)
+ """
export = False # export mode
def __init__(self, c1: int, c2: int, k: int = 1, s: int = 1, p: int | None = None, g: int = 1):
+ """Initialize YOLO classification head to transform input tensor from (b,c1,20,20) to (b,c2) shape.
+
+ Args:
+ c1 (int): Number of input channels.
+ c2 (int): Number of output classes.
+ k (int): Kernel size.
+ s (int): Stride.
+ p (int, optional): Padding.
+ g (int): Groups.
+ """
super().__init__()
c_ = 1280 # efficientnet_b0 size
self.conv = Conv(c1, c_, k, s, p, g)
@@ -481,6 +812,7 @@ self.linear = nn.Linear(c_, c2) # to x(b,c2)
def forward(self, x: list[torch.Tensor] | torch.Tensor) -> torch.Tensor | tuple:
+ """Perform forward pass on input feature maps."""
if isinstance(x, list):
x = torch.cat(x, 1)
x = self.linear(self.drop(self.pool(self.conv(x)).flatten(1)))
@@ -491,6 +823,26 @@
class WorldDetect(Detect):
+ """Head for integrating YOLO detection models with semantic understanding from text embeddings.
+
+ This class extends the standard Detect head to incorporate text embeddings for enhanced semantic understanding in
+ object detection tasks.
+
+ Attributes:
+ cv3 (nn.ModuleList): Convolution layers for embedding features.
+ cv4 (nn.ModuleList): Contrastive head layers for text-vision alignment.
+
+ Methods:
+ forward: Concatenate and return predicted bounding boxes and class probabilities.
+ bias_init: Initialize detection head biases.
+
+ Examples:
+ Create a WorldDetect head
+ >>> world_detect = WorldDetect(nc=80, embed=512, with_bn=False, ch=(256, 512, 1024))
+ >>> x = [torch.randn(1, 256, 80, 80), torch.randn(1, 512, 40, 40), torch.randn(1, 1024, 20, 20)]
+ >>> text = torch.randn(1, 80, 512)
+ >>> outputs = world_detect(x, text)
+ """
def __init__(
self,
@@ -501,12 +853,23 @@ end2end: bool = False,
ch: tuple = (),
):
+ """Initialize YOLO detection layer with nc classes and layer channels ch.
+
+ Args:
+ nc (int): Number of classes.
+ embed (int): Embedding dimension.
+ with_bn (bool): Whether to use batch normalization in contrastive head.
+ reg_max (int): Maximum number of DFL channels.
+ end2end (bool): Whether to use end-to-end NMS-free detection.
+ ch (tuple): Tuple of channel sizes from backbone feature maps.
+ """
super().__init__(nc, reg_max=reg_max, end2end=end2end, ch=ch)
c3 = max(ch[0], min(self.nc, 100))
self.cv3 = nn.ModuleList(nn.Sequential(Conv(x, c3, 3), Conv(c3, c3, 3), nn.Conv2d(c3, embed, 1)) for x in ch)
self.cv4 = nn.ModuleList(BNContrastiveHead(embed) if with_bn else ContrastiveHead() for _ in ch)
def forward(self, x: list[torch.Tensor], text: torch.Tensor) -> dict[str, torch.Tensor] | tuple:
+ """Concatenate and return predicted bounding boxes and class probabilities."""
feats = [xi.clone() for xi in x] # save original features for anchor generation
for i in range(self.nl):
x[i] = torch.cat((self.cv2[i](x[i]), self.cv4[i](self.cv3[i](x[i]), text)), 1)
@@ -521,6 +884,7 @@ return y if self.export else (y, preds)
def bias_init(self):
+ """Initialize Detect() biases, WARNING: requires stride availability."""
m = self # self.model[-1] # Detect() module
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1
# ncf = math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # nominal class frequency
@@ -530,8 +894,38 @@
class LRPCHead(nn.Module):
+ """Lightweight Region Proposal and Classification Head for efficient object detection.
+
+ This head combines region proposal filtering with classification to enable efficient detection with dynamic
+ vocabulary support.
+
+ Attributes:
+ vocab (nn.Module): Vocabulary/classification layer.
+ pf (nn.Module): Proposal filter module.
+ loc (nn.Module): Localization module.
+ enabled (bool): Whether the head is enabled.
+
+ Methods:
+ conv2linear: Convert a 1x1 convolutional layer to a linear layer.
+ forward: Process classification and localization features to generate detection proposals.
+
+ Examples:
+ Create an LRPC head
+ >>> vocab = nn.Conv2d(256, 80, 1)
+ >>> pf = nn.Conv2d(256, 1, 1)
+ >>> loc = nn.Conv2d(256, 4, 1)
+ >>> head = LRPCHead(vocab, pf, loc, enabled=True)
+ """
def __init__(self, vocab: nn.Module, pf: nn.Module, loc: nn.Module, enabled: bool = True):
+ """Initialize LRPCHead with vocabulary, proposal filter, and localization components.
+
+ Args:
+ vocab (nn.Module): Vocabulary/classification module.
+ pf (nn.Module): Proposal filter module.
+ loc (nn.Module): Localization module.
+ enabled (bool): Whether to enable the head functionality.
+ """
super().__init__()
self.vocab = self.conv2linear(vocab) if enabled else vocab
self.pf = pf
@@ -540,6 +934,7 @@
@staticmethod
def conv2linear(conv: nn.Conv2d) -> nn.Linear:
+ """Convert a 1x1 convolutional layer to a linear layer."""
assert isinstance(conv, nn.Conv2d) and conv.kernel_size == (1, 1)
linear = nn.Linear(conv.in_channels, conv.out_channels)
linear.weight.data = conv.weight.view(conv.out_channels, -1).data
@@ -547,6 +942,7 @@ return linear
def forward(self, cls_feat: torch.Tensor, loc_feat: torch.Tensor, conf: float) -> tuple[tuple, torch.Tensor]:
+ """Process classification and localization features to generate detection proposals."""
if self.enabled:
pf_score = self.pf(cls_feat)[0, 0].flatten(0)
mask = pf_score.sigmoid() > conf
@@ -564,12 +960,50 @@
class YOLOEDetect(Detect):
+ """Head for integrating YOLO detection models with semantic understanding from text embeddings.
+
+ This class extends the standard Detect head to support text-guided detection with enhanced semantic understanding
+ through text embeddings and visual prompt embeddings.
+
+ Attributes:
+ is_fused (bool): Whether the model is fused for inference.
+ cv3 (nn.ModuleList): Convolution layers for embedding features.
+ cv4 (nn.ModuleList): Contrastive head layers for text-vision alignment.
+ reprta (Residual): Residual block for text prompt embeddings.
+ savpe (SAVPE): Spatial-aware visual prompt embeddings module.
+ embed (int): Embedding dimension.
+
+ Methods:
+ fuse: Fuse text features with model weights for efficient inference.
+ get_tpe: Get text prompt embeddings with normalization.
+ get_vpe: Get visual prompt embeddings with spatial awareness.
+ forward_lrpc: Process features with fused text embeddings for prompt-free model.
+ forward: Process features with class prompt embeddings to generate detections.
+ bias_init: Initialize biases for detection heads.
+
+ Examples:
+ Create a YOLOEDetect head
+ >>> yoloe_detect = YOLOEDetect(nc=80, embed=512, with_bn=True, ch=(256, 512, 1024))
+ >>> x = [torch.randn(1, 256, 80, 80), torch.randn(1, 512, 40, 40), torch.randn(1, 1024, 20, 20)]
+ >>> cls_pe = torch.randn(1, 80, 512)
+ >>> outputs = yoloe_detect(x, cls_pe)
+ """
is_fused = False
def __init__(
self, nc: int = 80, embed: int = 512, with_bn: bool = False, reg_max=16, end2end=False, ch: tuple = ()
):
+ """Initialize YOLO detection layer with nc classes and layer channels ch.
+
+ Args:
+ nc (int): Number of classes.
+ embed (int): Embedding dimension.
+ with_bn (bool): Whether to use batch normalization in contrastive head.
+ reg_max (int): Maximum number of DFL channels.
+ end2end (bool): Whether to use end-to-end NMS-free detection.
+ ch (tuple): Tuple of channel sizes from backbone feature maps.
+ """
super().__init__(nc, reg_max, end2end, ch)
c3 = max(ch[0], min(self.nc, 100))
assert c3 <= embed
@@ -597,6 +1031,7 @@
@smart_inference_mode()
def fuse(self, txt_feats: torch.Tensor = None):
+ """Fuse text features with model weights for efficient inference."""
if txt_feats is None: # means eliminate one2many branch
self.cv2 = self.cv3 = self.cv4 = None
return
@@ -613,6 +1048,7 @@ self.is_fused = True
def _fuse_tp(self, txt_feats: torch.Tensor, cls_head: torch.nn.Module, bn_head: torch.nn.Module) -> None:
+ """Fuse text prompt embeddings with model weights for efficient inference."""
for cls_h, bn_h in zip(cls_head, bn_head):
assert isinstance(cls_h, nn.Sequential)
assert isinstance(bn_h, BNContrastiveHead)
@@ -649,9 +1085,11 @@ bn_h.fuse()
def get_tpe(self, tpe: torch.Tensor | None) -> torch.Tensor | None:
+ """Get text prompt embeddings with normalization."""
return None if tpe is None else F.normalize(self.reprta(tpe), dim=-1, p=2)
def get_vpe(self, x: list[torch.Tensor], vpe: torch.Tensor) -> torch.Tensor:
+ """Get visual prompt embeddings with spatial awareness."""
if vpe.shape[1] == 0: # no visual prompt embeddings
return torch.zeros(x[0].shape[0], 0, self.embed, device=x[0].device)
if vpe.ndim == 4: # (B, N, H, W)
@@ -660,11 +1098,13 @@ return vpe
def forward(self, x: list[torch.Tensor]) -> torch.Tensor | tuple:
+ """Process features with class prompt embeddings to generate detections."""
if hasattr(self, "lrpc"): # for prompt-free inference
return self.forward_lrpc(x[:3])
return super().forward(x)
def forward_lrpc(self, x: list[torch.Tensor]) -> torch.Tensor | tuple:
+ """Process features with fused text embeddings to generate detections for prompt-free model."""
boxes, scores, index = [], [], []
bs = x[0].shape[0]
cv2 = self.cv2 if not self.end2end else self.one2one_cv2
@@ -688,6 +1128,7 @@ return y if self.export else (y, preds)
def _get_decode_boxes(self, x):
+ """Decode predicted bounding boxes for inference."""
dbox = super()._get_decode_boxes(x)
if hasattr(self, "lrpc"):
dbox = dbox if self.export and not self.dynamic else dbox[..., x["index"]]
@@ -695,13 +1136,16 @@
@property
def one2many(self):
+ """Returns the one-to-many head components, here for v3/v5/v8/v9/v11 backward compatibility."""
return dict(box_head=self.cv2, cls_head=self.cv3, contrastive_head=self.cv4)
@property
def one2one(self):
+ """Returns the one-to-one head components."""
return dict(box_head=self.one2one_cv2, cls_head=self.one2one_cv3, contrastive_head=self.one2one_cv4)
def forward_head(self, x, box_head, cls_head, contrastive_head):
+ """Concatenates and returns predicted bounding boxes, class probabilities, and contrastive scores."""
assert len(x) == 4, f"Expected 4 features including 3 feature maps and 1 text embeddings, but got {len(x)}."
if box_head is None or cls_head is None: # for fused inference
return dict()
@@ -715,6 +1159,7 @@ return dict(boxes=boxes, scores=scores, feats=x[:3])
def bias_init(self):
+ """Initialize Detect() biases, WARNING: requires stride availability."""
for i, (a, b, c) in enumerate(
zip(self.one2many["box_head"], self.one2many["cls_head"], self.one2many["contrastive_head"])
):
@@ -731,6 +1176,27 @@
class YOLOESegment(YOLOEDetect):
+ """YOLO segmentation head with text embedding capabilities.
+
+ This class extends YOLOEDetect to include mask prediction capabilities for instance segmentation tasks with
+ text-guided semantic understanding.
+
+ Attributes:
+ nm (int): Number of masks.
+ npr (int): Number of protos.
+ proto (Proto): Prototype generation module.
+ cv5 (nn.ModuleList): Convolution layers for mask coefficients.
+
+ Methods:
+ forward: Return model outputs and mask coefficients.
+
+ Examples:
+ Create a YOLOESegment head
+ >>> yoloe_segment = YOLOESegment(nc=80, nm=32, npr=256, embed=512, with_bn=True, ch=(256, 512, 1024))
+ >>> x = [torch.randn(1, 256, 80, 80), torch.randn(1, 512, 40, 40), torch.randn(1, 1024, 20, 20)]
+ >>> text = torch.randn(1, 80, 512)
+ >>> outputs = yoloe_segment(x, text)
+ """
def __init__(
self,
@@ -743,6 +1209,18 @@ end2end=False,
ch: tuple = (),
):
+ """Initialize YOLOESegment with class count, mask parameters, and embedding dimensions.
+
+ Args:
+ nc (int): Number of classes.
+ nm (int): Number of masks.
+ npr (int): Number of protos.
+ embed (int): Embedding dimension.
+ with_bn (bool): Whether to use batch normalization in contrastive head.
+ reg_max (int): Maximum number of DFL channels.
+ end2end (bool): Whether to use end-to-end NMS-free detection.
+ ch (tuple): Tuple of channel sizes from backbone feature maps.
+ """
super().__init__(nc, embed, with_bn, reg_max, end2end, ch)
self.nm = nm
self.npr = npr
@@ -755,10 +1233,12 @@
@property
def one2many(self):
+ """Returns the one-to-many head components, here for v3/v5/v8/v9/v11 backward compatibility."""
return dict(box_head=self.cv2, cls_head=self.cv3, mask_head=self.cv5, contrastive_head=self.cv4)
@property
def one2one(self):
+ """Returns the one-to-one head components."""
return dict(
box_head=self.one2one_cv2,
cls_head=self.one2one_cv3,
@@ -767,6 +1247,7 @@ )
def forward_lrpc(self, x: list[torch.Tensor]) -> torch.Tensor | tuple:
+ """Process features with fused text embeddings to generate detections for prompt-free model."""
boxes, scores, index = [], [], []
bs = x[0].shape[0]
cv2 = self.cv2 if not self.end2end else self.one2one_cv2
@@ -799,6 +1280,7 @@ return y if self.export else (y, preds)
def forward(self, x: list[torch.Tensor]) -> tuple | list[torch.Tensor] | dict[str, torch.Tensor]:
+ """Return model outputs and mask coefficients if training, otherwise return outputs and mask coefficients."""
outputs = super().forward(x)
preds = outputs[1] if isinstance(outputs, tuple) else outputs
proto = self.proto(x[0]) # mask protos
@@ -813,6 +1295,7 @@ return (outputs, proto) if self.export else ((outputs[0], proto), preds)
def _inference(self, x: dict[str, torch.Tensor]) -> torch.Tensor:
+ """Decode predicted bounding boxes and class probabilities, concatenated with mask coefficients."""
preds = super()._inference(x)
return torch.cat([preds, x["mask_coefficient"]], dim=1)
@@ -824,6 +1307,7 @@ mask_head: torch.nn.Module,
contrastive_head: torch.nn.Module,
) -> dict[str, torch.Tensor]:
+ """Concatenates and returns predicted bounding boxes, class probabilities, and mask coefficients."""
preds = super().forward_head(x, box_head, cls_head, contrastive_head)
if mask_head is not None:
bs = x[0].shape[0] # batch size
@@ -831,6 +1315,16 @@ return preds
def postprocess(self, preds: torch.Tensor) -> torch.Tensor:
+ """Post-process YOLO model predictions.
+
+ Args:
+ preds (torch.Tensor): Raw predictions with shape (batch_size, num_anchors, 4 + nc + nm) with last dimension
+ format [x1, y1, x2, y2, class_probs, mask_coefficient].
+
+ Returns:
+ (torch.Tensor): Processed predictions with shape (batch_size, min(max_det, num_anchors), 6 + nm) and last
+ dimension format [x1, y1, x2, y2, max_class_prob, class_index, mask_coefficient].
+ """
boxes, scores, mask_coefficient = preds.split([4, self.nc, self.nm], dim=-1)
scores, conf, idx = self.get_topk_index(scores, self.max_det)
boxes = boxes.gather(dim=1, index=idx.repeat(1, 1, 4))
@@ -838,6 +1332,7 @@ return torch.cat([boxes, scores, conf, mask_coefficient], dim=-1)
def fuse(self, txt_feats: torch.Tensor = None):
+ """Fuse text features with model weights for efficient inference."""
super().fuse(txt_feats)
if txt_feats is None: # means eliminate one2many branch
self.cv5 = None
@@ -847,6 +1342,28 @@
class YOLOESegment26(YOLOESegment):
+ """YOLOE-style segmentation head module using Proto26 for mask generation.
+
+ This class extends the YOLOESegment functionality to include segmentation capabilities by integrating a Proto26
+ generation module and convolutional layers to predict mask coefficients.
+
+ Args:
+ nc (int): Number of classes. Defaults to 80.
+ nm (int): Number of masks. Defaults to 32.
+ npr (int): Number of prototype channels. Defaults to 256.
+ embed (int): Embedding dimensionality. Defaults to 512.
+ with_bn (bool): Whether to use Batch Normalization. Defaults to False.
+ reg_max (int): Maximum number of DFL channels. Defaults to 16.
+ end2end (bool): Whether to use end-to-end detection mode. Defaults to False.
+ ch (tuple[int, ...]): Input channels for each scale.
+
+ Attributes:
+ nm (int): Number of segmentation masks.
+ npr (int): Number of prototype channels.
+ proto (Proto26): Prototype generation module for segmentation.
+ cv5 (nn.ModuleList): Convolutional layers for generating mask coefficients from features.
+ one2one_cv5 (nn.ModuleList, optional): Deep copy of cv5 for end-to-end detection branches.
+ """
def __init__(
self,
@@ -859,6 +1376,7 @@ end2end=False,
ch: tuple = (),
):
+ """Initialize YOLOESegment26 with class count, mask parameters, and embedding dimensions."""
YOLOEDetect.__init__(self, nc, embed, with_bn, reg_max, end2end, ch)
self.nm = nm
self.npr = npr
@@ -870,6 +1388,7 @@ self.one2one_cv5 = copy.deepcopy(self.cv5)
def forward(self, x: list[torch.Tensor]) -> tuple | list[torch.Tensor] | dict[str, torch.Tensor]:
+ """Return model outputs and mask coefficients if training, otherwise return outputs and mask coefficients."""
outputs = YOLOEDetect.forward(self, x)
preds = outputs[1] if isinstance(outputs, tuple) else outputs
proto = self.proto([xi.detach() for xi in x], return_semseg=False) # mask protos
@@ -886,6 +1405,44 @@
class RTDETRDecoder(nn.Module):
+ """Real-Time Deformable Transformer Decoder (RTDETRDecoder) module for object detection.
+
+ This decoder module utilizes Transformer architecture along with deformable convolutions to predict bounding boxes
+ and class labels for objects in an image. It integrates features from multiple layers and runs through a series of
+ Transformer decoder layers to output the final predictions.
+
+ Attributes:
+ export (bool): Export mode flag.
+ hidden_dim (int): Dimension of hidden layers.
+ nhead (int): Number of heads in multi-head attention.
+ nl (int): Number of feature levels.
+ nc (int): Number of classes.
+ num_queries (int): Number of query points.
+ num_decoder_layers (int): Number of decoder layers.
+ input_proj (nn.ModuleList): Input projection layers for backbone features.
+ decoder (DeformableTransformerDecoder): Transformer decoder module.
+ denoising_class_embed (nn.Embedding): Class embeddings for denoising.
+ num_denoising (int): Number of denoising queries.
+ label_noise_ratio (float): Label noise ratio for training.
+ box_noise_scale (float): Box noise scale for training.
+ learnt_init_query (bool): Whether to learn initial query embeddings.
+ tgt_embed (nn.Embedding): Target embeddings for queries.
+ query_pos_head (MLP): Query position head.
+ enc_output (nn.Sequential): Encoder output layers.
+ enc_score_head (nn.Linear): Encoder score prediction head.
+ enc_bbox_head (MLP): Encoder bbox prediction head.
+ dec_score_head (nn.ModuleList): Decoder score prediction heads.
+ dec_bbox_head (nn.ModuleList): Decoder bbox prediction heads.
+
+ Methods:
+ forward: Run forward pass and return bounding box and classification scores.
+
+ Examples:
+ Create an RTDETRDecoder
+ >>> decoder = RTDETRDecoder(nc=80, ch=(512, 1024, 2048), hd=256, nq=300)
+ >>> x = [torch.randn(1, 512, 64, 64), torch.randn(1, 1024, 32, 32), torch.randn(1, 2048, 16, 16)]
+ >>> outputs = decoder(x)
+ """
export = False # export mode
shapes = []
@@ -912,6 +1469,25 @@ box_noise_scale: float = 1.0,
learnt_init_query: bool = False,
):
+ """Initialize the RTDETRDecoder module with the given parameters.
+
+ Args:
+ nc (int): Number of classes.
+ ch (tuple): Channels in the backbone feature maps.
+ hd (int): Dimension of hidden layers.
+ nq (int): Number of query points.
+ ndp (int): Number of decoder points.
+ nh (int): Number of heads in multi-head attention.
+ ndl (int): Number of decoder layers.
+ d_ffn (int): Dimension of the feed-forward networks.
+ dropout (float): Dropout rate.
+ act (nn.Module): Activation function.
+ eval_idx (int): Evaluation index.
+ nd (int): Number of denoising.
+ label_noise_ratio (float): Label noise ratio.
+ box_noise_scale (float): Box noise scale.
+ learnt_init_query (bool): Whether to learn initial query embeddings.
+ """
super().__init__()
self.hidden_dim = hd
self.nhead = nh
@@ -953,6 +1529,17 @@ self._reset_parameters()
def forward(self, x: list[torch.Tensor], batch: dict | None = None) -> tuple | torch.Tensor:
+ """Run the forward pass of the module, returning bounding box and classification scores for the input.
+
+ Args:
+ x (list[torch.Tensor]): List of feature maps from the backbone.
+ batch (dict, optional): Batch information for training.
+
+ Returns:
+ outputs (tuple | torch.Tensor): During training, returns a tuple of bounding boxes, scores, and other
+ metadata. During inference, returns a tensor of shape (bs, 300, 4+nc) containing bounding boxes and
+ class scores.
+ """
from ultralytics.models.utils.ops import get_cdn_group
# Input projection and embedding
@@ -998,6 +1585,19 @@ device: str = "cpu",
eps: float = 1e-2,
) -> tuple[torch.Tensor, torch.Tensor]:
+ """Generate anchor bounding boxes for given shapes with specific grid size and validate them.
+
+ Args:
+ shapes (list): List of feature map shapes.
+ grid_size (float, optional): Base size of grid cells.
+ dtype (torch.dtype, optional): Data type for tensors.
+ device (str, optional): Device to create tensors on.
+ eps (float, optional): Small value for numerical stability.
+
+ Returns:
+ anchors (torch.Tensor): Generated anchor boxes.
+ valid_mask (torch.Tensor): Valid mask for anchors.
+ """
anchors = []
for i, (h, w) in enumerate(shapes):
sy = torch.arange(end=h, dtype=dtype, device=device)
@@ -1017,6 +1617,15 @@ return anchors, valid_mask
def _get_encoder_input(self, x: list[torch.Tensor]) -> tuple[torch.Tensor, list[list[int]]]:
+ """Process and return encoder inputs by getting projection features from input and concatenating them.
+
+ Args:
+ x (list[torch.Tensor]): List of feature maps from the backbone.
+
+ Returns:
+ feats (torch.Tensor): Processed features.
+ shapes (list): List of feature map shapes.
+ """
# Get projection features
x = [self.input_proj[i](feat) for i, feat in enumerate(x)]
# Get encoder inputs
@@ -1040,6 +1649,20 @@ dn_embed: torch.Tensor | None = None,
dn_bbox: torch.Tensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
+ """Generate and prepare the input required for the decoder from the provided features and shapes.
+
+ Args:
+ feats (torch.Tensor): Processed features from encoder.
+ shapes (list): List of feature map shapes.
+ dn_embed (torch.Tensor, optional): Denoising embeddings.
+ dn_bbox (torch.Tensor, optional): Denoising bounding boxes.
+
+ Returns:
+ embeddings (torch.Tensor): Query embeddings for decoder.
+ refer_bbox (torch.Tensor): Reference bounding boxes.
+ enc_bboxes (torch.Tensor): Encoded bounding boxes.
+ enc_scores (torch.Tensor): Encoded scores.
+ """
bs = feats.shape[0]
if self.dynamic or self.shapes != shapes:
self.anchors, self.valid_mask = self._generate_anchors(shapes, dtype=feats.dtype, device=feats.device)
@@ -1079,6 +1702,7 @@ return embeddings, refer_bbox, enc_bboxes, enc_scores
def _reset_parameters(self):
+ """Initialize or reset the parameters of the model's various components with predefined weights and biases."""
# Class and bbox head init
bias_cls = bias_init_with_prob(0.01) / 80 * self.nc
# NOTE: the weight initialization in `linear_init` would cause NaN when training with custom datasets.
@@ -1103,10 +1727,39 @@
class v10Detect(Detect):
+ """v10 Detection head from https://arxiv.org/pdf/2405.14458.
+
+ This class implements the YOLOv10 detection head with dual-assignment training and consistent dual predictions for
+ improved efficiency and performance.
+
+ Attributes:
+ end2end (bool): End-to-end detection mode.
+ max_det (int): Maximum number of detections.
+ cv3 (nn.ModuleList): Light classification head layers.
+ one2one_cv3 (nn.ModuleList): One-to-one classification head layers.
+
+ Methods:
+ __init__: Initialize the v10Detect object with specified number of classes and input channels.
+ forward: Perform forward pass of the v10Detect module.
+ bias_init: Initialize biases of the Detect module.
+ fuse: Remove the one2many head for inference optimization.
+
+ Examples:
+ Create a v10Detect head
+ >>> v10_detect = v10Detect(nc=80, ch=(256, 512, 1024))
+ >>> x = [torch.randn(1, 256, 80, 80), torch.randn(1, 512, 40, 40), torch.randn(1, 1024, 20, 20)]
+ >>> outputs = v10_detect(x)
+ """
end2end = True
def __init__(self, nc: int = 80, ch: tuple = ()):
+ """Initialize the v10Detect object with the specified number of classes and input channels.
+
+ Args:
+ nc (int): Number of classes.
+ ch (tuple): Tuple of channel sizes from backbone feature maps.
+ """
super().__init__(nc, end2end=True, ch=ch)
c3 = max(ch[0], min(self.nc, 100)) # channels
# Light cls head
@@ -1121,4 +1774,5 @@ self.one2one_cv3 = copy.deepcopy(self.cv3)
def fuse(self):
- self.cv2 = self.cv3 = None+ """Remove the one2many head for inference optimization."""
+ self.cv2 = self.cv3 = None
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/nn/modules/head.py |
Add docstrings for internal functions | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import constant_, xavier_uniform_
from ultralytics.utils.torch_utils import TORCH_1_11
from .conv import Conv
from .utils import _get_clones, inverse_sigmoid, multi_scale_deformable_attn_pytorch
__all__ = (
"AIFI",
"MLP",
"DeformableTransformerDecoder",
"DeformableTransformerDecoderLayer",
"LayerNorm2d",
"MLPBlock",
"MSDeformAttn",
"TransformerBlock",
"TransformerEncoderLayer",
"TransformerLayer",
)
class TransformerEncoderLayer(nn.Module):
def __init__(
self,
c1: int,
cm: int = 2048,
num_heads: int = 8,
dropout: float = 0.0,
act: nn.Module = nn.GELU(),
normalize_before: bool = False,
):
super().__init__()
from ...utils.torch_utils import TORCH_1_9
if not TORCH_1_9:
raise ModuleNotFoundError(
"TransformerEncoderLayer() requires torch>=1.9 to use nn.MultiheadAttention(batch_first=True)."
)
self.ma = nn.MultiheadAttention(c1, num_heads, dropout=dropout, batch_first=True)
# Implementation of Feedforward model
self.fc1 = nn.Linear(c1, cm)
self.fc2 = nn.Linear(cm, c1)
self.norm1 = nn.LayerNorm(c1)
self.norm2 = nn.LayerNorm(c1)
self.dropout = nn.Dropout(dropout)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.act = act
self.normalize_before = normalize_before
@staticmethod
def with_pos_embed(tensor: torch.Tensor, pos: torch.Tensor | None = None) -> torch.Tensor:
return tensor if pos is None else tensor + pos
def forward_post(
self,
src: torch.Tensor,
src_mask: torch.Tensor | None = None,
src_key_padding_mask: torch.Tensor | None = None,
pos: torch.Tensor | None = None,
) -> torch.Tensor:
q = k = self.with_pos_embed(src, pos)
src2 = self.ma(q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.fc2(self.dropout(self.act(self.fc1(src))))
src = src + self.dropout2(src2)
return self.norm2(src)
def forward_pre(
self,
src: torch.Tensor,
src_mask: torch.Tensor | None = None,
src_key_padding_mask: torch.Tensor | None = None,
pos: torch.Tensor | None = None,
) -> torch.Tensor:
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.ma(q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.fc2(self.dropout(self.act(self.fc1(src2))))
return src + self.dropout2(src2)
def forward(
self,
src: torch.Tensor,
src_mask: torch.Tensor | None = None,
src_key_padding_mask: torch.Tensor | None = None,
pos: torch.Tensor | None = None,
) -> torch.Tensor:
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class AIFI(TransformerEncoderLayer):
def __init__(
self,
c1: int,
cm: int = 2048,
num_heads: int = 8,
dropout: float = 0,
act: nn.Module = nn.GELU(),
normalize_before: bool = False,
):
super().__init__(c1, cm, num_heads, dropout, act, normalize_before)
def forward(self, x: torch.Tensor) -> torch.Tensor:
c, h, w = x.shape[1:]
pos_embed = self.build_2d_sincos_position_embedding(w, h, c)
# Flatten [B, C, H, W] to [B, HxW, C]
x = super().forward(x.flatten(2).permute(0, 2, 1), pos=pos_embed.to(device=x.device, dtype=x.dtype))
return x.permute(0, 2, 1).view([-1, c, h, w]).contiguous()
@staticmethod
def build_2d_sincos_position_embedding(
w: int, h: int, embed_dim: int = 256, temperature: float = 10000.0
) -> torch.Tensor:
assert embed_dim % 4 == 0, "Embed dimension must be divisible by 4 for 2D sin-cos position embedding"
grid_w = torch.arange(w, dtype=torch.float32)
grid_h = torch.arange(h, dtype=torch.float32)
grid_w, grid_h = torch.meshgrid(grid_w, grid_h, indexing="ij") if TORCH_1_11 else torch.meshgrid(grid_w, grid_h)
pos_dim = embed_dim // 4
omega = torch.arange(pos_dim, dtype=torch.float32) / pos_dim
omega = 1.0 / (temperature**omega)
out_w = grid_w.flatten()[..., None] @ omega[None]
out_h = grid_h.flatten()[..., None] @ omega[None]
return torch.cat([torch.sin(out_w), torch.cos(out_w), torch.sin(out_h), torch.cos(out_h)], 1)[None]
class TransformerLayer(nn.Module):
def __init__(self, c: int, num_heads: int):
super().__init__()
self.q = nn.Linear(c, c, bias=False)
self.k = nn.Linear(c, c, bias=False)
self.v = nn.Linear(c, c, bias=False)
self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
self.fc1 = nn.Linear(c, c, bias=False)
self.fc2 = nn.Linear(c, c, bias=False)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
return self.fc2(self.fc1(x)) + x
class TransformerBlock(nn.Module):
def __init__(self, c1: int, c2: int, num_heads: int, num_layers: int):
super().__init__()
self.conv = None
if c1 != c2:
self.conv = Conv(c1, c2)
self.linear = nn.Linear(c2, c2) # learnable position embedding
self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers)))
self.c2 = c2
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.conv is not None:
x = self.conv(x)
b, _, h, w = x.shape
p = x.flatten(2).permute(2, 0, 1)
return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, h, w)
class MLPBlock(nn.Module):
def __init__(self, embedding_dim: int, mlp_dim: int, act=nn.GELU):
super().__init__()
self.lin1 = nn.Linear(embedding_dim, mlp_dim)
self.lin2 = nn.Linear(mlp_dim, embedding_dim)
self.act = act()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.lin2(self.act(self.lin1(x)))
class MLP(nn.Module):
def __init__(
self,
input_dim: int,
hidden_dim: int,
output_dim: int,
num_layers: int,
act=nn.ReLU,
sigmoid: bool = False,
residual: bool = False,
out_norm: nn.Module = None,
):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim, *h], [*h, output_dim]))
self.sigmoid = sigmoid
self.act = act()
if residual and input_dim != output_dim:
raise ValueError("residual is only supported if input_dim == output_dim")
self.residual = residual
# whether to apply a normalization layer to the output
assert isinstance(out_norm, nn.Module) or out_norm is None
self.out_norm = out_norm or nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
orig_x = x
for i, layer in enumerate(self.layers):
x = getattr(self, "act", nn.ReLU())(layer(x)) if i < self.num_layers - 1 else layer(x)
if getattr(self, "residual", False):
x = x + orig_x
x = getattr(self, "out_norm", nn.Identity())(x)
return x.sigmoid() if getattr(self, "sigmoid", False) else x
class LayerNorm2d(nn.Module):
def __init__(self, num_channels: int, eps: float = 1e-6):
super().__init__()
self.weight = nn.Parameter(torch.ones(num_channels))
self.bias = nn.Parameter(torch.zeros(num_channels))
self.eps = eps
def forward(self, x: torch.Tensor) -> torch.Tensor:
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
return self.weight[:, None, None] * x + self.bias[:, None, None]
class MSDeformAttn(nn.Module):
def __init__(self, d_model: int = 256, n_levels: int = 4, n_heads: int = 8, n_points: int = 4):
super().__init__()
if d_model % n_heads != 0:
raise ValueError(f"d_model must be divisible by n_heads, but got {d_model} and {n_heads}")
_d_per_head = d_model // n_heads
# Better to set _d_per_head to a power of 2 which is more efficient in a CUDA implementation
assert _d_per_head * n_heads == d_model, "`d_model` must be divisible by `n_heads`"
self.im2col_step = 64
self.d_model = d_model
self.n_levels = n_levels
self.n_heads = n_heads
self.n_points = n_points
self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2)
self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points)
self.value_proj = nn.Linear(d_model, d_model)
self.output_proj = nn.Linear(d_model, d_model)
self._reset_parameters()
def _reset_parameters(self):
constant_(self.sampling_offsets.weight.data, 0.0)
thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (
(grid_init / grid_init.abs().max(-1, keepdim=True)[0])
.view(self.n_heads, 1, 1, 2)
.repeat(1, self.n_levels, self.n_points, 1)
)
for i in range(self.n_points):
grid_init[:, :, i, :] *= i + 1
with torch.no_grad():
self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
constant_(self.attention_weights.weight.data, 0.0)
constant_(self.attention_weights.bias.data, 0.0)
xavier_uniform_(self.value_proj.weight.data)
constant_(self.value_proj.bias.data, 0.0)
xavier_uniform_(self.output_proj.weight.data)
constant_(self.output_proj.bias.data, 0.0)
def forward(
self,
query: torch.Tensor,
refer_bbox: torch.Tensor,
value: torch.Tensor,
value_shapes: list,
value_mask: torch.Tensor | None = None,
) -> torch.Tensor:
bs, len_q = query.shape[:2]
len_v = value.shape[1]
assert sum(s[0] * s[1] for s in value_shapes) == len_v
value = self.value_proj(value)
if value_mask is not None:
value = value.masked_fill(value_mask[..., None], float(0))
value = value.view(bs, len_v, self.n_heads, self.d_model // self.n_heads)
sampling_offsets = self.sampling_offsets(query).view(bs, len_q, self.n_heads, self.n_levels, self.n_points, 2)
attention_weights = self.attention_weights(query).view(bs, len_q, self.n_heads, self.n_levels * self.n_points)
attention_weights = F.softmax(attention_weights, -1).view(bs, len_q, self.n_heads, self.n_levels, self.n_points)
# N, Len_q, n_heads, n_levels, n_points, 2
num_points = refer_bbox.shape[-1]
if num_points == 2:
offset_normalizer = torch.as_tensor(value_shapes, dtype=query.dtype, device=query.device).flip(-1)
add = sampling_offsets / offset_normalizer[None, None, None, :, None, :]
sampling_locations = refer_bbox[:, :, None, :, None, :] + add
elif num_points == 4:
add = sampling_offsets / self.n_points * refer_bbox[:, :, None, :, None, 2:] * 0.5
sampling_locations = refer_bbox[:, :, None, :, None, :2] + add
else:
raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {num_points}.")
output = multi_scale_deformable_attn_pytorch(value, value_shapes, sampling_locations, attention_weights)
return self.output_proj(output)
class DeformableTransformerDecoderLayer(nn.Module):
def __init__(
self,
d_model: int = 256,
n_heads: int = 8,
d_ffn: int = 1024,
dropout: float = 0.0,
act: nn.Module = nn.ReLU(),
n_levels: int = 4,
n_points: int = 4,
):
super().__init__()
# Self attention
self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# Cross attention
self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
self.dropout2 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
# FFN
self.linear1 = nn.Linear(d_model, d_ffn)
self.act = act
self.dropout3 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout4 = nn.Dropout(dropout)
self.norm3 = nn.LayerNorm(d_model)
@staticmethod
def with_pos_embed(tensor: torch.Tensor, pos: torch.Tensor | None) -> torch.Tensor:
return tensor if pos is None else tensor + pos
def forward_ffn(self, tgt: torch.Tensor) -> torch.Tensor:
tgt2 = self.linear2(self.dropout3(self.act(self.linear1(tgt))))
tgt = tgt + self.dropout4(tgt2)
return self.norm3(tgt)
def forward(
self,
embed: torch.Tensor,
refer_bbox: torch.Tensor,
feats: torch.Tensor,
shapes: list,
padding_mask: torch.Tensor | None = None,
attn_mask: torch.Tensor | None = None,
query_pos: torch.Tensor | None = None,
) -> torch.Tensor:
# Self attention
q = k = self.with_pos_embed(embed, query_pos)
tgt = self.self_attn(q.transpose(0, 1), k.transpose(0, 1), embed.transpose(0, 1), attn_mask=attn_mask)[
0
].transpose(0, 1)
embed = embed + self.dropout1(tgt)
embed = self.norm1(embed)
# Cross attention
tgt = self.cross_attn(
self.with_pos_embed(embed, query_pos), refer_bbox.unsqueeze(2), feats, shapes, padding_mask
)
embed = embed + self.dropout2(tgt)
embed = self.norm2(embed)
# FFN
return self.forward_ffn(embed)
class DeformableTransformerDecoder(nn.Module):
def __init__(self, hidden_dim: int, decoder_layer: nn.Module, num_layers: int, eval_idx: int = -1):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.hidden_dim = hidden_dim
self.eval_idx = eval_idx if eval_idx >= 0 else num_layers + eval_idx
def forward(
self,
embed: torch.Tensor, # decoder embeddings
refer_bbox: torch.Tensor, # anchor
feats: torch.Tensor, # image features
shapes: list, # feature shapes
bbox_head: nn.Module,
score_head: nn.Module,
pos_mlp: nn.Module,
attn_mask: torch.Tensor | None = None,
padding_mask: torch.Tensor | None = None,
):
output = embed
dec_bboxes = []
dec_cls = []
last_refined_bbox = None
refer_bbox = refer_bbox.sigmoid()
for i, layer in enumerate(self.layers):
output = layer(output, refer_bbox, feats, shapes, padding_mask, attn_mask, pos_mlp(refer_bbox))
bbox = bbox_head[i](output)
refined_bbox = torch.sigmoid(bbox + inverse_sigmoid(refer_bbox))
if self.training:
dec_cls.append(score_head[i](output))
if i == 0:
dec_bboxes.append(refined_bbox)
else:
dec_bboxes.append(torch.sigmoid(bbox + inverse_sigmoid(last_refined_bbox)))
elif i == self.eval_idx:
dec_cls.append(score_head[i](output))
dec_bboxes.append(refined_bbox)
break
last_refined_bbox = refined_bbox
refer_bbox = refined_bbox.detach() if self.training else refined_bbox
return torch.stack(dec_bboxes), torch.stack(dec_cls) | --- +++ @@ -1,4 +1,5 @@ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""Transformer modules."""
from __future__ import annotations
@@ -29,6 +30,23 @@
class TransformerEncoderLayer(nn.Module):
+ """A single layer of the transformer encoder.
+
+ This class implements a standard transformer encoder layer with multi-head attention and feedforward network,
+ supporting both pre-normalization and post-normalization configurations.
+
+ Attributes:
+ ma (nn.MultiheadAttention): Multi-head attention module.
+ fc1 (nn.Linear): First linear layer in the feedforward network.
+ fc2 (nn.Linear): Second linear layer in the feedforward network.
+ norm1 (nn.LayerNorm): Layer normalization after attention.
+ norm2 (nn.LayerNorm): Layer normalization after feedforward network.
+ dropout (nn.Dropout): Dropout layer for the feedforward network.
+ dropout1 (nn.Dropout): Dropout layer after attention.
+ dropout2 (nn.Dropout): Dropout layer after feedforward network.
+ act (nn.Module): Activation function.
+ normalize_before (bool): Whether to apply normalization before attention and feedforward.
+ """
def __init__(
self,
@@ -39,6 +57,16 @@ act: nn.Module = nn.GELU(),
normalize_before: bool = False,
):
+ """Initialize the TransformerEncoderLayer with specified parameters.
+
+ Args:
+ c1 (int): Input dimension.
+ cm (int): Hidden dimension in the feedforward network.
+ num_heads (int): Number of attention heads.
+ dropout (float): Dropout probability.
+ act (nn.Module): Activation function.
+ normalize_before (bool): Whether to apply normalization before attention and feedforward.
+ """
super().__init__()
from ...utils.torch_utils import TORCH_1_9
@@ -62,6 +90,7 @@
@staticmethod
def with_pos_embed(tensor: torch.Tensor, pos: torch.Tensor | None = None) -> torch.Tensor:
+ """Add position embeddings to the tensor if provided."""
return tensor if pos is None else tensor + pos
def forward_post(
@@ -71,6 +100,17 @@ src_key_padding_mask: torch.Tensor | None = None,
pos: torch.Tensor | None = None,
) -> torch.Tensor:
+ """Perform forward pass with post-normalization.
+
+ Args:
+ src (torch.Tensor): Input tensor.
+ src_mask (torch.Tensor, optional): Mask for the src sequence.
+ src_key_padding_mask (torch.Tensor, optional): Mask for the src keys per batch.
+ pos (torch.Tensor, optional): Positional encoding.
+
+ Returns:
+ (torch.Tensor): Output tensor after attention and feedforward.
+ """
q = k = self.with_pos_embed(src, pos)
src2 = self.ma(q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
@@ -86,6 +126,17 @@ src_key_padding_mask: torch.Tensor | None = None,
pos: torch.Tensor | None = None,
) -> torch.Tensor:
+ """Perform forward pass with pre-normalization.
+
+ Args:
+ src (torch.Tensor): Input tensor.
+ src_mask (torch.Tensor, optional): Mask for the src sequence.
+ src_key_padding_mask (torch.Tensor, optional): Mask for the src keys per batch.
+ pos (torch.Tensor, optional): Positional encoding.
+
+ Returns:
+ (torch.Tensor): Output tensor after attention and feedforward.
+ """
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.ma(q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
@@ -101,12 +152,28 @@ src_key_padding_mask: torch.Tensor | None = None,
pos: torch.Tensor | None = None,
) -> torch.Tensor:
+ """Forward propagate the input through the encoder module.
+
+ Args:
+ src (torch.Tensor): Input tensor.
+ src_mask (torch.Tensor, optional): Mask for the src sequence.
+ src_key_padding_mask (torch.Tensor, optional): Mask for the src keys per batch.
+ pos (torch.Tensor, optional): Positional encoding.
+
+ Returns:
+ (torch.Tensor): Output tensor after transformer encoder layer.
+ """
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class AIFI(TransformerEncoderLayer):
+ """AIFI transformer layer for 2D data with positional embeddings.
+
+ This class extends TransformerEncoderLayer to work with 2D feature maps by adding 2D sine-cosine positional
+ embeddings and handling the spatial dimensions appropriately.
+ """
def __init__(
self,
@@ -117,9 +184,27 @@ act: nn.Module = nn.GELU(),
normalize_before: bool = False,
):
+ """Initialize the AIFI instance with specified parameters.
+
+ Args:
+ c1 (int): Input dimension.
+ cm (int): Hidden dimension in the feedforward network.
+ num_heads (int): Number of attention heads.
+ dropout (float): Dropout probability.
+ act (nn.Module): Activation function.
+ normalize_before (bool): Whether to apply normalization before attention and feedforward.
+ """
super().__init__(c1, cm, num_heads, dropout, act, normalize_before)
def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Forward pass for the AIFI transformer layer.
+
+ Args:
+ x (torch.Tensor): Input tensor with shape [B, C, H, W].
+
+ Returns:
+ (torch.Tensor): Output tensor with shape [B, C, H, W].
+ """
c, h, w = x.shape[1:]
pos_embed = self.build_2d_sincos_position_embedding(w, h, c)
# Flatten [B, C, H, W] to [B, HxW, C]
@@ -130,6 +215,17 @@ def build_2d_sincos_position_embedding(
w: int, h: int, embed_dim: int = 256, temperature: float = 10000.0
) -> torch.Tensor:
+ """Build 2D sine-cosine position embedding.
+
+ Args:
+ w (int): Width of the feature map.
+ h (int): Height of the feature map.
+ embed_dim (int): Embedding dimension.
+ temperature (float): Temperature for the sine/cosine functions.
+
+ Returns:
+ (torch.Tensor): Position embedding with shape [1, h*w, embed_dim].
+ """
assert embed_dim % 4 == 0, "Embed dimension must be divisible by 4 for 2D sin-cos position embedding"
grid_w = torch.arange(w, dtype=torch.float32)
grid_h = torch.arange(h, dtype=torch.float32)
@@ -145,8 +241,15 @@
class TransformerLayer(nn.Module):
+ """Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)."""
def __init__(self, c: int, num_heads: int):
+ """Initialize a self-attention mechanism using linear transformations and multi-head attention.
+
+ Args:
+ c (int): Input and output channel dimension.
+ num_heads (int): Number of attention heads.
+ """
super().__init__()
self.q = nn.Linear(c, c, bias=False)
self.k = nn.Linear(c, c, bias=False)
@@ -156,13 +259,40 @@ self.fc2 = nn.Linear(c, c, bias=False)
def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Apply a transformer block to the input x and return the output.
+
+ Args:
+ x (torch.Tensor): Input tensor.
+
+ Returns:
+ (torch.Tensor): Output tensor after transformer layer.
+ """
x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
return self.fc2(self.fc1(x)) + x
class TransformerBlock(nn.Module):
+ """Vision Transformer block based on https://arxiv.org/abs/2010.11929.
+
+ This class implements a complete transformer block with optional convolution layer for channel adjustment, learnable
+ position embedding, and multiple transformer layers.
+
+ Attributes:
+ conv (Conv, optional): Convolution layer if input and output channels differ.
+ linear (nn.Linear): Learnable position embedding.
+ tr (nn.Sequential): Sequential container of transformer layers.
+ c2 (int): Output channel dimension.
+ """
def __init__(self, c1: int, c2: int, num_heads: int, num_layers: int):
+ """Initialize a Transformer module with position embedding and specified number of heads and layers.
+
+ Args:
+ c1 (int): Input channel dimension.
+ c2 (int): Output channel dimension.
+ num_heads (int): Number of attention heads.
+ num_layers (int): Number of transformer layers.
+ """
super().__init__()
self.conv = None
if c1 != c2:
@@ -172,6 +302,14 @@ self.c2 = c2
def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Forward propagate the input through the transformer block.
+
+ Args:
+ x (torch.Tensor): Input tensor with shape [b, c1, h, w].
+
+ Returns:
+ (torch.Tensor): Output tensor with shape [b, c2, h, w].
+ """
if self.conv is not None:
x = self.conv(x)
b, _, h, w = x.shape
@@ -180,18 +318,45 @@
class MLPBlock(nn.Module):
+ """A single block of a multi-layer perceptron."""
def __init__(self, embedding_dim: int, mlp_dim: int, act=nn.GELU):
+ """Initialize the MLPBlock with specified embedding dimension, MLP dimension, and activation function.
+
+ Args:
+ embedding_dim (int): Input and output dimension.
+ mlp_dim (int): Hidden dimension.
+ act (type): Activation function class.
+ """
super().__init__()
self.lin1 = nn.Linear(embedding_dim, mlp_dim)
self.lin2 = nn.Linear(mlp_dim, embedding_dim)
self.act = act()
def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Forward pass for the MLPBlock.
+
+ Args:
+ x (torch.Tensor): Input tensor.
+
+ Returns:
+ (torch.Tensor): Output tensor after MLP block.
+ """
return self.lin2(self.act(self.lin1(x)))
class MLP(nn.Module):
+ """A simple multi-layer perceptron (also called FFN).
+
+ This class implements a configurable MLP with multiple linear layers, activation functions, and optional sigmoid
+ output activation.
+
+ Attributes:
+ num_layers (int): Number of layers in the MLP.
+ layers (nn.ModuleList): List of linear layers.
+ sigmoid (bool): Whether to apply sigmoid to the output.
+ act (nn.Module): Activation function.
+ """
def __init__(
self,
@@ -204,6 +369,18 @@ residual: bool = False,
out_norm: nn.Module = None,
):
+ """Initialize the MLP with specified input, hidden, output dimensions and number of layers.
+
+ Args:
+ input_dim (int): Input dimension.
+ hidden_dim (int): Hidden dimension.
+ output_dim (int): Output dimension.
+ num_layers (int): Number of layers.
+ act (type): Activation function class.
+ sigmoid (bool): Whether to apply sigmoid to the output.
+ residual (bool): Whether to use residual connections.
+ out_norm (nn.Module, optional): Normalization layer for the output.
+ """
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
@@ -218,6 +395,14 @@ self.out_norm = out_norm or nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Forward pass for the entire MLP.
+
+ Args:
+ x (torch.Tensor): Input tensor.
+
+ Returns:
+ (torch.Tensor): Output tensor after MLP.
+ """
orig_x = x
for i, layer in enumerate(self.layers):
x = getattr(self, "act", nn.ReLU())(layer(x)) if i < self.num_layers - 1 else layer(x)
@@ -228,14 +413,42 @@
class LayerNorm2d(nn.Module):
+ """2D Layer Normalization module inspired by Detectron2 and ConvNeXt implementations.
+
+ This class implements layer normalization for 2D feature maps, normalizing across the channel dimension while
+ preserving spatial dimensions.
+
+ Attributes:
+ weight (nn.Parameter): Learnable scale parameter.
+ bias (nn.Parameter): Learnable bias parameter.
+ eps (float): Small constant for numerical stability.
+
+ References:
+ https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py
+ https://github.com/facebookresearch/ConvNeXt/blob/main/models/convnext.py
+ """
def __init__(self, num_channels: int, eps: float = 1e-6):
+ """Initialize LayerNorm2d with the given parameters.
+
+ Args:
+ num_channels (int): Number of channels in the input.
+ eps (float): Small constant for numerical stability.
+ """
super().__init__()
self.weight = nn.Parameter(torch.ones(num_channels))
self.bias = nn.Parameter(torch.zeros(num_channels))
self.eps = eps
def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Perform forward pass for 2D layer normalization.
+
+ Args:
+ x (torch.Tensor): Input tensor.
+
+ Returns:
+ (torch.Tensor): Normalized output tensor.
+ """
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
@@ -243,8 +456,35 @@
class MSDeformAttn(nn.Module):
+ """Multiscale Deformable Attention Module based on Deformable-DETR and PaddleDetection implementations.
+
+ This module implements multiscale deformable attention that can attend to features at multiple scales with learnable
+ sampling locations and attention weights.
+
+ Attributes:
+ im2col_step (int): Step size for im2col operations.
+ d_model (int): Model dimension.
+ n_levels (int): Number of feature levels.
+ n_heads (int): Number of attention heads.
+ n_points (int): Number of sampling points per attention head per feature level.
+ sampling_offsets (nn.Linear): Linear layer for generating sampling offsets.
+ attention_weights (nn.Linear): Linear layer for generating attention weights.
+ value_proj (nn.Linear): Linear layer for projecting values.
+ output_proj (nn.Linear): Linear layer for projecting output.
+
+ References:
+ https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/ops/modules/ms_deform_attn.py
+ """
def __init__(self, d_model: int = 256, n_levels: int = 4, n_heads: int = 8, n_points: int = 4):
+ """Initialize MSDeformAttn with the given parameters.
+
+ Args:
+ d_model (int): Model dimension.
+ n_levels (int): Number of feature levels.
+ n_heads (int): Number of attention heads.
+ n_points (int): Number of sampling points per attention head per feature level.
+ """
super().__init__()
if d_model % n_heads != 0:
raise ValueError(f"d_model must be divisible by n_heads, but got {d_model} and {n_heads}")
@@ -267,6 +507,7 @@ self._reset_parameters()
def _reset_parameters(self):
+ """Reset module parameters."""
constant_(self.sampling_offsets.weight.data, 0.0)
thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
@@ -294,6 +535,23 @@ value_shapes: list,
value_mask: torch.Tensor | None = None,
) -> torch.Tensor:
+ """Perform forward pass for multiscale deformable attention.
+
+ Args:
+ query (torch.Tensor): Query tensor with shape [bs, query_length, C].
+ refer_bbox (torch.Tensor): Reference bounding boxes with shape [bs, query_length, n_levels, 2 or 4], range
+ in [0, 1], top-left (0,0), bottom-right (1, 1), including padding area.
+ value (torch.Tensor): Value tensor with shape [bs, value_length, C].
+ value_shapes (list): List with shape [n_levels, 2], [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})].
+ value_mask (torch.Tensor, optional): Mask tensor with shape [bs, value_length], True for padding elements,
+ False for non-padding elements.
+
+ Returns:
+ (torch.Tensor): Output tensor with shape [bs, Length_{query}, C].
+
+ References:
+ https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py
+ """
bs, len_q = query.shape[:2]
len_v = value.shape[1]
assert sum(s[0] * s[1] for s in value_shapes) == len_v
@@ -321,6 +579,29 @@
class DeformableTransformerDecoderLayer(nn.Module):
+ """Deformable Transformer Decoder Layer inspired by PaddleDetection and Deformable-DETR implementations.
+
+ This class implements a single decoder layer with self-attention, cross-attention using multiscale deformable
+ attention, and a feedforward network.
+
+ Attributes:
+ self_attn (nn.MultiheadAttention): Self-attention module.
+ dropout1 (nn.Dropout): Dropout after self-attention.
+ norm1 (nn.LayerNorm): Layer normalization after self-attention.
+ cross_attn (MSDeformAttn): Cross-attention module.
+ dropout2 (nn.Dropout): Dropout after cross-attention.
+ norm2 (nn.LayerNorm): Layer normalization after cross-attention.
+ linear1 (nn.Linear): First linear layer in the feedforward network.
+ act (nn.Module): Activation function.
+ dropout3 (nn.Dropout): Dropout in the feedforward network.
+ linear2 (nn.Linear): Second linear layer in the feedforward network.
+ dropout4 (nn.Dropout): Dropout after the feedforward network.
+ norm3 (nn.LayerNorm): Layer normalization after the feedforward network.
+
+ References:
+ https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py
+ https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/deformable_transformer.py
+ """
def __init__(
self,
@@ -332,6 +613,17 @@ n_levels: int = 4,
n_points: int = 4,
):
+ """Initialize the DeformableTransformerDecoderLayer with the given parameters.
+
+ Args:
+ d_model (int): Model dimension.
+ n_heads (int): Number of attention heads.
+ d_ffn (int): Dimension of the feedforward network.
+ dropout (float): Dropout probability.
+ act (nn.Module): Activation function.
+ n_levels (int): Number of feature levels.
+ n_points (int): Number of sampling points.
+ """
super().__init__()
# Self attention
@@ -354,9 +646,18 @@
@staticmethod
def with_pos_embed(tensor: torch.Tensor, pos: torch.Tensor | None) -> torch.Tensor:
+ """Add positional embeddings to the input tensor, if provided."""
return tensor if pos is None else tensor + pos
def forward_ffn(self, tgt: torch.Tensor) -> torch.Tensor:
+ """Perform forward pass through the Feed-Forward Network part of the layer.
+
+ Args:
+ tgt (torch.Tensor): Input tensor.
+
+ Returns:
+ (torch.Tensor): Output tensor after FFN.
+ """
tgt2 = self.linear2(self.dropout3(self.act(self.linear1(tgt))))
tgt = tgt + self.dropout4(tgt2)
return self.norm3(tgt)
@@ -371,6 +672,20 @@ attn_mask: torch.Tensor | None = None,
query_pos: torch.Tensor | None = None,
) -> torch.Tensor:
+ """Perform the forward pass through the entire decoder layer.
+
+ Args:
+ embed (torch.Tensor): Input embeddings.
+ refer_bbox (torch.Tensor): Reference bounding boxes.
+ feats (torch.Tensor): Feature maps.
+ shapes (list): Feature shapes.
+ padding_mask (torch.Tensor, optional): Padding mask.
+ attn_mask (torch.Tensor, optional): Attention mask.
+ query_pos (torch.Tensor, optional): Query position embeddings.
+
+ Returns:
+ (torch.Tensor): Output tensor after decoder layer.
+ """
# Self attention
q = k = self.with_pos_embed(embed, query_pos)
tgt = self.self_attn(q.transpose(0, 1), k.transpose(0, 1), embed.transpose(0, 1), attn_mask=attn_mask)[
@@ -391,8 +706,30 @@
class DeformableTransformerDecoder(nn.Module):
+ """Deformable Transformer Decoder based on PaddleDetection implementation.
+
+ This class implements a complete deformable transformer decoder with multiple decoder layers and prediction heads
+ for bounding box regression and classification.
+
+ Attributes:
+ layers (nn.ModuleList): List of decoder layers.
+ num_layers (int): Number of decoder layers.
+ hidden_dim (int): Hidden dimension.
+ eval_idx (int): Index of the layer to use during evaluation.
+
+ References:
+ https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py
+ """
def __init__(self, hidden_dim: int, decoder_layer: nn.Module, num_layers: int, eval_idx: int = -1):
+ """Initialize the DeformableTransformerDecoder with the given parameters.
+
+ Args:
+ hidden_dim (int): Hidden dimension.
+ decoder_layer (nn.Module): Decoder layer module.
+ num_layers (int): Number of decoder layers.
+ eval_idx (int): Index of the layer to use during evaluation.
+ """
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
@@ -411,6 +748,23 @@ attn_mask: torch.Tensor | None = None,
padding_mask: torch.Tensor | None = None,
):
+ """Perform the forward pass through the entire decoder.
+
+ Args:
+ embed (torch.Tensor): Decoder embeddings.
+ refer_bbox (torch.Tensor): Reference bounding boxes.
+ feats (torch.Tensor): Image features.
+ shapes (list): Feature shapes.
+ bbox_head (nn.Module): Bounding box prediction head.
+ score_head (nn.Module): Score prediction head.
+ pos_mlp (nn.Module): Position MLP.
+ attn_mask (torch.Tensor, optional): Attention mask.
+ padding_mask (torch.Tensor, optional): Padding mask.
+
+ Returns:
+ dec_bboxes (torch.Tensor): Decoded bounding boxes.
+ dec_cls (torch.Tensor): Decoded classification scores.
+ """
output = embed
dec_bboxes = []
dec_cls = []
@@ -436,4 +790,4 @@ last_refined_bbox = refined_bbox
refer_bbox = refined_bbox.detach() if self.training else refined_bbox
- return torch.stack(dec_bboxes), torch.stack(dec_cls)+ return torch.stack(dec_bboxes), torch.stack(dec_cls)
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/nn/modules/transformer.py |
Auto-generate documentation strings for this file | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from pathlib import Path
from typing import Any
import numpy as np
import torch
import torch.nn as nn
from ultralytics.utils.checks import check_suffix
from ultralytics.utils.downloads import is_url
from .backends import (
AxeleraBackend,
CoreMLBackend,
ExecuTorchBackend,
MNNBackend,
NCNNBackend,
ONNXBackend,
ONNXIMXBackend,
OpenVINOBackend,
PaddleBackend,
PyTorchBackend,
RKNNBackend,
TensorFlowBackend,
TensorRTBackend,
TorchScriptBackend,
TritonBackend,
)
def check_class_names(names: list | dict) -> dict[int, str]:
if isinstance(names, list): # names is a list
names = dict(enumerate(names)) # convert to dict
if isinstance(names, dict):
# Convert 1) string keys to int, i.e. '0' to 0, and non-string values to strings, i.e. True to 'True'
names = {int(k): str(v) for k, v in names.items()}
n = len(names)
if max(names.keys()) >= n:
raise KeyError(
f"{n}-class dataset requires class indices 0-{n - 1}, but you have invalid class indices "
f"{min(names.keys())}-{max(names.keys())} defined in your dataset YAML."
)
if isinstance(names[0], str) and names[0].startswith("n0"): # imagenet class codes, i.e. 'n01440764'
from ultralytics.utils import ROOT, YAML
names_map = YAML.load(ROOT / "cfg/datasets/ImageNet.yaml")["map"] # human-readable names
names = {k: names_map[v] for k, v in names.items()}
return names
def default_class_names(data: str | Path | None = None) -> dict[int, str]:
if data:
try:
from ultralytics.utils import YAML
from ultralytics.utils.checks import check_yaml
return YAML.load(check_yaml(data))["names"]
except Exception:
pass
return {i: f"class{i}" for i in range(999)} # return default if above errors
class AutoBackend(nn.Module):
_BACKEND_MAP = {
"pt": PyTorchBackend,
"torchscript": TorchScriptBackend,
"onnx": ONNXBackend,
"dnn": ONNXBackend, # Special case: ONNX with DNN
"openvino": OpenVINOBackend,
"engine": TensorRTBackend,
"coreml": CoreMLBackend,
"saved_model": TensorFlowBackend,
"pb": TensorFlowBackend,
"tflite": TensorFlowBackend,
"edgetpu": TensorFlowBackend,
"paddle": PaddleBackend,
"mnn": MNNBackend,
"ncnn": NCNNBackend,
"imx": ONNXIMXBackend,
"rknn": RKNNBackend,
"triton": TritonBackend,
"executorch": ExecuTorchBackend,
"axelera": AxeleraBackend,
}
@torch.no_grad()
def __init__(
self,
model: str | torch.nn.Module = "yolo26n.pt",
device: torch.device = torch.device("cpu"),
dnn: bool = False,
data: str | Path | None = None,
fp16: bool = False,
fuse: bool = True,
verbose: bool = True,
):
super().__init__()
# Determine model format from path/URL
format = "pt" if isinstance(model, nn.Module) else self._model_type(model, dnn)
# Check if format supports FP16
fp16 &= format in {"pt", "torchscript", "onnx", "openvino", "engine", "triton"}
# Set device
if (
isinstance(device, torch.device)
and torch.cuda.is_available()
and device.type != "cpu"
and format not in {"pt", "torchscript", "engine", "onnx", "paddle"}
):
device = torch.device("cpu")
# Select and initialize the appropriate backend
backend_kwargs = {"device": device, "fp16": fp16}
if format == "tfjs":
raise NotImplementedError("Ultralytics TF.js inference is not currently supported.")
if format not in self._BACKEND_MAP:
from ultralytics.engine.exporter import export_formats
raise TypeError(
f"model='{model}' is not a supported model format. "
f"Ultralytics supports: {export_formats()['Format']}\n"
f"See https://docs.ultralytics.com/modes/predict for help."
)
if format == "pt":
backend_kwargs["fuse"] = fuse
backend_kwargs["verbose"] = verbose
elif format in {"saved_model", "pb", "tflite", "edgetpu", "dnn"}:
backend_kwargs["format"] = format
self.backend = self._BACKEND_MAP[format](model, **backend_kwargs)
self.nhwc = format in {"coreml", "saved_model", "pb", "tflite", "edgetpu", "rknn"}
self.format = format
# Ensure backend has names (fallback to default if not set by metadata)
if not self.backend.names:
self.backend.names = default_class_names(data)
self.backend.names = check_class_names(self.backend.names)
def __getattr__(self, name: str) -> Any:
if "backend" in self.__dict__ and hasattr(self.backend, name):
return getattr(self.backend, name)
return super().__getattr__(name)
def forward(
self,
im: torch.Tensor,
augment: bool = False,
visualize: bool = False,
embed: list | None = None,
**kwargs: Any,
) -> torch.Tensor | list[torch.Tensor]:
if self.nhwc:
im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3)
if self.backend.fp16 and im.dtype != torch.float16:
im = im.half()
# Build forward kwargs based on backend type
forward_kwargs = {}
if self.format == "pt":
forward_kwargs = {"augment": augment, "visualize": visualize, "embed": embed, **kwargs}
y = self.backend.forward(im, **forward_kwargs)
if isinstance(y, (list, tuple)):
if len(self.names) == 999 and (self.task == "segment" or len(y) == 2): # segments and names not defined
nc = y[0].shape[1] - y[1].shape[1] - 4 # y = (1, 116, 8400), (1, 32, 160, 160)
self.names = {i: f"class{i}" for i in range(nc)}
return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]
else:
return self.from_numpy(y)
def from_numpy(self, x: np.ndarray | torch.Tensor) -> torch.Tensor:
return torch.tensor(x).to(self.device) if isinstance(x, np.ndarray) else x
def warmup(self, imgsz: tuple[int, int, int, int] = (1, 3, 640, 640)) -> None:
from ultralytics.utils.nms import non_max_suppression
if self.format in {"pt", "torchscript", "onnx", "engine", "saved_model", "pb", "triton"} and (
self.device.type != "cpu" or self.format == "triton"
):
im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input
for _ in range(2 if self.format == "torchscript" else 1):
self.forward(im) # warmup model
warmup_boxes = torch.rand(1, 84, 16, device=self.device) # 16 boxes works best empirically
warmup_boxes[:, :4] *= imgsz[-1]
non_max_suppression(warmup_boxes) # warmup NMS
@staticmethod
def _model_type(p: str = "path/to/model.pt", dnn: bool = False) -> str:
from ultralytics.engine.exporter import export_formats
sf = export_formats()["Suffix"]
if not is_url(p) and not isinstance(p, str):
check_suffix(p, sf)
name = Path(p).name
types = [s in name for s in sf]
types[5] |= name.endswith(".mlmodel")
types[8] &= not types[9]
format = next((f for i, f in enumerate(export_formats()["Argument"]) if types[i]), None)
if format == "-":
format = "pt"
elif format == "onnx" and dnn:
format = "dnn"
elif not any(types):
from urllib.parse import urlsplit
url = urlsplit(p)
if bool(url.netloc) and bool(url.path) and url.scheme in {"http", "grpc"}:
format = "triton"
return format
def eval(self) -> AutoBackend:
if hasattr(self.backend, "model") and hasattr(self.backend.model, "eval"):
self.backend.model.eval()
return super().eval()
def _apply(self, fn) -> AutoBackend:
self = super()._apply(fn)
if hasattr(self.backend, "model") and isinstance(self.backend.model, nn.Module):
self.backend.model._apply(fn)
self.backend.device = next(self.backend.model.parameters()).device # update device after move
return self | --- +++ @@ -32,6 +32,17 @@
def check_class_names(names: list | dict) -> dict[int, str]:
+ """Check class names and convert to dict format if needed.
+
+ Args:
+ names (list | dict): Class names as list or dict format.
+
+ Returns:
+ (dict): Class names in dict format with integer keys and string values.
+
+ Raises:
+ KeyError: If class indices are invalid for the dataset size.
+ """
if isinstance(names, list): # names is a list
names = dict(enumerate(names)) # convert to dict
if isinstance(names, dict):
@@ -52,6 +63,14 @@
def default_class_names(data: str | Path | None = None) -> dict[int, str]:
+ """Load class names from a YAML file or return numerical class names.
+
+ Args:
+ data (str | Path, optional): Path to YAML file containing class names.
+
+ Returns:
+ (dict): Dictionary mapping class indices to class names.
+ """
if data:
try:
from ultralytics.utils import YAML
@@ -64,6 +83,55 @@
class AutoBackend(nn.Module):
+ """Handle dynamic backend selection for running inference using Ultralytics YOLO models.
+
+ The AutoBackend class is designed to provide an abstraction layer for various inference engines. It supports a wide
+ range of formats, each with specific naming conventions as outlined below:
+
+ Supported Formats and Naming Conventions:
+ | Format | File Suffix |
+ | --------------------- | ----------------- |
+ | PyTorch | *.pt |
+ | TorchScript | *.torchscript |
+ | ONNX Runtime | *.onnx |
+ | ONNX OpenCV DNN | *.onnx (dnn=True) |
+ | OpenVINO | *openvino_model/ |
+ | CoreML | *.mlpackage |
+ | TensorRT | *.engine |
+ | TensorFlow SavedModel | *_saved_model/ |
+ | TensorFlow GraphDef | *.pb |
+ | TensorFlow Lite | *.tflite |
+ | TensorFlow Edge TPU | *_edgetpu.tflite |
+ | PaddlePaddle | *_paddle_model/ |
+ | MNN | *.mnn |
+ | NCNN | *_ncnn_model/ |
+ | IMX | *_imx_model/ |
+ | RKNN | *_rknn_model/ |
+ | Triton Inference | triton://model |
+ | ExecuTorch | *.pte |
+ | Axelera | *_axelera_model/ |
+
+ Attributes:
+ backend (BaseBackend): The loaded inference backend instance.
+ format (str): The model format (e.g., 'pt', 'onnx', 'engine').
+ model: The underlying model (nn.Module for PyTorch backends, backend instance otherwise).
+ device (torch.device): The device (CPU or GPU) on which the model is loaded.
+ task (str): The type of task the model performs (detect, segment, classify, pose).
+ names (dict): A dictionary of class names that the model can detect.
+ stride (int): The model stride, typically 32 for YOLO models.
+ fp16 (bool): Whether the model uses half-precision (FP16) inference.
+ nhwc (bool): Whether the model expects NHWC input format instead of NCHW.
+
+ Methods:
+ forward: Run inference on an input image.
+ from_numpy: Convert NumPy arrays to tensors on the model device.
+ warmup: Warm up the model with a dummy input.
+ _model_type: Determine the model type from file path.
+
+ Examples:
+ >>> model = AutoBackend(model="yolo26n.pt", device="cuda")
+ >>> results = model(img)
+ """
_BACKEND_MAP = {
"pt": PyTorchBackend,
@@ -98,6 +166,17 @@ fuse: bool = True,
verbose: bool = True,
):
+ """Initialize the AutoBackend for inference.
+
+ Args:
+ model (str | torch.nn.Module): Path to the model weights file or a module instance.
+ device (torch.device): Device to run the model on.
+ dnn (bool): Use OpenCV DNN module for ONNX inference.
+ data (str | Path, optional): Path to the additional data.yaml file containing class names.
+ fp16 (bool): Enable half-precision inference. Supported only on specific backends.
+ fuse (bool): Fuse Conv2D + BatchNorm layers for optimization.
+ verbose (bool): Enable verbose logging.
+ """
super().__init__()
# Determine model format from path/URL
format = "pt" if isinstance(model, nn.Module) else self._model_type(model, dnn)
@@ -143,6 +222,20 @@ self.backend.names = check_class_names(self.backend.names)
def __getattr__(self, name: str) -> Any:
+ """Delegate attribute access to the backend.
+
+ This allows AutoBackend to transparently expose backend attributes
+ without explicit copying.
+
+ Args:
+ name: Attribute name to look up.
+
+ Returns:
+ The attribute value from the backend.
+
+ Raises:
+ AttributeError: If the attribute is not found in backend.
+ """
if "backend" in self.__dict__ and hasattr(self.backend, name):
return getattr(self.backend, name)
return super().__getattr__(name)
@@ -155,6 +248,18 @@ embed: list | None = None,
**kwargs: Any,
) -> torch.Tensor | list[torch.Tensor]:
+ """Run inference on an AutoBackend model.
+
+ Args:
+ im (torch.Tensor): The image tensor to perform inference on.
+ augment (bool): Whether to perform data augmentation during inference.
+ visualize (bool): Whether to visualize the output predictions.
+ embed (list, optional): A list of layer indices to return embeddings from.
+ **kwargs (Any): Additional keyword arguments for model configuration.
+
+ Returns:
+ (torch.Tensor | list[torch.Tensor]): The raw output tensor(s) from the model.
+ """
if self.nhwc:
im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3)
if self.backend.fp16 and im.dtype != torch.float16:
@@ -176,9 +281,22 @@ return self.from_numpy(y)
def from_numpy(self, x: np.ndarray | torch.Tensor) -> torch.Tensor:
+ """Convert a NumPy array to a torch tensor on the model device.
+
+ Args:
+ x (np.ndarray | torch.Tensor): Input array or tensor.
+
+ Returns:
+ (torch.Tensor): Tensor on `self.device`.
+ """
return torch.tensor(x).to(self.device) if isinstance(x, np.ndarray) else x
def warmup(self, imgsz: tuple[int, int, int, int] = (1, 3, 640, 640)) -> None:
+ """Warm up the model by running forward pass(es) with a dummy input.
+
+ Args:
+ imgsz (tuple[int, int, int, int]): Dummy input shape in (batch, channels, height, width) format.
+ """
from ultralytics.utils.nms import non_max_suppression
if self.format in {"pt", "torchscript", "onnx", "engine", "saved_model", "pb", "triton"} and (
@@ -193,6 +311,19 @@
@staticmethod
def _model_type(p: str = "path/to/model.pt", dnn: bool = False) -> str:
+ """Take a path to a model file and return the model format string.
+
+ Args:
+ p (str): Path to the model file.
+ dnn (bool): Whether to use OpenCV DNN module for ONNX inference.
+
+ Returns:
+ (str): Model format string (e.g., 'pt', 'onnx', 'engine', 'triton').
+
+ Examples:
+ >>> fmt = AutoBackend._model_type("path/to/model.onnx")
+ >>> assert fmt == "onnx"
+ """
from ultralytics.engine.exporter import export_formats
sf = export_formats()["Suffix"]
@@ -216,13 +347,27 @@ return format
def eval(self) -> AutoBackend:
+ """Set the backend model to evaluation mode if supported."""
if hasattr(self.backend, "model") and hasattr(self.backend.model, "eval"):
self.backend.model.eval()
return super().eval()
def _apply(self, fn) -> AutoBackend:
+ """Apply a function to backend.model parameters, buffers, and tensors.
+
+ This method extends the functionality of the parent class's _apply method by additionally resetting the
+ predictor and updating the device in the model's overrides. It's typically used for operations like moving the
+ model to a different device or changing its precision.
+
+ Args:
+ fn (Callable): A function to be applied to the model's tensors. This is typically a method like to(), cpu(),
+ cuda(), half(), or float().
+
+ Returns:
+ (AutoBackend): The model instance with the function applied and updated attributes.
+ """
self = super()._apply(fn)
if hasattr(self.backend, "model") and isinstance(self.backend.model, nn.Module):
self.backend.model._apply(fn)
self.backend.device = next(self.backend.model.parameters()).device # update device after move
- return self+ return self
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/nn/autobackend.py |
Write docstrings for utility functions | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from itertools import cycle
from typing import Any
import cv2
import numpy as np
from ultralytics.solutions.solutions import BaseSolution, SolutionResults # Import a parent class
from ultralytics.utils import plt_settings
class Analytics(BaseSolution):
@plt_settings()
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
import matplotlib.pyplot as plt # scope for faster 'import ultralytics'
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.figure import Figure
self.type = self.CFG["analytics_type"] # Chart type: "line", "pie", "bar", or "area".
self.x_label = "Classes" if self.type in {"bar", "pie"} else "Frame#"
self.y_label = "Total Counts"
# Predefined data
self.bg_color = "#F3F3F3" # background color of frame
self.fg_color = "#111E68" # foreground color of frame
self.title = "Ultralytics Solutions" # window name
self.max_points = 45 # maximum points to be drawn on window
self.fontsize = 25 # text font size for display
figsize = self.CFG["figsize"] # Output size, e.g. (12.8, 7.2) -> 1280x720.
self.color_cycle = cycle(["#DD00BA", "#042AFF", "#FF4447", "#7D24FF", "#BD00FF"])
self.total_counts = 0 # Stores total counts for line charts.
self.clswise_count = {} # dictionary for class-wise counts
self.update_every = kwargs.get("update_every", 30) # Only update graph every 30 frames by default
self.last_plot_im = None # Cache of the last rendered chart
# Ensure line and area chart
if self.type in {"line", "area"}:
self.lines = {}
self.fig = Figure(facecolor=self.bg_color, figsize=figsize)
self.canvas = FigureCanvasAgg(self.fig) # Set common axis properties
self.ax = self.fig.add_subplot(111, facecolor=self.bg_color)
if self.type == "line":
(self.line,) = self.ax.plot([], [], color="cyan", linewidth=self.line_width)
elif self.type in {"bar", "pie"}:
# Initialize bar or pie plot
self.fig, self.ax = plt.subplots(figsize=figsize, facecolor=self.bg_color)
self.canvas = FigureCanvasAgg(self.fig) # Set common axis properties
self.ax.set_facecolor(self.bg_color)
self.color_mapping = {}
if self.type == "pie": # Ensure pie chart is circular
self.ax.axis("equal")
def process(self, im0: np.ndarray, frame_number: int) -> SolutionResults:
self.extract_tracks(im0) # Extract tracks
if self.type == "line":
for _ in self.boxes:
self.total_counts += 1
update_required = frame_number % self.update_every == 0 or self.last_plot_im is None
if update_required:
self.last_plot_im = self.update_graph(frame_number=frame_number)
plot_im = self.last_plot_im
self.total_counts = 0
elif self.type in {"pie", "bar", "area"}:
from collections import Counter
self.clswise_count = Counter(self.names[int(cls)] for cls in self.clss)
update_required = frame_number % self.update_every == 0 or self.last_plot_im is None
if update_required:
self.last_plot_im = self.update_graph(
frame_number=frame_number, count_dict=self.clswise_count, plot=self.type
)
plot_im = self.last_plot_im
else:
raise ValueError(f"Unsupported analytics_type='{self.type}'. Supported types: line, bar, pie, area.")
# Return results for downstream use.
return SolutionResults(plot_im=plot_im, total_tracks=len(self.track_ids), classwise_count=self.clswise_count)
def update_graph(
self, frame_number: int, count_dict: dict[str, int] | None = None, plot: str = "line"
) -> np.ndarray:
if count_dict is None:
# Single line update
x_data = np.append(self.line.get_xdata(), float(frame_number))
y_data = np.append(self.line.get_ydata(), float(self.total_counts))
if len(x_data) > self.max_points:
x_data, y_data = x_data[-self.max_points :], y_data[-self.max_points :]
self.line.set_data(x_data, y_data)
self.line.set_label("Counts")
self.line.set_color("#7b0068") # Pink color
self.line.set_marker("*")
self.line.set_markersize(self.line_width * 5)
else:
labels = list(count_dict.keys())
counts = list(count_dict.values())
if plot == "area":
color_cycle = cycle(["#DD00BA", "#042AFF", "#FF4447", "#7D24FF", "#BD00FF"])
# Multiple lines or area update
x_data = self.ax.lines[0].get_xdata() if self.ax.lines else np.array([])
y_data_dict = {key: np.array([]) for key in count_dict.keys()}
if self.ax.lines:
for line, key in zip(self.ax.lines, count_dict.keys()):
y_data_dict[key] = line.get_ydata()
x_data = np.append(x_data, float(frame_number))
max_length = len(x_data)
for key in count_dict.keys():
y_data_dict[key] = np.append(y_data_dict[key], float(count_dict[key]))
if len(y_data_dict[key]) < max_length:
y_data_dict[key] = np.pad(y_data_dict[key], (0, max_length - len(y_data_dict[key])))
if len(x_data) > self.max_points:
x_data = x_data[1:]
for key in count_dict.keys():
y_data_dict[key] = y_data_dict[key][1:]
self.ax.clear()
for key, y_data in y_data_dict.items():
color = next(color_cycle)
self.ax.fill_between(x_data, y_data, color=color, alpha=0.55)
self.ax.plot(
x_data,
y_data,
color=color,
linewidth=self.line_width,
marker="o",
markersize=self.line_width * 5,
label=f"{key} Data Points",
)
elif plot == "bar":
self.ax.clear() # clear bar data
for label in labels: # Map labels to colors
if label not in self.color_mapping:
self.color_mapping[label] = next(self.color_cycle)
colors = [self.color_mapping[label] for label in labels]
bars = self.ax.bar(labels, counts, color=colors)
for bar, count in zip(bars, counts):
self.ax.text(
bar.get_x() + bar.get_width() / 2,
bar.get_height(),
str(count),
ha="center",
va="bottom",
color=self.fg_color,
)
# Create the legend using labels from the bars
for bar, label in zip(bars, labels):
bar.set_label(label) # Assign label to each bar
self.ax.legend(loc="upper left", fontsize=13, facecolor=self.fg_color, edgecolor=self.fg_color)
elif plot == "pie":
total = sum(counts)
percentages = [size / total * 100 for size in counts]
self.ax.clear()
start_angle = 90
# Create pie chart and create legend labels with percentages
wedges, _ = self.ax.pie(
counts, labels=labels, startangle=start_angle, textprops={"color": self.fg_color}, autopct=None
)
legend_labels = [f"{label} ({percentage:.1f}%)" for label, percentage in zip(labels, percentages)]
# Assign the legend using the wedges and manually created labels
self.ax.legend(wedges, legend_labels, title="Classes", loc="center left", bbox_to_anchor=(1, 0, 0.5, 1))
self.fig.subplots_adjust(left=0.1, right=0.75) # Adjust layout to fit the legend
# Common plot settings
self.ax.set_facecolor("#f0f0f0") # Set to light gray or any other color you like
self.ax.grid(True, linestyle="--", linewidth=0.5, alpha=0.5) # Display grid for more data insights
self.ax.set_title(self.title, color=self.fg_color, fontsize=self.fontsize)
self.ax.set_xlabel(self.x_label, color=self.fg_color, fontsize=self.fontsize - 3)
self.ax.set_ylabel(self.y_label, color=self.fg_color, fontsize=self.fontsize - 3)
# Add and format legend
legend = self.ax.legend(loc="upper left", fontsize=13, facecolor=self.bg_color, edgecolor=self.bg_color)
for text in legend.get_texts():
text.set_color(self.fg_color)
# Redraw graph, update view, capture, and display the updated plot
self.ax.relim()
self.ax.autoscale_view()
self.canvas.draw()
im0 = np.array(self.canvas.renderer.buffer_rgba())
im0 = cv2.cvtColor(im0[:, :, :3], cv2.COLOR_RGBA2BGR)
self.display_output(im0)
return im0 # Return the image | --- +++ @@ -13,9 +13,43 @@
class Analytics(BaseSolution):
+ """A class for creating and updating various types of charts for visual analytics.
+
+ This class extends BaseSolution to provide functionality for generating line, bar, pie, and area charts based on
+ object detection and tracking data.
+
+ Attributes:
+ type (str): The type of analytics chart to generate ('line', 'bar', 'pie', or 'area').
+ x_label (str): Label for the x-axis.
+ y_label (str): Label for the y-axis.
+ bg_color (str): Background color of the chart frame.
+ fg_color (str): Foreground color of the chart frame.
+ title (str): Title of the chart window.
+ max_points (int): Maximum number of data points to display on the chart.
+ fontsize (int): Font size for text display.
+ color_cycle (cycle): Cyclic iterator for chart colors.
+ total_counts (int): Total count of detected objects (used for line charts).
+ clswise_count (dict[str, int]): Dictionary for class-wise object counts.
+ fig (Figure): Matplotlib figure object for the chart.
+ ax (Axes): Matplotlib axes object for the chart.
+ canvas (FigureCanvasAgg): Canvas for rendering the chart.
+ lines (dict): Dictionary to store line objects for area charts.
+ color_mapping (dict[str, str]): Dictionary mapping class labels to colors for consistent visualization.
+
+ Methods:
+ process: Process image data and update the chart.
+ update_graph: Update the chart with new data points.
+
+ Examples:
+ >>> analytics = Analytics(analytics_type="line")
+ >>> frame = cv2.imread("image.jpg")
+ >>> results = analytics.process(frame, frame_number=1)
+ >>> cv2.imshow("Analytics", results.plot_im)
+ """
@plt_settings()
def __init__(self, **kwargs: Any) -> None:
+ """Initialize Analytics class with various chart types for visual data representation."""
super().__init__(**kwargs)
import matplotlib.pyplot as plt # scope for faster 'import ultralytics'
@@ -59,6 +93,24 @@ self.ax.axis("equal")
def process(self, im0: np.ndarray, frame_number: int) -> SolutionResults:
+ """Process image data and run object tracking to update analytics charts.
+
+ Args:
+ im0 (np.ndarray): Input image for processing.
+ frame_number (int): Video frame number for plotting the data.
+
+ Returns:
+ (SolutionResults): Contains processed image `plot_im`, 'total_tracks' (int, total number of tracked objects)
+ and 'classwise_count' (dict, per-class object count).
+
+ Raises:
+ ValueError: If an unsupported chart type is specified.
+
+ Examples:
+ >>> analytics = Analytics(analytics_type="line")
+ >>> frame = np.zeros((480, 640, 3), dtype=np.uint8)
+ >>> results = analytics.process(frame, frame_number=1)
+ """
self.extract_tracks(im0) # Extract tracks
if self.type == "line":
for _ in self.boxes:
@@ -87,6 +139,23 @@ def update_graph(
self, frame_number: int, count_dict: dict[str, int] | None = None, plot: str = "line"
) -> np.ndarray:
+ """Update the graph with new data for single or multiple classes.
+
+ Args:
+ frame_number (int): The current frame number.
+ count_dict (dict[str, int], optional): Dictionary with class names as keys and counts as values for multiple
+ classes. If None, updates a single line graph.
+ plot (str): Type of the plot. Options are 'line', 'bar', 'pie', or 'area'.
+
+ Returns:
+ (np.ndarray): Updated image containing the graph.
+
+ Examples:
+ >>> analytics = Analytics(analytics_type="bar")
+ >>> frame_num = 10
+ >>> results_dict = {"person": 5, "car": 3}
+ >>> updated_image = analytics.update_graph(frame_num, results_dict, plot="bar")
+ """
if count_dict is None:
# Single line update
x_data = np.append(self.line.get_xdata(), float(frame_number))
@@ -192,4 +261,4 @@ im0 = cv2.cvtColor(im0[:, :, :3], cv2.COLOR_RGBA2BGR)
self.display_output(im0)
- return im0 # Return the image+ return im0 # Return the image
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/solutions/analytics.py |
Create docstrings for each class method | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import math
from typing import Any
import cv2
from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
from ultralytics.utils.plotting import colors
class DistanceCalculation(BaseSolution):
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
# Mouse event information
self.left_mouse_count = 0
self.selected_boxes: dict[int, list[float]] = {}
self.centroids: list[list[int]] = [] # Store centroids of selected objects
def mouse_event_for_distance(self, event: int, x: int, y: int, flags: int, param: Any) -> None:
if event == cv2.EVENT_LBUTTONDOWN:
self.left_mouse_count += 1
if self.left_mouse_count <= 2:
for box, track_id in zip(self.boxes, self.track_ids):
if box[0] < x < box[2] and box[1] < y < box[3] and track_id not in self.selected_boxes:
self.selected_boxes[track_id] = box
elif event == cv2.EVENT_RBUTTONDOWN:
self.selected_boxes = {}
self.left_mouse_count = 0
def process(self, im0) -> SolutionResults:
self.extract_tracks(im0) # Extract tracks
annotator = SolutionAnnotator(im0, line_width=self.line_width) # Initialize annotator
pixels_distance = 0
# Iterate over bounding boxes, track ids and classes index
for box, track_id, cls, conf in zip(self.boxes, self.track_ids, self.clss, self.confs):
annotator.box_label(box, color=colors(int(cls), True), label=self.adjust_box_label(cls, conf, track_id))
# Update selected boxes if they're being tracked
if len(self.selected_boxes) == 2:
for trk_id in self.selected_boxes.keys():
if trk_id == track_id:
self.selected_boxes[track_id] = box
if len(self.selected_boxes) == 2:
# Calculate centroids of selected boxes
self.centroids.extend(
[[int((box[0] + box[2]) // 2), int((box[1] + box[3]) // 2)] for box in self.selected_boxes.values()]
)
# Calculate Euclidean distance between centroids
pixels_distance = math.sqrt(
(self.centroids[0][0] - self.centroids[1][0]) ** 2 + (self.centroids[0][1] - self.centroids[1][1]) ** 2
)
annotator.plot_distance_and_line(pixels_distance, self.centroids)
self.centroids = [] # Reset centroids for next frame
plot_im = annotator.result()
self.display_output(plot_im) # Display output with base class function
if self.CFG.get("show") and self.env_check:
cv2.setMouseCallback("Ultralytics Solutions", self.mouse_event_for_distance)
# Return SolutionResults with processed image and calculated metrics
return SolutionResults(plot_im=plot_im, pixels_distance=pixels_distance, total_tracks=len(self.track_ids)) | --- +++ @@ -10,8 +10,30 @@
class DistanceCalculation(BaseSolution):
+ """A class to calculate distance between two objects in a real-time video stream based on their tracks.
+
+ This class extends BaseSolution to provide functionality for selecting objects and calculating the distance between
+ them in a video stream using YOLO object detection and tracking.
+
+ Attributes:
+ left_mouse_count (int): Counter for left mouse button clicks.
+ selected_boxes (dict[int, Any]): Dictionary to store selected bounding boxes keyed by track ID.
+ centroids (list[list[int]]): List to store centroids of selected bounding boxes.
+
+ Methods:
+ mouse_event_for_distance: Handle mouse events for selecting objects in the video stream.
+ process: Process video frames and calculate the distance between selected objects.
+
+ Examples:
+ >>> distance_calc = DistanceCalculation()
+ >>> frame = cv2.imread("frame.jpg")
+ >>> results = distance_calc.process(frame)
+ >>> cv2.imshow("Distance Calculation", results.plot_im)
+ >>> cv2.waitKey(0)
+ """
def __init__(self, **kwargs: Any) -> None:
+ """Initialize the DistanceCalculation class for measuring object distances in video streams."""
super().__init__(**kwargs)
# Mouse event information
@@ -20,6 +42,19 @@ self.centroids: list[list[int]] = [] # Store centroids of selected objects
def mouse_event_for_distance(self, event: int, x: int, y: int, flags: int, param: Any) -> None:
+ """Handle mouse events to select regions in a real-time video stream for distance calculation.
+
+ Args:
+ event (int): Type of mouse event (e.g., cv2.EVENT_MOUSEMOVE, cv2.EVENT_LBUTTONDOWN).
+ x (int): X-coordinate of the mouse pointer.
+ y (int): Y-coordinate of the mouse pointer.
+ flags (int): Flags associated with the event (e.g., cv2.EVENT_FLAG_CTRLKEY, cv2.EVENT_FLAG_SHIFTKEY).
+ param (Any): Additional parameters passed to the function.
+
+ Examples:
+ >>> # Assuming 'dc' is an instance of DistanceCalculation
+ >>> cv2.setMouseCallback("window_name", dc.mouse_event_for_distance)
+ """
if event == cv2.EVENT_LBUTTONDOWN:
self.left_mouse_count += 1
if self.left_mouse_count <= 2:
@@ -32,6 +67,27 @@ self.left_mouse_count = 0
def process(self, im0) -> SolutionResults:
+ """Process a video frame and calculate the distance between two selected bounding boxes.
+
+ This method extracts tracks from the input frame, annotates bounding boxes, and calculates the distance between
+ two user-selected objects if they have been chosen.
+
+ Args:
+ im0 (np.ndarray): The input image frame to process.
+
+ Returns:
+ (SolutionResults): Contains processed image `plot_im`, `total_tracks` (int) representing the total number of
+ tracked objects, and `pixels_distance` (float) representing the distance between selected objects
+ in pixels.
+
+ Examples:
+ >>> import numpy as np
+ >>> from ultralytics.solutions import DistanceCalculation
+ >>> dc = DistanceCalculation()
+ >>> frame = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
+ >>> results = dc.process(frame)
+ >>> print(f"Distance: {results.pixels_distance:.2f} pixels")
+ """
self.extract_tracks(im0) # Extract tracks
annotator = SolutionAnnotator(im0, line_width=self.line_width) # Initialize annotator
@@ -64,4 +120,4 @@ cv2.setMouseCallback("Ultralytics Solutions", self.mouse_event_for_distance)
# Return SolutionResults with processed image and calculated metrics
- return SolutionResults(plot_im=plot_im, pixels_distance=pixels_distance, total_tracks=len(self.track_ids))+ return SolutionResults(plot_im=plot_im, pixels_distance=pixels_distance, total_tracks=len(self.track_ids))
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/solutions/distance_calculation.py |
Create docstrings for all classes and functions | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from typing import Any
import cv2
import numpy as np
from ultralytics.solutions.object_counter import ObjectCounter
from ultralytics.solutions.solutions import SolutionAnnotator, SolutionResults
class Heatmap(ObjectCounter):
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.initialized = False # Flag for heatmap initialization
if self.region is not None: # Check if user provided the region coordinates
self.initialize_region()
# Store colormap
self.colormap = self.CFG["colormap"]
self.heatmap = None
def heatmap_effect(self, box: list[float]) -> None:
x0, y0, x1, y1 = map(int, box)
radius_squared = (min(x1 - x0, y1 - y0) // 2) ** 2
# Create a meshgrid with region of interest (ROI) for vectorized distance calculations
xv, yv = np.meshgrid(np.arange(x0, x1), np.arange(y0, y1))
# Calculate squared distances from the center
dist_squared = (xv - ((x0 + x1) // 2)) ** 2 + (yv - ((y0 + y1) // 2)) ** 2
# Create a mask of points within the radius
within_radius = dist_squared <= radius_squared
# Update only the values within the bounding box in a single vectorized operation
self.heatmap[y0:y1, x0:x1][within_radius] += 2
def process(self, im0: np.ndarray) -> SolutionResults:
if not self.initialized:
self.heatmap = np.zeros_like(im0, dtype=np.float32) * 0.99
self.initialized = True # Initialize heatmap only once
self.extract_tracks(im0) # Extract tracks
self.annotator = SolutionAnnotator(im0, line_width=self.line_width) # Initialize annotator
# Iterate over bounding boxes, track ids and classes index
for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
# Apply heatmap effect for the bounding box
self.heatmap_effect(box)
if self.region is not None:
self.annotator.draw_region(reg_pts=self.region, color=(104, 0, 123), thickness=self.line_width * 2)
self.store_tracking_history(track_id, box) # Store track history
# Get previous position if available
prev_position = None
if len(self.track_history[track_id]) > 1:
prev_position = self.track_history[track_id][-2]
self.count_objects(self.track_history[track_id][-1], track_id, prev_position, cls) # object counting
plot_im = self.annotator.result()
if self.region is not None:
self.display_counts(plot_im) # Display the counts on the frame
# Normalize, apply colormap to heatmap and combine with original image
if self.track_data.is_track:
normalized_heatmap = cv2.normalize(self.heatmap, None, 0, 255, cv2.NORM_MINMAX).astype(np.uint8)
colored_heatmap = cv2.applyColorMap(normalized_heatmap, self.colormap)
plot_im = cv2.addWeighted(plot_im, 0.5, colored_heatmap, 0.5, 0)
self.display_output(plot_im) # Display output with base class function
# Return SolutionResults
return SolutionResults(
plot_im=plot_im,
in_count=self.in_count,
out_count=self.out_count,
classwise_count=dict(self.classwise_count),
total_tracks=len(self.track_ids),
) | --- +++ @@ -12,8 +12,34 @@
class Heatmap(ObjectCounter):
+ """A class to draw heatmaps in real-time video streams based on object tracks.
+
+ This class extends the ObjectCounter class to generate and visualize heatmaps of object movements in video
+ streams. It uses tracked object positions to create a cumulative heatmap effect over time.
+
+ Attributes:
+ initialized (bool): Flag indicating whether the heatmap has been initialized.
+ colormap (int): OpenCV colormap used for heatmap visualization.
+ heatmap (np.ndarray): Array storing the cumulative heatmap data.
+ annotator (SolutionAnnotator): Object for drawing annotations on the image.
+
+ Methods:
+ heatmap_effect: Calculate and update the heatmap effect for a given bounding box.
+ process: Generate and apply the heatmap effect to each frame.
+
+ Examples:
+ >>> from ultralytics.solutions import Heatmap
+ >>> heatmap = Heatmap(model="yolo26n.pt", colormap=cv2.COLORMAP_JET)
+ >>> frame = cv2.imread("frame.jpg")
+ >>> processed_frame = heatmap.process(frame)
+ """
def __init__(self, **kwargs: Any) -> None:
+ """Initialize the Heatmap class for real-time video stream heatmap generation based on object tracks.
+
+ Args:
+ **kwargs (Any): Keyword arguments passed to the parent ObjectCounter class.
+ """
super().__init__(**kwargs)
self.initialized = False # Flag for heatmap initialization
@@ -25,6 +51,11 @@ self.heatmap = None
def heatmap_effect(self, box: list[float]) -> None:
+ """Efficiently calculate heatmap area and effect location for applying colormap.
+
+ Args:
+ box (list[float]): Bounding box coordinates [x0, y0, x1, y1].
+ """
x0, y0, x1, y1 = map(int, box)
radius_squared = (min(x1 - x0, y1 - y0) // 2) ** 2
@@ -41,6 +72,16 @@ self.heatmap[y0:y1, x0:x1][within_radius] += 2
def process(self, im0: np.ndarray) -> SolutionResults:
+ """Generate heatmap for each frame using Ultralytics tracking.
+
+ Args:
+ im0 (np.ndarray): Input image array for processing.
+
+ Returns:
+ (SolutionResults): Contains processed image `plot_im`, 'in_count' (int, count of objects entering the
+ region), 'out_count' (int, count of objects exiting the region), 'classwise_count' (dict, per-class
+ object count), and 'total_tracks' (int, total number of tracked objects).
+ """
if not self.initialized:
self.heatmap = np.zeros_like(im0, dtype=np.float32) * 0.99
self.initialized = True # Initialize heatmap only once
@@ -81,4 +122,4 @@ out_count=self.out_count,
classwise_count=dict(self.classwise_count),
total_tracks=len(self.track_ids),
- )+ )
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/solutions/heatmap.py |
Provide clean and structured docstrings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import contextlib
import pickle
import re
import types
from copy import deepcopy
from pathlib import Path
import torch
import torch.nn as nn
from ultralytics.nn.autobackend import check_class_names
from ultralytics.nn.modules import (
AIFI,
C1,
C2,
C2PSA,
C3,
C3TR,
ELAN1,
OBB,
OBB26,
PSA,
SPP,
SPPELAN,
SPPF,
A2C2f,
AConv,
ADown,
Bottleneck,
BottleneckCSP,
C2f,
C2fAttn,
C2fCIB,
C2fPSA,
C3Ghost,
C3k2,
C3x,
CBFuse,
CBLinear,
Classify,
Concat,
Conv,
Conv2,
ConvTranspose,
Detect,
DWConv,
DWConvTranspose2d,
Focus,
GhostBottleneck,
GhostConv,
HGBlock,
HGStem,
ImagePoolingAttn,
Index,
LRPCHead,
Pose,
Pose26,
RepC3,
RepConv,
RepNCSPELAN4,
RepVGGDW,
ResNetLayer,
RTDETRDecoder,
SCDown,
Segment,
Segment26,
TorchVision,
WorldDetect,
YOLOEDetect,
YOLOESegment,
YOLOESegment26,
v10Detect,
)
from ultralytics.utils import DEFAULT_CFG_DICT, LOGGER, WINDOWS, YAML, colorstr, emojis
from ultralytics.utils.checks import check_requirements, check_suffix, check_yaml
from ultralytics.utils.loss import (
E2ELoss,
PoseLoss26,
v8ClassificationLoss,
v8DetectionLoss,
v8OBBLoss,
v8PoseLoss,
v8SegmentationLoss,
)
from ultralytics.utils.ops import make_divisible
from ultralytics.utils.patches import torch_load
from ultralytics.utils.plotting import feature_visualization
from ultralytics.utils.torch_utils import (
fuse_conv_and_bn,
fuse_deconv_and_bn,
initialize_weights,
intersect_dicts,
model_info,
scale_img,
smart_inference_mode,
time_sync,
)
class BaseModel(torch.nn.Module):
def forward(self, x, *args, **kwargs):
if isinstance(x, dict): # for cases of training and validating while training.
return self.loss(x, *args, **kwargs)
return self.predict(x, *args, **kwargs)
def predict(self, x, profile=False, visualize=False, augment=False, embed=None):
if augment:
return self._predict_augment(x)
return self._predict_once(x, profile, visualize, embed)
def _predict_once(self, x, profile=False, visualize=False, embed=None):
y, dt, embeddings = [], [], [] # outputs
embed = frozenset(embed) if embed is not None else {-1}
max_idx = max(embed)
for m in self.model:
if m.f != -1: # if not from previous layer
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
if profile:
self._profile_one_layer(m, x, dt)
x = m(x) # run
y.append(x if m.i in self.save else None) # save output
if visualize:
feature_visualization(x, m.type, m.i, save_dir=visualize)
if m.i in embed:
embeddings.append(torch.nn.functional.adaptive_avg_pool2d(x, (1, 1)).squeeze(-1).squeeze(-1)) # flatten
if m.i == max_idx:
return torch.unbind(torch.cat(embeddings, 1), dim=0)
return x
def _predict_augment(self, x):
LOGGER.warning(
f"{self.__class__.__name__} does not support 'augment=True' prediction. "
f"Reverting to single-scale prediction."
)
return self._predict_once(x)
def _profile_one_layer(self, m, x, dt):
try:
import thop
except ImportError:
thop = None # conda support without 'ultralytics-thop' installed
c = m == self.model[-1] and isinstance(x, list) # is final layer list, copy input as inplace fix
flops = thop.profile(m, inputs=[x.copy() if c else x], verbose=False)[0] / 1e9 * 2 if thop else 0 # GFLOPs
t = time_sync()
for _ in range(10):
m(x.copy() if c else x)
dt.append((time_sync() - t) * 100)
if m == self.model[0]:
LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module")
LOGGER.info(f"{dt[-1]:10.2f} {flops:10.2f} {m.np:10.0f} {m.type}")
if c:
LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total")
def fuse(self, verbose=True):
if not self.is_fused():
for m in self.model.modules():
if isinstance(m, (Conv, Conv2, DWConv)) and hasattr(m, "bn"):
if isinstance(m, Conv2):
m.fuse_convs()
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
delattr(m, "bn") # remove batchnorm
m.forward = m.forward_fuse # update forward
if isinstance(m, ConvTranspose) and hasattr(m, "bn"):
m.conv_transpose = fuse_deconv_and_bn(m.conv_transpose, m.bn)
delattr(m, "bn") # remove batchnorm
m.forward = m.forward_fuse # update forward
if isinstance(m, RepConv):
m.fuse_convs()
m.forward = m.forward_fuse # update forward
if isinstance(m, RepVGGDW):
m.fuse()
m.forward = m.forward_fuse
if isinstance(m, Detect) and getattr(m, "end2end", False):
m.fuse() # remove one2many head
self.info(verbose=verbose)
return self
def is_fused(self, thresh=10):
bn = tuple(v for k, v in torch.nn.__dict__.items() if "Norm" in k) # normalization layers, i.e. BatchNorm2d()
return sum(isinstance(v, bn) for v in self.modules()) < thresh # True if < 'thresh' BatchNorm layers in model
def info(self, detailed=False, verbose=True, imgsz=640):
return model_info(self, detailed=detailed, verbose=verbose, imgsz=imgsz)
def _apply(self, fn):
self = super()._apply(fn)
m = self.model[-1] # Detect()
if isinstance(
m, Detect
): # includes all Detect subclasses like Segment, Pose, OBB, WorldDetect, YOLOEDetect, YOLOESegment
m.stride = fn(m.stride)
m.anchors = fn(m.anchors)
m.strides = fn(m.strides)
return self
def load(self, weights, verbose=True):
model = weights["model"] if isinstance(weights, dict) else weights # torchvision models are not dicts
csd = model.float().state_dict() # checkpoint state_dict as FP32
updated_csd = intersect_dicts(csd, self.state_dict()) # intersect
self.load_state_dict(updated_csd, strict=False) # load
len_updated_csd = len(updated_csd)
first_conv = "model.0.conv.weight" # hard-coded to yolo models for now
# mostly used to boost multi-channel training
state_dict = self.state_dict()
if first_conv not in updated_csd and first_conv in state_dict:
c1, c2, h, w = state_dict[first_conv].shape
cc1, cc2, ch, cw = csd[first_conv].shape
if ch == h and cw == w:
c1, c2 = min(c1, cc1), min(c2, cc2)
state_dict[first_conv][:c1, :c2] = csd[first_conv][:c1, :c2]
len_updated_csd += 1
if verbose:
LOGGER.info(f"Transferred {len_updated_csd}/{len(self.model.state_dict())} items from pretrained weights")
def loss(self, batch, preds=None):
if getattr(self, "criterion", None) is None:
self.criterion = self.init_criterion()
if preds is None:
preds = self.forward(batch["img"])
return self.criterion(preds, batch)
def init_criterion(self):
raise NotImplementedError("compute_loss() needs to be implemented by task heads")
class DetectionModel(BaseModel):
def __init__(self, cfg="yolo26n.yaml", ch=3, nc=None, verbose=True):
super().__init__()
self.yaml = cfg if isinstance(cfg, dict) else yaml_model_load(cfg) # cfg dict
if self.yaml["backbone"][0][2] == "Silence":
LOGGER.warning(
"YOLOv9 `Silence` module is deprecated in favor of torch.nn.Identity. "
"Please delete local *.pt file and re-download the latest model checkpoint."
)
self.yaml["backbone"][0][2] = "nn.Identity"
# Define model
self.yaml["channels"] = ch # save channels
if nc and nc != self.yaml["nc"]:
LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
self.yaml["nc"] = nc # override YAML value
self.model, self.save = parse_model(deepcopy(self.yaml), ch=ch, verbose=verbose) # model, savelist
self.names = {i: f"{i}" for i in range(self.yaml["nc"])} # default names dict
self.inplace = self.yaml.get("inplace", True)
# Build strides
m = self.model[-1] # Detect()
if isinstance(m, Detect): # includes all Detect subclasses like Segment, Pose, OBB, YOLOEDetect, YOLOESegment
s = 256 # 2x min stride
m.inplace = self.inplace
def _forward(x):
output = self.forward(x)
if self.end2end:
output = output["one2many"]
return output["feats"]
self.model.eval() # Avoid changing batch statistics until training begins
m.training = True # Setting it to True to properly return strides
m.stride = torch.tensor([s / x.shape[-2] for x in _forward(torch.zeros(1, ch, s, s))]) # forward
self.stride = m.stride
self.model.train() # Set model back to training(default) mode
m.bias_init() # only run once
else:
self.stride = torch.Tensor([32]) # default stride, e.g., RTDETR
# Init weights, biases
initialize_weights(self)
if verbose:
self.info()
LOGGER.info("")
@property
def end2end(self):
return getattr(self.model[-1], "end2end", False)
@end2end.setter
def end2end(self, value):
self.set_head_attr(end2end=value)
def set_head_attr(self, **kwargs):
head = self.model[-1]
for k, v in kwargs.items():
if not hasattr(head, k):
LOGGER.warning(f"Head has no attribute '{k}'.")
continue
setattr(head, k, v)
def _predict_augment(self, x):
if getattr(self, "end2end", False) or self.__class__.__name__ != "DetectionModel":
LOGGER.warning("Model does not support 'augment=True', reverting to single-scale prediction.")
return self._predict_once(x)
img_size = x.shape[-2:] # height, width
s = [1, 0.83, 0.67] # scales
f = [None, 3, None] # flips (2-ud, 3-lr)
y = [] # outputs
for si, fi in zip(s, f):
xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
yi = super().predict(xi)[0] # forward
yi = self._descale_pred(yi, fi, si, img_size)
y.append(yi)
y = self._clip_augmented(y) # clip augmented tails
return torch.cat(y, -1), None # augmented inference, train
@staticmethod
def _descale_pred(p, flips, scale, img_size, dim=1):
p[:, :4] /= scale # de-scale
x, y, wh, cls = p.split((1, 1, 2, p.shape[dim] - 4), dim)
if flips == 2:
y = img_size[0] - y # de-flip ud
elif flips == 3:
x = img_size[1] - x # de-flip lr
return torch.cat((x, y, wh, cls), dim)
def _clip_augmented(self, y):
nl = self.model[-1].nl # number of detection layers (P3-P5)
g = sum(4**x for x in range(nl)) # grid points
e = 1 # exclude layer count
i = (y[0].shape[-1] // g) * sum(4**x for x in range(e)) # indices
y[0] = y[0][..., :-i] # large
i = (y[-1].shape[-1] // g) * sum(4 ** (nl - 1 - x) for x in range(e)) # indices
y[-1] = y[-1][..., i:] # small
return y
def init_criterion(self):
return E2ELoss(self) if getattr(self, "end2end", False) else v8DetectionLoss(self)
class OBBModel(DetectionModel):
def __init__(self, cfg="yolo26n-obb.yaml", ch=3, nc=None, verbose=True):
super().__init__(cfg=cfg, ch=ch, nc=nc, verbose=verbose)
def init_criterion(self):
return E2ELoss(self, v8OBBLoss) if getattr(self, "end2end", False) else v8OBBLoss(self)
class SegmentationModel(DetectionModel):
def __init__(self, cfg="yolo26n-seg.yaml", ch=3, nc=None, verbose=True):
super().__init__(cfg=cfg, ch=ch, nc=nc, verbose=verbose)
def init_criterion(self):
return E2ELoss(self, v8SegmentationLoss) if getattr(self, "end2end", False) else v8SegmentationLoss(self)
class PoseModel(DetectionModel):
def __init__(self, cfg="yolo26n-pose.yaml", ch=3, nc=None, data_kpt_shape=(None, None), verbose=True):
if not isinstance(cfg, dict):
cfg = yaml_model_load(cfg) # load model YAML
if any(data_kpt_shape) and list(data_kpt_shape) != list(cfg["kpt_shape"]):
LOGGER.info(f"Overriding model.yaml kpt_shape={cfg['kpt_shape']} with kpt_shape={data_kpt_shape}")
cfg["kpt_shape"] = data_kpt_shape
super().__init__(cfg=cfg, ch=ch, nc=nc, verbose=verbose)
def init_criterion(self):
return E2ELoss(self, PoseLoss26) if getattr(self, "end2end", False) else v8PoseLoss(self)
class ClassificationModel(BaseModel):
def __init__(self, cfg="yolo26n-cls.yaml", ch=3, nc=None, verbose=True):
super().__init__()
self._from_yaml(cfg, ch, nc, verbose)
def _from_yaml(self, cfg, ch, nc, verbose):
self.yaml = cfg if isinstance(cfg, dict) else yaml_model_load(cfg) # cfg dict
# Define model
ch = self.yaml["channels"] = self.yaml.get("channels", ch) # input channels
if nc and nc != self.yaml["nc"]:
LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
self.yaml["nc"] = nc # override YAML value
elif not nc and not self.yaml.get("nc", None):
raise ValueError("nc not specified. Must specify nc in model.yaml or function arguments.")
self.model, self.save = parse_model(deepcopy(self.yaml), ch=ch, verbose=verbose) # model, savelist
self.stride = torch.Tensor([1]) # no stride constraints
self.names = {i: f"{i}" for i in range(self.yaml["nc"])} # default names dict
self.info()
@staticmethod
def reshape_outputs(model, nc):
name, m = list((model.model if hasattr(model, "model") else model).named_children())[-1] # last module
if isinstance(m, Classify): # YOLO Classify() head
if m.linear.out_features != nc:
m.linear = torch.nn.Linear(m.linear.in_features, nc)
elif isinstance(m, torch.nn.Linear): # ResNet, EfficientNet
if m.out_features != nc:
setattr(model, name, torch.nn.Linear(m.in_features, nc))
elif isinstance(m, torch.nn.Sequential):
types = [type(x) for x in m]
if torch.nn.Linear in types:
i = len(types) - 1 - types[::-1].index(torch.nn.Linear) # last torch.nn.Linear index
if m[i].out_features != nc:
m[i] = torch.nn.Linear(m[i].in_features, nc)
elif torch.nn.Conv2d in types:
i = len(types) - 1 - types[::-1].index(torch.nn.Conv2d) # last torch.nn.Conv2d index
if m[i].out_channels != nc:
m[i] = torch.nn.Conv2d(
m[i].in_channels, nc, m[i].kernel_size, m[i].stride, bias=m[i].bias is not None
)
def init_criterion(self):
return v8ClassificationLoss()
class RTDETRDetectionModel(DetectionModel):
def __init__(self, cfg="rtdetr-l.yaml", ch=3, nc=None, verbose=True):
super().__init__(cfg=cfg, ch=ch, nc=nc, verbose=verbose)
def _apply(self, fn):
self = super()._apply(fn)
m = self.model[-1]
m.anchors = fn(m.anchors)
m.valid_mask = fn(m.valid_mask)
return self
def init_criterion(self):
from ultralytics.models.utils.loss import RTDETRDetectionLoss
return RTDETRDetectionLoss(nc=self.nc, use_vfl=True)
def loss(self, batch, preds=None):
if not hasattr(self, "criterion"):
self.criterion = self.init_criterion()
img = batch["img"]
# NOTE: preprocess gt_bbox and gt_labels to list.
bs = img.shape[0]
batch_idx = batch["batch_idx"]
gt_groups = [(batch_idx == i).sum().item() for i in range(bs)]
targets = {
"cls": batch["cls"].to(img.device, dtype=torch.long).view(-1),
"bboxes": batch["bboxes"].to(device=img.device),
"batch_idx": batch_idx.to(img.device, dtype=torch.long).view(-1),
"gt_groups": gt_groups,
}
if preds is None:
preds = self.predict(img, batch=targets)
dec_bboxes, dec_scores, enc_bboxes, enc_scores, dn_meta = preds if self.training else preds[1]
if dn_meta is None:
dn_bboxes, dn_scores = None, None
else:
dn_bboxes, dec_bboxes = torch.split(dec_bboxes, dn_meta["dn_num_split"], dim=2)
dn_scores, dec_scores = torch.split(dec_scores, dn_meta["dn_num_split"], dim=2)
dec_bboxes = torch.cat([enc_bboxes.unsqueeze(0), dec_bboxes]) # (7, bs, 300, 4)
dec_scores = torch.cat([enc_scores.unsqueeze(0), dec_scores])
loss = self.criterion(
(dec_bboxes, dec_scores), targets, dn_bboxes=dn_bboxes, dn_scores=dn_scores, dn_meta=dn_meta
)
# NOTE: There are like 12 losses in RTDETR, backward with all losses but only show the main three losses.
return sum(loss.values()), torch.as_tensor(
[loss[k].detach() for k in ["loss_giou", "loss_class", "loss_bbox"]], device=img.device
)
def predict(self, x, profile=False, visualize=False, batch=None, augment=False, embed=None):
y, dt, embeddings = [], [], [] # outputs
embed = frozenset(embed) if embed is not None else {-1}
max_idx = max(embed)
for m in self.model[:-1]: # except the head part
if m.f != -1: # if not from previous layer
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
if profile:
self._profile_one_layer(m, x, dt)
x = m(x) # run
y.append(x if m.i in self.save else None) # save output
if visualize:
feature_visualization(x, m.type, m.i, save_dir=visualize)
if m.i in embed:
embeddings.append(torch.nn.functional.adaptive_avg_pool2d(x, (1, 1)).squeeze(-1).squeeze(-1)) # flatten
if m.i == max_idx:
return torch.unbind(torch.cat(embeddings, 1), dim=0)
head = self.model[-1]
x = head([y[j] for j in head.f], batch) # head inference
return x
class WorldModel(DetectionModel):
def __init__(self, cfg="yolov8s-world.yaml", ch=3, nc=None, verbose=True):
self.txt_feats = torch.randn(1, nc or 80, 512) # features placeholder
self.clip_model = None # CLIP model placeholder
super().__init__(cfg=cfg, ch=ch, nc=nc, verbose=verbose)
def set_classes(self, text, batch=80, cache_clip_model=True):
self.txt_feats = self.get_text_pe(text, batch=batch, cache_clip_model=cache_clip_model)
self.model[-1].nc = len(text)
def get_text_pe(self, text, batch=80, cache_clip_model=True):
from ultralytics.nn.text_model import build_text_model
device = next(self.model.parameters()).device
if not getattr(self, "clip_model", None) and cache_clip_model:
# For backwards compatibility of models lacking clip_model attribute
self.clip_model = build_text_model("clip:ViT-B/32", device=device)
model = self.clip_model if cache_clip_model else build_text_model("clip:ViT-B/32", device=device)
text_token = model.tokenize(text)
txt_feats = [model.encode_text(token).detach() for token in text_token.split(batch)]
txt_feats = txt_feats[0] if len(txt_feats) == 1 else torch.cat(txt_feats, dim=0)
return txt_feats.reshape(-1, len(text), txt_feats.shape[-1])
def predict(self, x, profile=False, visualize=False, txt_feats=None, augment=False, embed=None):
txt_feats = (self.txt_feats if txt_feats is None else txt_feats).to(device=x.device, dtype=x.dtype)
if txt_feats.shape[0] != x.shape[0] or self.model[-1].export:
txt_feats = txt_feats.expand(x.shape[0], -1, -1)
ori_txt_feats = txt_feats.clone()
y, dt, embeddings = [], [], [] # outputs
embed = frozenset(embed) if embed is not None else {-1}
max_idx = max(embed)
for m in self.model: # except the head part
if m.f != -1: # if not from previous layer
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
if profile:
self._profile_one_layer(m, x, dt)
if isinstance(m, C2fAttn):
x = m(x, txt_feats)
elif isinstance(m, WorldDetect):
x = m(x, ori_txt_feats)
elif isinstance(m, ImagePoolingAttn):
txt_feats = m(x, txt_feats)
else:
x = m(x) # run
y.append(x if m.i in self.save else None) # save output
if visualize:
feature_visualization(x, m.type, m.i, save_dir=visualize)
if m.i in embed:
embeddings.append(torch.nn.functional.adaptive_avg_pool2d(x, (1, 1)).squeeze(-1).squeeze(-1)) # flatten
if m.i == max_idx:
return torch.unbind(torch.cat(embeddings, 1), dim=0)
return x
def loss(self, batch, preds=None):
if not hasattr(self, "criterion"):
self.criterion = self.init_criterion()
if preds is None:
preds = self.forward(batch["img"], txt_feats=batch["txt_feats"])
return self.criterion(preds, batch)
class YOLOEModel(DetectionModel):
def __init__(self, cfg="yoloe-v8s.yaml", ch=3, nc=None, verbose=True):
super().__init__(cfg=cfg, ch=ch, nc=nc, verbose=verbose)
self.text_model = self.yaml.get("text_model", "mobileclip:blt")
@smart_inference_mode()
def get_text_pe(self, text, batch=80, cache_clip_model=False, without_reprta=False):
from ultralytics.nn.text_model import build_text_model
device = next(self.model.parameters()).device
if not getattr(self, "clip_model", None) and cache_clip_model:
# For backwards compatibility of models lacking clip_model attribute
self.clip_model = build_text_model(getattr(self, "text_model", "mobileclip:blt"), device=device)
model = (
self.clip_model
if cache_clip_model
else build_text_model(getattr(self, "text_model", "mobileclip:blt"), device=device)
)
text_token = model.tokenize(text)
txt_feats = [model.encode_text(token).detach() for token in text_token.split(batch)]
txt_feats = txt_feats[0] if len(txt_feats) == 1 else torch.cat(txt_feats, dim=0)
txt_feats = txt_feats.reshape(-1, len(text), txt_feats.shape[-1])
if without_reprta:
return txt_feats
head = self.model[-1]
assert isinstance(head, YOLOEDetect)
return head.get_tpe(txt_feats) # run auxiliary text head
@smart_inference_mode()
def get_visual_pe(self, img, visual):
return self(img, vpe=visual, return_vpe=True)
def set_vocab(self, vocab, names):
assert not self.training
head = self.model[-1]
assert isinstance(head, YOLOEDetect)
# Cache anchors for head
device = next(self.parameters()).device
self(torch.empty(1, 3, self.args["imgsz"], self.args["imgsz"]).to(device)) # warmup
cv3 = getattr(head, "one2one_cv3", head.cv3)
cv2 = getattr(head, "one2one_cv2", head.cv2)
# re-parameterization for prompt-free model
self.model[-1].lrpc = nn.ModuleList(
LRPCHead(cls, pf[-1], loc[-1], enabled=i != 2) for i, (cls, pf, loc) in enumerate(zip(vocab, cv3, cv2))
)
for loc_head, cls_head in zip(head.cv2, head.cv3):
assert isinstance(loc_head, nn.Sequential)
assert isinstance(cls_head, nn.Sequential)
del loc_head[-1]
del cls_head[-1]
self.model[-1].nc = len(names)
self.names = check_class_names(names)
def get_vocab(self, names):
assert not self.training
head = self.model[-1]
assert isinstance(head, YOLOEDetect)
assert not head.is_fused
tpe = self.get_text_pe(names)
self.set_classes(names, tpe)
device = next(self.model.parameters()).device
head.fuse(self.pe.to(device)) # fuse prompt embeddings to classify head
cv3 = getattr(head, "one2one_cv3", head.cv3)
vocab = nn.ModuleList()
for cls_head in cv3:
assert isinstance(cls_head, nn.Sequential)
vocab.append(cls_head[-1])
return vocab
def set_classes(self, names, embeddings):
assert not hasattr(self.model[-1], "lrpc"), (
"Prompt-free model does not support setting classes. Please try with Text/Visual prompt models."
)
assert embeddings.ndim == 3
self.pe = embeddings
self.model[-1].nc = len(names)
self.names = check_class_names(names)
def get_cls_pe(self, tpe, vpe):
all_pe = []
if tpe is not None:
assert tpe.ndim == 3
all_pe.append(tpe)
if vpe is not None:
assert vpe.ndim == 3
all_pe.append(vpe)
if not all_pe:
all_pe.append(getattr(self, "pe", torch.zeros(1, 80, 512)))
return torch.cat(all_pe, dim=1)
def predict(
self, x, profile=False, visualize=False, tpe=None, augment=False, embed=None, vpe=None, return_vpe=False
):
y, dt, embeddings = [], [], [] # outputs
b = x.shape[0]
embed = frozenset(embed) if embed is not None else {-1}
max_idx = max(embed)
for m in self.model: # except the head part
if m.f != -1: # if not from previous layer
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
if profile:
self._profile_one_layer(m, x, dt)
if isinstance(m, YOLOEDetect):
vpe = m.get_vpe(x, vpe) if vpe is not None else None
if return_vpe:
assert vpe is not None
assert not self.training
return vpe
cls_pe = self.get_cls_pe(m.get_tpe(tpe), vpe).to(device=x[0].device, dtype=x[0].dtype)
if cls_pe.shape[0] != b or m.export:
cls_pe = cls_pe.expand(b, -1, -1)
x.append(cls_pe) # adding cls embedding
x = m(x) # run
y.append(x if m.i in self.save else None) # save output
if visualize:
feature_visualization(x, m.type, m.i, save_dir=visualize)
if m.i in embed:
embeddings.append(torch.nn.functional.adaptive_avg_pool2d(x, (1, 1)).squeeze(-1).squeeze(-1)) # flatten
if m.i == max_idx:
return torch.unbind(torch.cat(embeddings, 1), dim=0)
return x
def loss(self, batch, preds=None):
if not hasattr(self, "criterion"):
from ultralytics.utils.loss import TVPDetectLoss
visual_prompt = batch.get("visuals", None) is not None # TODO
self.criterion = (
(E2ELoss(self, TVPDetectLoss) if getattr(self, "end2end", False) else TVPDetectLoss(self))
if visual_prompt
else self.init_criterion()
)
if preds is None:
preds = self.forward(
batch["img"],
tpe=None if "visuals" in batch else batch.get("txt_feats", None),
vpe=batch.get("visuals", None),
)
return self.criterion(preds, batch)
class YOLOESegModel(YOLOEModel, SegmentationModel):
def __init__(self, cfg="yoloe-v8s-seg.yaml", ch=3, nc=None, verbose=True):
super().__init__(cfg=cfg, ch=ch, nc=nc, verbose=verbose)
def loss(self, batch, preds=None):
if not hasattr(self, "criterion"):
from ultralytics.utils.loss import TVPSegmentLoss
visual_prompt = batch.get("visuals", None) is not None # TODO
self.criterion = (
(E2ELoss(self, TVPSegmentLoss) if getattr(self, "end2end", False) else TVPSegmentLoss(self))
if visual_prompt
else self.init_criterion()
)
if preds is None:
preds = self.forward(batch["img"], tpe=batch.get("txt_feats", None), vpe=batch.get("visuals", None))
return self.criterion(preds, batch)
class Ensemble(torch.nn.ModuleList):
def __init__(self):
super().__init__()
def forward(self, x, augment=False, profile=False, visualize=False):
y = [module(x, augment, profile, visualize)[0] for module in self]
# y = torch.stack(y).max(0)[0] # max ensemble
# y = torch.stack(y).mean(0) # mean ensemble
y = torch.cat(y, 2) # nms ensemble, y shape(B, HW, C*num_models)
return y, None # inference, train output
# Functions ------------------------------------------------------------------------------------------------------------
@contextlib.contextmanager
def temporary_modules(modules=None, attributes=None):
if modules is None:
modules = {}
if attributes is None:
attributes = {}
import sys
from importlib import import_module
try:
# Set attributes in sys.modules under their old name
for old, new in attributes.items():
old_module, old_attr = old.rsplit(".", 1)
new_module, new_attr = new.rsplit(".", 1)
setattr(import_module(old_module), old_attr, getattr(import_module(new_module), new_attr))
# Set modules in sys.modules under their old name
for old, new in modules.items():
sys.modules[old] = import_module(new)
yield
finally:
# Remove the temporary module paths
for old in modules:
if old in sys.modules:
del sys.modules[old]
class SafeClass:
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
pass
class SafeUnpickler(pickle.Unpickler):
def find_class(self, module, name):
safe_modules = (
"torch",
"collections",
"collections.abc",
"builtins",
"math",
"numpy",
# Add other modules considered safe
)
if module in safe_modules:
return super().find_class(module, name)
else:
return SafeClass
def torch_safe_load(weight, safe_only=False):
from ultralytics.utils.downloads import attempt_download_asset
check_suffix(file=weight, suffix=".pt")
file = attempt_download_asset(weight) # search online if missing locally
try:
with temporary_modules(
modules={
"ultralytics.yolo.utils": "ultralytics.utils",
"ultralytics.yolo.v8": "ultralytics.models.yolo",
"ultralytics.yolo.data": "ultralytics.data",
},
attributes={
"ultralytics.nn.modules.block.Silence": "torch.nn.Identity", # YOLOv9e
"ultralytics.nn.tasks.YOLOv10DetectionModel": "ultralytics.nn.tasks.DetectionModel", # YOLOv10
"ultralytics.utils.loss.v10DetectLoss": "ultralytics.utils.loss.E2EDetectLoss", # YOLOv10
# resolve cross-platform pathlib pickle incompatibility
**(
{"pathlib.PosixPath": "pathlib.WindowsPath"}
if WINDOWS
else {"pathlib.WindowsPath": "pathlib.PosixPath"}
),
},
):
if safe_only:
# Load via custom pickle module
safe_pickle = types.ModuleType("safe_pickle")
safe_pickle.Unpickler = SafeUnpickler
safe_pickle.load = lambda file_obj: SafeUnpickler(file_obj).load()
with open(file, "rb") as f:
ckpt = torch_load(f, pickle_module=safe_pickle)
else:
ckpt = torch_load(file, map_location="cpu")
except ModuleNotFoundError as e: # e.name is missing module name
if e.name == "models":
raise TypeError(
emojis(
f"ERROR ❌️ {weight} appears to be an Ultralytics YOLOv5 model originally trained "
f"with https://github.com/ultralytics/yolov5.\nThis model is NOT forwards compatible with "
f"YOLOv8 at https://github.com/ultralytics/ultralytics."
f"\nRecommend fixes are to train a new model using the latest 'ultralytics' package or to "
f"run a command with an official Ultralytics model, i.e. 'yolo predict model=yolo26n.pt'"
)
) from e
elif e.name == "numpy._core":
raise ModuleNotFoundError(
emojis(
f"ERROR ❌️ {weight} requires numpy>=1.26.1, however numpy=={__import__('numpy').__version__} is installed."
)
) from e
LOGGER.warning(
f"{weight} appears to require '{e.name}', which is not in Ultralytics requirements."
f"\nAutoInstall will run now for '{e.name}' but this feature will be removed in the future."
f"\nRecommend fixes are to train a new model using the latest 'ultralytics' package or to "
f"run a command with an official Ultralytics model, i.e. 'yolo predict model=yolo26n.pt'"
)
check_requirements(e.name) # install missing module
ckpt = torch_load(file, map_location="cpu")
if not isinstance(ckpt, dict):
# File is likely a YOLO instance saved with i.e. torch.save(model, "saved_model.pt")
LOGGER.warning(
f"The file '{weight}' appears to be improperly saved or formatted. "
f"For optimal results, use model.save('filename.pt') to correctly save YOLO models."
)
ckpt = {"model": ckpt.model}
return ckpt, file
def load_checkpoint(weight, device=None, inplace=True, fuse=False):
ckpt, weight = torch_safe_load(weight) # load ckpt
args = {**DEFAULT_CFG_DICT, **(ckpt.get("train_args", {}))} # combine model and default args, preferring model args
model = (ckpt.get("ema") or ckpt["model"]).float() # FP32 model
# Model compatibility updates
model.args = args # attach args to model
model.pt_path = str(weight) # attach *.pt file path to model as string (avoids WindowsPath pickle issues)
model.task = getattr(model, "task", guess_model_task(model))
if not hasattr(model, "stride"):
model.stride = torch.tensor([32.0])
model = (model.fuse() if fuse and hasattr(model, "fuse") else model).eval().to(device) # model in eval mode
# Module updates
for m in model.modules():
if hasattr(m, "inplace"):
m.inplace = inplace
elif isinstance(m, torch.nn.Upsample) and not hasattr(m, "recompute_scale_factor"):
m.recompute_scale_factor = None # torch 1.11.0 compatibility
# Return model and ckpt
return model, ckpt
def parse_model(d, ch, verbose=True):
import ast
# Args
legacy = True # backward compatibility for v3/v5/v8/v9 models
max_channels = float("inf")
nc, act, scales, end2end = (d.get(x) for x in ("nc", "activation", "scales", "end2end"))
reg_max = d.get("reg_max", 16)
depth, width, kpt_shape = (d.get(x, 1.0) for x in ("depth_multiple", "width_multiple", "kpt_shape"))
scale = d.get("scale")
if scales:
if not scale:
scale = next(iter(scales.keys()))
LOGGER.warning(f"no model scale passed. Assuming scale='{scale}'.")
depth, width, max_channels = scales[scale]
if act:
Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = torch.nn.SiLU()
if verbose:
LOGGER.info(f"{colorstr('activation:')} {act}") # print
if verbose:
LOGGER.info(f"\n{'':>3}{'from':>20}{'n':>3}{'params':>10} {'module':<45}{'arguments':<30}")
ch = [ch]
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
base_modules = frozenset(
{
Classify,
Conv,
ConvTranspose,
GhostConv,
Bottleneck,
GhostBottleneck,
SPP,
SPPF,
C2fPSA,
C2PSA,
DWConv,
Focus,
BottleneckCSP,
C1,
C2,
C2f,
C3k2,
RepNCSPELAN4,
ELAN1,
ADown,
AConv,
SPPELAN,
C2fAttn,
C3,
C3TR,
C3Ghost,
torch.nn.ConvTranspose2d,
DWConvTranspose2d,
C3x,
RepC3,
PSA,
SCDown,
C2fCIB,
A2C2f,
}
)
repeat_modules = frozenset( # modules with 'repeat' arguments
{
BottleneckCSP,
C1,
C2,
C2f,
C3k2,
C2fAttn,
C3,
C3TR,
C3Ghost,
C3x,
RepC3,
C2fPSA,
C2fCIB,
C2PSA,
A2C2f,
}
)
for i, (f, n, m, args) in enumerate(d["backbone"] + d["head"]): # from, number, module, args
m = (
getattr(torch.nn, m[3:])
if "nn." in m
else getattr(__import__("torchvision").ops, m[16:])
if "torchvision.ops." in m
else globals()[m]
) # get module
for j, a in enumerate(args):
if isinstance(a, str):
with contextlib.suppress(ValueError):
args[j] = locals()[a] if a in locals() else ast.literal_eval(a)
n = n_ = max(round(n * depth), 1) if n > 1 else n # depth gain
if m in base_modules:
c1, c2 = ch[f], args[0]
if c2 != nc: # if c2 != nc (e.g., Classify() output)
c2 = make_divisible(min(c2, max_channels) * width, 8)
if m is C2fAttn: # set 1) embed channels and 2) num heads
args[1] = make_divisible(min(args[1], max_channels // 2) * width, 8)
args[2] = int(max(round(min(args[2], max_channels // 2 // 32)) * width, 1) if args[2] > 1 else args[2])
args = [c1, c2, *args[1:]]
if m in repeat_modules:
args.insert(2, n) # number of repeats
n = 1
if m is C3k2: # for M/L/X sizes
legacy = False
if scale in "mlx":
args[3] = True
if m is A2C2f:
legacy = False
if scale in "lx": # for L/X sizes
args.extend((True, 1.2))
if m is C2fCIB:
legacy = False
elif m is AIFI:
args = [ch[f], *args]
elif m in frozenset({HGStem, HGBlock}):
c1, cm, c2 = ch[f], args[0], args[1]
args = [c1, cm, c2, *args[2:]]
if m is HGBlock:
args.insert(4, n) # number of repeats
n = 1
elif m is ResNetLayer:
c2 = args[1] if args[3] else args[1] * 4
elif m is torch.nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
c2 = sum(ch[x] for x in f)
elif m in frozenset(
{
Detect,
WorldDetect,
YOLOEDetect,
Segment,
Segment26,
YOLOESegment,
YOLOESegment26,
Pose,
Pose26,
OBB,
OBB26,
}
):
args.extend([reg_max, end2end, [ch[x] for x in f]])
if m is Segment or m is YOLOESegment or m is Segment26 or m is YOLOESegment26:
args[2] = make_divisible(min(args[2], max_channels) * width, 8)
if m in {Detect, YOLOEDetect, Segment, Segment26, YOLOESegment, YOLOESegment26, Pose, Pose26, OBB, OBB26}:
m.legacy = legacy
elif m is v10Detect:
args.append([ch[x] for x in f])
elif m is ImagePoolingAttn:
args.insert(1, [ch[x] for x in f]) # channels as second arg
elif m is RTDETRDecoder: # special case, channels arg must be passed in index 1
args.insert(1, [ch[x] for x in f])
elif m is CBLinear:
c2 = args[0]
c1 = ch[f]
args = [c1, c2, *args[1:]]
elif m is CBFuse:
c2 = ch[f[-1]]
elif m in frozenset({TorchVision, Index}):
c2 = args[0]
c1 = ch[f]
args = [*args[1:]]
else:
c2 = ch[f]
m_ = torch.nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
t = str(m)[8:-2].replace("__main__.", "") # module type
m_.np = sum(x.numel() for x in m_.parameters()) # number params
m_.i, m_.f, m_.type = i, f, t # attach index, 'from' index, type
if verbose:
LOGGER.info(f"{i:>3}{f!s:>20}{n_:>3}{m_.np:10.0f} {t:<45}{args!s:<30}") # print
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
layers.append(m_)
if i == 0:
ch = []
ch.append(c2)
return torch.nn.Sequential(*layers), sorted(save)
def yaml_model_load(path):
path = Path(path)
if path.stem in (f"yolov{d}{x}6" for x in "nsmlx" for d in (5, 8)):
new_stem = re.sub(r"(\d+)([nslmx])6(.+)?$", r"\1\2-p6\3", path.stem)
LOGGER.warning(f"Ultralytics YOLO P6 models now use -p6 suffix. Renaming {path.stem} to {new_stem}.")
path = path.with_name(new_stem + path.suffix)
unified_path = re.sub(r"(\d+)([nslmx])(.+)?$", r"\1\3", str(path)) # i.e. yolov8x.yaml -> yolov8.yaml
yaml_file = check_yaml(unified_path, hard=False) or check_yaml(path)
d = YAML.load(yaml_file) # model dict
d["scale"] = guess_model_scale(path)
d["yaml_file"] = str(path)
return d
def guess_model_scale(model_path):
try:
return re.search(r"yolo(e-)?[v]?\d+([nslmx])", Path(model_path).stem).group(2)
except AttributeError:
return ""
def guess_model_task(model):
def cfg2task(cfg):
m = cfg["head"][-1][-2].lower() # output module name
if m in {"classify", "classifier", "cls", "fc"}:
return "classify"
if "detect" in m:
return "detect"
if "segment" in m:
return "segment"
if "pose" in m:
return "pose"
if "obb" in m:
return "obb"
# Guess from model cfg
if isinstance(model, dict):
with contextlib.suppress(Exception):
return cfg2task(model)
# Guess from PyTorch model
if isinstance(model, torch.nn.Module): # PyTorch model
for x in "model.args", "model.model.args", "model.model.model.args":
with contextlib.suppress(Exception):
return eval(x)["task"] # nosec B307: safe eval of known attribute paths
for x in "model.yaml", "model.model.yaml", "model.model.model.yaml":
with contextlib.suppress(Exception):
return cfg2task(eval(x)) # nosec B307: safe eval of known attribute paths
for m in model.modules():
if isinstance(m, (Segment, YOLOESegment)):
return "segment"
elif isinstance(m, Classify):
return "classify"
elif isinstance(m, Pose):
return "pose"
elif isinstance(m, OBB):
return "obb"
elif isinstance(m, (Detect, WorldDetect, YOLOEDetect, v10Detect)):
return "detect"
# Guess from model filename
if isinstance(model, (str, Path)):
model = Path(model)
if "-seg" in model.stem or "segment" in model.parts:
return "segment"
elif "-cls" in model.stem or "classify" in model.parts:
return "classify"
elif "-pose" in model.stem or "pose" in model.parts:
return "pose"
elif "-obb" in model.stem or "obb" in model.parts:
return "obb"
elif "detect" in model.parts:
return "detect"
# Unable to determine task from model
LOGGER.warning(
"Unable to automatically guess model task, assuming 'task=detect'. "
"Explicitly define task for your model, i.e. 'task=detect', 'segment', 'classify','pose' or 'obb'."
)
return "detect" # assume detect | --- +++ @@ -100,18 +100,76 @@
class BaseModel(torch.nn.Module):
+ """Base class for all YOLO models in the Ultralytics family.
+
+ This class provides common functionality for YOLO models including forward pass handling, model fusion, information
+ display, and weight loading capabilities.
+
+ Attributes:
+ model (torch.nn.Sequential): The neural network model.
+ save (list): List of layer indices to save outputs from.
+ stride (torch.Tensor): Model stride values.
+
+ Methods:
+ forward: Perform forward pass for training or inference.
+ predict: Perform inference on input tensor.
+ fuse: Fuse Conv/BatchNorm layers and reparameterize for optimization.
+ info: Print model information.
+ load: Load weights into the model.
+ loss: Compute loss for training.
+
+ Examples:
+ Create a BaseModel instance
+ >>> model = BaseModel()
+ >>> model.info() # Display model information
+ """
def forward(self, x, *args, **kwargs):
+ """Perform forward pass of the model for either training or inference.
+
+ If x is a dict, calculates and returns the loss for training. Otherwise, returns predictions for inference.
+
+ Args:
+ x (torch.Tensor | dict): Input tensor for inference, or dict with image tensor and labels for training.
+ *args (Any): Variable length argument list.
+ **kwargs (Any): Arbitrary keyword arguments.
+
+ Returns:
+ (torch.Tensor): Loss if x is a dict (training), or network predictions (inference).
+ """
if isinstance(x, dict): # for cases of training and validating while training.
return self.loss(x, *args, **kwargs)
return self.predict(x, *args, **kwargs)
def predict(self, x, profile=False, visualize=False, augment=False, embed=None):
+ """Perform a forward pass through the network.
+
+ Args:
+ x (torch.Tensor): The input tensor to the model.
+ profile (bool): Print the computation time of each layer if True.
+ visualize (bool): Save the feature maps of the model if True.
+ augment (bool): Augment image during prediction.
+ embed (list, optional): A list of layer indices to return embeddings from.
+
+ Returns:
+ (torch.Tensor): The last output of the model.
+ """
if augment:
return self._predict_augment(x)
return self._predict_once(x, profile, visualize, embed)
def _predict_once(self, x, profile=False, visualize=False, embed=None):
+ """Perform a forward pass through the network.
+
+ Args:
+ x (torch.Tensor): The input tensor to the model.
+ profile (bool): Print the computation time of each layer if True.
+ visualize (bool): Save the feature maps of the model if True.
+ embed (list, optional): A list of layer indices to return embeddings from.
+
+ Returns:
+ (torch.Tensor): The last output of the model.
+ """
y, dt, embeddings = [], [], [] # outputs
embed = frozenset(embed) if embed is not None else {-1}
max_idx = max(embed)
@@ -131,6 +189,7 @@ return x
def _predict_augment(self, x):
+ """Perform augmentations on input image x and return augmented inference."""
LOGGER.warning(
f"{self.__class__.__name__} does not support 'augment=True' prediction. "
f"Reverting to single-scale prediction."
@@ -138,6 +197,13 @@ return self._predict_once(x)
def _profile_one_layer(self, m, x, dt):
+ """Profile the computation time and FLOPs of a single layer of the model on a given input.
+
+ Args:
+ m (torch.nn.Module): The layer to be profiled.
+ x (torch.Tensor): The input data to the layer.
+ dt (list): A list to store the computation time of the layer.
+ """
try:
import thop
except ImportError:
@@ -156,6 +222,14 @@ LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total")
def fuse(self, verbose=True):
+ """Fuse Conv/ConvTranspose and BatchNorm layers, and reparameterize RepConv/RepVGGDW for improved efficiency.
+
+ Args:
+ verbose (bool): Whether to print model information after fusion.
+
+ Returns:
+ (torch.nn.Module): The fused model is returned.
+ """
if not self.is_fused():
for m in self.model.modules():
if isinstance(m, (Conv, Conv2, DWConv)) and hasattr(m, "bn"):
@@ -181,13 +255,36 @@ return self
def is_fused(self, thresh=10):
+ """Check if the model has less than a certain threshold of normalization layers.
+
+ Args:
+ thresh (int, optional): The threshold number of normalization layers.
+
+ Returns:
+ (bool): True if the number of normalization layers in the model is less than the threshold, False otherwise.
+ """
bn = tuple(v for k, v in torch.nn.__dict__.items() if "Norm" in k) # normalization layers, i.e. BatchNorm2d()
return sum(isinstance(v, bn) for v in self.modules()) < thresh # True if < 'thresh' BatchNorm layers in model
def info(self, detailed=False, verbose=True, imgsz=640):
+ """Print model information.
+
+ Args:
+ detailed (bool): If True, prints out detailed information about the model.
+ verbose (bool): If True, prints out the model information.
+ imgsz (int): The size of the image used for computing model information.
+ """
return model_info(self, detailed=detailed, verbose=verbose, imgsz=imgsz)
def _apply(self, fn):
+ """Apply a function to all tensors in the model, including Detect head attributes like stride and anchors.
+
+ Args:
+ fn (function): The function to apply to the model.
+
+ Returns:
+ (BaseModel): An updated BaseModel object.
+ """
self = super()._apply(fn)
m = self.model[-1] # Detect()
if isinstance(
@@ -199,6 +296,12 @@ return self
def load(self, weights, verbose=True):
+ """Load weights into the model.
+
+ Args:
+ weights (dict | torch.nn.Module): The pre-trained weights to be loaded.
+ verbose (bool, optional): Whether to log the transfer progress.
+ """
model = weights["model"] if isinstance(weights, dict) else weights # torchvision models are not dicts
csd = model.float().state_dict() # checkpoint state_dict as FP32
updated_csd = intersect_dicts(csd, self.state_dict()) # intersect
@@ -218,6 +321,12 @@ LOGGER.info(f"Transferred {len_updated_csd}/{len(self.model.state_dict())} items from pretrained weights")
def loss(self, batch, preds=None):
+ """Compute loss.
+
+ Args:
+ batch (dict): Batch to compute loss on.
+ preds (torch.Tensor | list[torch.Tensor], optional): Predictions.
+ """
if getattr(self, "criterion", None) is None:
self.criterion = self.init_criterion()
@@ -226,12 +335,47 @@ return self.criterion(preds, batch)
def init_criterion(self):
+ """Initialize the loss criterion for the BaseModel."""
raise NotImplementedError("compute_loss() needs to be implemented by task heads")
class DetectionModel(BaseModel):
+ """YOLO detection model.
+
+ This class implements the YOLO detection architecture, handling model initialization, forward pass, augmented
+ inference, and loss computation for object detection tasks.
+
+ Attributes:
+ yaml (dict): Model configuration dictionary.
+ model (torch.nn.Sequential): The neural network model.
+ save (list): List of layer indices to save outputs from.
+ names (dict): Class names dictionary.
+ inplace (bool): Whether to use inplace operations.
+ end2end (bool): Whether the model uses end-to-end detection.
+ stride (torch.Tensor): Model stride values.
+
+ Methods:
+ __init__: Initialize the YOLO detection model.
+ _predict_augment: Perform augmented inference.
+ _descale_pred: De-scale predictions following augmented inference.
+ _clip_augmented: Clip YOLO augmented inference tails.
+ init_criterion: Initialize the loss criterion.
+
+ Examples:
+ Initialize a detection model
+ >>> model = DetectionModel("yolo26n.yaml", ch=3, nc=80)
+ >>> results = model.predict(image_tensor)
+ """
def __init__(self, cfg="yolo26n.yaml", ch=3, nc=None, verbose=True):
+ """Initialize the YOLO detection model with the given config and parameters.
+
+ Args:
+ cfg (str | dict): Model configuration file path or dictionary.
+ ch (int): Number of input channels.
+ nc (int, optional): Number of classes.
+ verbose (bool): Whether to display model information.
+ """
super().__init__()
self.yaml = cfg if isinstance(cfg, dict) else yaml_model_load(cfg) # cfg dict
if self.yaml["backbone"][0][2] == "Silence":
@@ -257,6 +401,7 @@ m.inplace = self.inplace
def _forward(x):
+ """Perform a forward pass through the model, handling different Detect subclass types accordingly."""
output = self.forward(x)
if self.end2end:
output = output["one2many"]
@@ -279,13 +424,20 @@
@property
def end2end(self):
+ """Return whether the model uses end-to-end NMS-free detection."""
return getattr(self.model[-1], "end2end", False)
@end2end.setter
def end2end(self, value):
+ """Override the end-to-end detection mode."""
self.set_head_attr(end2end=value)
def set_head_attr(self, **kwargs):
+ """Set attributes of the model head (last layer).
+
+ Args:
+ **kwargs (Any): Arbitrary keyword arguments representing attributes to set.
+ """
head = self.model[-1]
for k, v in kwargs.items():
if not hasattr(head, k):
@@ -294,6 +446,14 @@ setattr(head, k, v)
def _predict_augment(self, x):
+ """Perform augmentations on input image x and return augmented inference and train outputs.
+
+ Args:
+ x (torch.Tensor): Input image tensor.
+
+ Returns:
+ (tuple[torch.Tensor, None]): Augmented inference output and None for train output.
+ """
if getattr(self, "end2end", False) or self.__class__.__name__ != "DetectionModel":
LOGGER.warning("Model does not support 'augment=True', reverting to single-scale prediction.")
return self._predict_once(x)
@@ -311,6 +471,18 @@
@staticmethod
def _descale_pred(p, flips, scale, img_size, dim=1):
+ """De-scale predictions following augmented inference (inverse operation).
+
+ Args:
+ p (torch.Tensor): Predictions tensor.
+ flips (int | None): Flip type (None=none, 2=ud, 3=lr).
+ scale (float): Scale factor.
+ img_size (tuple): Original image size (height, width).
+ dim (int): Dimension to split at.
+
+ Returns:
+ (torch.Tensor): De-scaled predictions.
+ """
p[:, :4] /= scale # de-scale
x, y, wh, cls = p.split((1, 1, 2, p.shape[dim] - 4), dim)
if flips == 2:
@@ -320,6 +492,14 @@ return torch.cat((x, y, wh, cls), dim)
def _clip_augmented(self, y):
+ """Clip YOLO augmented inference tails.
+
+ Args:
+ y (list[torch.Tensor]): List of detection tensors.
+
+ Returns:
+ (list[torch.Tensor]): Clipped detection tensors.
+ """
nl = self.model[-1].nl # number of detection layers (P3-P5)
g = sum(4**x for x in range(nl)) # grid points
e = 1 # exclude layer count
@@ -330,30 +510,103 @@ return y
def init_criterion(self):
+ """Initialize the loss criterion for the DetectionModel."""
return E2ELoss(self) if getattr(self, "end2end", False) else v8DetectionLoss(self)
class OBBModel(DetectionModel):
+ """YOLO Oriented Bounding Box (OBB) model.
+
+ This class extends DetectionModel to handle oriented bounding box detection tasks, providing specialized loss
+ computation for rotated object detection.
+
+ Methods:
+ __init__: Initialize YOLO OBB model.
+ init_criterion: Initialize the loss criterion for OBB detection.
+
+ Examples:
+ Initialize an OBB model
+ >>> model = OBBModel("yolo26n-obb.yaml", ch=3, nc=80)
+ >>> results = model.predict(image_tensor)
+ """
def __init__(self, cfg="yolo26n-obb.yaml", ch=3, nc=None, verbose=True):
+ """Initialize YOLO OBB model with given config and parameters.
+
+ Args:
+ cfg (str | dict): Model configuration file path or dictionary.
+ ch (int): Number of input channels.
+ nc (int, optional): Number of classes.
+ verbose (bool): Whether to display model information.
+ """
super().__init__(cfg=cfg, ch=ch, nc=nc, verbose=verbose)
def init_criterion(self):
+ """Initialize the loss criterion for the model."""
return E2ELoss(self, v8OBBLoss) if getattr(self, "end2end", False) else v8OBBLoss(self)
class SegmentationModel(DetectionModel):
+ """YOLO segmentation model.
+
+ This class extends DetectionModel to handle instance segmentation tasks, providing specialized loss computation for
+ pixel-level object detection and segmentation.
+
+ Methods:
+ __init__: Initialize YOLO segmentation model.
+ init_criterion: Initialize the loss criterion for segmentation.
+
+ Examples:
+ Initialize a segmentation model
+ >>> model = SegmentationModel("yolo26n-seg.yaml", ch=3, nc=80)
+ >>> results = model.predict(image_tensor)
+ """
def __init__(self, cfg="yolo26n-seg.yaml", ch=3, nc=None, verbose=True):
+ """Initialize Ultralytics YOLO segmentation model with given config and parameters.
+
+ Args:
+ cfg (str | dict): Model configuration file path or dictionary.
+ ch (int): Number of input channels.
+ nc (int, optional): Number of classes.
+ verbose (bool): Whether to display model information.
+ """
super().__init__(cfg=cfg, ch=ch, nc=nc, verbose=verbose)
def init_criterion(self):
+ """Initialize the loss criterion for the SegmentationModel."""
return E2ELoss(self, v8SegmentationLoss) if getattr(self, "end2end", False) else v8SegmentationLoss(self)
class PoseModel(DetectionModel):
+ """YOLO pose model.
+
+ This class extends DetectionModel to handle human pose estimation tasks, providing specialized loss computation for
+ keypoint detection and pose estimation.
+
+ Attributes:
+ kpt_shape (tuple): Shape of keypoints data (num_keypoints, num_dimensions).
+
+ Methods:
+ __init__: Initialize YOLO pose model.
+ init_criterion: Initialize the loss criterion for pose estimation.
+
+ Examples:
+ Initialize a pose model
+ >>> model = PoseModel("yolo26n-pose.yaml", ch=3, nc=1, data_kpt_shape=(17, 3))
+ >>> results = model.predict(image_tensor)
+ """
def __init__(self, cfg="yolo26n-pose.yaml", ch=3, nc=None, data_kpt_shape=(None, None), verbose=True):
+ """Initialize Ultralytics YOLO Pose model.
+
+ Args:
+ cfg (str | dict): Model configuration file path or dictionary.
+ ch (int): Number of input channels.
+ nc (int, optional): Number of classes.
+ data_kpt_shape (tuple): Shape of keypoints data.
+ verbose (bool): Whether to display model information.
+ """
if not isinstance(cfg, dict):
cfg = yaml_model_load(cfg) # load model YAML
if any(data_kpt_shape) and list(data_kpt_shape) != list(cfg["kpt_shape"]):
@@ -362,16 +615,55 @@ super().__init__(cfg=cfg, ch=ch, nc=nc, verbose=verbose)
def init_criterion(self):
+ """Initialize the loss criterion for the PoseModel."""
return E2ELoss(self, PoseLoss26) if getattr(self, "end2end", False) else v8PoseLoss(self)
class ClassificationModel(BaseModel):
+ """YOLO classification model.
+
+ This class implements the YOLO classification architecture for image classification tasks, providing model
+ initialization, configuration, and output reshaping capabilities.
+
+ Attributes:
+ yaml (dict): Model configuration dictionary.
+ model (torch.nn.Sequential): The neural network model.
+ stride (torch.Tensor): Model stride values.
+ names (dict): Class names dictionary.
+
+ Methods:
+ __init__: Initialize ClassificationModel.
+ _from_yaml: Set model configurations and define architecture.
+ reshape_outputs: Update model to specified class count.
+ init_criterion: Initialize the loss criterion.
+
+ Examples:
+ Initialize a classification model
+ >>> model = ClassificationModel("yolo26n-cls.yaml", ch=3, nc=1000)
+ >>> results = model.predict(image_tensor)
+ """
def __init__(self, cfg="yolo26n-cls.yaml", ch=3, nc=None, verbose=True):
+ """Initialize ClassificationModel with YAML, channels, number of classes, verbose flag.
+
+ Args:
+ cfg (str | dict): Model configuration file path or dictionary.
+ ch (int): Number of input channels.
+ nc (int, optional): Number of classes.
+ verbose (bool): Whether to display model information.
+ """
super().__init__()
self._from_yaml(cfg, ch, nc, verbose)
def _from_yaml(self, cfg, ch, nc, verbose):
+ """Set Ultralytics YOLO model configurations and define the model architecture.
+
+ Args:
+ cfg (str | dict): Model configuration file path or dictionary.
+ ch (int): Number of input channels.
+ nc (int, optional): Number of classes.
+ verbose (bool): Whether to display model information.
+ """
self.yaml = cfg if isinstance(cfg, dict) else yaml_model_load(cfg) # cfg dict
# Define model
@@ -388,6 +680,12 @@
@staticmethod
def reshape_outputs(model, nc):
+ """Update a TorchVision classification model to class count 'nc' if required.
+
+ Args:
+ model (torch.nn.Module): Model to update.
+ nc (int): New number of classes.
+ """
name, m = list((model.model if hasattr(model, "model") else model).named_children())[-1] # last module
if isinstance(m, Classify): # YOLO Classify() head
if m.linear.out_features != nc:
@@ -409,15 +707,53 @@ )
def init_criterion(self):
+ """Initialize the loss criterion for the ClassificationModel."""
return v8ClassificationLoss()
class RTDETRDetectionModel(DetectionModel):
+ """RTDETR (Real-time DEtection and Tracking using Transformers) Detection Model class.
+
+ This class is responsible for constructing the RTDETR architecture, defining loss functions, and facilitating both
+ the training and inference processes. RTDETR is an object detection and tracking model that extends from the
+ DetectionModel base class.
+
+ Attributes:
+ nc (int): Number of classes for detection.
+ criterion (RTDETRDetectionLoss): Loss function for training.
+
+ Methods:
+ __init__: Initialize the RTDETRDetectionModel.
+ init_criterion: Initialize the loss criterion.
+ loss: Compute loss for training.
+ predict: Perform forward pass through the model.
+
+ Examples:
+ Initialize an RTDETR model
+ >>> model = RTDETRDetectionModel("rtdetr-l.yaml", ch=3, nc=80)
+ >>> results = model.predict(image_tensor)
+ """
def __init__(self, cfg="rtdetr-l.yaml", ch=3, nc=None, verbose=True):
+ """Initialize the RTDETRDetectionModel.
+
+ Args:
+ cfg (str | dict): Configuration file name or path.
+ ch (int): Number of input channels.
+ nc (int, optional): Number of classes.
+ verbose (bool): Print additional information during initialization.
+ """
super().__init__(cfg=cfg, ch=ch, nc=nc, verbose=verbose)
def _apply(self, fn):
+ """Apply a function to all tensors in the model, including decoder anchors and valid mask.
+
+ Args:
+ fn (function): The function to apply to the model.
+
+ Returns:
+ (RTDETRDetectionModel): An updated RTDETRDetectionModel object.
+ """
self = super()._apply(fn)
m = self.model[-1]
m.anchors = fn(m.anchors)
@@ -425,11 +761,22 @@ return self
def init_criterion(self):
+ """Initialize the loss criterion for the RTDETRDetectionModel."""
from ultralytics.models.utils.loss import RTDETRDetectionLoss
return RTDETRDetectionLoss(nc=self.nc, use_vfl=True)
def loss(self, batch, preds=None):
+ """Compute the loss for the given batch of data.
+
+ Args:
+ batch (dict): Dictionary containing image and label data.
+ preds (tuple, optional): Precomputed model predictions.
+
+ Returns:
+ (torch.Tensor): Total loss value.
+ (torch.Tensor): Main three losses in a tensor.
+ """
if not hasattr(self, "criterion"):
self.criterion = self.init_criterion()
@@ -466,6 +813,19 @@ )
def predict(self, x, profile=False, visualize=False, batch=None, augment=False, embed=None):
+ """Perform a forward pass through the model.
+
+ Args:
+ x (torch.Tensor): The input tensor.
+ profile (bool): If True, profile the computation time for each layer.
+ visualize (bool): If True, save feature maps for visualization.
+ batch (dict, optional): Ground truth data for evaluation.
+ augment (bool): If True, perform data augmentation during inference.
+ embed (list, optional): A list of layer indices to return embeddings from.
+
+ Returns:
+ (torch.Tensor): Model's output tensor.
+ """
y, dt, embeddings = [], [], [] # outputs
embed = frozenset(embed) if embed is not None else {-1}
max_idx = max(embed)
@@ -488,17 +848,64 @@
class WorldModel(DetectionModel):
+ """YOLOv8 World Model.
+
+ This class implements the YOLOv8 World model for open-vocabulary object detection, supporting text-based class
+ specification and CLIP model integration for zero-shot detection capabilities.
+
+ Attributes:
+ txt_feats (torch.Tensor): Text feature embeddings for classes.
+ clip_model (torch.nn.Module): CLIP model for text encoding.
+
+ Methods:
+ __init__: Initialize YOLOv8 world model.
+ set_classes: Set classes for offline inference.
+ get_text_pe: Get text positional embeddings.
+ predict: Perform forward pass with text features.
+ loss: Compute loss with text features.
+
+ Examples:
+ Initialize a world model
+ >>> model = WorldModel("yolov8s-world.yaml", ch=3, nc=80)
+ >>> model.set_classes(["person", "car", "bicycle"])
+ >>> results = model.predict(image_tensor)
+ """
def __init__(self, cfg="yolov8s-world.yaml", ch=3, nc=None, verbose=True):
+ """Initialize YOLOv8 world model with given config and parameters.
+
+ Args:
+ cfg (str | dict): Model configuration file path or dictionary.
+ ch (int): Number of input channels.
+ nc (int, optional): Number of classes.
+ verbose (bool): Whether to display model information.
+ """
self.txt_feats = torch.randn(1, nc or 80, 512) # features placeholder
self.clip_model = None # CLIP model placeholder
super().__init__(cfg=cfg, ch=ch, nc=nc, verbose=verbose)
def set_classes(self, text, batch=80, cache_clip_model=True):
+ """Set classes in advance so that model could do offline-inference without clip model.
+
+ Args:
+ text (list[str]): List of class names.
+ batch (int): Batch size for processing text tokens.
+ cache_clip_model (bool): Whether to cache the CLIP model.
+ """
self.txt_feats = self.get_text_pe(text, batch=batch, cache_clip_model=cache_clip_model)
self.model[-1].nc = len(text)
def get_text_pe(self, text, batch=80, cache_clip_model=True):
+ """Get text positional embeddings using the CLIP model.
+
+ Args:
+ text (list[str]): List of class names.
+ batch (int): Batch size for processing text tokens.
+ cache_clip_model (bool): Whether to cache the CLIP model.
+
+ Returns:
+ (torch.Tensor): Text positional embeddings.
+ """
from ultralytics.nn.text_model import build_text_model
device = next(self.model.parameters()).device
@@ -512,6 +919,19 @@ return txt_feats.reshape(-1, len(text), txt_feats.shape[-1])
def predict(self, x, profile=False, visualize=False, txt_feats=None, augment=False, embed=None):
+ """Perform a forward pass through the model.
+
+ Args:
+ x (torch.Tensor): The input tensor.
+ profile (bool): If True, profile the computation time for each layer.
+ visualize (bool): If True, save feature maps for visualization.
+ txt_feats (torch.Tensor, optional): The text features, use it if it's given.
+ augment (bool): If True, perform data augmentation during inference.
+ embed (list, optional): A list of layer indices to return embeddings from.
+
+ Returns:
+ (torch.Tensor): Model's output tensor.
+ """
txt_feats = (self.txt_feats if txt_feats is None else txt_feats).to(device=x.device, dtype=x.dtype)
if txt_feats.shape[0] != x.shape[0] or self.model[-1].export:
txt_feats = txt_feats.expand(x.shape[0], -1, -1)
@@ -543,6 +963,12 @@ return x
def loss(self, batch, preds=None):
+ """Compute loss.
+
+ Args:
+ batch (dict): Batch to compute loss on.
+ preds (torch.Tensor | list[torch.Tensor], optional): Predictions.
+ """
if not hasattr(self, "criterion"):
self.criterion = self.init_criterion()
@@ -552,13 +978,57 @@
class YOLOEModel(DetectionModel):
+ """YOLOE detection model.
+
+ This class implements the YOLOE architecture for efficient object detection with text and visual prompts, supporting
+ both prompt-based and prompt-free inference modes.
+
+ Attributes:
+ pe (torch.Tensor): Prompt embeddings for classes.
+ clip_model (torch.nn.Module): CLIP model for text encoding.
+
+ Methods:
+ __init__: Initialize YOLOE model.
+ get_text_pe: Get text positional embeddings.
+ get_visual_pe: Get visual embeddings.
+ set_vocab: Set vocabulary for prompt-free model.
+ get_vocab: Get fused vocabulary layer.
+ set_classes: Set classes for offline inference.
+ get_cls_pe: Get class positional embeddings.
+ predict: Perform forward pass with prompts.
+ loss: Compute loss with prompts.
+
+ Examples:
+ Initialize a YOLOE model
+ >>> model = YOLOEModel("yoloe-v8s.yaml", ch=3, nc=80)
+ >>> results = model.predict(image_tensor, tpe=text_embeddings)
+ """
def __init__(self, cfg="yoloe-v8s.yaml", ch=3, nc=None, verbose=True):
+ """Initialize YOLOE model with given config and parameters.
+
+ Args:
+ cfg (str | dict): Model configuration file path or dictionary.
+ ch (int): Number of input channels.
+ nc (int, optional): Number of classes.
+ verbose (bool): Whether to display model information.
+ """
super().__init__(cfg=cfg, ch=ch, nc=nc, verbose=verbose)
self.text_model = self.yaml.get("text_model", "mobileclip:blt")
@smart_inference_mode()
def get_text_pe(self, text, batch=80, cache_clip_model=False, without_reprta=False):
+ """Get text positional embeddings using the CLIP model.
+
+ Args:
+ text (list[str]): List of class names.
+ batch (int): Batch size for processing text tokens.
+ cache_clip_model (bool): Whether to cache the CLIP model.
+ without_reprta (bool): Whether to return text embeddings without reprta module processing.
+
+ Returns:
+ (torch.Tensor): Text positional embeddings.
+ """
from ultralytics.nn.text_model import build_text_model
device = next(self.model.parameters()).device
@@ -584,9 +1054,24 @@
@smart_inference_mode()
def get_visual_pe(self, img, visual):
+ """Get visual positional embeddings.
+
+ Args:
+ img (torch.Tensor): Input image tensor.
+ visual (torch.Tensor): Visual features.
+
+ Returns:
+ (torch.Tensor): Visual positional embeddings.
+ """
return self(img, vpe=visual, return_vpe=True)
def set_vocab(self, vocab, names):
+ """Set vocabulary for the prompt-free model.
+
+ Args:
+ vocab (nn.ModuleList): List of vocabulary items.
+ names (list[str]): List of class names.
+ """
assert not self.training
head = self.model[-1]
assert isinstance(head, YOLOEDetect)
@@ -611,6 +1096,14 @@ self.names = check_class_names(names)
def get_vocab(self, names):
+ """Get fused vocabulary layer from the model.
+
+ Args:
+ names (list[str]): List of class names.
+
+ Returns:
+ (nn.ModuleList): List of vocabulary modules.
+ """
assert not self.training
head = self.model[-1]
assert isinstance(head, YOLOEDetect)
@@ -629,6 +1122,12 @@ return vocab
def set_classes(self, names, embeddings):
+ """Set classes in advance so that model could do offline-inference without clip model.
+
+ Args:
+ names (list[str]): List of class names.
+ embeddings (torch.Tensor): Embeddings tensor.
+ """
assert not hasattr(self.model[-1], "lrpc"), (
"Prompt-free model does not support setting classes. Please try with Text/Visual prompt models."
)
@@ -638,6 +1137,15 @@ self.names = check_class_names(names)
def get_cls_pe(self, tpe, vpe):
+ """Get class positional embeddings.
+
+ Args:
+ tpe (torch.Tensor | None): Text positional embeddings.
+ vpe (torch.Tensor | None): Visual positional embeddings.
+
+ Returns:
+ (torch.Tensor): Class positional embeddings.
+ """
all_pe = []
if tpe is not None:
assert tpe.ndim == 3
@@ -652,6 +1160,21 @@ def predict(
self, x, profile=False, visualize=False, tpe=None, augment=False, embed=None, vpe=None, return_vpe=False
):
+ """Perform a forward pass through the model.
+
+ Args:
+ x (torch.Tensor): The input tensor.
+ profile (bool): If True, profile the computation time for each layer.
+ visualize (bool): If True, save feature maps for visualization.
+ tpe (torch.Tensor, optional): Text positional embeddings.
+ augment (bool): If True, perform data augmentation during inference.
+ embed (list, optional): A list of layer indices to return embeddings from.
+ vpe (torch.Tensor, optional): Visual positional embeddings.
+ return_vpe (bool): If True, return visual positional embeddings.
+
+ Returns:
+ (torch.Tensor): Model's output tensor.
+ """
y, dt, embeddings = [], [], [] # outputs
b = x.shape[0]
embed = frozenset(embed) if embed is not None else {-1}
@@ -683,6 +1206,12 @@ return x
def loss(self, batch, preds=None):
+ """Compute loss.
+
+ Args:
+ batch (dict): Batch to compute loss on.
+ preds (torch.Tensor | list[torch.Tensor], optional): Predictions.
+ """
if not hasattr(self, "criterion"):
from ultralytics.utils.loss import TVPDetectLoss
@@ -702,11 +1231,39 @@
class YOLOESegModel(YOLOEModel, SegmentationModel):
+ """YOLOE segmentation model.
+
+ This class extends YOLOEModel to handle instance segmentation tasks with text and visual prompts, providing
+ specialized loss computation for pixel-level object detection and segmentation.
+
+ Methods:
+ __init__: Initialize YOLOE segmentation model.
+ loss: Compute loss with prompts for segmentation.
+
+ Examples:
+ Initialize a YOLOE segmentation model
+ >>> model = YOLOESegModel("yoloe-v8s-seg.yaml", ch=3, nc=80)
+ >>> results = model.predict(image_tensor, tpe=text_embeddings)
+ """
def __init__(self, cfg="yoloe-v8s-seg.yaml", ch=3, nc=None, verbose=True):
+ """Initialize YOLOE segmentation model with given config and parameters.
+
+ Args:
+ cfg (str | dict): Model configuration file path or dictionary.
+ ch (int): Number of input channels.
+ nc (int, optional): Number of classes.
+ verbose (bool): Whether to display model information.
+ """
super().__init__(cfg=cfg, ch=ch, nc=nc, verbose=verbose)
def loss(self, batch, preds=None):
+ """Compute loss.
+
+ Args:
+ batch (dict): Batch to compute loss on.
+ preds (torch.Tensor | list[torch.Tensor], optional): Predictions.
+ """
if not hasattr(self, "criterion"):
from ultralytics.utils.loss import TVPSegmentLoss
@@ -723,11 +1280,40 @@
class Ensemble(torch.nn.ModuleList):
+ """Ensemble of models.
+
+ This class allows combining multiple YOLO models into an ensemble for improved performance through model averaging
+ or other ensemble techniques.
+
+ Methods:
+ __init__: Initialize an ensemble of models.
+ forward: Generate predictions from all models in the ensemble.
+
+ Examples:
+ Create an ensemble of models
+ >>> ensemble = Ensemble()
+ >>> ensemble.append(model1)
+ >>> ensemble.append(model2)
+ >>> results = ensemble(image_tensor)
+ """
def __init__(self):
+ """Initialize an ensemble of models."""
super().__init__()
def forward(self, x, augment=False, profile=False, visualize=False):
+ """Run ensemble forward pass and concatenate predictions from all models.
+
+ Args:
+ x (torch.Tensor): Input tensor.
+ augment (bool): Whether to augment the input.
+ profile (bool): Whether to profile the model.
+ visualize (bool): Whether to visualize the features.
+
+ Returns:
+ (torch.Tensor): Concatenated predictions from all models.
+ (None): Always None for ensemble inference.
+ """
y = [module(x, augment, profile, visualize)[0] for module in self]
# y = torch.stack(y).max(0)[0] # max ensemble
# y = torch.stack(y).mean(0) # mean ensemble
@@ -740,6 +1326,26 @@
@contextlib.contextmanager
def temporary_modules(modules=None, attributes=None):
+ """Context manager for temporarily adding or modifying modules in Python's module cache (`sys.modules`).
+
+ This function can be used to change the module paths during runtime. It's useful when refactoring code, where you've
+ moved a module from one location to another, but you still want to support the old import paths for backwards
+ compatibility.
+
+ Args:
+ modules (dict, optional): A dictionary mapping old module paths to new module paths.
+ attributes (dict, optional): A dictionary mapping old module attributes to new module attributes.
+
+ Examples:
+ >>> with temporary_modules({"old.module": "new.module"}, {"old.module.attribute": "new.module.attribute"}):
+ >>> import old.module # this will now import new.module
+ >>> from old.module import attribute # this will now import new.module.attribute
+
+ Notes:
+ The changes are only in effect inside the context manager and are undone once the context manager exits.
+ Be aware that directly manipulating `sys.modules` can lead to unpredictable results, especially in larger
+ applications or libraries. Use this function with caution.
+ """
if modules is None:
modules = {}
if attributes is None:
@@ -767,17 +1373,30 @@
class SafeClass:
+ """A placeholder class to replace unknown classes during unpickling."""
def __init__(self, *args, **kwargs):
+ """Initialize SafeClass instance, ignoring all arguments."""
pass
def __call__(self, *args, **kwargs):
+ """Run SafeClass instance, ignoring all arguments."""
pass
class SafeUnpickler(pickle.Unpickler):
+ """Custom Unpickler that replaces unknown classes with SafeClass."""
def find_class(self, module, name):
+ """Attempt to find a class, returning SafeClass if not among safe modules.
+
+ Args:
+ module (str): Module name.
+ name (str): Class name.
+
+ Returns:
+ (type): Found class or SafeClass.
+ """
safe_modules = (
"torch",
"collections",
@@ -794,6 +1413,22 @@
def torch_safe_load(weight, safe_only=False):
+ """Attempt to load a PyTorch model with the torch.load() function. If a ModuleNotFoundError is raised, it catches
+ the error, logs a warning message, and attempts to install the missing module via the check_requirements()
+ function. After installation, the function again attempts to load the model using torch.load().
+
+ Args:
+ weight (str | Path): The file path of the PyTorch model.
+ safe_only (bool): If True, replace unknown classes with SafeClass during loading.
+
+ Returns:
+ (dict): The loaded model checkpoint.
+ (str): The loaded filename.
+
+ Examples:
+ >>> from ultralytics.nn.tasks import torch_safe_load
+ >>> ckpt, file = torch_safe_load("path/to/best.pt", safe_only=True)
+ """
from ultralytics.utils.downloads import attempt_download_asset
check_suffix(file=weight, suffix=".pt")
@@ -865,6 +1500,18 @@
def load_checkpoint(weight, device=None, inplace=True, fuse=False):
+ """Load single model weights.
+
+ Args:
+ weight (str | Path): Model weight path.
+ device (torch.device, optional): Device to load model to.
+ inplace (bool): Whether to do inplace operations.
+ fuse (bool): Whether to fuse model.
+
+ Returns:
+ (torch.nn.Module): Loaded model.
+ (dict): Model checkpoint dictionary.
+ """
ckpt, weight = torch_safe_load(weight) # load ckpt
args = {**DEFAULT_CFG_DICT, **(ckpt.get("train_args", {}))} # combine model and default args, preferring model args
model = (ckpt.get("ema") or ckpt["model"]).float() # FP32 model
@@ -890,6 +1537,17 @@
def parse_model(d, ch, verbose=True):
+ """Parse a YOLO model.yaml dictionary into a PyTorch model.
+
+ Args:
+ d (dict): Model dictionary.
+ ch (int): Input channels.
+ verbose (bool): Whether to print model details.
+
+ Returns:
+ (torch.nn.Sequential): PyTorch model.
+ (list): Sorted list of layer indices whose outputs need to be saved.
+ """
import ast
# Args
@@ -1074,6 +1732,14 @@
def yaml_model_load(path):
+ """Load a YOLO model from a YAML file.
+
+ Args:
+ path (str | Path): Path to the YAML file.
+
+ Returns:
+ (dict): Model dictionary.
+ """
path = Path(path)
if path.stem in (f"yolov{d}{x}6" for x in "nsmlx" for d in (5, 8)):
new_stem = re.sub(r"(\d+)([nslmx])6(.+)?$", r"\1\2-p6\3", path.stem)
@@ -1089,6 +1755,14 @@
def guess_model_scale(model_path):
+ """Extract the size character n, s, m, l, or x of the model's scale from the model path.
+
+ Args:
+ model_path (str | Path): The path to the YOLO model's YAML file.
+
+ Returns:
+ (str): The size character of the model's scale (n, s, m, l, or x), or empty string if not found.
+ """
try:
return re.search(r"yolo(e-)?[v]?\d+([nslmx])", Path(model_path).stem).group(2)
except AttributeError:
@@ -1096,8 +1770,17 @@
def guess_model_task(model):
+ """Guess the task of a PyTorch model from its architecture or configuration.
+
+ Args:
+ model (torch.nn.Module | dict | str | Path): PyTorch model, model configuration dict, or model file path.
+
+ Returns:
+ (str): Task of the model ('detect', 'segment', 'classify', 'pose', 'obb').
+ """
def cfg2task(cfg):
+ """Guess from YAML dictionary."""
m = cfg["head"][-1][-2].lower() # output module name
if m in {"classify", "classifier", "cls", "fc"}:
return "classify"
@@ -1153,4 +1836,4 @@ "Unable to automatically guess model task, assuming 'task=detect'. "
"Explicitly define task for your model, i.e. 'task=detect', 'segment', 'classify','pose' or 'obb'."
)
- return "detect" # assume detect+ return "detect" # assume detect
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/nn/tasks.py |
Improve documentation using docstrings | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from abc import abstractmethod
from pathlib import Path
import torch
import torch.nn as nn
from PIL import Image
from ultralytics.utils import checks
from ultralytics.utils.torch_utils import smart_inference_mode
try:
import clip
except ImportError:
checks.check_requirements("git+https://github.com/ultralytics/CLIP.git")
import clip
class TextModel(nn.Module):
def __init__(self):
super().__init__()
@abstractmethod
def tokenize(self, texts):
pass
@abstractmethod
def encode_text(self, texts, dtype):
pass
class CLIP(TextModel):
def __init__(self, size: str, device: torch.device) -> None:
super().__init__()
self.model, self.image_preprocess = clip.load(size, device=device)
self.to(device)
self.device = device
self.eval()
def tokenize(self, texts: str | list[str], truncate: bool = True) -> torch.Tensor:
return clip.tokenize(texts, truncate=truncate).to(self.device)
@smart_inference_mode()
def encode_text(self, texts: torch.Tensor, dtype: torch.dtype = torch.float32) -> torch.Tensor:
txt_feats = self.model.encode_text(texts).to(dtype)
txt_feats = txt_feats / txt_feats.norm(p=2, dim=-1, keepdim=True)
return txt_feats
@smart_inference_mode()
def encode_image(self, image: Image.Image | torch.Tensor, dtype: torch.dtype = torch.float32) -> torch.Tensor:
if isinstance(image, Image.Image):
image = self.image_preprocess(image).unsqueeze(0).to(self.device)
img_feats = self.model.encode_image(image).to(dtype)
img_feats = img_feats / img_feats.norm(p=2, dim=-1, keepdim=True)
return img_feats
class MobileCLIP(TextModel):
config_size_map = {"s0": "s0", "s1": "s1", "s2": "s2", "b": "b", "blt": "b"}
def __init__(self, size: str, device: torch.device) -> None:
try:
import mobileclip
except ImportError:
# Ultralytics fork preferred since Apple MobileCLIP repo has incorrect version of torchvision
checks.check_requirements("git+https://github.com/ultralytics/mobileclip.git")
import mobileclip
super().__init__()
config = self.config_size_map[size]
file = f"mobileclip_{size}.pt"
if not Path(file).is_file():
from ultralytics import download
download(f"https://docs-assets.developer.apple.com/ml-research/datasets/mobileclip/{file}")
self.model = mobileclip.create_model_and_transforms(f"mobileclip_{config}", pretrained=file, device=device)[0]
self.tokenizer = mobileclip.get_tokenizer(f"mobileclip_{config}")
self.to(device)
self.device = device
self.eval()
def tokenize(self, texts: list[str]) -> torch.Tensor:
return self.tokenizer(texts).to(self.device)
@smart_inference_mode()
def encode_text(self, texts: torch.Tensor, dtype: torch.dtype = torch.float32) -> torch.Tensor:
text_features = self.model.encode_text(texts).to(dtype)
text_features /= text_features.norm(p=2, dim=-1, keepdim=True)
return text_features
class MobileCLIPTS(TextModel):
def __init__(self, device: torch.device, weight: str = "mobileclip_blt.ts"):
super().__init__()
from ultralytics.utils.downloads import attempt_download_asset
self.encoder = torch.jit.load(attempt_download_asset(weight), map_location=device)
self.tokenizer = clip.clip.tokenize
self.device = device
def tokenize(self, texts: list[str], truncate: bool = True) -> torch.Tensor:
return self.tokenizer(texts, truncate=truncate).to(self.device)
@smart_inference_mode()
def encode_text(self, texts: torch.Tensor, dtype: torch.dtype = torch.float32) -> torch.Tensor:
# NOTE: no need to do normalization here as it's embedded in the torchscript model
return self.encoder(texts).to(dtype)
def build_text_model(variant: str, device: torch.device = None) -> TextModel:
base, size = variant.split(":")
if base == "clip":
return CLIP(size, device)
elif base == "mobileclip":
return MobileCLIPTS(device)
elif base == "mobileclip2":
return MobileCLIPTS(device, weight="mobileclip2_b.ts")
else:
raise ValueError(f"Unrecognized base model '{base}'. Supported models are 'clip', 'mobileclip', 'mobileclip2'.") | --- +++ @@ -20,22 +20,65 @@
class TextModel(nn.Module):
+ """Abstract base class for text encoding models.
+
+ This class defines the interface for text encoding models used in vision-language tasks. Subclasses must implement
+ the tokenize and encode_text methods to provide text tokenization and encoding functionality.
+
+ Methods:
+ tokenize: Convert input texts to tokens for model processing.
+ encode_text: Encode tokenized texts into normalized feature vectors.
+ """
def __init__(self):
+ """Initialize the TextModel base class."""
super().__init__()
@abstractmethod
def tokenize(self, texts):
+ """Convert input texts to tokens for model processing."""
pass
@abstractmethod
def encode_text(self, texts, dtype):
+ """Encode tokenized texts into normalized feature vectors."""
pass
class CLIP(TextModel):
+ """Implements OpenAI's CLIP (Contrastive Language-Image Pre-training) text encoder.
+
+ This class provides a text encoder based on OpenAI's CLIP model, which can convert text into feature vectors that
+ are aligned with corresponding image features in a shared embedding space.
+
+ Attributes:
+ model (clip.model.CLIP): The loaded CLIP model.
+ image_preprocess (callable): Preprocessing transform for images.
+ device (torch.device): Device where the model is loaded.
+
+ Methods:
+ tokenize: Convert input texts to CLIP tokens.
+ encode_text: Encode tokenized texts into normalized feature vectors.
+
+ Examples:
+ >>> import torch
+ >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+ >>> clip_model = CLIP(size="ViT-B/32", device=device)
+ >>> tokens = clip_model.tokenize(["a photo of a cat", "a photo of a dog"])
+ >>> text_features = clip_model.encode_text(tokens)
+ >>> print(text_features.shape)
+ """
def __init__(self, size: str, device: torch.device) -> None:
+ """Initialize the CLIP text encoder.
+
+ This class implements the TextModel interface using OpenAI's CLIP model for text encoding. It loads a
+ pre-trained CLIP model of the specified size and prepares it for text encoding tasks.
+
+ Args:
+ size (str): Model size identifier (e.g., 'ViT-B/32').
+ device (torch.device): Device to load the model on.
+ """
super().__init__()
self.model, self.image_preprocess = clip.load(size, device=device)
self.to(device)
@@ -43,16 +86,75 @@ self.eval()
def tokenize(self, texts: str | list[str], truncate: bool = True) -> torch.Tensor:
+ """Convert input texts to CLIP tokens.
+
+ Args:
+ texts (str | list[str]): Input text or list of texts to tokenize.
+ truncate (bool, optional): Whether to trim texts that exceed CLIP's context length. Defaults to True to
+ avoid RuntimeError from overly long inputs while still allowing explicit opt-out.
+
+ Returns:
+ (torch.Tensor): Tokenized text tensor with shape (batch_size, context_length) ready for model processing.
+
+ Examples:
+ >>> model = CLIP("ViT-B/32", device="cpu")
+ >>> tokens = model.tokenize("a photo of a cat")
+ >>> print(tokens.shape) # torch.Size([1, 77])
+ >>> strict_tokens = model.tokenize("a photo of a cat", truncate=False) # Enforce strict length checks
+ >>> print(strict_tokens.shape) # Same shape/content as tokens since prompt less than 77 tokens
+ """
return clip.tokenize(texts, truncate=truncate).to(self.device)
@smart_inference_mode()
def encode_text(self, texts: torch.Tensor, dtype: torch.dtype = torch.float32) -> torch.Tensor:
+ """Encode tokenized texts into normalized feature vectors.
+
+ This method processes tokenized text inputs through the CLIP model to generate feature vectors, which are then
+ normalized to unit length. These normalized vectors can be used for text-image similarity comparisons.
+
+ Args:
+ texts (torch.Tensor): Tokenized text inputs, typically created using the tokenize() method.
+ dtype (torch.dtype, optional): Data type for output features.
+
+ Returns:
+ (torch.Tensor): Normalized text feature vectors with unit length (L2 norm = 1).
+
+ Examples:
+ >>> clip_model = CLIP("ViT-B/32", device="cuda")
+ >>> tokens = clip_model.tokenize(["a photo of a cat", "a photo of a dog"])
+ >>> features = clip_model.encode_text(tokens)
+ >>> features.shape
+ torch.Size([2, 512])
+ """
txt_feats = self.model.encode_text(texts).to(dtype)
txt_feats = txt_feats / txt_feats.norm(p=2, dim=-1, keepdim=True)
return txt_feats
@smart_inference_mode()
def encode_image(self, image: Image.Image | torch.Tensor, dtype: torch.dtype = torch.float32) -> torch.Tensor:
+ """Encode images into normalized feature vectors.
+
+ This method processes image inputs through the CLIP model to generate feature vectors, which are then
+ normalized to unit length. These normalized vectors can be used for text-image similarity comparisons.
+
+ Args:
+ image (PIL.Image | torch.Tensor): Image input as a PIL Image or preprocessed tensor. If a PIL Image is
+ provided, it will be converted to a tensor using the model's image preprocessing function.
+ dtype (torch.dtype, optional): Data type for output features.
+
+ Returns:
+ (torch.Tensor): Normalized image feature vectors with unit length (L2 norm = 1).
+
+ Examples:
+ >>> from ultralytics.nn.text_model import CLIP
+ >>> from PIL import Image
+ >>> clip_model = CLIP("ViT-B/32", device="cuda")
+ >>> image = Image.open("path/to/image.jpg")
+ >>> image_tensor = clip_model.image_preprocess(image).unsqueeze(0).to("cuda")
+ >>> features = clip_model.encode_image(image_tensor)
+ >>> features.shape
+ torch.Size([1, 512])
+ """
if isinstance(image, Image.Image):
image = self.image_preprocess(image).unsqueeze(0).to(self.device)
img_feats = self.model.encode_image(image).to(dtype)
@@ -61,10 +163,39 @@
class MobileCLIP(TextModel):
+ """Implement Apple's MobileCLIP text encoder for efficient text encoding.
+
+ This class implements the TextModel interface using Apple's MobileCLIP model, providing efficient text encoding
+ capabilities for vision-language tasks with reduced computational requirements compared to standard CLIP models.
+
+ Attributes:
+ model (mobileclip.model.MobileCLIP): The loaded MobileCLIP model.
+ tokenizer (callable): Tokenizer function for processing text inputs.
+ device (torch.device): Device where the model is loaded.
+ config_size_map (dict): Mapping from size identifiers to model configuration names.
+
+ Methods:
+ tokenize: Convert input texts to MobileCLIP tokens.
+ encode_text: Encode tokenized texts into normalized feature vectors.
+
+ Examples:
+ >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+ >>> text_encoder = MobileCLIP(size="s0", device=device)
+ >>> tokens = text_encoder.tokenize(["a photo of a cat", "a photo of a dog"])
+ >>> features = text_encoder.encode_text(tokens)
+ """
config_size_map = {"s0": "s0", "s1": "s1", "s2": "s2", "b": "b", "blt": "b"}
def __init__(self, size: str, device: torch.device) -> None:
+ """Initialize the MobileCLIP text encoder.
+
+ This class implements the TextModel interface using Apple's MobileCLIP model for efficient text encoding.
+
+ Args:
+ size (str): Model size identifier (e.g., 's0', 's1', 's2', 'b', 'blt').
+ device (torch.device): Device to load the model on.
+ """
try:
import mobileclip
except ImportError:
@@ -86,18 +217,75 @@ self.eval()
def tokenize(self, texts: list[str]) -> torch.Tensor:
+ """Convert input texts to MobileCLIP tokens.
+
+ Args:
+ texts (list[str]): List of text strings to tokenize.
+
+ Returns:
+ (torch.Tensor): Tokenized text inputs with shape (batch_size, sequence_length).
+
+ Examples:
+ >>> model = MobileCLIP("s0", "cpu")
+ >>> tokens = model.tokenize(["a photo of a cat", "a photo of a dog"])
+ """
return self.tokenizer(texts).to(self.device)
@smart_inference_mode()
def encode_text(self, texts: torch.Tensor, dtype: torch.dtype = torch.float32) -> torch.Tensor:
+ """Encode tokenized texts into normalized feature vectors.
+
+ Args:
+ texts (torch.Tensor): Tokenized text inputs.
+ dtype (torch.dtype, optional): Data type for output features.
+
+ Returns:
+ (torch.Tensor): Normalized text feature vectors with L2 normalization applied.
+
+ Examples:
+ >>> model = MobileCLIP("s0", device="cpu")
+ >>> tokens = model.tokenize(["a photo of a cat", "a photo of a dog"])
+ >>> features = model.encode_text(tokens)
+ >>> features.shape
+ torch.Size([2, 512]) # Actual dimension depends on model size
+ """
text_features = self.model.encode_text(texts).to(dtype)
text_features /= text_features.norm(p=2, dim=-1, keepdim=True)
return text_features
class MobileCLIPTS(TextModel):
+ """Load a TorchScript traced version of MobileCLIP.
+
+ This class implements the TextModel interface using Apple's MobileCLIP model in TorchScript format, providing
+ efficient text encoding capabilities for vision-language tasks with optimized inference performance.
+
+ Attributes:
+ encoder (torch.jit.ScriptModule): The loaded TorchScript MobileCLIP text encoder.
+ tokenizer (callable): Tokenizer function for processing text inputs.
+ device (torch.device): Device where the model is loaded.
+
+ Methods:
+ tokenize: Convert input texts to MobileCLIP tokens.
+ encode_text: Encode tokenized texts into normalized feature vectors.
+
+ Examples:
+ >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+ >>> text_encoder = MobileCLIPTS(device=device)
+ >>> tokens = text_encoder.tokenize(["a photo of a cat", "a photo of a dog"])
+ >>> features = text_encoder.encode_text(tokens)
+ """
def __init__(self, device: torch.device, weight: str = "mobileclip_blt.ts"):
+ """Initialize the MobileCLIP TorchScript text encoder.
+
+ This class implements the TextModel interface using Apple's MobileCLIP model in TorchScript format for efficient
+ text encoding with optimized inference performance.
+
+ Args:
+ device (torch.device): Device to load the model on.
+ weight (str): Path to the TorchScript model weights.
+ """
super().__init__()
from ultralytics.utils.downloads import attempt_download_asset
@@ -106,15 +294,61 @@ self.device = device
def tokenize(self, texts: list[str], truncate: bool = True) -> torch.Tensor:
+ """Convert input texts to MobileCLIP tokens.
+
+ Args:
+ texts (list[str]): List of text strings to tokenize.
+ truncate (bool, optional): Whether to trim texts that exceed the tokenizer context length. Defaults to True,
+ matching CLIP's behavior to prevent runtime failures on long captions.
+
+ Returns:
+ (torch.Tensor): Tokenized text inputs with shape (batch_size, sequence_length).
+
+ Examples:
+ >>> model = MobileCLIPTS(device=torch.device("cpu"))
+ >>> tokens = model.tokenize(["a photo of a cat", "a photo of a dog"])
+ >>> strict_tokens = model.tokenize(
+ ... ["a very long caption"], truncate=False
+ ... ) # RuntimeError if exceeds 77-token
+ """
return self.tokenizer(texts, truncate=truncate).to(self.device)
@smart_inference_mode()
def encode_text(self, texts: torch.Tensor, dtype: torch.dtype = torch.float32) -> torch.Tensor:
+ """Encode tokenized texts into normalized feature vectors.
+
+ Args:
+ texts (torch.Tensor): Tokenized text inputs.
+ dtype (torch.dtype, optional): Data type for output features.
+
+ Returns:
+ (torch.Tensor): Normalized text feature vectors with L2 normalization applied.
+
+ Examples:
+ >>> model = MobileCLIPTS(device="cpu")
+ >>> tokens = model.tokenize(["a photo of a cat", "a photo of a dog"])
+ >>> features = model.encode_text(tokens)
+ >>> features.shape
+ torch.Size([2, 512]) # Actual dimension depends on model size
+ """
# NOTE: no need to do normalization here as it's embedded in the torchscript model
return self.encoder(texts).to(dtype)
def build_text_model(variant: str, device: torch.device = None) -> TextModel:
+ """Build a text encoding model based on the specified variant.
+
+ Args:
+ variant (str): Model variant in format "base:size" (e.g., "clip:ViT-B/32" or "mobileclip:s0").
+ device (torch.device, optional): Device to load the model on.
+
+ Returns:
+ (TextModel): Instantiated text encoding model.
+
+ Examples:
+ >>> model = build_text_model("clip:ViT-B/32", device=torch.device("cuda"))
+ >>> model = build_text_model("mobileclip:s0", device=torch.device("cpu"))
+ """
base, size = variant.split(":")
if base == "clip":
return CLIP(size, device)
@@ -123,4 +357,4 @@ elif base == "mobileclip2":
return MobileCLIPTS(device, weight="mobileclip2_b.ts")
else:
- raise ValueError(f"Unrecognized base model '{base}'. Supported models are 'clip', 'mobileclip', 'mobileclip2'.")+ raise ValueError(f"Unrecognized base model '{base}'. Supported models are 'clip', 'mobileclip', 'mobileclip2'.")
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/nn/text_model.py |
Generate docstrings for exported functions | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import torch
from torch import optim
def zeropower_via_newtonschulz5(G: torch.Tensor, eps: float = 1e-7) -> torch.Tensor:
assert len(G.shape) == 2
X = G.bfloat16()
X /= X.norm() + eps # ensure top singular value <= 1
if G.size(0) > G.size(1):
X = X.T
for a, b, c in [ # num_steps fixed at 5
# original params
(3.4445, -4.7750, 2.0315),
(3.4445, -4.7750, 2.0315),
(3.4445, -4.7750, 2.0315),
(3.4445, -4.7750, 2.0315),
(3.4445, -4.7750, 2.0315),
]:
# for _ in range(steps):
A = X @ X.T
B = b * A + c * A @ A
X = a * X + B @ X
if G.size(0) > G.size(1):
X = X.T
return X
def muon_update(grad: torch.Tensor, momentum: torch.Tensor, beta: float = 0.95, nesterov: bool = True) -> torch.Tensor:
momentum.lerp_(grad, 1 - beta)
update = grad.lerp(momentum, beta) if nesterov else momentum
if update.ndim == 4: # for the case of conv filters
update = update.view(len(update), -1)
update = zeropower_via_newtonschulz5(update)
update *= max(1, grad.size(-2) / grad.size(-1)) ** 0.5
return update
class MuSGD(optim.Optimizer):
def __init__(
self,
params,
lr: float = 1e-3,
momentum: float = 0.0,
weight_decay: float = 0.0,
nesterov: bool = False,
use_muon: bool = False,
muon: float = 0.5,
sgd: float = 0.5,
):
defaults = dict(
lr=lr,
momentum=momentum,
weight_decay=weight_decay,
nesterov=nesterov,
use_muon=use_muon,
)
super().__init__(params, defaults)
self.muon = muon
self.sgd = sgd
@torch.no_grad()
def step(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
# Muon
if group["use_muon"]:
# generate weight updates in distributed fashion
for p in group["params"]:
lr = group["lr"]
if p.grad is None:
continue
grad = p.grad
state = self.state[p]
if len(state) == 0:
state["momentum_buffer"] = torch.zeros_like(p)
state["momentum_buffer_SGD"] = torch.zeros_like(p)
update = muon_update(
grad, state["momentum_buffer"], beta=group["momentum"], nesterov=group["nesterov"]
)
p.add_(update.reshape(p.shape), alpha=-(lr * self.muon))
# SGD update
if group["weight_decay"] != 0:
grad = grad.add(p, alpha=group["weight_decay"])
state["momentum_buffer_SGD"].mul_(group["momentum"]).add_(grad)
sgd_update = (
grad.add(state["momentum_buffer_SGD"], alpha=group["momentum"])
if group["nesterov"]
else state["momentum_buffer_SGD"]
)
p.add_(sgd_update, alpha=-(lr * self.sgd))
else: # SGD
for p in group["params"]:
lr = group["lr"]
if p.grad is None:
continue
grad = p.grad
if group["weight_decay"] != 0:
grad = grad.add(p, alpha=group["weight_decay"])
state = self.state[p]
if len(state) == 0:
state["momentum_buffer"] = torch.zeros_like(p)
state["momentum_buffer"].mul_(group["momentum"]).add_(grad)
update = (
grad.add(state["momentum_buffer"], alpha=group["momentum"])
if group["nesterov"]
else state["momentum_buffer"]
)
p.add_(update, alpha=-lr)
return loss
class Muon(optim.Optimizer):
def __init__(self, params, lr: float = 0.02, weight_decay: float = 0, momentum: float = 0.95):
defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum)
super().__init__(params, defaults)
@torch.no_grad()
def step(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
# continue
p.grad = torch.zeros_like(p) # Force synchronization
state = self.state[p]
if len(state) == 0:
state["momentum_buffer"] = torch.zeros_like(p)
update = muon_update(p.grad, state["momentum_buffer"], beta=group["momentum"])
p.mul_(1 - group["lr"] * group["weight_decay"])
p.add_(update.reshape(p.shape), alpha=-group["lr"])
return loss | --- +++ @@ -7,6 +7,33 @@
def zeropower_via_newtonschulz5(G: torch.Tensor, eps: float = 1e-7) -> torch.Tensor:
+ """Compute the zeroth power / orthogonalization of matrix G using Newton-Schulz iteration.
+
+ This function implements a quintic Newton-Schulz iteration to compute an approximate orthogonalization of the input
+ matrix G. The iteration coefficients are optimized to maximize convergence slope at zero, producing a result similar
+ to UV^T from SVD, where USV^T = G, but with relaxed convergence guarantees that empirically work well for
+ optimization purposes.
+
+ Args:
+ G (torch.Tensor): Input 2D tensor/matrix to orthogonalize.
+ eps (float, optional): Small epsilon value added to norm for numerical stability. Default: 1e-7.
+
+ Returns:
+ (torch.Tensor): Orthogonalized matrix with same shape as input G.
+
+ Examples:
+ >>> G = torch.randn(128, 64)
+ >>> G_ortho = zeropower_via_newtonschulz5(G)
+ >>> print(G_ortho.shape)
+ torch.Size([128, 64])
+
+ Notes:
+ - Uses bfloat16 precision for computation.
+ - Performs exactly 5 Newton-Schulz iteration steps with fixed coefficients.
+ - Automatically transposes for efficiency when rows > columns.
+ - Output approximates US'V^T where S' has diagonal entries ~ Uniform(0.5, 1.5).
+ - Does not produce exact UV^T but works well empirically for neural network optimization.
+ """
assert len(G.shape) == 2
X = G.bfloat16()
X /= X.norm() + eps # ensure top singular value <= 1
@@ -30,6 +57,36 @@
def muon_update(grad: torch.Tensor, momentum: torch.Tensor, beta: float = 0.95, nesterov: bool = True) -> torch.Tensor:
+ """Compute Muon optimizer update with momentum and orthogonalization.
+
+ This function applies momentum to the gradient, optionally uses Nesterov acceleration, and then orthogonalizes the
+ update using Newton-Schulz iterations. For convolutional filters (4D tensors), it reshapes before orthogonalization
+ and scales the final update based on parameter dimensions.
+
+ Args:
+ grad (torch.Tensor): Gradient tensor to update. Can be 2D or 4D (for conv filters).
+ momentum (torch.Tensor): Momentum buffer tensor, modified in-place via lerp.
+ beta (float, optional): Momentum coefficient for exponential moving average. Default: 0.95.
+ nesterov (bool, optional): Whether to use Nesterov momentum acceleration. Default: True.
+
+ Returns:
+ (torch.Tensor): Orthogonalized update tensor with same shape as input grad. For 4D inputs, returns reshaped
+ result matching original dimensions.
+
+ Examples:
+ >>> grad = torch.randn(64, 128)
+ >>> momentum = torch.zeros_like(grad)
+ >>> update = muon_update(grad, momentum, beta=0.95, nesterov=True)
+ >>> print(update.shape)
+ torch.Size([64, 128])
+
+ Notes:
+ - Momentum buffer is updated in-place: momentum = beta * momentum + (1-beta) * grad.
+ - With Nesterov: update = beta * momentum + (1-beta) * grad.
+ - Without Nesterov: update = momentum.
+ - 4D tensors (conv filters) are reshaped to 2D as (out_channels, in_channels*height*width) for orthogonalization.
+ - Final update is scaled by sqrt(max(1, dim[-2] / dim[-1])) to account for parameter dimensions.
+ """
momentum.lerp_(grad, 1 - beta)
update = grad.lerp(momentum, beta) if nesterov else momentum
if update.ndim == 4: # for the case of conv filters
@@ -40,6 +97,50 @@
class MuSGD(optim.Optimizer):
+ """Hybrid optimizer combining Muon and SGD updates for neural network training.
+
+ This optimizer implements a combination of Muon (a momentum-based optimizer with orthogonalization via Newton-Schulz
+ iterations) and standard SGD with momentum. It allows different parameter groups to use either the hybrid Muon+SGD
+ approach or pure SGD.
+
+ Args:
+ params (Iterable): Parameters to optimize or dicts defining parameter groups.
+ muon (float, optional): Weight factor for Muon updates in hybrid mode. Default: 0.5.
+ sgd (float, optional): Weight factor for SGD updates in hybrid mode. Default: 0.5.
+
+ Attributes:
+ muon (float): Scaling factor applied to Muon learning rate.
+ sgd (float): Scaling factor applied to SGD learning rate in hybrid mode.
+
+ Examples:
+ >>> param_groups = [
+ ... {
+ ... "params": model.conv_params,
+ ... "lr": 0.02,
+ ... "use_muon": True,
+ ... "momentum": 0.95,
+ ... "nesterov": True,
+ ... "weight_decay": 0.01,
+ ... },
+ ... {
+ ... "params": model.other_params,
+ ... "lr": 0.01,
+ ... "use_muon": False,
+ ... "momentum": 0.9,
+ ... "nesterov": False,
+ ... "weight_decay": 0,
+ ... },
+ ... ]
+ >>> optimizer = MuSGD(param_groups, muon=0.5, sgd=0.5)
+ >>> loss = model(data)
+ >>> loss.backward()
+ >>> optimizer.step()
+
+ Notes:
+ - Parameter groups with 'use_muon': True will receive both Muon and SGD updates.
+ - Parameter groups with 'use_muon': False will receive only SGD updates.
+ - The Muon update uses orthogonalization which works best for 2D+ parameter tensors.
+ """
def __init__(
self,
@@ -52,6 +153,18 @@ muon: float = 0.5,
sgd: float = 0.5,
):
+ """Initialize MuSGD optimizer with hybrid Muon and SGD capabilities.
+
+ Args:
+ params (Iterable): Iterable of parameters to optimize or dicts defining parameter groups.
+ lr (float): Learning rate.
+ momentum (float): Momentum factor for SGD.
+ weight_decay (float): Weight decay (L2 penalty).
+ nesterov (bool): Whether to use Nesterov momentum.
+ use_muon (bool): Whether to enable Muon updates.
+ muon (float): Scaling factor for Muon component.
+ sgd (float): Scaling factor for SGD component.
+ """
defaults = dict(
lr=lr,
momentum=momentum,
@@ -65,6 +178,24 @@
@torch.no_grad()
def step(self, closure=None):
+ """Perform a single optimization step.
+
+ Applies either hybrid Muon+SGD updates or pure SGD updates depending on the
+ 'use_muon' flag in each parameter group. For Muon-enabled groups, parameters
+ receive both an orthogonalized Muon update and a standard SGD momentum update.
+
+ Args:
+ closure (Callable, optional): A closure that reevaluates the model
+ and returns the loss. Default: None.
+
+ Returns:
+ (torch.Tensor | None): The loss value if closure is provided, otherwise None.
+
+ Notes:
+ - Parameters with None gradients are skipped.
+ - Muon updates use Newton-Schulz orthogonalization and work best on 2D+ tensors.
+ - Weight decay is applied only to the SGD component in hybrid mode.
+ """
loss = None
if closure is not None:
with torch.enable_grad():
@@ -121,13 +252,72 @@
class Muon(optim.Optimizer):
+ """Muon optimizer for usage in non-distributed settings.
+
+ This optimizer implements the Muon algorithm, which combines momentum-based updates with orthogonalization via
+ Newton-Schulz iterations. It applies weight decay and learning rate scaling to parameter updates.
+
+ Args:
+ params (iterable): Iterable of parameters to optimize or dicts defining parameter groups.
+ lr (float, optional): Learning rate. Default: 0.02.
+ weight_decay (float, optional): Weight decay (L2 penalty) coefficient. Default: 0.
+ momentum (float, optional): Momentum coefficient for exponential moving average. Default: 0.95.
+
+ Attributes:
+ param_groups (list): List of parameter groups with their optimization settings.
+ state (dict): Dictionary containing optimizer state for each parameter.
+
+ Examples:
+ >>> model = YourModel()
+ >>> optimizer = Muon(model.parameters(), lr=0.02, weight_decay=0.01, momentum=0.95)
+ >>> loss = model(data)
+ >>> loss.backward()
+ >>> optimizer.step()
+
+ Notes:
+ - Designed for non-distributed training environments.
+ - Uses Muon updates with orthogonalization for all parameters.
+ - Weight decay is applied multiplicatively before parameter update.
+ - Parameters with None gradients are assigned zero gradients for synchronization.
+ """
def __init__(self, params, lr: float = 0.02, weight_decay: float = 0, momentum: float = 0.95):
+ """Initialize Muon optimizer with orthogonalization-based updates.
+
+ Args:
+ params (Iterable): Iterable of parameters to optimize or dicts defining parameter groups.
+ lr (float): Learning rate.
+ weight_decay (float): Weight decay factor applied multiplicatively.
+ momentum (float): Momentum factor for gradient accumulation.
+ """
defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum)
super().__init__(params, defaults)
@torch.no_grad()
def step(self, closure=None):
+ """Perform a single optimization step.
+
+ Applies Muon updates to all parameters, incorporating momentum and orthogonalization.
+ Weight decay is applied multiplicatively before the parameter update.
+
+ Args:
+ closure (Callable[[], torch.Tensor] | None, optional): A closure that reevaluates the model
+ and returns the loss. Default: None.
+
+ Returns:
+ (torch.Tensor | None): The loss value if closure is provided, otherwise None.
+
+ Examples:
+ >>> optimizer = Muon(model.parameters())
+ >>> loss = model(inputs)
+ >>> loss.backward()
+ >>> optimizer.step()
+
+ Notes:
+ - Parameters with None gradients are assigned zero gradients for synchronization.
+ - Weight decay is applied as: p *= (1 - lr * weight_decay).
+ - Muon update uses Newton-Schulz orthogonalization and works best on 2D+ tensors.
+ """
loss = None
if closure is not None:
with torch.enable_grad():
@@ -145,4 +335,4 @@ p.mul_(1 - group["lr"] * group["weight_decay"])
p.add_(update.reshape(p.shape), alpha=-group["lr"])
- return loss+ return loss
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/optim/muon.py |
Generate docstrings for this script | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from pathlib import Path
import numpy as np
import torch
from PIL import Image
from ultralytics.utils import LOGGER
from ultralytics.utils.checks import check_requirements
from .base import BaseBackend
class CoreMLBackend(BaseBackend):
def load_model(self, weight: str | Path) -> None:
check_requirements(["coremltools>=9.0", "numpy>=1.14.5,<=2.3.5"])
import coremltools as ct
LOGGER.info(f"Loading {weight} for CoreML inference...")
self.model = ct.models.MLModel(weight)
self.dynamic = self.model.get_spec().description.input[0].type.HasField("multiArrayType")
# Load metadata
self.apply_metadata(dict(self.model.user_defined_metadata))
def forward(self, im: torch.Tensor) -> np.ndarray | list[np.ndarray]:
im = im.cpu().numpy()
h, w = im.shape[1:3]
im = im.transpose(0, 3, 1, 2) if self.dynamic else Image.fromarray((im[0] * 255).astype("uint8"))
y = self.model.predict({"image": im})
if "confidence" in y: # NMS included
from ultralytics.utils.ops import xywh2xyxy
box = xywh2xyxy(y["coordinates"] * [[w, h, w, h]])
cls = y["confidence"].argmax(1, keepdims=True)
y = np.concatenate((box, np.take_along_axis(y["confidence"], cls, axis=1), cls), 1)[None]
else:
y = list(y.values())
if len(y) == 2 and len(y[1].shape) != 4: # segmentation model
y = list(reversed(y))
return y | --- +++ @@ -15,8 +15,18 @@
class CoreMLBackend(BaseBackend):
+ """CoreML inference backend for Apple hardware.
+
+ Loads and runs inference with CoreML models (.mlpackage files) using the coremltools library. Supports both static
+ and dynamic input shapes and handles NMS-included model outputs.
+ """
def load_model(self, weight: str | Path) -> None:
+ """Load a CoreML model from a .mlpackage file.
+
+ Args:
+ weight (str | Path): Path to the .mlpackage model file.
+ """
check_requirements(["coremltools>=9.0", "numpy>=1.14.5,<=2.3.5"])
import coremltools as ct
@@ -28,6 +38,14 @@ self.apply_metadata(dict(self.model.user_defined_metadata))
def forward(self, im: torch.Tensor) -> np.ndarray | list[np.ndarray]:
+ """Run CoreML inference with automatic input format handling.
+
+ Args:
+ im (torch.Tensor): Input image tensor in BHWC format (converted from BCHW by AutoBackend).
+
+ Returns:
+ (np.ndarray | list[np.ndarray]): Model predictions as numpy array(s).
+ """
im = im.cpu().numpy()
h, w = im.shape[1:3]
@@ -43,4 +61,4 @@ y = list(y.values())
if len(y) == 2 and len(y[1].shape) != 4: # segmentation model
y = list(reversed(y))
- return y+ return y
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/nn/backends/coreml.py |
Write documentation strings for class attributes | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from typing import Any
from ultralytics.engine.results import Results
from ultralytics.solutions.solutions import BaseSolution, SolutionResults
class InstanceSegmentation(BaseSolution):
def __init__(self, **kwargs: Any) -> None:
kwargs["model"] = kwargs.get("model", "yolo26n-seg.pt")
super().__init__(**kwargs)
self.show_conf = self.CFG.get("show_conf", True)
self.show_labels = self.CFG.get("show_labels", True)
self.show_boxes = self.CFG.get("show_boxes", True)
def process(self, im0) -> SolutionResults:
self.extract_tracks(im0) # Extract tracks (bounding boxes, classes, and masks)
self.masks = getattr(self.tracks, "masks", None)
# Iterate over detected classes, track IDs, and segmentation masks
if self.masks is None:
self.LOGGER.warning("No masks detected! Ensure you're using a supported Ultralytics segmentation model.")
plot_im = im0
else:
results = Results(im0, path=None, names=self.names, boxes=self.track_data.data, masks=self.masks.data)
plot_im = results.plot(
line_width=self.line_width,
boxes=self.show_boxes,
conf=self.show_conf,
labels=self.show_labels,
color_mode="instance",
)
self.display_output(plot_im) # Display the annotated output using the base class function
# Return SolutionResults
return SolutionResults(plot_im=plot_im, total_tracks=len(self.track_ids)) | --- +++ @@ -7,8 +7,40 @@
class InstanceSegmentation(BaseSolution):
+ """A class to manage instance segmentation in images or video streams.
+
+ This class extends the BaseSolution class and provides functionality for performing instance segmentation, including
+ drawing segmented masks with bounding boxes and labels.
+
+ Attributes:
+ model (str): The segmentation model to use for inference.
+ line_width (int): Width of the bounding box and text lines.
+ names (dict[int, str]): Dictionary mapping class indices to class names.
+ clss (list[int]): List of detected class indices.
+ track_ids (list[int]): List of track IDs for detected instances.
+ masks (list[np.ndarray]): List of segmentation masks for detected instances.
+ show_conf (bool): Whether to display confidence scores.
+ show_labels (bool): Whether to display class labels.
+ show_boxes (bool): Whether to display bounding boxes.
+
+ Methods:
+ process: Process the input image to perform instance segmentation and annotate results.
+ extract_tracks: Extract tracks including bounding boxes, classes, and masks from model predictions.
+
+ Examples:
+ >>> segmenter = InstanceSegmentation()
+ >>> frame = cv2.imread("frame.jpg")
+ >>> results = segmenter.process(frame)
+ >>> print(f"Total segmented instances: {results.total_tracks}")
+ """
def __init__(self, **kwargs: Any) -> None:
+ """Initialize the InstanceSegmentation class for detecting and annotating segmented instances.
+
+ Args:
+ **kwargs (Any): Keyword arguments passed to the BaseSolution parent class including:
+ - model (str): Model name or path, defaults to "yolo26n-seg.pt".
+ """
kwargs["model"] = kwargs.get("model", "yolo26n-seg.pt")
super().__init__(**kwargs)
@@ -17,6 +49,20 @@ self.show_boxes = self.CFG.get("show_boxes", True)
def process(self, im0) -> SolutionResults:
+ """Perform instance segmentation on the input image and annotate the results.
+
+ Args:
+ im0 (np.ndarray): The input image for segmentation.
+
+ Returns:
+ (SolutionResults): Object containing the annotated image and total number of tracked instances.
+
+ Examples:
+ >>> segmenter = InstanceSegmentation()
+ >>> frame = cv2.imread("image.jpg")
+ >>> summary = segmenter.process(frame)
+ >>> print(summary)
+ """
self.extract_tracks(im0) # Extract tracks (bounding boxes, classes, and masks)
self.masks = getattr(self.tracks, "masks", None)
@@ -37,4 +83,4 @@ self.display_output(plot_im) # Display the annotated output using the base class function
# Return SolutionResults
- return SolutionResults(plot_im=plot_im, total_tracks=len(self.track_ids))+ return SolutionResults(plot_im=plot_im, total_tracks=len(self.track_ids))
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/solutions/instance_segmentation.py |
Document this code for team use | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import uniform_
__all__ = "inverse_sigmoid", "multi_scale_deformable_attn_pytorch"
def _get_clones(module, n):
return nn.ModuleList([copy.deepcopy(module) for _ in range(n)])
def bias_init_with_prob(prior_prob=0.01):
return float(-np.log((1 - prior_prob) / prior_prob)) # return bias_init
def linear_init(module):
bound = 1 / math.sqrt(module.weight.shape[0])
uniform_(module.weight, -bound, bound)
if hasattr(module, "bias") and module.bias is not None:
uniform_(module.bias, -bound, bound)
def inverse_sigmoid(x, eps=1e-5):
x = x.clamp(min=0, max=1)
x1 = x.clamp(min=eps)
x2 = (1 - x).clamp(min=eps)
return torch.log(x1 / x2)
def multi_scale_deformable_attn_pytorch(
value: torch.Tensor,
value_spatial_shapes: torch.Tensor,
sampling_locations: torch.Tensor,
attention_weights: torch.Tensor,
) -> torch.Tensor:
bs, _, num_heads, embed_dims = value.shape
_, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape
value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1)
sampling_grids = 2 * sampling_locations - 1
sampling_value_list = []
for level, (H_, W_) in enumerate(value_spatial_shapes):
# bs, H_*W_, num_heads, embed_dims ->
# bs, H_*W_, num_heads*embed_dims ->
# bs, num_heads*embed_dims, H_*W_ ->
# bs*num_heads, embed_dims, H_, W_
value_l_ = value_list[level].flatten(2).transpose(1, 2).reshape(bs * num_heads, embed_dims, H_, W_)
# bs, num_queries, num_heads, num_points, 2 ->
# bs, num_heads, num_queries, num_points, 2 ->
# bs*num_heads, num_queries, num_points, 2
sampling_grid_l_ = sampling_grids[:, :, :, level].transpose(1, 2).flatten(0, 1)
# bs*num_heads, embed_dims, num_queries, num_points
sampling_value_l_ = F.grid_sample(
value_l_, sampling_grid_l_, mode="bilinear", padding_mode="zeros", align_corners=False
)
sampling_value_list.append(sampling_value_l_)
# (bs, num_queries, num_heads, num_levels, num_points) ->
# (bs, num_heads, num_queries, num_levels, num_points) ->
# (bs*num_heads, 1, num_queries, num_levels*num_points)
attention_weights = attention_weights.transpose(1, 2).reshape(
bs * num_heads, 1, num_queries, num_levels * num_points
)
output = (
(torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights)
.sum(-1)
.view(bs, num_heads * embed_dims, num_queries)
)
return output.transpose(1, 2).contiguous() | --- +++ @@ -13,14 +13,61 @@
def _get_clones(module, n):
+ """Create a list of cloned modules from the given module.
+
+ Args:
+ module (nn.Module): The module to be cloned.
+ n (int): Number of clones to create.
+
+ Returns:
+ (nn.ModuleList): A ModuleList containing n clones of the input module.
+
+ Examples:
+ >>> import torch.nn as nn
+ >>> layer = nn.Linear(10, 10)
+ >>> clones = _get_clones(layer, 3)
+ >>> len(clones)
+ 3
+ """
return nn.ModuleList([copy.deepcopy(module) for _ in range(n)])
def bias_init_with_prob(prior_prob=0.01):
+ """Initialize conv/fc bias value according to a given probability value.
+
+ This function calculates the bias initialization value based on a prior probability using the inverse sigmoid
+ (logit)
+ function. It's commonly used in object detection models to initialize classification layers with a specific positive
+ prediction probability.
+
+ Args:
+ prior_prob (float, optional): Prior probability for bias initialization.
+
+ Returns:
+ (float): Bias initialization value calculated from the prior probability.
+
+ Examples:
+ >>> bias = bias_init_with_prob(0.01)
+ >>> print(f"Bias initialization value: {bias:.4f}")
+ Bias initialization value: -4.5951
+ """
return float(-np.log((1 - prior_prob) / prior_prob)) # return bias_init
def linear_init(module):
+ """Initialize the weights and biases of a linear module.
+
+ This function initializes the weights of a linear module using a uniform distribution within bounds calculated from
+ the output dimension. If the module has a bias, it is also initialized.
+
+ Args:
+ module (nn.Module): Linear module to initialize.
+
+ Examples:
+ >>> import torch.nn as nn
+ >>> linear = nn.Linear(10, 5)
+ >>> linear_init(linear)
+ """
bound = 1 / math.sqrt(module.weight.shape[0])
uniform_(module.weight, -bound, bound)
if hasattr(module, "bias") and module.bias is not None:
@@ -28,6 +75,23 @@
def inverse_sigmoid(x, eps=1e-5):
+ """Calculate the inverse sigmoid function for a tensor.
+
+ This function applies the inverse of the sigmoid function to a tensor, which is useful in various neural network
+ operations, particularly in attention mechanisms and coordinate transformations.
+
+ Args:
+ x (torch.Tensor): Input tensor with values in range [0, 1].
+ eps (float, optional): Small epsilon value to prevent numerical instability.
+
+ Returns:
+ (torch.Tensor): Tensor after applying the inverse sigmoid function.
+
+ Examples:
+ >>> x = torch.tensor([0.2, 0.5, 0.8])
+ >>> inverse_sigmoid(x)
+ tensor([-1.3863, 0.0000, 1.3863])
+ """
x = x.clamp(min=0, max=1)
x1 = x.clamp(min=eps)
x2 = (1 - x).clamp(min=eps)
@@ -40,6 +104,25 @@ sampling_locations: torch.Tensor,
attention_weights: torch.Tensor,
) -> torch.Tensor:
+ """Implement multi-scale deformable attention in PyTorch.
+
+ This function performs deformable attention across multiple feature map scales, allowing the model to attend to
+ different spatial locations with learned offsets.
+
+ Args:
+ value (torch.Tensor): The value tensor with shape (bs, num_keys, num_heads, embed_dims).
+ value_spatial_shapes (torch.Tensor): Spatial shapes of the value tensor with shape (num_levels, 2).
+ sampling_locations (torch.Tensor): The sampling locations with shape (bs, num_queries, num_heads, num_levels,
+ num_points, 2).
+ attention_weights (torch.Tensor): The attention weights with shape (bs, num_queries, num_heads, num_levels,
+ num_points).
+
+ Returns:
+ (torch.Tensor): The output tensor with shape (bs, num_queries, num_heads * embed_dims).
+
+ References:
+ https://github.com/IDEA-Research/detrex/blob/main/detrex/layers/multi_scale_deform_attn.py
+ """
bs, _, num_heads, embed_dims = value.shape
_, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape
value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1)
@@ -71,4 +154,4 @@ .sum(-1)
.view(bs, num_heads * embed_dims, num_queries)
)
- return output.transpose(1, 2).contiguous()+ return output.transpose(1, 2).contiguous()
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/nn/modules/utils.py |
Write Python docstrings for this snippet | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from pathlib import Path
import numpy as np
import torch
from ultralytics.utils import LOGGER
from ultralytics.utils.checks import check_requirements
from .base import BaseBackend
class OpenVINOBackend(BaseBackend):
def load_model(self, weight: str | Path) -> None:
LOGGER.info(f"Loading {weight} for OpenVINO inference...")
check_requirements("openvino>=2024.0.0")
import openvino as ov
core = ov.Core()
device_name = "AUTO"
if isinstance(self.device, str) and self.device.startswith("intel"):
device_name = self.device.split(":")[1].upper()
self.device = torch.device("cpu")
if device_name not in core.available_devices:
LOGGER.warning(f"OpenVINO device '{device_name}' not available. Using 'AUTO' instead.")
device_name = "AUTO"
w = Path(weight)
if not w.is_file():
w = next(w.glob("*.xml"))
ov_model = core.read_model(model=str(w), weights=w.with_suffix(".bin"))
if ov_model.get_parameters()[0].get_layout().empty:
ov_model.get_parameters()[0].set_layout(ov.Layout("NCHW"))
# Load metadata
metadata_file = w.parent / "metadata.yaml"
if metadata_file.exists():
from ultralytics.utils import YAML
self.apply_metadata(YAML.load(metadata_file))
# Set inference mode
self.inference_mode = "CUMULATIVE_THROUGHPUT" if self.dynamic and self.batch > 1 else "LATENCY"
self.ov_compiled_model = core.compile_model(
ov_model,
device_name=device_name,
config={"PERFORMANCE_HINT": self.inference_mode},
)
LOGGER.info(
f"Using OpenVINO {self.inference_mode} mode for batch={self.batch} inference on "
f"{', '.join(self.ov_compiled_model.get_property('EXECUTION_DEVICES'))}..."
)
self.input_name = self.ov_compiled_model.input().get_any_name()
self.ov = ov
def forward(self, im: torch.Tensor) -> list[np.ndarray]:
im = im.cpu().numpy().astype(np.float32)
if self.inference_mode in {"THROUGHPUT", "CUMULATIVE_THROUGHPUT"}:
# Async inference for larger batch sizes
n = im.shape[0]
results = [None] * n
def callback(request, userdata):
results[userdata] = request.results
async_queue = self.ov.AsyncInferQueue(self.ov_compiled_model)
async_queue.set_callback(callback)
for i in range(n):
async_queue.start_async(inputs={self.input_name: im[i : i + 1]}, userdata=i)
async_queue.wait_all()
y = [list(r.values()) for r in results]
y = [np.concatenate(x) for x in zip(*y)]
else:
# Sync inference for LATENCY mode
y = list(self.ov_compiled_model(im).values())
return y | --- +++ @@ -14,8 +14,18 @@
class OpenVINOBackend(BaseBackend):
+ """Intel OpenVINO inference backend for Intel hardware acceleration.
+
+ Loads and runs inference with Intel OpenVINO IR models (*_openvino_model/ directories). Supports automatic device
+ selection, Intel-specific device targeting, and async inference for throughput optimization.
+ """
def load_model(self, weight: str | Path) -> None:
+ """Load an Intel OpenVINO IR model from a .xml/.bin file pair or model directory.
+
+ Args:
+ weight (str | Path): Path to the .xml file or directory containing OpenVINO model files.
+ """
LOGGER.info(f"Loading {weight} for OpenVINO inference...")
check_requirements("openvino>=2024.0.0")
import openvino as ov
@@ -61,6 +71,14 @@ self.ov = ov
def forward(self, im: torch.Tensor) -> list[np.ndarray]:
+ """Run Intel OpenVINO inference with sync or async execution based on inference mode.
+
+ Args:
+ im (torch.Tensor): Input image tensor in BCHW format, normalized to [0, 1].
+
+ Returns:
+ (list[np.ndarray]): Model predictions as a list of numpy arrays, one per output layer.
+ """
im = im.cpu().numpy().astype(np.float32)
if self.inference_mode in {"THROUGHPUT", "CUMULATIVE_THROUGHPUT"}:
@@ -69,6 +87,7 @@ results = [None] * n
def callback(request, userdata):
+ """Store async inference result in the preallocated results list at the given index."""
results[userdata] = request.results
async_queue = self.ov.AsyncInferQueue(self.ov_compiled_model)
@@ -83,4 +102,4 @@ else:
# Sync inference for LATENCY mode
y = list(self.ov_compiled_model(im).values())
- return y+ return y
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/nn/backends/openvino.py |
Help me document legacy Python code | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from pathlib import Path
import numpy as np
import torch
from ultralytics.utils import ARM64, LOGGER
from ultralytics.utils.checks import check_requirements
from .base import BaseBackend
class PaddleBackend(BaseBackend):
def load_model(self, weight: str | Path) -> None:
cuda = isinstance(self.device, torch.device) and torch.cuda.is_available() and self.device.type != "cpu"
LOGGER.info(f"Loading {weight} for PaddlePaddle inference...")
if cuda:
check_requirements("paddlepaddle-gpu>=3.0.0,!=3.3.0")
elif ARM64:
check_requirements("paddlepaddle==3.0.0")
else:
check_requirements("paddlepaddle>=3.0.0,!=3.3.0")
import paddle.inference as pdi
w = Path(weight)
model_file, params_file = None, None
if w.is_dir():
model_file = next(w.rglob("*.json"), None)
params_file = next(w.rglob("*.pdiparams"), None)
elif w.suffix == ".pdiparams":
model_file = w.with_name("model.json")
params_file = w
if not (model_file and params_file and model_file.is_file() and params_file.is_file()):
raise FileNotFoundError(f"Paddle model not found in {w}. Both .json and .pdiparams files are required.")
config = pdi.Config(str(model_file), str(params_file))
if cuda:
config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=self.device.index or 0)
self.predictor = pdi.create_predictor(config)
self.input_handle = self.predictor.get_input_handle(self.predictor.get_input_names()[0])
self.output_names = self.predictor.get_output_names()
# Load metadata
metadata_file = (w if w.is_dir() else w.parent) / "metadata.yaml"
if metadata_file.exists():
from ultralytics.utils import YAML
self.apply_metadata(YAML.load(metadata_file))
def forward(self, im: torch.Tensor) -> list[np.ndarray]:
self.input_handle.copy_from_cpu(im.cpu().numpy().astype(np.float32))
self.predictor.run()
return [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names] | --- +++ @@ -14,8 +14,18 @@
class PaddleBackend(BaseBackend):
+ """Baidu PaddlePaddle inference backend.
+
+ Loads and runs inference with Baidu PaddlePaddle models (*_paddle_model/ directories). Supports both CPU and GPU
+ execution with automatic device configuration and memory pool initialization.
+ """
def load_model(self, weight: str | Path) -> None:
+ """Load a Baidu PaddlePaddle model from a directory containing .json and .pdiparams files.
+
+ Args:
+ weight (str | Path): Path to the model directory or .pdiparams file.
+ """
cuda = isinstance(self.device, torch.device) and torch.cuda.is_available() and self.device.type != "cpu"
LOGGER.info(f"Loading {weight} for PaddlePaddle inference...")
if cuda:
@@ -56,6 +66,14 @@ self.apply_metadata(YAML.load(metadata_file))
def forward(self, im: torch.Tensor) -> list[np.ndarray]:
+ """Run Baidu PaddlePaddle inference.
+
+ Args:
+ im (torch.Tensor): Input image tensor in BCHW format, normalized to [0, 1].
+
+ Returns:
+ (list[np.ndarray]): Model predictions as a list of numpy arrays, one per output handle.
+ """
self.input_handle.copy_from_cpu(im.cpu().numpy().astype(np.float32))
self.predictor.run()
- return [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]+ return [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/nn/backends/paddle.py |
Generate docstrings for each module | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from collections import defaultdict
from typing import Any
from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
class AIGym(BaseSolution):
def __init__(self, **kwargs: Any) -> None:
kwargs["model"] = kwargs.get("model", "yolo26n-pose.pt")
super().__init__(**kwargs)
self.states = defaultdict(lambda: {"angle": 0, "count": 0, "stage": "-"}) # Dict for count, angle and stage
# Extract details from CFG single time for usage later
self.up_angle = float(self.CFG["up_angle"]) # Pose up predefined angle to consider up pose
self.down_angle = float(self.CFG["down_angle"]) # Pose down predefined angle to consider down pose
self.kpts = self.CFG["kpts"] # User selected kpts of workouts storage for further usage
def process(self, im0) -> SolutionResults:
annotator = SolutionAnnotator(im0, line_width=self.line_width) # Initialize annotator
self.extract_tracks(im0) # Extract tracks (bounding boxes, classes, and masks)
if len(self.boxes):
kpt_data = self.tracks.keypoints.data
for i, k in enumerate(kpt_data):
state = self.states[self.track_ids[i]] # get state details
# Get keypoints and estimate the angle
state["angle"] = annotator.estimate_pose_angle(*[k[int(idx)] for idx in self.kpts])
annotator.draw_specific_kpts(k, self.kpts, radius=self.line_width * 3)
# Determine stage and count logic based on angle thresholds
if state["angle"] < self.down_angle:
if state["stage"] == "up":
state["count"] += 1
state["stage"] = "down"
elif state["angle"] > self.up_angle:
state["stage"] = "up"
# Display angle, count, and stage text
if self.show_labels:
annotator.plot_angle_and_count_and_stage(
angle_text=state["angle"], # angle text for display
count_text=state["count"], # count text for workouts
stage_text=state["stage"], # stage position text
center_kpt=k[int(self.kpts[1])], # center keypoint for display
)
plot_im = annotator.result()
self.display_output(plot_im) # Display output image, if environment support display
# Return SolutionResults
return SolutionResults(
plot_im=plot_im,
workout_count=[v["count"] for v in self.states.values()],
workout_stage=[v["stage"] for v in self.states.values()],
workout_angle=[v["angle"] for v in self.states.values()],
total_tracks=len(self.track_ids),
) | --- +++ @@ -7,8 +7,36 @@
class AIGym(BaseSolution):
+ """A class to manage gym steps of people in a real-time video stream based on their poses.
+
+ This class extends BaseSolution to monitor workouts using YOLO pose estimation models. It tracks and counts
+ repetitions of exercises based on predefined angle thresholds for up and down positions.
+
+ Attributes:
+ states (dict[int, dict[str, float | int | str]]): Per-track angle, rep count, and stage for workout monitoring.
+ up_angle (float): Angle threshold for considering the 'up' position of an exercise.
+ down_angle (float): Angle threshold for considering the 'down' position of an exercise.
+ kpts (list[int]): Indices of keypoints used for angle calculation.
+
+ Methods:
+ process: Process a frame to detect poses, calculate angles, and count repetitions.
+
+ Examples:
+ >>> gym = AIGym(model="yolo26n-pose.pt")
+ >>> image = cv2.imread("gym_scene.jpg")
+ >>> results = gym.process(image)
+ >>> processed_image = results.plot_im
+ >>> cv2.imshow("Processed Image", processed_image)
+ >>> cv2.waitKey(0)
+ """
def __init__(self, **kwargs: Any) -> None:
+ """Initialize AIGym for workout monitoring using pose estimation and predefined angles.
+
+ Args:
+ **kwargs (Any): Keyword arguments passed to the parent class constructor including:
+ - model (str): Model name or path, defaults to "yolo26n-pose.pt".
+ """
kwargs["model"] = kwargs.get("model", "yolo26n-pose.pt")
super().__init__(**kwargs)
self.states = defaultdict(lambda: {"angle": 0, "count": 0, "stage": "-"}) # Dict for count, angle and stage
@@ -19,6 +47,25 @@ self.kpts = self.CFG["kpts"] # User selected kpts of workouts storage for further usage
def process(self, im0) -> SolutionResults:
+ """Monitor workouts using Ultralytics YOLO Pose Model.
+
+ This function processes an input image to track and analyze human poses for workout monitoring. It uses the YOLO
+ Pose model to detect keypoints, estimate angles, and count repetitions based on predefined angle thresholds.
+
+ Args:
+ im0 (np.ndarray): Input image for processing.
+
+ Returns:
+ (SolutionResults): Contains processed image `plot_im`, 'workout_count' (list of completed reps),
+ 'workout_stage' (list of current stages), 'workout_angle' (list of angles), and 'total_tracks' (total
+ number of tracked individuals).
+
+ Examples:
+ >>> gym = AIGym()
+ >>> image = cv2.imread("workout.jpg")
+ >>> results = gym.process(image)
+ >>> processed_image = results.plot_im
+ """
annotator = SolutionAnnotator(im0, line_width=self.line_width) # Initialize annotator
self.extract_tracks(im0) # Extract tracks (bounding boxes, classes, and masks)
@@ -58,4 +105,4 @@ workout_stage=[v["stage"] for v in self.states.values()],
workout_angle=[v["angle"] for v in self.states.values()],
total_tracks=len(self.track_ids),
- )+ )
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/solutions/ai_gym.py |
Generate helpful docstrings for debugging | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from pathlib import Path
import numpy as np
import torch
from ultralytics.utils import LOGGER
from ultralytics.utils.checks import check_requirements
from .base import BaseBackend
class NCNNBackend(BaseBackend):
def load_model(self, weight: str | Path) -> None:
LOGGER.info(f"Loading {weight} for NCNN inference...")
check_requirements("ncnn", cmds="--no-deps")
import ncnn as pyncnn
self.pyncnn = pyncnn
self.net = pyncnn.Net()
# Setup Vulkan if available
if isinstance(self.device, str) and self.device.startswith("vulkan"):
self.net.opt.use_vulkan_compute = True
self.net.set_vulkan_device(int(self.device.split(":")[1]))
self.device = torch.device("cpu")
else:
self.net.opt.use_vulkan_compute = False
w = Path(weight)
if not w.is_file():
w = next(w.glob("*.param"))
self.net.load_param(str(w))
self.net.load_model(str(w.with_suffix(".bin")))
# Load metadata
metadata_file = w.parent / "metadata.yaml"
if metadata_file.exists():
from ultralytics.utils import YAML
self.apply_metadata(YAML.load(metadata_file))
def forward(self, im: torch.Tensor) -> list[np.ndarray]:
mat_in = self.pyncnn.Mat(im[0].cpu().numpy())
with self.net.create_extractor() as ex:
ex.input(self.net.input_names()[0], mat_in)
# Sort output names as temporary fix for pnnx issue
y = [np.array(ex.extract(x)[1])[None] for x in sorted(self.net.output_names())]
return y | --- +++ @@ -14,8 +14,18 @@
class NCNNBackend(BaseBackend):
+ """Tencent NCNN inference backend for mobile and embedded deployment.
+
+ Loads and runs inference with Tencent NCNN models (*_ncnn_model/ directories). Optimized for mobile platforms with
+ optional Vulkan GPU acceleration when available.
+ """
def load_model(self, weight: str | Path) -> None:
+ """Load an NCNN model from a .param/.bin file pair or model directory.
+
+ Args:
+ weight (str | Path): Path to the .param file or directory containing NCNN model files.
+ """
LOGGER.info(f"Loading {weight} for NCNN inference...")
check_requirements("ncnn", cmds="--no-deps")
import ncnn as pyncnn
@@ -46,9 +56,17 @@ self.apply_metadata(YAML.load(metadata_file))
def forward(self, im: torch.Tensor) -> list[np.ndarray]:
+ """Run inference using the NCNN runtime.
+
+ Args:
+ im (torch.Tensor): Input image tensor in BCHW format, normalized to [0, 1].
+
+ Returns:
+ (list[np.ndarray]): Model predictions as a list of numpy arrays, one per output layer.
+ """
mat_in = self.pyncnn.Mat(im[0].cpu().numpy())
with self.net.create_extractor() as ex:
ex.input(self.net.input_names()[0], mat_in)
# Sort output names as temporary fix for pnnx issue
y = [np.array(ex.extract(x)[1])[None] for x in sorted(self.net.output_names())]
- return y+ return y
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/nn/backends/ncnn.py |
Write proper docstrings for these functions | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from pathlib import Path
import torch
from ultralytics.utils import LOGGER
from ultralytics.utils.checks import check_executorch_requirements
from .base import BaseBackend
class ExecuTorchBackend(BaseBackend):
def load_model(self, weight: str | Path) -> None:
LOGGER.info(f"Loading {weight} for ExecuTorch inference...")
check_executorch_requirements()
from executorch.runtime import Runtime
w = Path(weight)
if w.is_dir():
model_file = next(w.rglob("*.pte"))
metadata_file = w / "metadata.yaml"
else:
model_file = w
metadata_file = w.parent / "metadata.yaml"
program = Runtime.get().load_program(str(model_file))
self.model = program.load_method("forward")
# Load metadata
if metadata_file.exists():
from ultralytics.utils import YAML
self.apply_metadata(YAML.load(metadata_file))
def forward(self, im: torch.Tensor) -> list:
return self.model.execute([im]) | --- +++ @@ -13,8 +13,18 @@
class ExecuTorchBackend(BaseBackend):
+ """Meta ExecuTorch inference backend for on-device deployment.
+
+ Loads and runs inference with Meta ExecuTorch models (.pte files) using the ExecuTorch runtime. Supports both
+ standalone .pte files and directory-based model packages with metadata.
+ """
def load_model(self, weight: str | Path) -> None:
+ """Load an ExecuTorch model from a .pte file or directory.
+
+ Args:
+ weight (str | Path): Path to the .pte model file or directory containing the model.
+ """
LOGGER.info(f"Loading {weight} for ExecuTorch inference...")
check_executorch_requirements()
@@ -38,4 +48,12 @@ self.apply_metadata(YAML.load(metadata_file))
def forward(self, im: torch.Tensor) -> list:
- return self.model.execute([im])+ """Run inference using the ExecuTorch runtime.
+
+ Args:
+ im (torch.Tensor): Input image tensor in BCHW format, normalized to [0, 1].
+
+ Returns:
+ (list): Model predictions as a list of ExecuTorch output values.
+ """
+ return self.model.execute([im])
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/nn/backends/executorch.py |
Write proper docstrings for these functions | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Any
import cv2
@dataclass
class SolutionConfig:
source: str | None = None
model: str | None = None
classes: list[int] | None = None
show_conf: bool = True
show_labels: bool = True
region: list[tuple[int, int]] | None = None
colormap: int | None = cv2.COLORMAP_DEEPGREEN
show_in: bool = True
show_out: bool = True
up_angle: float = 145.0
down_angle: int = 90
kpts: list[int] = field(default_factory=lambda: [6, 8, 10])
analytics_type: str = "line"
figsize: tuple[int, int] | None = (12.8, 7.2)
blur_ratio: float = 0.5
vision_point: tuple[int, int] = (20, 20)
crop_dir: str = "cropped-detections"
json_file: str = None
line_width: int = 2
records: int = 5
fps: float = 30.0
max_hist: int = 5
meter_per_pixel: float = 0.05
max_speed: int = 120
show: bool = False
iou: float = 0.7
conf: float = 0.25
device: str | None = None
max_det: int = 300
half: bool = False
tracker: str = "botsort.yaml"
verbose: bool = True
data: str = "images"
def update(self, **kwargs: Any):
for key, value in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
else:
url = "https://docs.ultralytics.com/solutions/#solutions-arguments"
raise ValueError(f"{key} is not a valid solution argument, see {url}")
return self | --- +++ @@ -10,6 +10,57 @@
@dataclass
class SolutionConfig:
+ """Manages configuration parameters for Ultralytics Vision AI solutions.
+
+ The SolutionConfig class serves as a centralized configuration container for all the Ultralytics solution modules:
+ https://docs.ultralytics.com/solutions/#solutions. It leverages Python `dataclass` for clear, type-safe, and
+ maintainable parameter definitions.
+
+ Attributes:
+ source (str, optional): Path to the input source (video, RTSP, etc.). Only usable with Solutions CLI.
+ model (str, optional): Path to the Ultralytics YOLO model to be used for inference.
+ classes (list[int], optional): List of class indices to filter detections.
+ show_conf (bool): Whether to show confidence scores on the visual output.
+ show_labels (bool): Whether to display class labels on visual output.
+ region (list[tuple[int, int]], optional): Polygonal region or line for object counting.
+ colormap (int, optional): OpenCV colormap constant for visual overlays (e.g., cv2.COLORMAP_JET).
+ show_in (bool): Whether to display count number for objects entering the region.
+ show_out (bool): Whether to display count number for objects leaving the region.
+ up_angle (float): Upper angle threshold used in pose-based workouts monitoring.
+ down_angle (int): Lower angle threshold used in pose-based workouts monitoring.
+ kpts (list[int]): Keypoint indices to monitor, e.g., for pose analytics.
+ analytics_type (str): Type of analytics to perform ("line", "area", "bar", "pie", etc.).
+ figsize (tuple[float, float], optional): Size of the matplotlib figure used for analytical plots (width,
+ height).
+ blur_ratio (float): Ratio used to blur objects in the video frames (0.0 to 1.0).
+ vision_point (tuple[int, int]): Reference point for directional tracking or perspective drawing.
+ crop_dir (str): Directory path to save cropped detection images.
+ json_file (str, optional): Path to a JSON file containing data for parking areas.
+ line_width (int): Width for visual display, e.g. bounding boxes, keypoints, and counts.
+ records (int): Number of detection records to send email alerts.
+ fps (float): Frame rate (Frames Per Second) for speed estimation calculation.
+ max_hist (int): Maximum number of historical points or states stored per tracked object for speed estimation.
+ meter_per_pixel (float): Scale for real-world measurement, used in speed or distance calculations.
+ max_speed (int): Maximum speed limit (e.g., km/h or mph) used in visual alerts or constraints.
+ show (bool): Whether to display the visual output on screen.
+ iou (float): Intersection-over-Union threshold for detection filtering.
+ conf (float): Confidence threshold for keeping predictions.
+ device (str, optional): Device to run inference on (e.g., 'cpu', '0' for CUDA GPU).
+ max_det (int): Maximum number of detections allowed per video frame.
+ half (bool): Whether to use FP16 precision (requires a supported CUDA device).
+ tracker (str): Path to tracking configuration YAML file (e.g., 'botsort.yaml').
+ verbose (bool): Enable verbose logging output for debugging or diagnostics.
+ data (str): Path to image directory used for similarity search.
+
+ Methods:
+ update: Update the configuration with user-defined keyword arguments and raise error on invalid keys.
+
+ Examples:
+ >>> from ultralytics.solutions.config import SolutionConfig
+ >>> cfg = SolutionConfig(model="yolo26n.pt", region=[(0, 0), (100, 0), (100, 100), (0, 100)])
+ >>> cfg.update(show=False, conf=0.3)
+ >>> print(cfg.model)
+ """
source: str | None = None
model: str | None = None
@@ -46,6 +97,7 @@ data: str = "images"
def update(self, **kwargs: Any):
+ """Update configuration parameters with new values provided as keyword arguments."""
for key, value in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
@@ -53,4 +105,4 @@ url = "https://docs.ultralytics.com/solutions/#solutions-arguments"
raise ValueError(f"{key} is not a valid solution argument, see {url}")
- return self+ return self
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/solutions/config.py |
Add minimal docstrings for each function | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import os
from pathlib import Path
import torch
from ultralytics.utils import LOGGER
from ultralytics.utils.checks import check_requirements
from .base import BaseBackend
class AxeleraBackend(BaseBackend):
def load_model(self, weight: str | Path) -> None:
if not os.environ.get("AXELERA_RUNTIME_DIR"):
LOGGER.warning(
"Axelera runtime environment is not activated.\n"
"Please run: source /opt/axelera/sdk/latest/axelera_activate.sh\n\n"
"If this fails, verify driver installation: "
"https://docs.ultralytics.com/integrations/axelera/#axelera-driver-installation"
)
try:
from axelera.runtime import op
except ImportError:
check_requirements(
"axelera_runtime2==0.1.2",
cmds="--extra-index-url https://software.axelera.ai/artifactory/axelera-runtime-pypi",
)
from axelera.runtime import op
w = Path(weight)
found = next(w.rglob("*.axm"), None)
if found is None:
raise FileNotFoundError(f"No .axm file found in: {w}")
self.model = op.load(str(found))
# Load metadata
metadata_file = found.parent / "metadata.yaml"
if metadata_file.exists():
from ultralytics.utils import YAML
self.apply_metadata(YAML.load(metadata_file))
def forward(self, im: torch.Tensor) -> list:
return self.model(im.cpu()) | --- +++ @@ -14,8 +14,18 @@
class AxeleraBackend(BaseBackend):
+ """Axelera AI inference backend for Axelera Metis AI accelerators.
+
+ Loads compiled Axelera models (.axm files) and runs inference using the Axelera AI runtime SDK. Requires the Axelera
+ runtime environment to be activated before use.
+ """
def load_model(self, weight: str | Path) -> None:
+ """Load an Axelera model from a directory containing a .axm file.
+
+ Args:
+ weight (str | Path): Path to the Axelera model directory containing the .axm binary.
+ """
if not os.environ.get("AXELERA_RUNTIME_DIR"):
LOGGER.warning(
"Axelera runtime environment is not activated.\n"
@@ -48,4 +58,12 @@ self.apply_metadata(YAML.load(metadata_file))
def forward(self, im: torch.Tensor) -> list:
- return self.model(im.cpu())+ """Run inference on the Axelera hardware accelerator.
+
+ Args:
+ im (torch.Tensor): Input image tensor in BCHW format, normalized to [0, 1].
+
+ Returns:
+ (list): Model predictions as a list of output arrays.
+ """
+ return self.model(im.cpu())
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/nn/backends/axelera.py |
Create docstrings for each class method | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
from pathlib import Path
import numpy as np
import torch
from ultralytics.utils import LOGGER
from ultralytics.utils.checks import check_requirements
from .base import BaseBackend
class ONNXBackend(BaseBackend):
def __init__(self, weight: str | Path, device: torch.device, fp16: bool = False, format: str = "onnx"):
assert format in {"onnx", "dnn"}, f"Unsupported ONNX format: {format}."
self.format = format
super().__init__(weight, device, fp16)
def load_model(self, weight: str | Path) -> None:
cuda = isinstance(self.device, torch.device) and torch.cuda.is_available() and self.device.type != "cpu"
if self.format == "dnn":
# OpenCV DNN
LOGGER.info(f"Loading {weight} for ONNX OpenCV DNN inference...")
check_requirements("opencv-python>=4.5.4")
import cv2
self.net = cv2.dnn.readNetFromONNX(weight)
else:
# ONNX Runtime
LOGGER.info(f"Loading {weight} for ONNX Runtime inference...")
check_requirements(("onnx", "onnxruntime-gpu" if cuda else "onnxruntime"))
import onnxruntime
# Select execution provider
available = onnxruntime.get_available_providers()
if cuda and "CUDAExecutionProvider" in available:
providers = [("CUDAExecutionProvider", {"device_id": self.device.index}), "CPUExecutionProvider"]
elif self.device.type == "mps" and "CoreMLExecutionProvider" in available:
providers = ["CoreMLExecutionProvider", "CPUExecutionProvider"]
else:
providers = ["CPUExecutionProvider"]
if cuda:
LOGGER.warning("CUDA requested but CUDAExecutionProvider not available. Using CPU...")
self.device = torch.device("cpu")
cuda = False
LOGGER.info(
f"Using ONNX Runtime {onnxruntime.__version__} with "
f"{providers[0] if isinstance(providers[0], str) else providers[0][0]}"
)
self.session = onnxruntime.InferenceSession(weight, providers=providers)
self.output_names = [x.name for x in self.session.get_outputs()]
# Get metadata
metadata_map = self.session.get_modelmeta().custom_metadata_map
if metadata_map:
self.apply_metadata(dict(metadata_map))
# Check if dynamic shapes
self.dynamic = isinstance(self.session.get_outputs()[0].shape[0], str)
self.fp16 = "float16" in self.session.get_inputs()[0].type
# Setup IO binding for CUDA
self.use_io_binding = not self.dynamic and cuda
if self.use_io_binding:
self.io = self.session.io_binding()
self.bindings = []
for output in self.session.get_outputs():
out_fp16 = "float16" in output.type
y_tensor = torch.empty(output.shape, dtype=torch.float16 if out_fp16 else torch.float32).to(
self.device
)
self.io.bind_output(
name=output.name,
device_type=self.device.type,
device_id=self.device.index if cuda else 0,
element_type=np.float16 if out_fp16 else np.float32,
shape=tuple(y_tensor.shape),
buffer_ptr=y_tensor.data_ptr(),
)
self.bindings.append(y_tensor)
def forward(self, im: torch.Tensor) -> torch.Tensor | list[torch.Tensor] | np.ndarray:
if self.format == "dnn":
# OpenCV DNN
self.net.setInput(im.cpu().numpy())
return self.net.forward()
# ONNX Runtime
if self.use_io_binding:
if self.device.type == "cpu":
im = im.cpu()
self.io.bind_input(
name="images",
device_type=im.device.type,
device_id=im.device.index if im.device.type == "cuda" else 0,
element_type=np.float16 if self.fp16 else np.float32,
shape=tuple(im.shape),
buffer_ptr=im.data_ptr(),
)
self.session.run_with_iobinding(self.io)
return self.bindings
else:
return self.session.run(self.output_names, {self.session.get_inputs()[0].name: im.cpu().numpy()})
class ONNXIMXBackend(ONNXBackend):
def load_model(self, weight: str | Path) -> None:
check_requirements(("model-compression-toolkit>=2.4.1", "edge-mdt-cl<1.1.0", "onnxruntime-extensions"))
check_requirements(("onnx", "onnxruntime"))
import mct_quantizers as mctq
import onnxruntime
from edgemdt_cl.pytorch.nms import nms_ort # noqa - register custom NMS ops
w = Path(weight)
onnx_file = next(w.glob("*.onnx"))
LOGGER.info(f"Loading {onnx_file} for ONNX IMX inference...")
session_options = mctq.get_ort_session_options()
session_options.enable_mem_reuse = False
self.session = onnxruntime.InferenceSession(onnx_file, session_options, providers=["CPUExecutionProvider"])
self.output_names = [x.name for x in self.session.get_outputs()]
self.dynamic = isinstance(self.session.get_outputs()[0].shape[0], str)
self.fp16 = "float16" in self.session.get_inputs()[0].type
metadata_map = self.session.get_modelmeta().custom_metadata_map
if metadata_map:
self.apply_metadata(dict(metadata_map))
def forward(self, im: torch.Tensor) -> np.ndarray | list[np.ndarray] | tuple[np.ndarray, ...]:
y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im.cpu().numpy()})
if self.task == "detect":
# boxes, conf, cls
return np.concatenate([y[0], y[1][:, :, None], y[2][:, :, None]], axis=-1)
elif self.task == "pose":
# boxes, conf, kpts
return np.concatenate([y[0], y[1][:, :, None], y[2][:, :, None], y[3]], axis=-1, dtype=y[0].dtype)
elif self.task == "segment":
return (
np.concatenate([y[0], y[1][:, :, None], y[2][:, :, None], y[3]], axis=-1, dtype=y[0].dtype),
y[4],
)
return y | --- +++ @@ -14,13 +14,32 @@
class ONNXBackend(BaseBackend):
+ """Microsoft ONNX Runtime inference backend with optional OpenCV DNN support.
+
+ Loads and runs inference with ONNX models (.onnx files) using either Microsoft ONNX Runtime with CUDA/CoreML
+ execution providers, or OpenCV DNN for lightweight CPU inference. Supports IO binding for optimized GPU inference
+ with static input shapes.
+ """
def __init__(self, weight: str | Path, device: torch.device, fp16: bool = False, format: str = "onnx"):
+ """Initialize the ONNX backend.
+
+ Args:
+ weight (str | Path): Path to the .onnx model file.
+ device (torch.device): Device to run inference on.
+ fp16 (bool): Whether to use FP16 half-precision inference.
+ format (str): Inference engine, either "onnx" for ONNX Runtime or "dnn" for OpenCV DNN.
+ """
assert format in {"onnx", "dnn"}, f"Unsupported ONNX format: {format}."
self.format = format
super().__init__(weight, device, fp16)
def load_model(self, weight: str | Path) -> None:
+ """Load an ONNX model using ONNX Runtime or OpenCV DNN.
+
+ Args:
+ weight (str | Path): Path to the .onnx model file.
+ """
cuda = isinstance(self.device, torch.device) and torch.cuda.is_available() and self.device.type != "cpu"
if self.format == "dnn":
@@ -87,6 +106,14 @@ self.bindings.append(y_tensor)
def forward(self, im: torch.Tensor) -> torch.Tensor | list[torch.Tensor] | np.ndarray:
+ """Run ONNX inference using IO binding (CUDA) or standard session execution.
+
+ Args:
+ im (torch.Tensor): Input image tensor in BCHW format, normalized to [0, 1].
+
+ Returns:
+ (torch.Tensor | list[torch.Tensor] | np.ndarray): Model predictions as tensor(s) or numpy array(s).
+ """
if self.format == "dnn":
# OpenCV DNN
self.net.setInput(im.cpu().numpy())
@@ -111,8 +138,18 @@
class ONNXIMXBackend(ONNXBackend):
+ """ONNX IMX inference backend for NXP i.MX processors.
+
+ Extends `ONNXBackend` with support for quantized models targeting NXP i.MX edge devices. Uses MCT (Model Compression
+ Toolkit) quantizers and custom NMS operations for optimized inference.
+ """
def load_model(self, weight: str | Path) -> None:
+ """Load a quantized ONNX model from an IMX model directory.
+
+ Args:
+ weight (str | Path): Path to the IMX model directory containing the .onnx file.
+ """
check_requirements(("model-compression-toolkit>=2.4.1", "edge-mdt-cl<1.1.0", "onnxruntime-extensions"))
check_requirements(("onnx", "onnxruntime"))
import mct_quantizers as mctq
@@ -135,6 +172,14 @@ self.apply_metadata(dict(metadata_map))
def forward(self, im: torch.Tensor) -> np.ndarray | list[np.ndarray] | tuple[np.ndarray, ...]:
+ """Run IMX inference with task-specific output concatenation for detect, pose, and segment tasks.
+
+ Args:
+ im (torch.Tensor): Input image tensor in BCHW format, normalized to [0, 1].
+
+ Returns:
+ (np.ndarray | list[np.ndarray] | tuple[np.ndarray, ...]): Task-formatted model predictions.
+ """
y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im.cpu().numpy()})
if self.task == "detect":
@@ -148,4 +193,4 @@ np.concatenate([y[0], y[1][:, :, None], y[2][:, :, None], y[3]], axis=-1, dtype=y[0].dtype),
y[4],
)
- return y+ return y
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/nn/backends/onnx.py |
Write docstrings that follow conventions | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import json
import os
from pathlib import Path
import torch
from ultralytics.utils import LOGGER
from ultralytics.utils.checks import check_requirements
from .base import BaseBackend
class MNNBackend(BaseBackend):
def load_model(self, weight: str | Path) -> None:
LOGGER.info(f"Loading {weight} for MNN inference...")
check_requirements("MNN")
import MNN
config = {"precision": "low", "backend": "CPU", "numThread": (os.cpu_count() + 1) // 2}
rt = MNN.nn.create_runtime_manager((config,))
self.net = MNN.nn.load_module_from_file(weight, [], [], runtime_manager=rt, rearrange=True)
self.expr = MNN.expr
# Load metadata from bizCode
info = self.net.get_info()
if "bizCode" in info:
try:
self.apply_metadata(json.loads(info["bizCode"]))
except json.JSONDecodeError:
pass
def forward(self, im: torch.Tensor) -> list:
input_var = self.expr.const(im.data_ptr(), im.shape)
output_var = self.net.onForward([input_var])
# NOTE: need this copy(), or it'd get incorrect results on ARM devices
return [x.read().copy() for x in output_var] | --- +++ @@ -15,8 +15,18 @@
class MNNBackend(BaseBackend):
+ """MNN (Mobile Neural Network) inference backend.
+
+ Loads and runs inference with MNN models (.mnn files) using the Alibaba MNN framework. Optimized for mobile and edge
+ deployment with configurable thread count and precision.
+ """
def load_model(self, weight: str | Path) -> None:
+ """Load an Alibaba MNN model from a .mnn file.
+
+ Args:
+ weight (str | Path): Path to the .mnn model file.
+ """
LOGGER.info(f"Loading {weight} for MNN inference...")
check_requirements("MNN")
import MNN
@@ -35,7 +45,15 @@ pass
def forward(self, im: torch.Tensor) -> list:
+ """Run inference using the MNN runtime.
+
+ Args:
+ im (torch.Tensor): Input image tensor in BCHW format, normalized to [0, 1].
+
+ Returns:
+ (list): Model predictions as a list of numpy arrays.
+ """
input_var = self.expr.const(im.data_ptr(), im.shape)
output_var = self.net.onForward([input_var])
# NOTE: need this copy(), or it'd get incorrect results on ARM devices
- return [x.read().copy() for x in output_var]+ return [x.read().copy() for x in output_var]
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/nn/backends/mnn.py |
Add docstrings for production code | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import ast
from abc import ABC, abstractmethod
import torch
class BaseBackend(ABC):
def __init__(self, weight: str | torch.nn.Module, device: torch.device | str, fp16: bool = False):
self.device = device
self.fp16 = fp16
self.nhwc = False
self.stride = 32
self.names = {}
self.task = None
self.batch = 1
self.channels = 3
self.end2end = False
self.dynamic = False
self.metadata = {}
self.model = None
self.load_model(weight)
@abstractmethod
def load_model(self, weight: str | torch.nn.Module) -> None:
raise NotImplementedError
@abstractmethod
def forward(self, im: torch.Tensor) -> torch.Tensor | list[torch.Tensor]:
raise NotImplementedError
def apply_metadata(self, metadata: dict | None) -> None:
if not metadata:
return
# Store raw metadata
self.metadata = metadata
# Process type conversions for known fields
for k, v in metadata.items():
if k in {"stride", "batch", "channels"}:
metadata[k] = int(v)
elif k in {"imgsz", "names", "kpt_shape", "kpt_names", "args", "end2end"} and isinstance(v, str):
metadata[k] = ast.literal_eval(v)
# Handle models exported with end-to-end NMS
metadata["end2end"] = metadata.get("end2end", False) or metadata.get("args", {}).get("nms", False)
metadata["dynamic"] = metadata.get("args", {}).get("dynamic", self.dynamic)
# Apply all metadata fields as backend attributes
for k, v in metadata.items():
setattr(self, k, v) | --- +++ @@ -9,8 +9,35 @@
class BaseBackend(ABC):
+ """Base class for all inference backends.
+
+ This abstract class defines the interface that all inference backends must implement. It provides common
+ functionality for model loading, metadata processing, and device management.
+
+ Attributes:
+ model: The underlying inference model or runtime session.
+ device (torch.device): The device to run inference on.
+ fp16 (bool): Whether to use FP16 (half-precision) inference.
+ nhwc (bool): Whether the model expects NHWC input format instead of NCHW.
+ stride (int): Model stride, typically 32 for YOLO models.
+ names (dict): Dictionary mapping class indices to class names.
+ task (str | None): The task type (detect, segment, classify, pose, obb).
+ batch (int): Batch size for inference.
+ imgsz (tuple): Input image size as (height, width).
+ channels (int): Number of input channels, typically 3 for RGB.
+ end2end (bool): Whether the model includes end-to-end NMS post-processing.
+ dynamic (bool): Whether the model supports dynamic input shapes.
+ metadata (dict): Model metadata dictionary containing export configuration.
+ """
def __init__(self, weight: str | torch.nn.Module, device: torch.device | str, fp16: bool = False):
+ """Initialize the base backend with common attributes and load the model.
+
+ Args:
+ weight (str | torch.nn.Module): Path to the model weights file or a PyTorch module instance.
+ device (torch.device | str): Device to run inference on (e.g., 'cpu', 'cuda:0').
+ fp16 (bool): Whether to use FP16 half-precision inference.
+ """
self.device = device
self.fp16 = fp16
self.nhwc = False
@@ -27,13 +54,34 @@
@abstractmethod
def load_model(self, weight: str | torch.nn.Module) -> None:
+ """Load the model from a weights file or module instance.
+
+ Args:
+ weight (str | torch.nn.Module): Path to model weights or a PyTorch module.
+ """
raise NotImplementedError
@abstractmethod
def forward(self, im: torch.Tensor) -> torch.Tensor | list[torch.Tensor]:
+ """Run inference on the input image tensor.
+
+ Args:
+ im (torch.Tensor): Input image tensor in BCHW format, normalized to [0, 1].
+
+ Returns:
+ (torch.Tensor | list[torch.Tensor]): Model output as a single tensor or list of tensors.
+ """
raise NotImplementedError
def apply_metadata(self, metadata: dict | None) -> None:
+ """Process and apply model metadata to backend attributes.
+
+ Handles type conversions for common metadata fields (e.g., stride, batch, names) and sets them as
+ instance attributes. Also resolves end-to-end NMS and dynamic shape settings from export args.
+
+ Args:
+ metadata (dict | None): Dictionary containing metadata key-value pairs from model export.
+ """
if not metadata:
return
@@ -53,4 +101,4 @@
# Apply all metadata fields as backend attributes
for k, v in metadata.items():
- setattr(self, k, v)+ setattr(self, k, v)
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/nn/backends/base.py |
Create docstrings for all classes and functions | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import json
from collections import OrderedDict, namedtuple
from pathlib import Path
import numpy as np
import torch
from ultralytics.utils import IS_JETSON, LINUX, LOGGER, PYTHON_VERSION
from ultralytics.utils.checks import check_requirements, check_version
from .base import BaseBackend
class TensorRTBackend(BaseBackend):
def load_model(self, weight: str | Path) -> None:
LOGGER.info(f"Loading {weight} for TensorRT inference...")
if IS_JETSON and check_version(PYTHON_VERSION, "<=3.8.10"):
check_requirements("numpy==1.23.5")
try:
import tensorrt as trt
except ImportError:
if LINUX:
check_requirements("tensorrt>7.0.0,!=10.1.0")
import tensorrt as trt
check_version(trt.__version__, ">=7.0.0", hard=True)
check_version(trt.__version__, "!=10.1.0", msg="https://github.com/ultralytics/ultralytics/pull/14239")
if self.device.type == "cpu":
self.device = torch.device("cuda:0")
Binding = namedtuple("Binding", ("name", "dtype", "shape", "data", "ptr"))
logger = trt.Logger(trt.Logger.INFO)
# Read engine file
with open(weight, "rb") as f, trt.Runtime(logger) as runtime:
try:
meta_len = int.from_bytes(f.read(4), byteorder="little")
metadata = json.loads(f.read(meta_len).decode("utf-8"))
dla = metadata.get("dla", None)
if dla is not None:
runtime.DLA_core = int(dla)
except UnicodeDecodeError:
f.seek(0)
metadata = None
engine = runtime.deserialize_cuda_engine(f.read())
self.apply_metadata(metadata)
try:
self.context = engine.create_execution_context()
except Exception as e:
LOGGER.error("TensorRT model exported with a different version than expected\n")
raise e
# Setup bindings
self.bindings = OrderedDict()
self.output_names = []
self.fp16 = False
self.dynamic = False
self.is_trt10 = not hasattr(engine, "num_bindings")
num = range(engine.num_io_tensors) if self.is_trt10 else range(engine.num_bindings)
for i in num:
if self.is_trt10:
name = engine.get_tensor_name(i)
dtype = trt.nptype(engine.get_tensor_dtype(name))
is_input = engine.get_tensor_mode(name) == trt.TensorIOMode.INPUT
shape = tuple(engine.get_tensor_shape(name))
profile_shape = tuple(engine.get_tensor_profile_shape(name, 0)[2]) if is_input else None
else:
name = engine.get_binding_name(i)
dtype = trt.nptype(engine.get_binding_dtype(i))
is_input = engine.binding_is_input(i)
shape = tuple(engine.get_binding_shape(i))
profile_shape = tuple(engine.get_profile_shape(0, i)[1]) if is_input else None
if is_input:
if -1 in shape:
self.dynamic = True
if self.is_trt10:
self.context.set_input_shape(name, profile_shape)
else:
self.context.set_binding_shape(i, profile_shape)
if dtype == np.float16:
self.fp16 = True
else:
self.output_names.append(name)
shape = (
tuple(self.context.get_tensor_shape(name))
if self.is_trt10
else tuple(self.context.get_binding_shape(i))
)
im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(self.device)
self.bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))
self.binding_addrs = OrderedDict((n, d.ptr) for n, d in self.bindings.items())
self.model = engine
def forward(self, im: torch.Tensor) -> list[torch.Tensor]:
if self.dynamic and im.shape != self.bindings["images"].shape:
if self.is_trt10:
self.context.set_input_shape("images", im.shape)
self.bindings["images"] = self.bindings["images"]._replace(shape=im.shape)
for name in self.output_names:
self.bindings[name].data.resize_(tuple(self.context.get_tensor_shape(name)))
else:
i = self.model.get_binding_index("images")
self.context.set_binding_shape(i, im.shape)
self.bindings["images"] = self.bindings["images"]._replace(shape=im.shape)
for name in self.output_names:
i = self.model.get_binding_index(name)
self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i)))
s = self.bindings["images"].shape
assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}"
self.binding_addrs["images"] = int(im.data_ptr())
self.context.execute_v2(list(self.binding_addrs.values()))
return [self.bindings[x].data for x in sorted(self.output_names)] | --- +++ @@ -16,8 +16,18 @@
class TensorRTBackend(BaseBackend):
+ """NVIDIA TensorRT inference backend for GPU-accelerated deployment.
+
+ Loads and runs inference with NVIDIA TensorRT serialized engines (.engine files). Supports both TensorRT 7-9 and
+ TensorRT 10+ APIs, dynamic input shapes, FP16 precision, and DLA core offloading.
+ """
def load_model(self, weight: str | Path) -> None:
+ """Load an NVIDIA TensorRT engine from a serialized .engine file.
+
+ Args:
+ weight (str | Path): Path to the .engine file with optional embedded metadata.
+ """
LOGGER.info(f"Loading {weight} for TensorRT inference...")
if IS_JETSON and check_version(PYTHON_VERSION, "<=3.8.10"):
@@ -104,6 +114,14 @@ self.model = engine
def forward(self, im: torch.Tensor) -> list[torch.Tensor]:
+ """Run NVIDIA TensorRT inference with dynamic shape handling.
+
+ Args:
+ im (torch.Tensor): Input image tensor in BCHW format on the CUDA device.
+
+ Returns:
+ (list[torch.Tensor]): Model predictions as a list of tensors on the CUDA device.
+ """
if self.dynamic and im.shape != self.bindings["images"].shape:
if self.is_trt10:
self.context.set_input_shape("images", im.shape)
@@ -123,4 +141,4 @@
self.binding_addrs["images"] = int(im.data_ptr())
self.context.execute_v2(list(self.binding_addrs.values()))
- return [self.bindings[x].data for x in sorted(self.output_names)]+ return [self.bindings[x].data for x in sorted(self.output_names)]
| https://raw.githubusercontent.com/ultralytics/ultralytics/HEAD/ultralytics/nn/backends/tensorrt.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.