import logging # Configure Logger logger = logging.getLogger(__name__) def get_model(task: str, model_key: str, device="cpu"): """ Dynamically retrieves the model instance based on the task and model_key. Args: task (str): One of "detection", "segmentation", or "depth". model_key (str): Model identifier or variant. device (str): Device to run inference on ("cpu" or "cuda"). Returns: object: Uninitialized model instance. """ logger.info(f"Preparing model wrapper '{model_key}' for task '{task}' on device '{device}'") try: if task == "detection": from models.detection.detector import ObjectDetector return ObjectDetector(model_key=model_key, device=device) elif task == "segmentation": from models.segmentation.segmenter import Segmenter return Segmenter(model_key=model_key, device=device) elif task == "depth": from models.depth.depth_estimator import DepthEstimator return DepthEstimator(model_key=model_key, device=device) else: raise ValueError(f"Unsupported task '{task}'") except Exception as e: logger.error(f"Error loading model '{model_key}' for task '{task}': {e}") raise