Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,565 Bytes
b23251f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
import logging
# Configure Logger
logger = logging.getLogger(__name__)
def get_model(task: str, model_key: str, device="cpu"):
"""
Dynamically retrieves the model instance based on the task and model_key.
Args:
task (str): One of "detection", "segmentation", or "depth".
model_key (str): Model identifier or variant.
device (str): Device to run inference on ("cpu" or "cuda").
Returns:
object: Initialized model ready for inference.
Raises:
ValueError: If task is unsupported or model loading fails.
"""
logger.info(f"Request received to load model '{model_key}' for task '{task}' on device '{device}'")
try:
if task == "detection":
from models.detection.detector import ObjectDetector
return ObjectDetector(model_key=model_key, device=device)
elif task == "segmentation":
from models.segmentation.segmenter import Segmenter
return Segmenter(model_key=model_key, device=device)
elif task == "depth":
from models.depth.depth_estimator import DepthEstimator
return DepthEstimator(model_key=model_key, device=device)
else:
error_msg = f"Unsupported task '{task}'. Valid options are: 'detection', 'segmentation', 'depth'."
logger.error(error_msg)
raise ValueError(error_msg)
except Exception as e:
logger.error(f"Error while loading model '{model_key}' for task '{task}': {e}")
raise
|