Spaces:
Running
Running
from PIL import Image | |
import torch | |
from typing import IO | |
from model_loader import models | |
class ImagePreprocessor: | |
def __init__(self): | |
self.preprocess = models.clip_preprocess | |
self.device = models.device | |
def process(self, image_file: IO) -> torch.Tensor: | |
""" | |
Opens an image file, preprocesses it, and returns it as a tensor. | |
Args: | |
image_file (IO): The image file object (e.g., from a file upload). | |
Returns: | |
torch.Tensor: The preprocessed image as a tensor, ready for the model. | |
""" | |
try: | |
# Open the image from the file-like object | |
image = Image.open(image_file).convert("RGB") | |
except Exception as e: | |
print(f"Error opening image: {e}") | |
# You might want to raise a custom exception here | |
raise ValueError("Invalid or corrupted image file.") | |
# Apply the CLIP preprocessing transformations and move to the correct device | |
image_tensor = self.preprocess(image).unsqueeze(0).to(self.device) | |
return image_tensor | |
preprocessor = ImagePreprocessor() | |