Spaces:
Running
Running
from typing import Dict | |
import numpy as np | |
import torch | |
from PIL import Image | |
from torchmetrics.multimodal.clip_score import CLIPScore | |
class CLIPMetric: | |
def __init__(self, model_name_or_path: str = "openai/clip-vit-large-patch14"): | |
self.device = torch.device( | |
"cuda" | |
if torch.cuda.is_available() | |
else "mps" | |
if torch.backends.mps.is_available() | |
else "cpu" | |
) | |
self.metric = CLIPScore(model_name_or_path="openai/clip-vit-large-patch14") | |
self.metric.to(self.device) | |
def name(self) -> str: | |
return "clip" | |
def compute_score(self, image: Image.Image, prompt: str) -> Dict[str, float]: | |
image_tensor = torch.from_numpy(np.array(image)).permute(2, 0, 1).float() | |
image_tensor = image_tensor.to(self.device) | |
score = self.metric(image_tensor, prompt) | |
return {"clip": score.item()} | |