File size: 369 Bytes
d17ca98 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 |
import os
from dotenv import load_dotenv
from transformers import CLIPProcessor, CLIPModel
load_dotenv()
# Load CLIP model globally for reuse
clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch16")
clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16")
def get_clip_model():
return clip_model, clip_processor |