from smolagents import Tool from transformers import BlipProcessor, BlipForConditionalGeneration from PIL import Image import torch class CaptionImageTool(Tool): name = "caption_image_tool" description = "Caption an image using a free Hugging Face template." inputs = { "image_path": { "type": "string", "description": "The path of the local image file to elaborate" } } output_type = "string" def __init__(self): super().__init__() self.device = "cuda" if torch.cuda.is_available() else "cpu" self.model = "Salesforce/blip-image-captioning-base" self.processor = BlipProcessor.from_pretrained(self.model) self.model = BlipForConditionalGeneration.from_pretrained(self.model).to(self.device) def forward(self, image_path: str) -> str: try: image = Image.open(image_path).convert('RGB') inputs = self.processor(image, return_tensors="pt").to(self.device) out = self.model.generate(**inputs) caption = self.processor.decode(out[0], skip_special_tokens=True) return "Image caption: " + caption except Exception as e: return f"Error caption_image is not working properly, error: {e}, please skip this tool"