from transformers import AutoModelForCausalLM, AutoTokenizer import torch import os class EndpointHandler: def __init__(self, path=""): self.tokenizer = AutoTokenizer.from_pretrained(path) self.model = AutoModelForCausalLM.from_pretrained(path, torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32) self.model.eval() def __call__(self, inputs: dict): prompt = inputs.get("inputs", "") if not prompt: return {"error": "No input provided."} input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids outputs = self.model.generate(input_ids=input_ids, max_new_tokens=100) generated = self.tokenizer.decode(outputs[0], skip_special_tokens=True) return {"generated_text": generated}