from transformers import AutoTokenizer, AutoModelForSeq2SeqLM import gradio as gr # Load ultralight model model_id = "sshleifer/tiny-t5" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForSeq2SeqLM.from_pretrained(model_id) # Simple chat function def chat(message, history): prompt = "Answer this: " + message inputs = tokenizer(prompt, return_tensors="pt") outputs = model.generate(**inputs, max_new_tokens=50) response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response.strip() # Gradio chat interface gr.ChatInterface( fn=chat, title="TinyT5 Chat", description="Ask basic questions. Powered by the super lightweight sshleifer/tiny-t5 model.", ).launch()