import torch, gradio as gr from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline MODEL_ID = "ogrnz/t5-chat-titles" TOKENIZER_ID = "google-t5/t5-small" tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_ID) model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_ID) device = 0 if torch.cuda.is_available() else -1 summarizer = pipeline("summarization", model=model, tokenizer=tokenizer, device=device) summarizer("warm‑up") def make_title(prompt: str) -> str: return summarizer(prompt, max_length=16, min_length=3)[0]["summary_text"] with gr.Blocks(title="Title Generator") as demo: gr.Markdown("# Title Generator") inp = gr.Textbox(label="Prompt", placeholder="Enter your message…") out = gr.Textbox(label="Generated Title") btn = gr.Button("Generate") btn.click(fn=make_title, inputs=inp, outputs=out) demo.queue().launch(server_name="0.0.0.0", server_port=5000, show_api=True)