gcuomo's picture
Update app.py
6300e91 verified
import gradio as gr
from transformers import T5ForConditionalGeneration, T5Tokenizer
# Load model and tokenizer
model = T5ForConditionalGeneration.from_pretrained("gcuomo/open-source-ai-t5-liar-lens")
tokenizer = T5Tokenizer.from_pretrained("gcuomo/open-source-ai-t5-liar-lens")
# Shared prediction function
def classify(statement):
prompt = f"summarize: {statement}"
inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=128)
output = model.generate(**inputs, max_new_tokens=8)
return tokenizer.decode(output[0], skip_special_tokens=True).strip().lower()
# Build UI with Blocks
with gr.Blocks() as demo:
gr.Markdown("## πŸ€₯ Open Source AI – LIAR Lens")
with gr.Row():
inp = gr.Textbox(label="Enter a statement", lines=2, placeholder="e.g. The book 'Open Source AI' explores Hugging Face and T5 models.")
out = gr.Textbox(label="Predicted label")
btn = gr.Button("Classify")
btn.click(fn=classify, inputs=inp, outputs=out)
# Register for remote access via gradio_client
demo.predict = classify # πŸ‘ˆ this makes remote .predict(...) possible
# Enable queueing and launch
demo.queue()
demo.launch()