Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline | |
# Load model and tokenizer from local files in the root (already uploaded) | |
tokenizer = AutoTokenizer.from_pretrained(".", local_files_only=True) | |
model = AutoModelForSeq2SeqLM.from_pretrained(".", local_files_only=True) | |
pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer) | |
# Inference function | |
def predict(task, prompt, context, auto_cot): | |
full_prompt = f"[TASK: {task.upper()}] {prompt}" | |
if context: | |
full_prompt += f" Context: {context}" | |
if auto_cot: | |
full_prompt += "\nLet's think step by step." | |
output = pipe(full_prompt, max_new_tokens=128)[0]["generated_text"] | |
return output | |
# Create Interface | |
demo = gr.Interface( | |
fn=predict, | |
inputs=["text", "text", "text", "checkbox"], | |
outputs="text" | |
) | |
# ⚠️ Do NOT set share=True or inbrowser=True | |
demo.launch() |