File size: 919 Bytes
3965925
 
 
1c6ad86
8318b40
 
3965925
 
1c6ad86
 
9d2acd8
3965925
9d2acd8
3965925
9d2acd8
 
 
8e19006
1c6ad86
ad63f3a
3965925
d9a9784
ad63f3a
3965925
 
1c6ad86
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline

# Load model and tokenizer from local files in the root (already uploaded)
tokenizer = AutoTokenizer.from_pretrained(".", local_files_only=True)
model = AutoModelForSeq2SeqLM.from_pretrained(".", local_files_only=True)
pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)

# Inference function
def predict(task, prompt, context, auto_cot):
    full_prompt = f"[TASK: {task.upper()}] {prompt}"
    if context:
        full_prompt += f" Context: {context}"
    if auto_cot:
        full_prompt += "\nLet's think step by step."
    output = pipe(full_prompt, max_new_tokens=128)[0]["generated_text"]
    return output

# Create Interface
demo = gr.Interface(
    fn=predict,
    inputs=["text", "text", "text", "checkbox"],
    outputs="text"
)

# ⚠️ Do NOT set share=True or inbrowser=True
demo.launch()