File size: 1,102 Bytes
1263d52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

# Load your model
model_name = "CJHauser/PrisimAI-t5"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

def answer_question(context, question):
    input_text = f"question: {question} context: {context}"
    inputs = tokenizer.encode(input_text, return_tensors="pt", truncation=True)
    outputs = model.generate(inputs, max_length=128)
    answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return answer

# Gradio UI
with gr.Blocks() as demo:
    gr.Markdown("# πŸ€– PrisimAI Q&A\nAsk questions based on a given context.")
    
    with gr.Row():
        context = gr.Textbox(label="Context", placeholder="Paste your reference text here...", lines=8)
    
    question = gr.Textbox(label="Your Question", placeholder="What do you want to know?")
    answer = gr.Textbox(label="Answer", interactive=False)
    
    btn = gr.Button("Get Answer")
    btn.click(fn=answer_question, inputs=[context, question], outputs=answer)

demo.launch()