File size: 1,176 Bytes
39c629a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import os
import gradio as gr
from huggingface_hub import InferenceClient

# Get your Hugging Face token from environment variables (set this in your Space's secrets)
HF_TOKEN = os.getenv("HF_TOKEN")

# Initialize the inference client
client = InferenceClient(
    provider="hf-inference",
    api_key=HF_TOKEN,
)

# Define the function to use in Gradio
def answer_question(question, context):
    if not HF_TOKEN:
        return "HF_TOKEN not found. Please set it in the environment variables."

    try:
        result = client.question_answering(
            question=question,
            context=context,
            model="deepset/roberta-base-squad2",
        )
        return result["answer"]
    except Exception as e:
        return f"Error: {str(e)}"

# Gradio Interface
iface = gr.Interface(
    fn=answer_question,
    inputs=[
        gr.Textbox(label="Question"),
        gr.Textbox(label="Context", lines=5),
    ],
    outputs="text",
    title="Question Answering with RoBERTa",
    description="Enter a question and context. The model will find the answer from the context.",
)

# Launch for Hugging Face Space
if __name__ == "__main__":
    iface.launch()