File size: 815 Bytes
2052539
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import gradio as gr
from transformers import BlipForQuestionAnswering, AutoProcessor
from PIL import Image


model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")
processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base")

def answer_question(image, question):
   
    inputs = processor(image, question, return_tensors="pt")
   
    out = model.generate(**inputs)
    
    answer = processor.decode(out[0], skip_special_tokens=True)
    return answer


iface = gr.Interface(
    fn=answer_question,
    inputs=[
        gr.inputs.Image(type="pil", label="Upload Image"),
        gr.inputs.Textbox(label="Enter Your Question")
    ],
    outputs="text",
    title="BLIP Question Answering",
    description="Upload an image and ask a question to get an answer."
)


iface.launch()