File size: 1,440 Bytes
2c72784
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
from transformers import BlipProcessor, BlipForQuestionAnswering 
import torch 
import gradio as gr 
from PIL import Image

# Load the processor and model
processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base")
model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")

# Set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def vqa_answer(image, question):
    # Preprocess the inputs
    inputs = processor(image, question, return_tensors="pt").to(device)
    
    # Generate the answer
    with torch.no_grad():
        generated_ids = model.generate(**inputs)
        answer = processor.decode(generated_ids[0], skip_special_tokens=True)
    
    return answer

# Define the input components
image_input = gr.components.Image(type="pil", label="Upload an Image")
question_input = gr.components.Textbox(lines=1, placeholder="Enter your question here...", label="Question")

# Define the output component
answer_output = gr.components.Textbox(label="Answer")

# Create the interface
iface = gr.Interface(
    fn=vqa_answer,
    inputs=[image_input, question_input],
    outputs=answer_output,
    title="Visual Question Answering App",
    description="Ask a question about the uploaded image.",
    article="This app uses the BLIP model to answer questions about images."
)

# Launch the app
iface.launch(share=True)