Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
import gradio as gr
|
2 |
-
from utils import generate_response
|
3 |
from transformers import BlipProcessor, BlipForConditionalGeneration
|
4 |
import torch
|
5 |
from PIL import Image
|
@@ -9,12 +8,12 @@ processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base
|
|
9 |
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
|
10 |
|
11 |
# Function to generate description for an image using BLIP
|
12 |
-
def describe_image(image):
|
13 |
try:
|
14 |
-
#
|
15 |
inputs = processor(images=image, return_tensors="pt")
|
16 |
-
|
17 |
-
# Generate
|
18 |
out = model.generate(**inputs)
|
19 |
description = processor.decode(out[0], skip_special_tokens=True)
|
20 |
|
@@ -25,8 +24,8 @@ def describe_image(image):
|
|
25 |
# Define chatbot interaction function
|
26 |
def chat(user_input, chat_history, image):
|
27 |
try:
|
28 |
-
# Generate text response
|
29 |
-
response =
|
30 |
|
31 |
# If an image is uploaded, describe it using the image captioning model
|
32 |
if image is not None:
|
@@ -35,11 +34,11 @@ def chat(user_input, chat_history, image):
|
|
35 |
else:
|
36 |
image_description = "No image uploaded."
|
37 |
|
38 |
-
# Update chat history
|
39 |
chat_history.append(("User", user_input))
|
40 |
chat_history.append(("AI", response))
|
41 |
|
42 |
-
# Format chat history for display
|
43 |
formatted_history = "\n".join(
|
44 |
[f"{role}: {message}" for role, message in chat_history]
|
45 |
)
|
|
|
1 |
import gradio as gr
|
|
|
2 |
from transformers import BlipProcessor, BlipForConditionalGeneration
|
3 |
import torch
|
4 |
from PIL import Image
|
|
|
8 |
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
|
9 |
|
10 |
# Function to generate description for an image using BLIP
|
11 |
+
def describe_image(image: Image.Image):
|
12 |
try:
|
13 |
+
# Preprocess the image and pass it to the model
|
14 |
inputs = processor(images=image, return_tensors="pt")
|
15 |
+
|
16 |
+
# Generate caption using the model
|
17 |
out = model.generate(**inputs)
|
18 |
description = processor.decode(out[0], skip_special_tokens=True)
|
19 |
|
|
|
24 |
# Define chatbot interaction function
|
25 |
def chat(user_input, chat_history, image):
|
26 |
try:
|
27 |
+
# Generate text response (text processing happens via your custom function)
|
28 |
+
response = f"AI Response: {user_input}" # Placeholder response for now
|
29 |
|
30 |
# If an image is uploaded, describe it using the image captioning model
|
31 |
if image is not None:
|
|
|
34 |
else:
|
35 |
image_description = "No image uploaded."
|
36 |
|
37 |
+
# Update chat history with both user input and AI response
|
38 |
chat_history.append(("User", user_input))
|
39 |
chat_history.append(("AI", response))
|
40 |
|
41 |
+
# Format chat history for display
|
42 |
formatted_history = "\n".join(
|
43 |
[f"{role}: {message}" for role, message in chat_history]
|
44 |
)
|