Update app.py
Browse files
app.py
CHANGED
@@ -1,43 +1,6 @@
|
|
1 |
-
# import gradio as gr
|
2 |
-
# from segmentation import segment_image
|
3 |
-
# import numpy as np
|
4 |
-
# import cv2
|
5 |
-
|
6 |
-
# # Image de test par défaut
|
7 |
-
# default_image_path = "./image.png"
|
8 |
-
|
9 |
-
# def segment_and_display(image_path=default_image_path):
|
10 |
-
# # Appeler la fonction de segmentation
|
11 |
-
# original_image, segmented_image = segment_image(image_path)
|
12 |
-
|
13 |
-
# # Retourner les images pour l'affichage
|
14 |
-
# return original_image, segmented_image
|
15 |
-
|
16 |
-
# # Charger l'image de test par défaut
|
17 |
-
# default_original_image, default_segmented_image = segment_image(default_image_path)
|
18 |
-
|
19 |
-
# # Interface Gradio
|
20 |
-
# iface = gr.Interface(
|
21 |
-
# fn=segment_and_display,
|
22 |
-
# inputs=gr.Image(type="filepath", label="Upload Image"),
|
23 |
-
# outputs=[
|
24 |
-
# gr.Image(type="numpy", label="Original Image"),
|
25 |
-
# gr.Image(type="numpy", label="Segmented Image")
|
26 |
-
# ],
|
27 |
-
# title="Image Segmentation with K-means (k=2)",
|
28 |
-
# description="Upload an image or use the default test image to see the segmentation result.",
|
29 |
-
# examples=[
|
30 |
-
# [default_image_path]
|
31 |
-
# ],
|
32 |
-
# live=True # Permet de voir les changements en temps réel
|
33 |
-
# )
|
34 |
-
|
35 |
-
# # Afficher l'image de test par défaut lorsque l'interface est ouverte
|
36 |
-
# iface.launch(share=True, inline=True)
|
37 |
import gradio as gr
|
38 |
from segmentation import segment_image
|
39 |
import os
|
40 |
-
import os
|
41 |
import requests
|
42 |
import base64
|
43 |
|
@@ -52,19 +15,26 @@ def query_medgemma(image_path, question):
|
|
52 |
encoded_image = base64.b64encode(image_bytes).decode("utf-8")
|
53 |
encoded_image = f"data:image/png;base64,{encoded_image}"
|
54 |
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
60 |
}
|
61 |
-
}
|
62 |
|
63 |
response = requests.post(API_URL, headers=HEADERS, json=payload)
|
64 |
if response.ok:
|
65 |
-
|
|
|
|
|
|
|
66 |
else:
|
67 |
-
return f"Error: {response.text}"
|
68 |
|
69 |
# 默认图片路径
|
70 |
default_image_path = "./image.png"
|
@@ -86,11 +56,10 @@ with gr.Blocks() as demo:
|
|
86 |
|
87 |
with gr.Column(scale=2):
|
88 |
chatbot = gr.Textbox(label="Ask MedGemma", placeholder="Enter your medical question...")
|
89 |
-
image_for_analysis = gr.Image(type="filepath", label="Upload image for analysis
|
90 |
analyze_button = gr.Button("Analyze")
|
91 |
response_output = gr.Textbox(label="Response")
|
92 |
|
93 |
analyze_button.click(fn=analyze_with_medgemma, inputs=[image_for_analysis, chatbot], outputs=response_output)
|
94 |
|
95 |
demo.launch()
|
96 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
from segmentation import segment_image
|
3 |
import os
|
|
|
4 |
import requests
|
5 |
import base64
|
6 |
|
|
|
15 |
encoded_image = base64.b64encode(image_bytes).decode("utf-8")
|
16 |
encoded_image = f"data:image/png;base64,{encoded_image}"
|
17 |
|
18 |
+
payload = {
|
19 |
+
"inputs": [
|
20 |
+
{
|
21 |
+
"role": "user",
|
22 |
+
"content": [
|
23 |
+
{"type": "text", "text": question},
|
24 |
+
{"type": "image", "image": encoded_image}
|
25 |
+
]
|
26 |
+
}
|
27 |
+
]
|
28 |
}
|
|
|
29 |
|
30 |
response = requests.post(API_URL, headers=HEADERS, json=payload)
|
31 |
if response.ok:
|
32 |
+
try:
|
33 |
+
return response.json()[0]["generated_text"][-1]["content"]
|
34 |
+
except Exception as e:
|
35 |
+
return f"Parse Error: {str(e)}\nFull Response: {response.json()}"
|
36 |
else:
|
37 |
+
return f"Error: {response.status_code} - {response.text}"
|
38 |
|
39 |
# 默认图片路径
|
40 |
default_image_path = "./image.png"
|
|
|
56 |
|
57 |
with gr.Column(scale=2):
|
58 |
chatbot = gr.Textbox(label="Ask MedGemma", placeholder="Enter your medical question...")
|
59 |
+
image_for_analysis = gr.Image(type="filepath", label="Upload image for analysis")
|
60 |
analyze_button = gr.Button("Analyze")
|
61 |
response_output = gr.Textbox(label="Response")
|
62 |
|
63 |
analyze_button.click(fn=analyze_with_medgemma, inputs=[image_for_analysis, chatbot], outputs=response_output)
|
64 |
|
65 |
demo.launch()
|
|