yusir4200's picture
Update app.py
b5fd219 verified
import gradio as gr
from segmentation import segment_image
import os
import requests
import base64
# 获取 Hugging Face Token(已通过 Secrets 设置)
HUGGINGFACE_API_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
API_URL = "https://api-inference.huggingface.co/models/google/medgemma-4b-it"
HEADERS = {"Authorization": f"Bearer {HUGGINGFACE_API_TOKEN}"}
def query_medgemma(image_path, question):
with open(image_path, "rb") as f:
image_bytes = f.read()
encoded_image = base64.b64encode(image_bytes).decode("utf-8")
encoded_image = f"data:image/png;base64,{encoded_image}"
payload = {
"inputs": [
{
"role": "user",
"content": [
{"type": "text", "text": question},
{"type": "image", "image": encoded_image}
]
}
]
}
response = requests.post(API_URL, headers=HEADERS, json=payload)
if response.ok:
try:
return response.json()[0]["generated_text"][-1]["content"]
except Exception as e:
return f"Parse Error: {str(e)}\nFull Response: {response.json()}"
else:
return f"Error: {response.status_code} - {response.text}"
# 默认图片路径
default_image_path = "./image.png"
def segment_only(image_path):
_, segmented_image = segment_image(image_path)
return segmented_image
def analyze_with_medgemma(image, question):
return query_medgemma(image, question)
with gr.Blocks() as demo:
with gr.Row():
with gr.Column(scale=1):
image_input = gr.Image(type="filepath", label="Upload Image")
segmented_output = gr.Image(type="numpy", label="Segmented Image")
image_input.change(fn=segment_only, inputs=image_input, outputs=segmented_output)
with gr.Column(scale=2):
chatbot = gr.Textbox(label="Ask MedGemma", placeholder="Enter your medical question...")
image_for_analysis = gr.Image(type="filepath", label="Upload image for analysis")
analyze_button = gr.Button("Analyze")
response_output = gr.Textbox(label="Response")
analyze_button.click(fn=analyze_with_medgemma, inputs=[image_for_analysis, chatbot], outputs=response_output)
demo.launch()