Spaces:
Sleeping
Sleeping
File size: 2,828 Bytes
5652034 8a6917c 5652034 8a6917c 5652034 8a6917c 5652034 8a6917c 5652034 8a6917c 5652034 8a6917c ce2118a 5652034 8a6917c 5652034 8a6917c 5652034 8a6917c 5652034 8a6917c 5652034 8a6917c ce2118a 5652034 ce2118a 5652034 8a6917c 5652034 8a6917c 5652034 8a6917c 5652034 8a6917c 5652034 8a6917c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 |
import os
from dotenv import load_dotenv
import gradio as gr
from PIL import Image
from transformers import BlipProcessor, BlipForConditionalGeneration
import openai
# Load environment variables from .env file
load_dotenv()
# Retrieve OpenAI credentials from environment
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
OPENAI_API_BASE = os.getenv("OPENAI_API_BASE", "https://models.inference.ai.azure.com") # fallback
# Set OpenAI credentials
openai.api_key = OPENAI_API_KEY
openai.base_url = OPENAI_API_BASE
# Load the BLIP model and processor
processor = BlipProcessor.from_pretrained("nathansutton/generate-cxr")
model = BlipForConditionalGeneration.from_pretrained("nathansutton/generate-cxr")
def generate_report(image):
"""Generate a CXR report from the uploaded image."""
inputs = processor(images=image, text="a chest x-ray", return_tensors="pt")
output = model.generate(**inputs, max_length=512)
report = processor.decode(output[0], skip_special_tokens=True)
return report
def chat_with_openai(user_message, previous_report):
"""Chat with GPT-4o based on the generated report."""
conversation = [
{"role": "system", "content": "You are a helpful medical assistant."},
{"role": "user", "content": f"Here is a medical report: {previous_report}. Now, {user_message}"}
]
response = openai.ChatCompletion.create(
model="gpt-4o",
messages=conversation,
temperature=1.0,
top_p=1.0,
max_tokens=1000,
)
return response['choices'][0]['message']['content']
def process_image_and_chat(image, user_message, chat_history):
"""Handle full process: generate report and chat."""
if chat_history is None:
chat_history = []
# Step 1: Generate CXR report
report = generate_report(image)
chat_history.append({"role": "assistant", "content": report})
# Step 2: Chat based on the report
response = chat_with_openai(user_message, report)
chat_history.append({"role": "user", "content": user_message})
chat_history.append({"role": "assistant", "content": response})
return chat_history, chat_history
# Gradio Interface
iface = gr.Interface(
fn=process_image_and_chat,
inputs=[
gr.Image(type="pil", label="Upload Chest X-ray Image"),
gr.Textbox(label="Your Question", placeholder="Ask a question about the report..."),
gr.State(value=[]), # Memory for chat history
],
outputs=[
gr.Chatbot(label="Medical Assistant Chat", type="messages"),
gr.State(), # Return updated history
],
title="Chest X-ray Assistant",
description="Upload a chest X-ray image and ask questions about it. The assistant will generate a radiology report and answer your questions using GPT-4o.",
)
if __name__ == "__main__":
iface.launch()
|