MedSynapticGPT / app.py
mgbam's picture
Upload 5 files
e39bf15 verified
"""MedSynapticGPT – Streamlit UI"""
import os, io, tempfile, json, base64
from pathlib import Path
import streamlit as st
import openai, pydicom, numpy as np
from PIL import Image
from pydub import AudioSegment
import requests
openai.api_key = os.getenv("OPENAI_API_KEY")
st.set_page_config(page_title="MedSynapticGPT", layout="wide")
st.title("🩺 MedSynapticGPT – Multimodal Clinical Reasoner")
if "credits" not in st.session_state:
st.session_state.credits = 3 # daily free quota
def charge_credit():
st.session_state.credits -= 1
if st.session_state.credits < 0:
st.error("Free quota reached. Upgrade for more cases.")
st.stop()
# ───────── Helpers ─────────
@st.cache_data(show_spinner=False)
def gpt(system, user):
resp = openai.chat.completions.create(model="gpt-4o-mini", messages=[{"role":"system","content":system},{"role":"user","content":user}], max_tokens=700)
return resp.choices[0].message.content.strip()
@st.cache_data(show_spinner=False)
def whisper_transcribe(audio_bytes):
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp:
tmp.write(audio_bytes)
tmp.flush()
txt = openai.audio.transcriptions.create(model="whisper-1", file=open(tmp.name,"rb")).text
return txt
@st.cache_data(show_spinner=False)
def dicom_to_png(ds):
arr = ds.pixel_array.astype(np.float32)
arr -= arr.min(); arr /= arr.max(); arr *= 255
return Image.fromarray(arr.astype(np.uint8))
# ───────── Tabs ─────────
tabs = st.tabs(["Radiology AI", "Clinical Note Q&A", "Voice Triage", "UMLS Lookup", "GraphRAG"])
# Radiology
with tabs[0]:
st.header("πŸ“· Radiology AI")
up = st.file_uploader("Upload DICOM or PNG/JPG", type=["dcm","png","jpg","jpeg"])
if up and st.button("Analyze"):
charge_credit()
if up.name.endswith(".dcm"):
ds = pydicom.dcmread(up)
img = dicom_to_png(ds)
else:
img = Image.open(up)
st.image(img, caption="Preview", use_column_width=True)
report = gpt("You are a board-certified radiologist. Provide an impression with SNOMED codes.", "Describe findings and give TNM if cancer present.")
st.markdown("#### Report")
st.write(report)
# Clinical Note Q&A
with tabs[1]:
st.header("πŸ“„ Clinical Note Q&A")
txt = st.text_area("Paste clinical note")
q = st.text_input("Ask a question (or leave blank for summary)")
if txt and st.button("Process"):
charge_credit()
prompt = f"Here is the clinical note:\n{txt}\n\nQuestion: {q or 'Summarize'}"
ans = gpt("You are an expert physician.", prompt)
st.write(ans)
# Voice Triage
with tabs[2]:
st.header("🎀 Voice Triage")
aud = st.file_uploader("Upload patient symptoms recording", type=["wav","mp3","m4a","ogg"])
if aud and st.button("Infer" ):
charge_credit()
t = whisper_transcribe(aud.read())
st.write("**Transcript:**", t)
diff = gpt("You are a triage nurse.", f"Patient says: {t}\nProvide likely differentials with SNOMED codes and urgency level.")
st.write(diff)
# UMLS Lookup
with tabs[3]:
st.header("πŸ” UMLS Concept Lookup")
term = st.text_input("Term or CUI")
if term:
resp = requests.get(f"https://uts-ws.nlm.nih.gov/rest/search/current?string={term}&apiKey=sample")
st.json(resp.json())
# GraphRAG Explorer (toy demo)
with tabs[4]:
st.header("🧠 GraphRAG Explorer")
st.info("Prototype biomedical KG Q&A – coming soon.")
---------------------------------------------------------------------