vedaMD / src /groq_medical_chatbot.py
sniro23's picture
Initial commit without binary files
19aaa42
"""
Groq Medical Chatbot Interface v2.0
FREE Groq Cloud API integration for advanced medical reasoning
"""
import gradio as gr
import time
import json
import os
from datetime import datetime
from typing import List, Tuple, Dict, Any
# Import our Groq medical RAG system
from groq_medical_rag import GroqMedicalRAG, MedicalResponse
class GroqMedicalChatbot:
"""Professional medical chatbot interface using Groq-powered RAG system"""
def __init__(self):
"""Initialize the Groq medical chatbot"""
self.rag_system = None
self.chat_history = []
self.session_stats = {
"queries_processed": 0,
"total_response_time": 0,
"avg_confidence": 0,
"llm_responses": 0,
"template_responses": 0,
"session_start": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
}
# Check for Groq API key
self.groq_api_key = os.getenv("GROQ_API_KEY")
# Initialize RAG system
self._initialize_rag_system()
def _initialize_rag_system(self):
"""Initialize the Groq RAG system"""
try:
if not self.groq_api_key:
print("⚠️ GROQ_API_KEY not found in environment variables")
print("πŸ”— Get your free API key at: https://console.groq.com/keys")
print("πŸ“ Set it with: export GROQ_API_KEY='your_api_key_here'")
self.rag_system = None
return
print("πŸš€ Initializing Groq Medical RAG System...")
self.rag_system = GroqMedicalRAG(groq_api_key=self.groq_api_key)
print("βœ… Groq Medical RAG System initialized successfully!")
except Exception as e:
print(f"❌ Error initializing Groq RAG system: {e}")
print("πŸ”„ Attempting to initialize without Groq...")
# Fallback to simple RAG system
try:
from simple_medical_rag import SimpleMedicalRAG
self.rag_system = SimpleMedicalRAG()
print("βœ… Fallback to simple medical RAG system")
except Exception as e2:
print(f"❌ Failed to initialize any RAG system: {e2}")
self.rag_system = None
def process_query(self, query: str, history: List[Tuple[str, str]], use_llm: bool = True) -> Tuple[List[Tuple[str, str]], str]:
"""Process medical query and return response"""
if not self.rag_system:
error_msg = "❌ **System Error**: Medical RAG system not initialized.\n\n"
error_msg += "Please ensure GROQ_API_KEY is set:\n"
error_msg += "1. Get free API key: https://console.groq.com/keys\n"
error_msg += "2. Set environment variable: `export GROQ_API_KEY='your_key'`\n"
error_msg += "3. Restart the application"
history.append((query, error_msg))
return history, ""
if not query.strip():
return history, ""
start_time = time.time()
try:
# Check if we have Groq integration
has_groq = hasattr(self.rag_system, 'groq_client')
# Process query with RAG system
if has_groq:
response = self.rag_system.query(query, k=5, use_llm=use_llm)
else:
response = self.rag_system.query(query, k=5)
response.llm_reasoning = False # Mark as template-based
# Format response for display
formatted_response = self._format_response_for_display(response, has_groq)
# Update session statistics
query_time = time.time() - start_time
self._update_session_stats(query_time, response.confidence, response.llm_reasoning)
# Add to chat history
history.append((query, formatted_response))
return history, ""
except Exception as e:
error_msg = f"❌ **Error processing query**: {str(e)}\n\n⚠️ Please try rephrasing your question or contact support."
history.append((query, error_msg))
return history, ""
def _format_response_for_display(self, response: MedicalResponse, has_groq: bool) -> str:
"""Format medical response for beautiful display in Gradio"""
# LLM vs Template indicator
reasoning_emoji = "πŸ€–" if response.llm_reasoning else "πŸ“‹"
reasoning_text = "AI Reasoning" if response.llm_reasoning else "Template-based"
# Confidence level indicator
confidence_emoji = "🟒" if response.confidence > 0.7 else "🟑" if response.confidence > 0.5 else "πŸ”΄"
confidence_text = f"{confidence_emoji} **Confidence: {response.confidence:.1%}**"
# Response type indicator
type_emoji = "πŸ’Š" if "dosage" in response.response_type else "🚨" if "emergency" in response.response_type else "πŸ₯"
# Main response
formatted_response = f"""
{type_emoji} **Medical Information**
{response.answer}
---
πŸ“Š **Response Details**
{reasoning_emoji} **Method**: {reasoning_text}
{confidence_text}
πŸ“š **Sources**: {len(response.sources)} documents referenced
"""
# Add top sources
if response.sources:
formatted_response += "πŸ“– **Primary Sources**:\n"
for i, source in enumerate(response.sources[:3], 1):
doc_name = source['document'].replace('.pdf', '').replace('-', ' ').title()
formatted_response += f"{i}. {doc_name} (Relevance: {source['relevance_score']:.1%})\n"
formatted_response += "\n"
# Add Groq status if applicable
if has_groq and response.llm_reasoning:
formatted_response += "🌟 **Powered by**: Groq Cloud (FREE tier) + Llama 3.1-8B\n\n"
elif has_groq and not response.llm_reasoning:
formatted_response += "⚑ **Fallback**: Template-based response (Groq available)\n\n"
elif not has_groq:
formatted_response += "⚠️ **Note**: Using template responses (Set GROQ_API_KEY for AI reasoning)\n\n"
# Add medical disclaimer
formatted_response += f"""
---
{response.medical_disclaimer}
πŸ”— **Note**: This response is based on Sri Lankan maternal health guidelines and should be used in conjunction with current clinical protocols.
"""
return formatted_response
def _update_session_stats(self, query_time: float, confidence: float, llm_reasoning: bool):
"""Update session statistics"""
self.session_stats["queries_processed"] += 1
self.session_stats["total_response_time"] += query_time
if llm_reasoning:
self.session_stats["llm_responses"] += 1
else:
self.session_stats["template_responses"] += 1
# Update average confidence
current_avg = self.session_stats["avg_confidence"]
queries = self.session_stats["queries_processed"]
self.session_stats["avg_confidence"] = ((current_avg * (queries - 1)) + confidence) / queries
def get_system_info(self) -> str:
"""Get system information for display"""
if not self.rag_system:
return """❌ **System Status**: Not initialized
πŸ”§ **Setup Required**:
1. Get free Groq API key: https://console.groq.com/keys
2. Set environment variable: `export GROQ_API_KEY='your_key'`
3. Restart the application
πŸ’‘ **Groq Benefits**: FREE LLM reasoning for medical queries"""
try:
stats = self.rag_system.get_system_stats()
has_groq = 'groq_integration' in stats
system_info = f"""
πŸ₯ **Sri Lankan Maternal Health Assistant v2.0**
πŸ“Š **System Status**: {stats['status'].upper()} βœ…
**Knowledge Base**:
β€’ πŸ“š Total Documents: {stats['vector_store']['total_chunks']:,} medical chunks
β€’ 🧠 Embedding Model: {stats['vector_store']['embedding_model']}
β€’ πŸ’Ύ Vector Store Size: {stats['vector_store']['vector_store_size_mb']} MB
β€’ ⚑ Approach: Simplified document-based retrieval
"""
# Add Groq integration info
if has_groq:
groq_info = stats['groq_integration']
system_info += f"""
**πŸ€– Groq Integration** (FREE):
β€’ Model: {groq_info['groq_model']}
β€’ Status: {groq_info['groq_api_status']} βœ…
β€’ LLM Reasoning: {groq_info['llm_reasoning']}
β€’ Daily Limit: 5,000 tokens (FREE tier)
"""
else:
system_info += """
**⚠️ Groq Integration**: Not available
β€’ Get free API key: https://console.groq.com/keys
β€’ Enables advanced medical reasoning
β€’ 5,000 free tokens/day
"""
# Add content distribution
system_info += "**Content Distribution**:\n"
for content_type, count in stats['vector_store']['content_type_distribution'].items():
percentage = (count / stats['vector_store']['total_chunks']) * 100
content_info = content_type.replace('_', ' ').title()
system_info += f"β€’ {content_info}: {count:,} chunks ({percentage:.1f}%)\n"
return system_info
except Exception as e:
return f"❌ **Error retrieving system info**: {str(e)}"
def get_session_stats(self) -> str:
"""Get session statistics for display"""
if self.session_stats["queries_processed"] == 0:
return "πŸ“ˆ **Session Statistics**: No queries processed yet"
avg_response_time = self.session_stats["total_response_time"] / self.session_stats["queries_processed"]
llm_percentage = (self.session_stats["llm_responses"] / self.session_stats["queries_processed"]) * 100
return f"""
πŸ“ˆ **Session Statistics**
πŸ• **Session Started**: {self.session_stats["session_start"]}
πŸ“ **Queries Processed**: {self.session_stats["queries_processed"]}
⚑ **Avg Response Time**: {avg_response_time:.2f} seconds
🎯 **Avg Confidence**: {self.session_stats["avg_confidence"]:.1%}
**Response Methods**:
πŸ€– **AI Reasoning**: {self.session_stats["llm_responses"]} ({llm_percentage:.1f}%)
πŸ“‹ **Template-based**: {self.session_stats["template_responses"]} ({100-llm_percentage:.1f}%)
"""
def clear_chat(self) -> Tuple[List, str]:
"""Clear chat history"""
self.chat_history = []
return [], ""
def get_example_queries(self) -> List[str]:
"""Get example medical queries"""
return [
"What is the dosage of magnesium sulfate for preeclampsia?",
"How to manage postpartum hemorrhage emergency?",
"Normal fetal heart rate during labor monitoring?",
"Management protocol for breech delivery?",
"Antenatal care schedule for high-risk pregnancies?",
"Signs and symptoms of preeclampsia?",
"When to perform cesarean delivery?",
"Postpartum care guidelines for new mothers?"
]
def create_groq_medical_chatbot_interface():
"""Create the main Gradio interface"""
# Initialize chatbot
chatbot = GroqMedicalChatbot()
# Custom CSS for medical theme
css = """
.gradio-container {
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
}
.main-header {
text-align: center;
color: #2c5aa0;
font-size: 2.2em;
font-weight: bold;
margin-bottom: 20px;
text-shadow: 2px 2px 4px rgba(0,0,0,0.1);
}
.description {
text-align: center;
font-size: 1.1em;
color: #555;
margin-bottom: 25px;
padding: 15px;
background: rgba(255,255,255,0.9);
border-radius: 10px;
box-shadow: 0 2px 10px rgba(0,0,0,0.1);
}
.tab-content {
background: white;
border-radius: 15px;
padding: 20px;
box-shadow: 0 4px 20px rgba(0,0,0,0.1);
}
.medical-disclaimer {
background: #fff3cd;
border: 1px solid #ffeaa7;
border-radius: 8px;
padding: 12px;
margin: 10px 0;
font-size: 0.9em;
color: #856404;
}
"""
with gr.Blocks(css=css, title="Sri Lankan Maternal Health Assistant") as interface:
# Header
gr.HTML("""
<div class="main-header">
πŸ₯ Sri Lankan Maternal Health Assistant v2.0
</div>
<div class="description">
<strong>πŸ€– Powered by FREE Groq Cloud API + Advanced Medical Reasoning</strong><br/>
Professional medical information based on Sri Lankan maternal health guidelines.<br/>
<em>Enhanced with Llama 3.1-8B for intelligent clinical responses</em>
</div>
""")
with gr.Tabs():
# Main Chat Interface
with gr.Tab("πŸ’¬ Medical Consultation"):
with gr.Row():
with gr.Column(scale=4):
chatbot_interface = gr.Chatbot(
label="Medical Assistant",
height=500,
placeholder="Welcome! Ask me about maternal health guidelines, medications, procedures, or emergency protocols."
)
with gr.Row():
msg = gr.Textbox(
label="Your Medical Question",
placeholder="E.g., What is the management protocol for preeclampsia?",
lines=2,
max_lines=5
)
submit_btn = gr.Button("🩺 Ask", variant="primary", scale=1)
with gr.Row():
use_llm_checkbox = gr.Checkbox(
label="πŸ€– Use AI Reasoning (Groq)",
value=True,
info="Uncheck for template-based responses"
)
clear_btn = gr.Button("πŸ—‘οΈ Clear Chat", variant="secondary")
with gr.Column(scale=1):
gr.HTML("<h3>πŸ“ Example Questions</h3>")
example_queries = chatbot.get_example_queries()
for query in example_queries:
gr.Button(query, size="sm").click(
lambda x=query: (x, []),
outputs=[msg, chatbot_interface]
)
# System Information
with gr.Tab("πŸ“Š System Status"):
with gr.Row():
with gr.Column():
system_info = gr.Textbox(
label="System Information",
value=chatbot.get_system_info(),
lines=20,
max_lines=25,
interactive=False
)
refresh_btn = gr.Button("πŸ”„ Refresh System Info", variant="secondary")
with gr.Column():
session_stats = gr.Textbox(
label="Session Statistics",
value=chatbot.get_session_stats(),
lines=20,
max_lines=25,
interactive=False
)
stats_refresh_btn = gr.Button("πŸ“ˆ Refresh Stats", variant="secondary")
# Setup Instructions
with gr.Tab("βš™οΈ Setup & Configuration"):
gr.HTML("""
<div class="tab-content">
<h2>πŸš€ Quick Setup for FREE Groq Integration</h2>
<h3>1. Get Your FREE Groq API Key</h3>
<ol>
<li>Visit: <a href="https://console.groq.com/keys" target="_blank">https://console.groq.com/keys</a></li>
<li>Sign up for a free account</li>
<li>Create a new API key</li>
<li>Copy the API key</li>
</ol>
<h3>2. Set Environment Variable</h3>
<p><strong>macOS/Linux:</strong></p>
<code>export GROQ_API_KEY='your_api_key_here'</code>
<p><strong>Windows:</strong></p>
<code>set GROQ_API_KEY=your_api_key_here</code>
<h3>3. Restart the Application</h3>
<p>After setting the API key, restart this application to enable AI reasoning.</p>
<h3>🌟 Benefits of Groq Integration</h3>
<ul>
<li><strong>FREE:</strong> 5,000 tokens per day at no cost</li>
<li><strong>Fast:</strong> Ultra-fast inference with Groq's specialized hardware</li>
<li><strong>Smart:</strong> Llama 3.1-8B model with advanced medical reasoning</li>
<li><strong>Professional:</strong> Structured clinical responses</li>
</ul>
<div class="medical-disclaimer">
<strong>⚠️ Medical Disclaimer:</strong> This system provides educational information only and should not replace professional medical advice. Always consult qualified healthcare providers for clinical decisions.
</div>
</div>
""")
# Event handlers
def submit_query(query, history, use_llm):
return chatbot.process_query(query, history, use_llm)
def refresh_system():
return chatbot.get_system_info()
def refresh_stats():
return chatbot.get_session_stats()
def clear_chat_handler():
return chatbot.clear_chat()
# Connect events
submit_btn.click(
submit_query,
inputs=[msg, chatbot_interface, use_llm_checkbox],
outputs=[chatbot_interface, msg]
)
msg.submit(
submit_query,
inputs=[msg, chatbot_interface, use_llm_checkbox],
outputs=[chatbot_interface, msg]
)
clear_btn.click(
clear_chat_handler,
outputs=[chatbot_interface, msg]
)
refresh_btn.click(
refresh_system,
outputs=[system_info]
)
stats_refresh_btn.click(
refresh_stats,
outputs=[session_stats]
)
return interface
def main():
"""Launch the Groq medical chatbot"""
print("πŸš€ Starting Groq Medical Chatbot...")
interface = create_groq_medical_chatbot_interface()
# Launch with appropriate settings
interface.launch(
server_name="0.0.0.0",
server_port=7860,
share=False,
debug=False
)
if __name__ == "__main__":
main()