File size: 19,691 Bytes
19aaa42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
"""
Groq Medical Chatbot Interface v2.0
FREE Groq Cloud API integration for advanced medical reasoning
"""

import gradio as gr
import time
import json
import os
from datetime import datetime
from typing import List, Tuple, Dict, Any

# Import our Groq medical RAG system
from groq_medical_rag import GroqMedicalRAG, MedicalResponse

class GroqMedicalChatbot:
    """Professional medical chatbot interface using Groq-powered RAG system"""
    
    def __init__(self):
        """Initialize the Groq medical chatbot"""
        self.rag_system = None
        self.chat_history = []
        self.session_stats = {
            "queries_processed": 0,
            "total_response_time": 0,
            "avg_confidence": 0,
            "llm_responses": 0,
            "template_responses": 0,
            "session_start": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        }
        
        # Check for Groq API key
        self.groq_api_key = os.getenv("GROQ_API_KEY")
        
        # Initialize RAG system
        self._initialize_rag_system()
    
    def _initialize_rag_system(self):
        """Initialize the Groq RAG system"""
        try:
            if not self.groq_api_key:
                print("⚠️  GROQ_API_KEY not found in environment variables")
                print("πŸ”— Get your free API key at: https://console.groq.com/keys")
                print("πŸ“ Set it with: export GROQ_API_KEY='your_api_key_here'")
                self.rag_system = None
                return
            
            print("πŸš€ Initializing Groq Medical RAG System...")
            self.rag_system = GroqMedicalRAG(groq_api_key=self.groq_api_key)
            print("βœ… Groq Medical RAG System initialized successfully!")
            
        except Exception as e:
            print(f"❌ Error initializing Groq RAG system: {e}")
            print("πŸ”„ Attempting to initialize without Groq...")
            
            # Fallback to simple RAG system
            try:
                from simple_medical_rag import SimpleMedicalRAG
                self.rag_system = SimpleMedicalRAG()
                print("βœ… Fallback to simple medical RAG system")
            except Exception as e2:
                print(f"❌ Failed to initialize any RAG system: {e2}")
                self.rag_system = None
    
    def process_query(self, query: str, history: List[Tuple[str, str]], use_llm: bool = True) -> Tuple[List[Tuple[str, str]], str]:
        """Process medical query and return response"""
        if not self.rag_system:
            error_msg = "❌ **System Error**: Medical RAG system not initialized.\n\n"
            error_msg += "Please ensure GROQ_API_KEY is set:\n"
            error_msg += "1. Get free API key: https://console.groq.com/keys\n"
            error_msg += "2. Set environment variable: `export GROQ_API_KEY='your_key'`\n"
            error_msg += "3. Restart the application"
            
            history.append((query, error_msg))
            return history, ""
        
        if not query.strip():
            return history, ""
        
        start_time = time.time()
        
        try:
            # Check if we have Groq integration
            has_groq = hasattr(self.rag_system, 'groq_client')
            
            # Process query with RAG system
            if has_groq:
                response = self.rag_system.query(query, k=5, use_llm=use_llm)
            else:
                response = self.rag_system.query(query, k=5)
                response.llm_reasoning = False  # Mark as template-based
            
            # Format response for display
            formatted_response = self._format_response_for_display(response, has_groq)
            
            # Update session statistics
            query_time = time.time() - start_time
            self._update_session_stats(query_time, response.confidence, response.llm_reasoning)
            
            # Add to chat history
            history.append((query, formatted_response))
            
            return history, ""
            
        except Exception as e:
            error_msg = f"❌ **Error processing query**: {str(e)}\n\n⚠️ Please try rephrasing your question or contact support."
            history.append((query, error_msg))
            return history, ""
    
    def _format_response_for_display(self, response: MedicalResponse, has_groq: bool) -> str:
        """Format medical response for beautiful display in Gradio"""
        
        # LLM vs Template indicator
        reasoning_emoji = "πŸ€–" if response.llm_reasoning else "πŸ“‹"
        reasoning_text = "AI Reasoning" if response.llm_reasoning else "Template-based"
        
        # Confidence level indicator
        confidence_emoji = "🟒" if response.confidence > 0.7 else "🟑" if response.confidence > 0.5 else "πŸ”΄"
        confidence_text = f"{confidence_emoji} **Confidence: {response.confidence:.1%}**"
        
        # Response type indicator
        type_emoji = "πŸ’Š" if "dosage" in response.response_type else "🚨" if "emergency" in response.response_type else "πŸ₯"
        
        # Main response
        formatted_response = f"""
{type_emoji} **Medical Information**

{response.answer}

---

πŸ“Š **Response Details**
{reasoning_emoji} **Method**: {reasoning_text}
{confidence_text}
πŸ“š **Sources**: {len(response.sources)} documents referenced

"""
        
        # Add top sources
        if response.sources:
            formatted_response += "πŸ“– **Primary Sources**:\n"
            for i, source in enumerate(response.sources[:3], 1):
                doc_name = source['document'].replace('.pdf', '').replace('-', ' ').title()
                formatted_response += f"{i}. {doc_name} (Relevance: {source['relevance_score']:.1%})\n"
            formatted_response += "\n"
        
        # Add Groq status if applicable
        if has_groq and response.llm_reasoning:
            formatted_response += "🌟 **Powered by**: Groq Cloud (FREE tier) + Llama 3.1-8B\n\n"
        elif has_groq and not response.llm_reasoning:
            formatted_response += "⚑ **Fallback**: Template-based response (Groq available)\n\n"
        elif not has_groq:
            formatted_response += "⚠️ **Note**: Using template responses (Set GROQ_API_KEY for AI reasoning)\n\n"
        
        # Add medical disclaimer
        formatted_response += f"""
---

{response.medical_disclaimer}

πŸ”— **Note**: This response is based on Sri Lankan maternal health guidelines and should be used in conjunction with current clinical protocols.
"""
        
        return formatted_response
    
    def _update_session_stats(self, query_time: float, confidence: float, llm_reasoning: bool):
        """Update session statistics"""
        self.session_stats["queries_processed"] += 1
        self.session_stats["total_response_time"] += query_time
        
        if llm_reasoning:
            self.session_stats["llm_responses"] += 1
        else:
            self.session_stats["template_responses"] += 1
        
        # Update average confidence
        current_avg = self.session_stats["avg_confidence"]
        queries = self.session_stats["queries_processed"]
        self.session_stats["avg_confidence"] = ((current_avg * (queries - 1)) + confidence) / queries
    
    def get_system_info(self) -> str:
        """Get system information for display"""
        if not self.rag_system:
            return """❌ **System Status**: Not initialized

πŸ”§ **Setup Required**:
1. Get free Groq API key: https://console.groq.com/keys  
2. Set environment variable: `export GROQ_API_KEY='your_key'`
3. Restart the application

πŸ’‘ **Groq Benefits**: FREE LLM reasoning for medical queries"""
        
        try:
            stats = self.rag_system.get_system_stats()
            has_groq = 'groq_integration' in stats
            
            system_info = f"""
πŸ₯ **Sri Lankan Maternal Health Assistant v2.0**

πŸ“Š **System Status**: {stats['status'].upper()} βœ…

**Knowledge Base**:
β€’ πŸ“š Total Documents: {stats['vector_store']['total_chunks']:,} medical chunks
β€’ 🧠 Embedding Model: {stats['vector_store']['embedding_model']}
β€’ πŸ’Ύ Vector Store Size: {stats['vector_store']['vector_store_size_mb']} MB
β€’ ⚑ Approach: Simplified document-based retrieval

"""
            
            # Add Groq integration info
            if has_groq:
                groq_info = stats['groq_integration']
                system_info += f"""
**πŸ€– Groq Integration** (FREE):
β€’ Model: {groq_info['groq_model']}
β€’ Status: {groq_info['groq_api_status']} βœ…
β€’ LLM Reasoning: {groq_info['llm_reasoning']}
β€’ Daily Limit: 5,000 tokens (FREE tier)

"""
            else:
                system_info += """
**⚠️ Groq Integration**: Not available
β€’ Get free API key: https://console.groq.com/keys
β€’ Enables advanced medical reasoning
β€’ 5,000 free tokens/day

"""
            
            # Add content distribution
            system_info += "**Content Distribution**:\n"
            for content_type, count in stats['vector_store']['content_type_distribution'].items():
                percentage = (count / stats['vector_store']['total_chunks']) * 100
                content_info = content_type.replace('_', ' ').title()
                system_info += f"β€’ {content_info}: {count:,} chunks ({percentage:.1f}%)\n"
            
            return system_info
            
        except Exception as e:
            return f"❌ **Error retrieving system info**: {str(e)}"
    
    def get_session_stats(self) -> str:
        """Get session statistics for display"""
        if self.session_stats["queries_processed"] == 0:
            return "πŸ“ˆ **Session Statistics**: No queries processed yet"
        
        avg_response_time = self.session_stats["total_response_time"] / self.session_stats["queries_processed"]
        llm_percentage = (self.session_stats["llm_responses"] / self.session_stats["queries_processed"]) * 100
        
        return f"""
πŸ“ˆ **Session Statistics**

πŸ• **Session Started**: {self.session_stats["session_start"]}
πŸ“ **Queries Processed**: {self.session_stats["queries_processed"]}
⚑ **Avg Response Time**: {avg_response_time:.2f} seconds
🎯 **Avg Confidence**: {self.session_stats["avg_confidence"]:.1%}

**Response Methods**:
πŸ€– **AI Reasoning**: {self.session_stats["llm_responses"]} ({llm_percentage:.1f}%)
πŸ“‹ **Template-based**: {self.session_stats["template_responses"]} ({100-llm_percentage:.1f}%)
"""
    
    def clear_chat(self) -> Tuple[List, str]:
        """Clear chat history"""
        self.chat_history = []
        return [], ""
    
    def get_example_queries(self) -> List[str]:
        """Get example medical queries"""
        return [
            "What is the dosage of magnesium sulfate for preeclampsia?",
            "How to manage postpartum hemorrhage emergency?",
            "Normal fetal heart rate during labor monitoring?",
            "Management protocol for breech delivery?",
            "Antenatal care schedule for high-risk pregnancies?",
            "Signs and symptoms of preeclampsia?",
            "When to perform cesarean delivery?",
            "Postpartum care guidelines for new mothers?"
        ]

def create_groq_medical_chatbot_interface():
    """Create the main Gradio interface"""
    
    # Initialize chatbot
    chatbot = GroqMedicalChatbot()
    
    # Custom CSS for medical theme
    css = """
    .gradio-container {
        font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
        background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
    }
    .main-header {
        text-align: center;
        color: #2c5aa0;
        font-size: 2.2em;
        font-weight: bold;
        margin-bottom: 20px;
        text-shadow: 2px 2px 4px rgba(0,0,0,0.1);
    }
    .description {
        text-align: center;
        font-size: 1.1em;
        color: #555;
        margin-bottom: 25px;
        padding: 15px;
        background: rgba(255,255,255,0.9);
        border-radius: 10px;
        box-shadow: 0 2px 10px rgba(0,0,0,0.1);
    }
    .tab-content {
        background: white;
        border-radius: 15px;
        padding: 20px;
        box-shadow: 0 4px 20px rgba(0,0,0,0.1);
    }
    .medical-disclaimer {
        background: #fff3cd;
        border: 1px solid #ffeaa7;
        border-radius: 8px;
        padding: 12px;
        margin: 10px 0;
        font-size: 0.9em;
        color: #856404;
    }
    """
    
    with gr.Blocks(css=css, title="Sri Lankan Maternal Health Assistant") as interface:
        
        # Header
        gr.HTML("""
        <div class="main-header">
            πŸ₯ Sri Lankan Maternal Health Assistant v2.0
        </div>
        <div class="description">
            <strong>πŸ€– Powered by FREE Groq Cloud API + Advanced Medical Reasoning</strong><br/>
            Professional medical information based on Sri Lankan maternal health guidelines.<br/>
            <em>Enhanced with Llama 3.1-8B for intelligent clinical responses</em>
        </div>
        """)
        
        with gr.Tabs():
            # Main Chat Interface
            with gr.Tab("πŸ’¬ Medical Consultation"):
                with gr.Row():
                    with gr.Column(scale=4):
                        chatbot_interface = gr.Chatbot(
                            label="Medical Assistant",
                            height=500,
                            placeholder="Welcome! Ask me about maternal health guidelines, medications, procedures, or emergency protocols."
                        )
                        
                        with gr.Row():
                            msg = gr.Textbox(
                                label="Your Medical Question",
                                placeholder="E.g., What is the management protocol for preeclampsia?",
                                lines=2,
                                max_lines=5
                            )
                            submit_btn = gr.Button("🩺 Ask", variant="primary", scale=1)
                        
                        with gr.Row():
                            use_llm_checkbox = gr.Checkbox(
                                label="πŸ€– Use AI Reasoning (Groq)",
                                value=True,
                                info="Uncheck for template-based responses"
                            )
                            clear_btn = gr.Button("πŸ—‘οΈ Clear Chat", variant="secondary")
                    
                    with gr.Column(scale=1):
                        gr.HTML("<h3>πŸ“ Example Questions</h3>")
                        example_queries = chatbot.get_example_queries()
                        for query in example_queries:
                            gr.Button(query, size="sm").click(
                                lambda x=query: (x, []), 
                                outputs=[msg, chatbot_interface]
                            )
            
            # System Information
            with gr.Tab("πŸ“Š System Status"):
                with gr.Row():
                    with gr.Column():
                        system_info = gr.Textbox(
                            label="System Information",
                            value=chatbot.get_system_info(),
                            lines=20,
                            max_lines=25,
                            interactive=False
                        )
                        refresh_btn = gr.Button("πŸ”„ Refresh System Info", variant="secondary")
                    
                    with gr.Column():
                        session_stats = gr.Textbox(
                            label="Session Statistics",
                            value=chatbot.get_session_stats(),
                            lines=20,
                            max_lines=25,
                            interactive=False
                        )
                        stats_refresh_btn = gr.Button("πŸ“ˆ Refresh Stats", variant="secondary")
            
            # Setup Instructions
            with gr.Tab("βš™οΈ Setup & Configuration"):
                gr.HTML("""
                <div class="tab-content">
                    <h2>πŸš€ Quick Setup for FREE Groq Integration</h2>
                    
                    <h3>1. Get Your FREE Groq API Key</h3>
                    <ol>
                        <li>Visit: <a href="https://console.groq.com/keys" target="_blank">https://console.groq.com/keys</a></li>
                        <li>Sign up for a free account</li>
                        <li>Create a new API key</li>
                        <li>Copy the API key</li>
                    </ol>
                    
                    <h3>2. Set Environment Variable</h3>
                    <p><strong>macOS/Linux:</strong></p>
                    <code>export GROQ_API_KEY='your_api_key_here'</code>
                    
                    <p><strong>Windows:</strong></p>
                    <code>set GROQ_API_KEY=your_api_key_here</code>
                    
                    <h3>3. Restart the Application</h3>
                    <p>After setting the API key, restart this application to enable AI reasoning.</p>
                    
                    <h3>🌟 Benefits of Groq Integration</h3>
                    <ul>
                        <li><strong>FREE:</strong> 5,000 tokens per day at no cost</li>
                        <li><strong>Fast:</strong> Ultra-fast inference with Groq's specialized hardware</li>
                        <li><strong>Smart:</strong> Llama 3.1-8B model with advanced medical reasoning</li>
                        <li><strong>Professional:</strong> Structured clinical responses</li>
                    </ul>
                    
                    <div class="medical-disclaimer">
                        <strong>⚠️ Medical Disclaimer:</strong> This system provides educational information only and should not replace professional medical advice. Always consult qualified healthcare providers for clinical decisions.
                    </div>
                </div>
                """)
        
        # Event handlers
        def submit_query(query, history, use_llm):
            return chatbot.process_query(query, history, use_llm)
        
        def refresh_system():
            return chatbot.get_system_info()
        
        def refresh_stats():
            return chatbot.get_session_stats()
        
        def clear_chat_handler():
            return chatbot.clear_chat()
        
        # Connect events
        submit_btn.click(
            submit_query,
            inputs=[msg, chatbot_interface, use_llm_checkbox],
            outputs=[chatbot_interface, msg]
        )
        
        msg.submit(
            submit_query,
            inputs=[msg, chatbot_interface, use_llm_checkbox],
            outputs=[chatbot_interface, msg]
        )
        
        clear_btn.click(
            clear_chat_handler,
            outputs=[chatbot_interface, msg]
        )
        
        refresh_btn.click(
            refresh_system,
            outputs=[system_info]
        )
        
        stats_refresh_btn.click(
            refresh_stats,
            outputs=[session_stats]
        )
    
    return interface

def main():
    """Launch the Groq medical chatbot"""
    print("πŸš€ Starting Groq Medical Chatbot...")
    
    interface = create_groq_medical_chatbot_interface()
    
    # Launch with appropriate settings
    interface.launch(
        server_name="0.0.0.0",
        server_port=7860,
        share=False,
        debug=False
    )

if __name__ == "__main__":
    main()