File size: 10,961 Bytes
3af622c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
import time
import logging
from typing import List, Dict, Optional, Union, Tuple
from src.llm.base_provider import LLMProvider
from src.llm.hf_provider import HuggingFaceProvider
from src.llm.ollama_provider import OllamaProvider
from core.session import session_manager
from utils.config import config

logger = logging.getLogger(__name__)

class MentorProvider(LLMProvider):
    """Mentor provider that uses HF as expert and Ollama as mentor/coach"""
    
    def __init__(self, model_name: str, timeout: int = 120, max_retries: int = 2):
        super().__init__(model_name, timeout, max_retries)
        self.hf_provider = None
        self.ollama_provider = None
        self.conversation_analyzer = ConversationAnalyzer()
        
        # Initialize providers
        try:
            if config.hf_token:
                self.hf_provider = HuggingFaceProvider(
                    model_name="DavidAU/OpenAi-GPT-oss-20b-abliterated-uncensored-NEO-Imatrix-gguf",
                    timeout=120
                )
        except Exception as e:
            logger.warning(f"Failed to initialize HF provider: {e}")
            
        try:
            if config.ollama_host:
                self.ollama_provider = OllamaProvider(
                    model_name=config.local_model_name,
                    timeout=60
                )
        except Exception as e:
            logger.warning(f"Failed to initialize Ollama provider: {e}")

    def generate(self, prompt: str, conversation_history: List[Dict]) -> Optional[str]:
        """Generate response using mentor approach"""
        try:
            # Step 1: Get expert response from HF Endpoint
            hf_response = self._get_expert_response(prompt, conversation_history)
            
            if not hf_response:
                raise Exception("HF Endpoint expert failed to provide response")
            
            # Step 2: Get mentor analysis from Ollama
            mentor_insights = self._get_mentor_analysis(
                prompt, 
                hf_response, 
                conversation_history
            )
            
            # Step 3: Combine expert response with mentor insights
            combined_response = self._combine_responses(hf_response, mentor_insights)
            
            # Step 4: Store interaction for learning
            self._store_interaction(prompt, hf_response, mentor_insights)
            
            return combined_response
            
        except Exception as e:
            logger.error(f"Mentor generation failed: {e}")
            
            # Fallback to HF only
            if self.hf_provider:
                try:
                    logger.info("Falling back to HF Endpoint only")
                    return self.hf_provider.generate(prompt, conversation_history)
                except Exception as fallback_error:
                    logger.error(f"HF fallback also failed: {fallback_error}")
            
            raise Exception(f"All providers failed: {str(e)}")

    def stream_generate(self, prompt: str, conversation_history: List[Dict]) -> Optional[Union[str, List[str]]]:
        """Stream response using mentor approach"""
        try:
            # For streaming, we'll stream HF response and add mentor insights at the end
            if self.hf_provider:
                hf_stream = self.hf_provider.stream_generate(prompt, conversation_history)
                return hf_stream
            else:
                raise Exception("No HF provider available for streaming")
        except Exception as e:
            logger.error(f"Mentor stream generation failed: {e}")
            raise

    def _get_expert_response(self, prompt: str, conversation_history: List[Dict]) -> Optional[str]:
        """Get expert response from HF Endpoint"""
        if not self.hf_provider:
            return None
            
        try:
            logger.info("🤖 Getting expert response from HF Endpoint...")
            response = self.hf_provider.generate(prompt, conversation_history)
            logger.info("✅ HF Endpoint expert response received")
            return response
        except Exception as e:
            logger.error(f"HF Endpoint expert failed: {e}")
            return None

    def _get_mentor_analysis(self, user_prompt: str, hf_response: str, conversation_history: List[Dict]) -> Dict:
        """Get mentor analysis and suggestions from Ollama"""
        if not self.ollama_provider:
            return {}
            
        try:
            logger.info("🐱 Getting mentor analysis from Ollama...")
            
            # Create mentor prompt for analysis
            mentor_prompt = self._create_mentor_prompt(user_prompt, hf_response, conversation_history)
            
            # Get mentor insights
            mentor_response = self.ollama_provider.generate(mentor_prompt, [])
            
            # Parse mentor response into structured insights
            insights = self.conversation_analyzer.parse_mentor_response(mentor_response)
            
            logger.info("✅ Ollama mentor analysis completed")
            return insights
            
        except Exception as e:
            logger.warning(f"Ollama mentor analysis failed: {e}")
            return {}

    def _create_mentor_prompt(self, user_prompt: str, hf_response: str, conversation_history: List[Dict]) -> str:
        """Create prompt for Ollama mentor to analyze interaction"""
        conversation_context = "\n".join([
            f"{msg['role']}: {msg['content']}" 
            for msg in conversation_history[-5:]  # Last 5 messages for context
        ])
        
        prompt = f"""
You are an AI mentor and conversation analyst. Your job is to analyze the interaction between a user and an expert AI, then provide insightful guidance.

ANALYZE THIS INTERACTION:
User Question: "{user_prompt}"
Expert Response: "{hf_response}"

Recent Conversation Context:
{conversation_context}

PROVIDE YOUR ANALYSIS IN THIS FORMAT:

<thinking_analysis>
Analyze the expert's reasoning approach, depth of analysis, and problem-solving methodology.
</thinking_analysis>

<goal_progress>
Assess how well this response advances toward the user's likely goals based on conversation history.
</goal_progress>

<follow_up_suggestions>
Provide 2-3 thoughtful follow-up questions or research directions that would deepen understanding.
</follow_up_suggestions>

<data_gathering>
Suggest what additional information or data would be valuable to collect.
</data_gathering>

<critical_insights>
Highlight any key insights, potential blind spots, or areas needing further exploration.
</critical_insights>

Keep your analysis concise but insightful. Focus on helping the user achieve their goals through better questioning and information gathering.
"""
        return prompt

    def _combine_responses(self, hf_response: str, mentor_insights: Dict) -> str:
        """Combine expert response with mentor insights"""
        if not mentor_insights:
            return hf_response
            
        # Format mentor insights nicely
        insights_section = "\n\n--- 🎓 Mentor Insights ---\n"
        
        if mentor_insights.get('thinking_analysis'):
            insights_section += f"\n🧠 **Thinking Analysis**\n{mentor_insights['thinking_analysis']}"
            
        if mentor_insights.get('goal_progress'):
            insights_section += f"\n\n🎯 **Goal Progress**\n{mentor_insights['goal_progress']}"
            
        if mentor_insights.get('follow_up_suggestions'):
            insights_section += f"\n\n🤔 **Follow-up Suggestions**\n{mentor_insights['follow_up_suggestions']}"
            
        if mentor_insights.get('data_gathering'):
            insights_section += f"\n\n📋 **Data to Gather**\n{mentor_insights['data_gathering']}"
            
        if mentor_insights.get('critical_insights'):
            insights_section += f"\n\n💡 **Critical Insights**\n{mentor_insights['critical_insights']}"
        
        return f"{hf_response}{insights_section}"

    def _store_interaction(self, user_prompt: str, hf_response: str, mentor_insights: Dict):
        """Store interaction for learning and pattern recognition"""
        try:
            user_session = session_manager.get_session("default_user")
            interaction_log = user_session.get("interaction_log", [])
            
            # Create interaction record
            interaction = {
                "timestamp": time.time(),
                "user_prompt": user_prompt,
                "expert_response": hf_response,
                "mentor_insights": mentor_insights,
                "conversation_length": len(interaction_log)
            }
            
            # Keep last 20 interactions
            interaction_log.append(interaction)
            if len(interaction_log) > 20:
                interaction_log = interaction_log[-20:]
                
            user_session["interaction_log"] = interaction_log
            session_manager.update_session("default_user", user_session)
            
        except Exception as e:
            logger.warning(f"Failed to store interaction: {e}")

class ConversationAnalyzer:
    """Analyzes conversation patterns and provides insights"""
    
    def parse_mentor_response(self, mentor_response: str) -> Dict:
        """Parse mentor response into structured insights"""
        if not mentor_response:
            return {}
            
        insights = {}
        
        # Extract sections using simple parsing
        sections = {
            'thinking_analysis': self._extract_section(mentor_response, 'thinking_analysis'),
            'goal_progress': self._extract_section(mentor_response, 'goal_progress'),
            'follow_up_suggestions': self._extract_section(mentor_response, 'follow_up_suggestions'),
            'data_gathering': self._extract_section(mentor_response, 'data_gathering'),
            'critical_insights': self._extract_section(mentor_response, 'critical_insights')
        }
        
        # Clean up sections
        for key, value in sections.items():
            if value:
                # Remove markdown and clean text
                cleaned = value.strip()
                if cleaned:
                    insights[key] = cleaned
                    
        return insights
    
    def _extract_section(self, text: str, section_name: str) -> Optional[str]:
        """Extract specific section from mentor response"""
        start_tag = f"<{section_name}>"
        end_tag = f"</{section_name}>"
        
        start_idx = text.find(start_tag)
        if start_idx == -1:
            return None
            
        start_idx += len(start_tag)
        end_idx = text.find(end_tag, start_idx)
        
        if end_idx == -1:
            return None
            
        return text[start_idx:end_idx].strip()

# Global instance
mentor_provider = MentorProvider("mentor_model")