|
""" |
|
Mistral LLM Tools for CrewAI |
|
""" |
|
|
|
from crewai.tools import BaseTool |
|
from models.mistral_model import MistralModel |
|
from typing import Dict, List |
|
|
|
class MistralChatTool(BaseTool): |
|
name: str = "mistral_chat" |
|
description: str = "Chat with Mistral AI for intelligent responses" |
|
|
|
def __init__(self): |
|
super().__init__() |
|
self.model = MistralModel() |
|
|
|
def _run(self, prompt: str, context: dict = None) -> str: |
|
"""Generate response using Mistral""" |
|
if context: |
|
full_prompt = f""" |
|
Context: {context} |
|
|
|
User Query: {prompt} |
|
|
|
Provide a thoughtful, compassionate response. |
|
""" |
|
else: |
|
full_prompt = prompt |
|
|
|
return self.model.generate(full_prompt) |
|
|
|
class GenerateAdviceTool(BaseTool): |
|
name: str = "generate_personalized_advice" |
|
description: str = "Generate personalized advice based on user's situation" |
|
|
|
def __init__(self): |
|
super().__init__() |
|
self.model = MistralModel() |
|
|
|
def _run(self, user_analysis: dict, wisdom_quotes: list) -> str: |
|
"""Generate personalized advice""" |
|
prompt = f""" |
|
Based on this user analysis: |
|
- Emotional state: {user_analysis.get('primary_emotion')} |
|
- Concerns: {user_analysis.get('concerns')} |
|
- Needs: {user_analysis.get('needs')} |
|
|
|
And these relevant wisdom quotes: |
|
{wisdom_quotes} |
|
|
|
Generate compassionate, personalized advice that: |
|
1. Acknowledges their feelings |
|
2. Offers practical guidance |
|
3. Includes relevant wisdom |
|
4. Suggests actionable steps |
|
5. Maintains hope and encouragement |
|
|
|
Be specific to their situation, not generic. |
|
""" |
|
|
|
return self.model.generate(prompt, max_length=500) |
|
|
|
class SummarizeTool(BaseTool): |
|
name: str = "summarize_conversation" |
|
description: str = "Summarize conversation maintaining key insights" |
|
|
|
def __init__(self): |
|
super().__init__() |
|
self.model = MistralModel() |
|
|
|
def _run(self, conversation: list) -> str: |
|
"""Summarize conversation history""" |
|
prompt = f""" |
|
Summarize this coaching conversation: |
|
{conversation} |
|
|
|
Include: |
|
1. Main concerns discussed |
|
2. Key insights shared |
|
3. Progress made |
|
4. Next steps suggested |
|
|
|
Keep it concise but meaningful. |
|
""" |
|
|
|
return self.model.generate(prompt, max_length=200) |