File size: 1,902 Bytes
b28635c
292f6f6
c5514db
b28635c
6d99a60
b73fa18
20d720d
c537b15
f130374
 
d39c478
c5514db
20d720d
f130374
c5514db
d0f6de1
f130374
 
20d720d
c537b15
f130374
 
d39c478
c5514db
c537b15
f130374
c5514db
d0f6de1
f130374
 
20d720d
c537b15
f130374
 
d39c478
c5514db
c537b15
f130374
c5514db
d0f6de1
f130374
c537b15
 
 
 
 
 
f130374
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
#from .base_tool import BaseTool
from models.tinygpt2_model import TinyGPT2Model
from pydantic import PrivateAttr
from crewai.tools import BaseTool



class MistralChatTool(BaseTool):
    name: str = "mistral_chat"
    description: str = "Generate an empathetic AI chat response."
    model_config = {"arbitrary_types_allowed": True}
    _model: TinyGPT2Model = PrivateAttr()
    def __init__(self, config=None):
        super().__init__()
        self._model = TinyGPT2Model()
    def _run(self, prompt: str, context: dict = None):
        msg = f"Context: {context}\nUser: {prompt}" if context else prompt
        return self.model.generate(msg)

class GenerateAdviceTool(BaseTool):
    name: str = "generate_advice"
    description: str = "Generate personalized advice."
    model_config = {"arbitrary_types_allowed": True}
    _model: TinyGPT2Model = PrivateAttr()
    def __init__(self, config=None):
        super().__init__()
        self._model = TinyGPT2Model()
    def _run(self, user_analysis: dict, wisdom_quotes: list):
        prompt = f"Advice for: {user_analysis}, with wisdom: {wisdom_quotes}"
        return self.model.generate(prompt, max_length=300)

class SummarizeConversationTool(BaseTool):
    name: str = "summarize_conversation"
    description: str = "Summarize chat with insights and next steps."
    model_config = {"arbitrary_types_allowed": True}
    _model: TinyGPT2Model = PrivateAttr()
    def __init__(self, config=None):
        super().__init__()
        self._model = TinyGPT2Model()
    def _run(self, conversation: list):
        prompt = f"Summarize: {conversation}"
        return self.model.generate(prompt, max_length=200)

class LLMTools:
    def __init__(self, config=None):
        self.mistral_chat = MistralChatTool(config)
        self.generate_advice = GenerateAdviceTool(config)
        self.summarize_conversation = SummarizeConversationTool(config)