File size: 1,969 Bytes
371bdfb
 
9b5b26a
 
c19d193
6aae614
9b5b26a
2be1154
9b5b26a
fa2d41c
371bdfb
 
fa2d41c
371bdfb
9b5b26a
 
371bdfb
9b5b26a
 
 
 
 
 
8c01ffb
6aae614
ae7a494
371bdfb
 
 
 
 
 
 
ae7a494
371bdfb
 
 
 
 
 
 
 
 
 
e4a77e5
371bdfb
e4a77e5
 
 
 
13d500a
8c01ffb
371bdfb
861422e
 
371bdfb
 
8c01ffb
8fe992b
371bdfb
8c01ffb
 
 
 
 
 
1f503be
8fe992b
 
371bdfb
8c01ffb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
from smolagents import CodeAgent, load_tool, tool
from ollama import Client
import datetime
import pytz
import yaml
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI

@tool
def calculate(expression: str) -> str:
    """Evaluates a basic arithmetic expression."""
    result = eval(expression)
    return str(result)

@tool
def get_current_time_in_timezone(timezone: str) -> str:
    """Fetches the current local time in a specified timezone."""
    try:
        tz = pytz.timezone(timezone)
        local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
        return f"The current local time in {timezone} is: {local_time}"
    except Exception as e:
        return f"Error fetching time for timezone '{timezone}': {str(e)}"

final_answer = FinalAnswerTool()

# Ollama model class
class OllamaModel:
    def __init__(self, model_name, max_tokens, temperature):
        self.model_name = model_name
        self.max_tokens = max_tokens
        self.temperature = temperature
        self.client = Client(host='http://localhost:11434')

    def generate(self, prompt, **kwargs):
        response = self.client.generate(
            model=self.model_name,
            prompt=prompt,
            options={
                "num_predict": self.max_tokens,
                "temperature": self.temperature
            }
        )
        return response['response']

# Initialize model
model = OllamaModel(
    model_name='mistral:7b',
    max_tokens=2096,
    temperature=0.5
)

# Load prompt templates
with open("prompts.yaml", 'r') as stream:
    prompt_templates = yaml.safe_load(stream)

# Initialize agent
agent = CodeAgent(
    model=model,
    tools=[final_answer, calculate],  # Remove image_generation_tool if problematic
    max_steps=6,
    verbosity_level=1,
    grammar=None,
    planning_interval=None,
    name=None,
    description=None,
    prompt_templates=prompt_templates 
)

# Launch UI
GradioUI(agent).launch()