Spaces:
Running
Running
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel, LiteLLMModel, load_tool,tool | |
from pydantic import BaseModel | |
import datetime | |
import requests | |
import pytz | |
import yaml | |
from tools.final_answer import FinalAnswerTool | |
import os | |
import base64 | |
from Gradio_UI import GradioUI | |
LANGFUSE_PUBLIC_KEY = os.getenv("LANGFUSE_PUBLIC_KEY") | |
LANGFUSE_SECRET_KEY = os.getenv("LANGFUSE_SECRET_KEY") | |
LANGFUSE_AUTH=base64.b64encode(f"{LANGFUSE_PUBLIC_KEY}:{LANGFUSE_SECRET_KEY}".encode()).decode() | |
#os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = "https://cloud.langfuse.com/api/public/otel" # EU data region | |
os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = "https://us.cloud.langfuse.com/api/public/otel" # US data region | |
os.environ["OTEL_EXPORTER_OTLP_HEADERS"] = f"Authorization=Basic {LANGFUSE_AUTH}" | |
from opentelemetry.sdk.trace import TracerProvider | |
from openinference.instrumentation.smolagents import SmolagentsInstrumentor | |
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter | |
from opentelemetry.sdk.trace.export import SimpleSpanProcessor | |
trace_provider = TracerProvider() | |
trace_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter())) | |
SmolagentsInstrumentor().instrument(tracer_provider=trace_provider) | |
GAME_API_KEY = os.getenv("GAME_API_KEY") | |
GAME_API_BASE_URL = os.getenv("GAME_API_BASE_URL") | |
class Game_Response(BaseModel): | |
name: str | |
description: str | |
year_published: int | |
min_players: int | |
max_players: int | |
users_rated: int | |
average_rating: float | |
bayes_adjusted_average: float | |
complexity: float | |
def boardgame_lookup_tool(q:str)-> list[Game_Response]: | |
"""A tool that fetches information about board games | |
Args: | |
q: a search term representing part or all of a board game's name or description, prefixed with either "name:" or "description:" | |
""" | |
url = f'{GAME_API_BASE_URL}/games' | |
headers = {'x-api-key': f'{GAME_API_KEY}'} | |
response = requests.get(url, headers=headers, params={'q': q, 'search_type': search_type}) | |
api_data_list = response.json() | |
return [Game_Response(name = api_data['name'], | |
description = api_data['description'], | |
year_published = api_data['year_published'], | |
min_players = api_data['min_players'], | |
max_players = api_data['max_players'], | |
users_rated = api_data['users_rated'], | |
average_rating = api_data['average'], | |
bayes_adjusted_average = api_data['bayes_average'], | |
complexity = api_data['weight']) for api_data in api_data_list] | |
def get_current_time_in_timezone(timezone: str) -> str: | |
"""A tool that fetches the current local time in a specified timezone. | |
Args: | |
timezone: A string representing a valid timezone (e.g., 'America/New_York'). | |
""" | |
try: | |
# Create timezone object | |
tz = pytz.timezone(timezone) | |
# Get current time in that timezone | |
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S") | |
return f"The current local time in {timezone} is: {local_time}" | |
except Exception as e: | |
return f"Error fetching time for timezone '{timezone}': {str(e)}" | |
final_answer = FinalAnswerTool() | |
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder: | |
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud' | |
#model = HfApiModel( | |
#max_tokens=2096, | |
#temperature=0.5, | |
#model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded | |
#custom_role_conversions=None, | |
#) | |
#model = LiteLLMModel("groq/qwen-2.5-coder-32b", temperature=0.5) | |
model = LiteLLMModel("openrouter/qwen/qwen2.5-vl-32b-instruct:free", temperature=0.5) | |
# Import tool from Hub | |
#image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True) | |
with open("prompts.yaml", 'r') as stream: | |
prompt_templates = yaml.safe_load(stream) | |
agent = CodeAgent( | |
model=model, | |
tools=[final_answer,boardgame_lookup_tool], | |
max_steps=6, | |
verbosity_level=1, | |
grammar=None, | |
planning_interval=None, | |
name=None, | |
description=None, | |
prompt_templates=prompt_templates | |
) | |
GradioUI(agent).launch() | |