|
import os |
|
import time |
|
import json |
|
import requests |
|
from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, InferenceClientModel |
|
from tools import duckduckgo_search, wikipedia_search, summarize_text, load_memory, save_memory |
|
|
|
|
|
tokens = os.environ.get('HUGGINGFACEHUB_API_TOKEN') |
|
print(tokens) |
|
|
|
token = os.getenv('HF_HOME/token') |
|
|
|
token = 'HUGGINGFACEHUB_API_TOKEN' |
|
if not token: |
|
raise ValueError("Imposta la variabile d'ambiente HF_TOKEN") |
|
|
|
|
|
model = HfApiModel(model_id="google/flan-t5-base", token=token) |
|
|
|
|
|
tools = [DuckDuckGoSearchTool(), wikipedia_search] |
|
|
|
agent = CodeAgent( |
|
tools=tools, |
|
model=model, |
|
additional_authorized_imports=["requests", "bs4"] |
|
) |
|
|
|
def run_agent(query: str) -> str: |
|
try: |
|
memory = load_memory() |
|
if query in memory: |
|
return memory[query] |
|
|
|
|
|
duck_text = duckduckgo_search(query, max_results=3) |
|
wiki_text = wikipedia_search(query) |
|
|
|
duck_summary = summarize_text(duck_text, max_length=150) |
|
wiki_summary = summarize_text(wiki_text, max_length=150) |
|
|
|
prompt = ( |
|
f"Domanda: {query}\n\n" |
|
f"Informazioni sintetizzate da Wikipedia:\n{wiki_summary}\n\n" |
|
f"Informazioni sintetizzate da DuckDuckGo:\n{duck_summary}\n\n" |
|
"Fornisci una risposta sintetica e chiara." |
|
) |
|
|
|
|
|
if isinstance(model, InferenceClientModel): |
|
messages = [ |
|
{"role": "system", "content": "Sei un assistente utile."}, |
|
{"role": "user", "content": prompt} |
|
] |
|
response = model.generate(messages=messages) |
|
else: |
|
response = model.generate(prompt=prompt, max_new_tokens=200) |
|
|
|
|
|
if not response.strip(): |
|
response = "Non ho capito la domanda, per favore riprova." |
|
|
|
memory[query] = response |
|
save_memory(memory) |
|
|
|
return response |
|
|
|
except Exception as e: |
|
return f"Errore durante l'esecuzione dell'agent: {str(e)}" |