Spaces:
Sleeping
Sleeping
agent ready to start
Browse files- .gitignore +4 -1
- agent.py +12 -5
- app.py +3 -1
- config.py +8 -1
- flow.py +18 -0
- logger.py +33 -0
- nodes.py +103 -0
- requirements.txt +3 -1
- utils.py +20 -1
.gitignore
CHANGED
@@ -171,4 +171,7 @@ cython_debug/
|
|
171 |
.ruff_cache/
|
172 |
|
173 |
# PyPI configuration file
|
174 |
-
.pypirc
|
|
|
|
|
|
|
|
171 |
.ruff_cache/
|
172 |
|
173 |
# PyPI configuration file
|
174 |
+
.pypirc
|
175 |
+
|
176 |
+
# agent's log files
|
177 |
+
log.txt
|
agent.py
CHANGED
@@ -1,18 +1,25 @@
|
|
1 |
|
|
|
|
|
|
|
|
|
2 |
from assignment_utils import getQuestionByPos
|
3 |
|
4 |
|
5 |
class Agent:
|
6 |
-
def __init__(self):
|
7 |
-
|
8 |
|
9 |
def invoke(self, question):
|
10 |
-
|
11 |
-
|
|
|
|
|
12 |
|
13 |
|
14 |
if __name__ == "__main__":
|
15 |
-
|
|
|
16 |
question = getQuestionByPos(0)
|
17 |
print(question)
|
18 |
response = agent.invoke(question['question'])
|
|
|
1 |
|
2 |
+
from flow import createFlow
|
3 |
+
from logger import Logger
|
4 |
+
import config
|
5 |
+
|
6 |
from assignment_utils import getQuestionByPos
|
7 |
|
8 |
|
9 |
class Agent:
|
10 |
+
def __init__(self, logger):
|
11 |
+
self.logger = logger
|
12 |
|
13 |
def invoke(self, question):
|
14 |
+
flow = createFlow(self.logger)
|
15 |
+
shared = { "question": question }
|
16 |
+
flow.run(shared)
|
17 |
+
return shared["answer"]
|
18 |
|
19 |
|
20 |
if __name__ == "__main__":
|
21 |
+
logger = Logger(config.logLevel,config.logFile)
|
22 |
+
agent = Agent(logger)
|
23 |
question = getQuestionByPos(0)
|
24 |
print(question)
|
25 |
response = agent.invoke(question['question'])
|
app.py
CHANGED
@@ -6,6 +6,7 @@ import pandas as pd
|
|
6 |
|
7 |
import config
|
8 |
from agent import Agent
|
|
|
9 |
|
10 |
|
11 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
@@ -25,7 +26,8 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
25 |
|
26 |
# 1. Instantiate Agent ( modify this part to create your agent)
|
27 |
try:
|
28 |
-
|
|
|
29 |
except Exception as e:
|
30 |
print(f"Error instantiating agent: {e}")
|
31 |
return f"Error initializing agent: {e}", None
|
|
|
6 |
|
7 |
import config
|
8 |
from agent import Agent
|
9 |
+
from logger import Logger
|
10 |
|
11 |
|
12 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
|
26 |
|
27 |
# 1. Instantiate Agent ( modify this part to create your agent)
|
28 |
try:
|
29 |
+
logger = Logger(config.logLevel,config.logFile)
|
30 |
+
agent = Agent(logger)
|
31 |
except Exception as e:
|
32 |
print(f"Error instantiating agent: {e}")
|
33 |
return f"Error initializing agent: {e}", None
|
config.py
CHANGED
@@ -1,4 +1,11 @@
|
|
1 |
|
2 |
baseApiUrl = "https://agents-course-unit4-scoring.hf.space"
|
3 |
questionsUrl = f"{baseApiUrl}/questions"
|
4 |
-
submitUrl = f"{baseApiUrl}/submit"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
|
2 |
baseApiUrl = "https://agents-course-unit4-scoring.hf.space"
|
3 |
questionsUrl = f"{baseApiUrl}/questions"
|
4 |
+
submitUrl = f"{baseApiUrl}/submit"
|
5 |
+
|
6 |
+
runLocal = True
|
7 |
+
localModel = "llama3.2"
|
8 |
+
hfMoldel = "(tbd)"
|
9 |
+
|
10 |
+
logLevel = 'DEBUG'
|
11 |
+
logFile = 'log.txt'
|
flow.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from pocketflow import Flow
|
3 |
+
|
4 |
+
from nodes import Decide, Search, Answer
|
5 |
+
|
6 |
+
def createFlow(logger):
|
7 |
+
decide = Decide()
|
8 |
+
decide.setLogger(logger)
|
9 |
+
search = Search()
|
10 |
+
search.setLogger(logger)
|
11 |
+
answer = Answer()
|
12 |
+
answer.setLogger(logger)
|
13 |
+
|
14 |
+
decide - "search" >> search
|
15 |
+
decide - "answer" >> answer
|
16 |
+
search - "decide" >> decide
|
17 |
+
|
18 |
+
return Flow(start=decide)
|
logger.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from datetime import datetime
|
3 |
+
|
4 |
+
import config
|
5 |
+
|
6 |
+
|
7 |
+
class Logger:
|
8 |
+
def __init__(self, logLevelName, logFile):
|
9 |
+
self.logLevels = {'DEBUG': 10, 'INFO': 20, 'ERROR': 30, 'CRITICAL': 40}
|
10 |
+
self.logLevel = self.logLevels[logLevelName]
|
11 |
+
self.logFile = logFile
|
12 |
+
|
13 |
+
def debug(self, message):
|
14 |
+
if self.logLevel >= self.logLevels['DEBUG']:
|
15 |
+
self.__log(f"{datetime.now().strftime("%Y-%m-%d %H:%M:%S")} DEBUG {message}")
|
16 |
+
|
17 |
+
def info(self, message):
|
18 |
+
if self.logLevel >= self.logLevels['INFO']:
|
19 |
+
self.__log(f"{datetime.now().strftime("%Y-%m-%d %H:%M:%S")} INFO {message}")
|
20 |
+
|
21 |
+
def error(self, message):
|
22 |
+
if self.logLevel >= self.logLevels['ERROR']:
|
23 |
+
self.__log(f"{datetime.now().strftime("%Y-%m-%d %H:%M:%S")} ERROR {message}")
|
24 |
+
|
25 |
+
def critical(self, message):
|
26 |
+
if self.logLevel >= self.logLevels['CRITICAL']:
|
27 |
+
self.__log(f"{datetime.now().strftime("%Y-%m-%d %H:%M:%S")} ** CRITICAL ** {message}")
|
28 |
+
|
29 |
+
def __log(self, message):
|
30 |
+
print(message)
|
31 |
+
if config.logFile:
|
32 |
+
with open(config.logFile,"a") as fh:
|
33 |
+
fh.write(message + "\n")
|
nodes.py
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import yaml
|
3 |
+
from pocketflow import Node
|
4 |
+
|
5 |
+
from utils import callLLM, callWebSearch
|
6 |
+
|
7 |
+
class Decide(Node):
|
8 |
+
def setLogger(self,logger):
|
9 |
+
self.logger = logger
|
10 |
+
|
11 |
+
def prep(self,shared):
|
12 |
+
context = shared.get("context","No previous search")
|
13 |
+
question = shared["question"]
|
14 |
+
return question,context
|
15 |
+
|
16 |
+
def exec(self,inputs):
|
17 |
+
question,context = inputs
|
18 |
+
prompt = f"""
|
19 |
+
### CONTEXT
|
20 |
+
You are a research assistant that can search the web
|
21 |
+
Question: {question}
|
22 |
+
Previous research: {context}
|
23 |
+
|
24 |
+
### ACTION SPACE
|
25 |
+
[1] search
|
26 |
+
Description: Look up more information on the web
|
27 |
+
Parameters:
|
28 |
+
- query (str): what to search for
|
29 |
+
|
30 |
+
[2] answer
|
31 |
+
Description: Answer the question with current knowledge
|
32 |
+
Parameters:
|
33 |
+
- answer (str): final answer to the question
|
34 |
+
|
35 |
+
### NEXT ACTION
|
36 |
+
Decide the next action based on the context and available actions.
|
37 |
+
Return your response in the following format:
|
38 |
+
```yaml
|
39 |
+
thinking: |
|
40 |
+
<your step-by-step reasoning process>
|
41 |
+
action: search OR answer
|
42 |
+
reason: <why you choose this action>
|
43 |
+
search_query: <specific search query if action is search>
|
44 |
+
```
|
45 |
+
IMPORTANT: Make sure to:
|
46 |
+
1. Use proper indentation (4 spaces) for all multi-line fields
|
47 |
+
2. Use the | character for multi-line text fields
|
48 |
+
3. Keep single-line fields without the | character
|
49 |
+
"""
|
50 |
+
response = callLLM(prompt)
|
51 |
+
self.logger.debug(f"=== CALLING LLM\n{prompt}\n=== LLM RESPONSE\n{response}\n\n==========\n\n")
|
52 |
+
yaml_str = response.replace("|","") #.split("```yaml")[1].split("```")[0].strip()
|
53 |
+
decision = yaml.safe_load(yaml_str)
|
54 |
+
return decision
|
55 |
+
|
56 |
+
def post(self,shared,prep_res,exec_res):
|
57 |
+
if exec_res["action"] == "search":
|
58 |
+
shared["search_query"] = exec_res["search_query"]
|
59 |
+
return exec_res["action"]
|
60 |
+
|
61 |
+
|
62 |
+
class Search(Node):
|
63 |
+
def setLogger(self,logger):
|
64 |
+
self.logger = logger
|
65 |
+
|
66 |
+
def prep(self,shared):
|
67 |
+
return shared["search_query"]
|
68 |
+
|
69 |
+
def exec(self,search_query):
|
70 |
+
results = callWebSearch(search_query)
|
71 |
+
self.logger.debug(f"*** SEARCHING\n{search_query}\n*** SEARCH RESULTS\n{results}\n\n**********\n\n")
|
72 |
+
return results
|
73 |
+
|
74 |
+
def post(self,shared,prep_res,exec_res):
|
75 |
+
previous = shared.get("context","")
|
76 |
+
shared["context"] = f"{previous}\n\nSEARCH: {shared["search_query"]}\n\nRESULTS: {exec_res}"
|
77 |
+
return "decide"
|
78 |
+
|
79 |
+
|
80 |
+
class Answer(Node):
|
81 |
+
def setLogger(self,logger):
|
82 |
+
self.logger = logger
|
83 |
+
|
84 |
+
def prep(self,shared):
|
85 |
+
return shared["question"],shared.get("context","")
|
86 |
+
|
87 |
+
def exec(self,inputs):
|
88 |
+
question,context = inputs
|
89 |
+
prompt = f'''
|
90 |
+
### CONTEXT
|
91 |
+
Based on the following information, answer the question.
|
92 |
+
Question: {question}
|
93 |
+
Research: {context}
|
94 |
+
|
95 |
+
### YOUR ANSWER
|
96 |
+
Provide a comprehensive answer using research results
|
97 |
+
'''
|
98 |
+
response = callLLM(prompt)
|
99 |
+
self.logger.debug(f"### CALLING LLM\n{prompt}\n### LLM RESPONSE\n{response}\n\n##########\n\n")
|
100 |
+
return response
|
101 |
+
|
102 |
+
def post(self,shared,prep_res,exec_res):
|
103 |
+
shared["answer"] = exec_res
|
requirements.txt
CHANGED
@@ -5,4 +5,6 @@ langgraph
|
|
5 |
langchain_openai
|
6 |
langchain_huggingface
|
7 |
langchain-community
|
8 |
-
duckduckgo-search
|
|
|
|
|
|
5 |
langchain_openai
|
6 |
langchain_huggingface
|
7 |
langchain-community
|
8 |
+
duckduckgo-search
|
9 |
+
pocketflow
|
10 |
+
#ollama
|
utils.py
CHANGED
@@ -1,13 +1,32 @@
|
|
1 |
|
|
|
|
|
|
|
|
|
2 |
from langchain_community.tools import DuckDuckGoSearchRun
|
3 |
|
4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
def DuckDuckGo(query):
|
6 |
search_tool = DuckDuckGoSearchRun()
|
7 |
results = search_tool.invoke(query)
|
8 |
return results
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
if __name__ == "__main__":
|
12 |
-
response =
|
13 |
print(response)
|
|
|
1 |
|
2 |
+
import config
|
3 |
+
if config.runLocal:
|
4 |
+
from ollama import chat as OllamaChat
|
5 |
+
|
6 |
from langchain_community.tools import DuckDuckGoSearchRun
|
7 |
|
8 |
|
9 |
+
def callWebSearch(query):
|
10 |
+
return DuckDuckGo(query)
|
11 |
+
|
12 |
+
def callLLM(query):
|
13 |
+
if config.runLocal:
|
14 |
+
return callLocalLLM(query)
|
15 |
+
else:
|
16 |
+
return callHfLLM(query)
|
17 |
+
|
18 |
def DuckDuckGo(query):
|
19 |
search_tool = DuckDuckGoSearchRun()
|
20 |
results = search_tool.invoke(query)
|
21 |
return results
|
22 |
|
23 |
+
def callLocalLLM(query):
|
24 |
+
response = OllamaChat(model=config.localModel, messages=[ { 'role': 'user', 'content': query } ])
|
25 |
+
return response['message']['content']
|
26 |
+
|
27 |
+
def callHfLLM(query):
|
28 |
+
return f"No way to call {config.hfMoldel} yet"
|
29 |
|
30 |
if __name__ == "__main__":
|
31 |
+
response = callWebSearch("who is the president of France")
|
32 |
print(response)
|