naman1102 commited on
Commit
45c7739
·
1 Parent(s): ebec9e2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -45
app.py CHANGED
@@ -15,15 +15,15 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
15
  MODEL_API_URL = "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-Coder-32B-Instruct"
16
  HF_TOKEN = os.getenv("HF_TOKEN") # Make sure to set this environment variable
17
 
18
- class AgentState(TypedDict):
19
- """Type definition for the agent's state."""
20
- question: str
21
- current_step: str
22
- tool_output: str
23
- final_answer: str
24
- history: List[Dict[str, str]]
25
- needs_more_info: bool
26
- search_query: str
27
 
28
  # --- Basic Agent Definition ---
29
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
@@ -63,7 +63,7 @@ class BasicAgent:
63
 
64
  def _analyze_question(self, state: AgentState) -> AgentState:
65
  """Analyze the question and determine the next step."""
66
- prompt = f"""Analyze this question and determine what needs to be done: {state['question']}
67
  Return your analysis in this format:
68
  {{
69
  "needs_calculation": true/false,
@@ -77,35 +77,35 @@ class BasicAgent:
77
  """
78
 
79
  analysis = eval(self._call_llm_api(prompt))
80
- state['needs_more_info'] = analysis.get('needs_search', False)
81
- state['search_query'] = analysis.get('search_query', '')
82
 
83
  if analysis.get('needs_calculation', False):
84
- state['current_step'] = 'calculator'
85
- state['tool_output'] = str(analysis['calculation'])
86
  elif analysis.get('needs_search', False):
87
- state['current_step'] = 'search'
88
  else:
89
- state['current_step'] = 'final_answer'
90
 
91
  return state
92
 
93
  def _use_calculator(self, state: AgentState) -> AgentState:
94
  """Use the calculator tool."""
95
  try:
96
- result = self.calculator.invoke({"input": eval(state['tool_output'])})
97
- state['history'].append({
98
  'step': 'calculator',
99
- 'input': state['tool_output'],
100
  'output': str(result['output'].result)
101
  })
102
- state['current_step'] = 'final_answer'
103
  except Exception as e:
104
- state['history'].append({
105
  'step': 'calculator_error',
106
  'error': str(e)
107
  })
108
- state['current_step'] = 'final_answer'
109
  return state
110
 
111
  def _use_search(self, state: AgentState) -> AgentState:
@@ -113,31 +113,31 @@ class BasicAgent:
113
  try:
114
  result = self.search_tool.invoke({
115
  "input": {
116
- "query": state['search_query'],
117
  "max_results": 3
118
  }
119
  })
120
- state['history'].append({
121
  'step': 'search',
122
- 'query': state['search_query'],
123
  'results': [str(r) for r in result['output'].results]
124
  })
125
- state['needs_more_info'] = False
126
- state['current_step'] = 'final_answer'
127
  except Exception as e:
128
- state['history'].append({
129
  'step': 'search_error',
130
  'error': str(e)
131
  })
132
- state['current_step'] = 'final_answer'
133
  return state
134
 
135
  def _generate_final_answer(self, state: AgentState) -> AgentState:
136
  """Generate the final answer based on all gathered information."""
137
  history_str = "\n".join([f"{h['step']}: {h.get('output', h.get('results', h.get('error', '')))}"
138
- for h in state['history']])
139
 
140
- prompt = f"""Based on the following information and history, provide a final answer to the question: {state['question']}
141
 
142
  History of steps taken:
143
  {history_str}
@@ -145,7 +145,7 @@ class BasicAgent:
145
  Provide a clear, concise answer that addresses the original question.
146
  """
147
 
148
- state['final_answer'] = self._call_llm_api(prompt)
149
  return state
150
 
151
  def _create_workflow(self) -> Graph:
@@ -167,11 +167,11 @@ class BasicAgent:
167
 
168
  # Define conditional edges
169
  def router(state: AgentState) -> str:
170
- if state['current_step'] == 'calculator':
171
  return 'calculator'
172
- elif state['current_step'] == 'search':
173
  return 'search'
174
- elif state['current_step'] == 'final_answer':
175
  return 'final_answer'
176
  return 'analyze'
177
 
@@ -197,19 +197,19 @@ class BasicAgent:
197
 
198
  try:
199
  # Initialize the state
200
- initial_state = {
201
- "question": question,
202
- "current_step": "analyze",
203
- "tool_output": "",
204
- "final_answer": "",
205
- "history": [],
206
- "needs_more_info": False,
207
- "search_query": ""
208
- }
209
 
210
  # Run the workflow
211
  final_state = self.workflow.invoke(initial_state)
212
- return final_state['final_answer']
213
 
214
  except Exception as e:
215
  print(f"Error in agent processing: {e}")
 
15
  MODEL_API_URL = "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-Coder-32B-Instruct"
16
  HF_TOKEN = os.getenv("HF_TOKEN") # Make sure to set this environment variable
17
 
18
+ class AgentState(BaseModel):
19
+ """Schema for the agent's state."""
20
+ question: str = Field(..., description="The original question")
21
+ current_step: str = Field(default="analyze", description="Current step in the workflow")
22
+ tool_output: str = Field(default="", description="Output from the last tool used")
23
+ final_answer: str = Field(default="", description="The final answer to be returned")
24
+ history: List[Dict[str, str]] = Field(default_factory=list, description="History of operations performed")
25
+ needs_more_info: bool = Field(default=False, description="Whether more information is needed")
26
+ search_query: str = Field(default="", description="Current search query if any")
27
 
28
  # --- Basic Agent Definition ---
29
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
 
63
 
64
  def _analyze_question(self, state: AgentState) -> AgentState:
65
  """Analyze the question and determine the next step."""
66
+ prompt = f"""Analyze this question and determine what needs to be done: {state.question}
67
  Return your analysis in this format:
68
  {{
69
  "needs_calculation": true/false,
 
77
  """
78
 
79
  analysis = eval(self._call_llm_api(prompt))
80
+ state.needs_more_info = analysis.get('needs_search', False)
81
+ state.search_query = analysis.get('search_query', '')
82
 
83
  if analysis.get('needs_calculation', False):
84
+ state.current_step = 'calculator'
85
+ state.tool_output = str(analysis['calculation'])
86
  elif analysis.get('needs_search', False):
87
+ state.current_step = 'search'
88
  else:
89
+ state.current_step = 'final_answer'
90
 
91
  return state
92
 
93
  def _use_calculator(self, state: AgentState) -> AgentState:
94
  """Use the calculator tool."""
95
  try:
96
+ result = self.calculator.invoke({"input": eval(state.tool_output)})
97
+ state.history.append({
98
  'step': 'calculator',
99
+ 'input': state.tool_output,
100
  'output': str(result['output'].result)
101
  })
102
+ state.current_step = 'final_answer'
103
  except Exception as e:
104
+ state.history.append({
105
  'step': 'calculator_error',
106
  'error': str(e)
107
  })
108
+ state.current_step = 'final_answer'
109
  return state
110
 
111
  def _use_search(self, state: AgentState) -> AgentState:
 
113
  try:
114
  result = self.search_tool.invoke({
115
  "input": {
116
+ "query": state.search_query,
117
  "max_results": 3
118
  }
119
  })
120
+ state.history.append({
121
  'step': 'search',
122
+ 'query': state.search_query,
123
  'results': [str(r) for r in result['output'].results]
124
  })
125
+ state.needs_more_info = False
126
+ state.current_step = 'final_answer'
127
  except Exception as e:
128
+ state.history.append({
129
  'step': 'search_error',
130
  'error': str(e)
131
  })
132
+ state.current_step = 'final_answer'
133
  return state
134
 
135
  def _generate_final_answer(self, state: AgentState) -> AgentState:
136
  """Generate the final answer based on all gathered information."""
137
  history_str = "\n".join([f"{h['step']}: {h.get('output', h.get('results', h.get('error', '')))}"
138
+ for h in state.history])
139
 
140
+ prompt = f"""Based on the following information and history, provide a final answer to the question: {state.question}
141
 
142
  History of steps taken:
143
  {history_str}
 
145
  Provide a clear, concise answer that addresses the original question.
146
  """
147
 
148
+ state.final_answer = self._call_llm_api(prompt)
149
  return state
150
 
151
  def _create_workflow(self) -> Graph:
 
167
 
168
  # Define conditional edges
169
  def router(state: AgentState) -> str:
170
+ if state.current_step == 'calculator':
171
  return 'calculator'
172
+ elif state.current_step == 'search':
173
  return 'search'
174
+ elif state.current_step == 'final_answer':
175
  return 'final_answer'
176
  return 'analyze'
177
 
 
197
 
198
  try:
199
  # Initialize the state
200
+ initial_state = AgentState(
201
+ question=question,
202
+ current_step="analyze",
203
+ tool_output="",
204
+ final_answer="",
205
+ history=[],
206
+ needs_more_info=False,
207
+ search_query=""
208
+ )
209
 
210
  # Run the workflow
211
  final_state = self.workflow.invoke(initial_state)
212
+ return final_state.final_answer
213
 
214
  except Exception as e:
215
  print(f"Error in agent processing: {e}")