errchh commited on
Commit
1b04af5
·
1 Parent(s): eb88a24

changed llm

Browse files
Files changed (2) hide show
  1. __pycache__/agent.cpython-312.pyc +0 -0
  2. agent.py +25 -2
__pycache__/agent.cpython-312.pyc ADDED
Binary file (7.67 kB). View file
 
agent.py CHANGED
@@ -23,6 +23,7 @@ from langgraph.prebuilt import ToolNode, tools_condition
23
  # load environment variables
24
  load_dotenv()
25
  HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
 
26
 
27
 
28
  # maths tool
@@ -150,24 +151,35 @@ tools = [
150
  def build_graph():
151
  # llm
152
  llm = HuggingFaceEndpoint(
153
- repo_id = "microsoft/Phi-4-reasoning-plus",
154
  huggingfacehub_api_token=HUGGINGFACEHUB_API_TOKEN,
155
  )
 
156
 
157
  chat = ChatHuggingFace(llm=llm, verbose=False)
 
158
 
159
  # bind tools to llm
160
  chat_with_tools = chat.bind_tools(tools)
 
161
 
162
  # generate AgentState and Agent graph
163
  class AgentState(TypedDict):
164
  messages: Annotated[list[AnyMessage], add_messages]
165
 
166
  def assistant(state: AgentState):
 
 
 
 
 
 
 
167
  return {
168
- "messages": [chat_with_tools.invoke(state["messages"])],
169
  }
170
 
 
171
  # build graph
172
  builder = StateGraph(AgentState)
173
 
@@ -188,3 +200,14 @@ def build_graph():
188
  }
189
  )
190
  builder.add_edge("tools", "assistant")
 
 
 
 
 
 
 
 
 
 
 
 
23
  # load environment variables
24
  load_dotenv()
25
  HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
26
+ print(f"DEBUG: HUGGINGFACEHUB_API_TOKEN = {HUGGINGFACEHUB_API_TOKEN}")
27
 
28
 
29
  # maths tool
 
151
  def build_graph():
152
  # llm
153
  llm = HuggingFaceEndpoint(
154
+ repo_id="deepseek-ai/DeepSeek-R1",
155
  huggingfacehub_api_token=HUGGINGFACEHUB_API_TOKEN,
156
  )
157
+ print(f"DEBUG: llm object = {llm}")
158
 
159
  chat = ChatHuggingFace(llm=llm, verbose=False)
160
+ print(f"DEBUG: chat object = {chat}")
161
 
162
  # bind tools to llm
163
  chat_with_tools = chat.bind_tools(tools)
164
+ print(f"DEBUG: chat_with_tools object = {chat_with_tools}")
165
 
166
  # generate AgentState and Agent graph
167
  class AgentState(TypedDict):
168
  messages: Annotated[list[AnyMessage], add_messages]
169
 
170
  def assistant(state: AgentState):
171
+ result = chat_with_tools.invoke(state["messages"])
172
+ # Ensure the result is always wrapped in a list, even if invoke returns a single message
173
+ # Add usage information if it's not already present
174
+ if isinstance(result, AIMessage) and result.usage_metadata is None:
175
+ # Add dummy usage metadata if none exists
176
+ result.usage_metadata = {"input_tokens": 0, "output_tokens": 0, "total_tokens": 0}
177
+
178
  return {
179
+ "messages": [result]
180
  }
181
 
182
+
183
  # build graph
184
  builder = StateGraph(AgentState)
185
 
 
200
  }
201
  )
202
  builder.add_edge("tools", "assistant")
203
+
204
+ return builder.compile()
205
+
206
+
207
+ if __name__ == "__main__":
208
+ question = "When was a picture of St. Thomas Aquinas first added to the Wikipedia page on the Principle of double effect?"
209
+ graph = build_graph()
210
+ messages = [HumanMessage(content=question)]
211
+ messages = graph.invoke({"messages": messages})
212
+ for m in messages["messages"]:
213
+ m.pretty_print()