Update langgraph_agent.py
Browse files- langgraph_agent.py +13 -3
langgraph_agent.py
CHANGED
@@ -6,6 +6,7 @@ from typing import Dict, List, Union
|
|
6 |
|
7 |
from PIL import Image as PILImage
|
8 |
from huggingface_hub import InferenceClient
|
|
|
9 |
from langgraph.graph import START, StateGraph, MessagesState
|
10 |
from langgraph.prebuilt import tools_condition, ToolNode
|
11 |
from langchain_openai import ChatOpenAI
|
@@ -15,33 +16,38 @@ from langchain_core.messages import SystemMessage, HumanMessage
|
|
15 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
16 |
from langchain_core.tools import tool
|
17 |
|
18 |
-
# Correct import for GoogleSearchAPIWrapper
|
19 |
from langchain_google_community import GoogleSearchAPIWrapper
|
20 |
|
21 |
@tool
|
22 |
def multiply(a: int, b: int) -> int:
|
|
|
23 |
return a * b
|
24 |
|
25 |
@tool
|
26 |
def add(a: int, b: int) -> int:
|
|
|
27 |
return a + b
|
28 |
|
29 |
@tool
|
30 |
def subtract(a: int, b: int) -> int:
|
|
|
31 |
return a - b
|
32 |
|
33 |
@tool
|
34 |
def divide(a: int, b: int) -> float:
|
|
|
35 |
if b == 0:
|
36 |
raise ValueError("Cannot divide by zero.")
|
37 |
return a / b
|
38 |
|
39 |
@tool
|
40 |
def modulus(a: int, b: int) -> int:
|
|
|
41 |
return a % b
|
42 |
|
43 |
@tool
|
44 |
def wiki_search(query: str) -> dict:
|
|
|
45 |
try:
|
46 |
docs = WikipediaLoader(query=query, load_max_docs=2, lang="en").load()
|
47 |
if not docs:
|
@@ -55,13 +61,12 @@ def wiki_search(query: str) -> dict:
|
|
55 |
print(f"Error in wiki_search tool: {e}")
|
56 |
return {"wiki_results": f"Error occurred while searching Wikipedia for '{query}'. Details: {str(e)}"}
|
57 |
|
58 |
-
# Instantiate GoogleSearchAPIWrapper
|
59 |
search = GoogleSearchAPIWrapper()
|
60 |
|
61 |
@tool
|
62 |
def google_web_search(query: str) -> str:
|
|
|
63 |
try:
|
64 |
-
# Use the run method of the GoogleSearchAPIWrapper instance
|
65 |
return search.run(query)
|
66 |
except Exception as e:
|
67 |
print(f"Error in google_web_search tool: {e}")
|
@@ -69,6 +74,7 @@ def google_web_search(query: str) -> str:
|
|
69 |
|
70 |
@tool
|
71 |
def arvix_search(query: str) -> dict:
|
|
|
72 |
docs = ArxivLoader(query=query, load_max_docs=3).load()
|
73 |
formatted = "\n\n---\n\n".join(
|
74 |
f'<Document source="{d.metadata["source"]}"/>\n{d.page_content[:1000]}'
|
@@ -85,6 +91,7 @@ else:
|
|
85 |
|
86 |
@tool
|
87 |
def read_file_content(file_path: str) -> Dict[str, str]:
|
|
|
88 |
try:
|
89 |
_, file_extension = os.path.splitext(file_path)
|
90 |
file_extension = file_extension.lower()
|
@@ -110,6 +117,7 @@ def read_file_content(file_path: str) -> Dict[str, str]:
|
|
110 |
|
111 |
@tool
|
112 |
def python_interpreter(code: str) -> Dict[str, str]:
|
|
|
113 |
old_stdout = io.StringIO()
|
114 |
with contextlib.redirect_stdout(old_stdout):
|
115 |
try:
|
@@ -123,6 +131,7 @@ def python_interpreter(code: str) -> Dict[str, str]:
|
|
123 |
|
124 |
@tool
|
125 |
def describe_image(image_path: str) -> Dict[str, str]:
|
|
|
126 |
if not HF_INFERENCE_CLIENT:
|
127 |
return {"error": "Hugging Face API token not configured for image description. Cannot use this tool."}
|
128 |
try:
|
@@ -137,6 +146,7 @@ def describe_image(image_path: str) -> Dict[str, str]:
|
|
137 |
|
138 |
@tool
|
139 |
def transcribe_audio(audio_path: str) -> Dict[str, str]:
|
|
|
140 |
if not HF_INFERENCE_CLIENT:
|
141 |
return {"error": "Hugging Face API token not configured for audio transcription. Cannot use this tool."}
|
142 |
try:
|
|
|
6 |
|
7 |
from PIL import Image as PILImage
|
8 |
from huggingface_hub import InferenceClient
|
9 |
+
|
10 |
from langgraph.graph import START, StateGraph, MessagesState
|
11 |
from langgraph.prebuilt import tools_condition, ToolNode
|
12 |
from langchain_openai import ChatOpenAI
|
|
|
16 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
17 |
from langchain_core.tools import tool
|
18 |
|
|
|
19 |
from langchain_google_community import GoogleSearchAPIWrapper
|
20 |
|
21 |
@tool
|
22 |
def multiply(a: int, b: int) -> int:
|
23 |
+
"""Multiply two integers."""
|
24 |
return a * b
|
25 |
|
26 |
@tool
|
27 |
def add(a: int, b: int) -> int:
|
28 |
+
"""Add two integers."""
|
29 |
return a + b
|
30 |
|
31 |
@tool
|
32 |
def subtract(a: int, b: int) -> int:
|
33 |
+
"""Subtract the second integer from the first."""
|
34 |
return a - b
|
35 |
|
36 |
@tool
|
37 |
def divide(a: int, b: int) -> float:
|
38 |
+
"""Divide first integer by second; error if divisor is zero."""
|
39 |
if b == 0:
|
40 |
raise ValueError("Cannot divide by zero.")
|
41 |
return a / b
|
42 |
|
43 |
@tool
|
44 |
def modulus(a: int, b: int) -> int:
|
45 |
+
"""Return the remainder of dividing first integer by second."""
|
46 |
return a % b
|
47 |
|
48 |
@tool
|
49 |
def wiki_search(query: str) -> dict:
|
50 |
+
"""Search Wikipedia for a query and return up to 2 documents."""
|
51 |
try:
|
52 |
docs = WikipediaLoader(query=query, load_max_docs=2, lang="en").load()
|
53 |
if not docs:
|
|
|
61 |
print(f"Error in wiki_search tool: {e}")
|
62 |
return {"wiki_results": f"Error occurred while searching Wikipedia for '{query}'. Details: {str(e)}"}
|
63 |
|
|
|
64 |
search = GoogleSearchAPIWrapper()
|
65 |
|
66 |
@tool
|
67 |
def google_web_search(query: str) -> str:
|
68 |
+
"""Perform a web search (via Google Custom Search) and return results."""
|
69 |
try:
|
|
|
70 |
return search.run(query)
|
71 |
except Exception as e:
|
72 |
print(f"Error in google_web_search tool: {e}")
|
|
|
74 |
|
75 |
@tool
|
76 |
def arvix_search(query: str) -> dict:
|
77 |
+
"""Search arXiv for a query and return up to 3 paper excerpts."""
|
78 |
docs = ArxivLoader(query=query, load_max_docs=3).load()
|
79 |
formatted = "\n\n---\n\n".join(
|
80 |
f'<Document source="{d.metadata["source"]}"/>\n{d.page_content[:1000]}'
|
|
|
91 |
|
92 |
@tool
|
93 |
def read_file_content(file_path: str) -> Dict[str, str]:
|
94 |
+
"""Reads the content of a file and returns its primary information. For text/code/excel, returns content. For media, returns a prompt to use specific tools."""
|
95 |
try:
|
96 |
_, file_extension = os.path.splitext(file_path)
|
97 |
file_extension = file_extension.lower()
|
|
|
117 |
|
118 |
@tool
|
119 |
def python_interpreter(code: str) -> Dict[str, str]:
|
120 |
+
"""Executes Python code and returns its standard output. If there's an error during execution, it returns the error message."""
|
121 |
old_stdout = io.StringIO()
|
122 |
with contextlib.redirect_stdout(old_stdout):
|
123 |
try:
|
|
|
131 |
|
132 |
@tool
|
133 |
def describe_image(image_path: str) -> Dict[str, str]:
|
134 |
+
"""Generates a textual description for an image file (JPEG, JPG, PNG) using an image-to-text model from the Hugging Face Inference API. Requires HF_API_TOKEN environment variable to be set."""
|
135 |
if not HF_INFERENCE_CLIENT:
|
136 |
return {"error": "Hugging Face API token not configured for image description. Cannot use this tool."}
|
137 |
try:
|
|
|
146 |
|
147 |
@tool
|
148 |
def transcribe_audio(audio_path: str) -> Dict[str, str]:
|
149 |
+
"""Transcribes an audio file (e.g., MP3) to text using an automatic speech recognition model from the Hugging Face Inference API. Requires HF_API_TOKEN environment variable to be set."""
|
150 |
if not HF_INFERENCE_CLIENT:
|
151 |
return {"error": "Hugging Face API token not configured for audio transcription. Cannot use this tool."}
|
152 |
try:
|