Spaces:
Running
Running
File size: 3,872 Bytes
ecb0096 ccfb934 ecb0096 ccfb934 ecb0096 ccfb934 e097cb4 ccfb934 e097cb4 ccfb934 e097cb4 ccfb934 e097cb4 ccfb934 e097cb4 ccfb934 e097cb4 ccfb934 e097cb4 ccfb934 e097cb4 ccfb934 e097cb4 ccfb934 78dd464 ccfb934 78dd464 ccfb934 78dd464 ccfb934 78dd464 ccfb934 78dd464 ccfb934 78dd464 ccfb934 78dd464 ccfb934 78dd464 ccfb934 78dd464 ccfb934 ecb0096 ccfb934 f0adc70 ccfb934 ecb0096 5367d81 ecb0096 ccfb934 ecb0096 9758d0b ecb0096 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
import gradio as gr
from langgraph.graph import START, StateGraph, MessagesState
from langgraph.prebuilt import tools_condition, ToolNode
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
from langchain_core.tools import tool
from langchain_community.document_loaders import WikipediaLoader, ArxivLoader
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
from langchain_core.runnables import RunnableConfig
@tool
def multiply(a: int, b: int) -> int:
"""Multiply two numbers."""
return a * b
@tool
def add(a: int, b: int) -> int:
"""Add two numbers."""
return a + b
@tool
def subtract(a: int, b: int) -> int:
"""Subtract two numbers."""
return a - b
@tool
def divide(a: int, b: int) -> float:
"""Divide two numbers."""
if b == 0:
raise ValueError("Cannot divide by zero.")
return a / b
@tool
def modulus(a: int, b: int) -> int:
"""Get the modulus of two numbers."""
return a % b
@tool
def wiki_search(query: str) -> dict:
"""Search Wikipedia for a query and return maximum 2 results."""
search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
formatted_search_docs = "\n\n---\n\n".join(
[
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
for doc in search_docs
]
)
return {"wiki_results": formatted_search_docs}
@tool
def web_search(query: str) -> dict:
"""Search Tavily for a query and return maximum 3 results."""
search_docs = TavilySearchResults(max_results=3).invoke(query=query)
formatted_search_docs = "\n\n---\n\n".join(
[
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
for doc in search_docs
]
)
return {"web_results": formatted_search_docs}
@tool
def arvix_search(query: str) -> dict:
"""Search Arxiv for a query and return maximum 3 results."""
search_docs = ArxivLoader(query=query, load_max_docs=3).load()
formatted_search_docs = "\n\n---\n\n".join(
[
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>'
for doc in search_docs
]
)
return {"arvix_results": formatted_search_docs}
# 🧰 All tools
tools = [
multiply,
add,
subtract,
divide,
modulus,
wiki_search,
web_search,
arvix_search,
]
# 📄 Load system prompt
with open("system_prompt.txt", "r", encoding="utf-8") as f:
system_prompt = f.read()
sys_msg = SystemMessage(content=system_prompt)
# 🧠 LLM setup (ChatHuggingFace via hosted endpoint)
llm = ChatHuggingFace(
llm=HuggingFaceEndpoint(
endpoint_url="Qwen/Qwen2.5-Coder-32B-Instruct"
# huggingfacehub_api_token="your_huggingface_token_here", # Replace this
),
temperature=0,
)
llm_with_tools = llm.bind_tools(tools)
# 🧠 Assistant node logic
def assistant(state: MessagesState):
return {"messages": [llm_with_tools.invoke(state["messages"])]}
# 🧠 LangGraph setup
def build_graph():
from langchain_core.messages import HumanMessage, SystemMessage
llm = ChatHuggingFace(
llm=HuggingFaceEndpoint
(
model="Qwen/Qwen2.5-Coder-32B-Instruct",
),
temperature=0,
verbose=True
)
llm_with_tools = llm.bind_tools(tools)
with open("system_prompt.txt", "r", encoding="utf-8") as f:
system_prompt = f.read()
sys_msg = SystemMessage(content=system_prompt)
messages = [sys_msg]
# llm_with_tools = llm.invoke(messages)
return llm_with_tools, messages
|