Spaces:
Running
Running
import gradio as gr | |
from typing import TypedDict, Annotated | |
from huggingface_hub import InferenceClient, login, list_models | |
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFacePipeline | |
#from langchain.schema import AIMessage, HumanMessage | |
from langgraph.graph.message import add_messages | |
from langchain.docstore.document import Document | |
from langgraph.prebuilt import ToolNode, tools_condition | |
from langchain_core.messages import AnyMessage, HumanMessage, AIMessage | |
from langchain_community.retrievers import BM25Retriever | |
import datasets | |
import os | |
from langgraph.graph import START, StateGraph | |
from langchain.tools import Tool | |
from mytools import search_tool, weather_info_tool | |
#from dotenv import load_dotenv | |
#load_dotenv() | |
HUGGINGFACEHUB_API_TOKEN = os.environ["HUGGINGFACEHUB_API_TOKEN"] | |
login(token=HUGGINGFACEHUB_API_TOKEN, add_to_git_credential=True) | |
llm = HuggingFaceEndpoint( | |
#repo_id="HuggingFaceH4/zephyr-7b-beta", | |
repo_id="Qwen/Qwen2.5-Coder-32B-Instruct", | |
task="text-generation", | |
max_new_tokens=512, | |
do_sample=False, | |
repetition_penalty=1.03, | |
timeout=240, | |
) | |
model = ChatHuggingFace(llm=llm, verbose=True) | |
# Load the dataset | |
guest_dataset = datasets.load_dataset("agents-course/unit3-invitees", split="train") | |
# Convert dataset entries into Document objects | |
docs = [ | |
Document( | |
page_content="\n".join([ | |
f"Name: {guest['name']}", | |
f"Relation: {guest['relation']}", | |
f"Description: {guest['description']}", | |
f"Email: {guest['email']}" | |
]), | |
metadata={"name": guest["name"]} | |
) | |
for guest in guest_dataset | |
] | |
bm25_retriever = BM25Retriever.from_documents(docs) | |
def extract_text(query: str) -> str: | |
"""Retrieves detailed information about gala guests based on their name or relation.""" | |
results = bm25_retriever.invoke(query) | |
if results: | |
return "\n\n".join([doc.page_content for doc in results[:3]]) | |
else: | |
return "No matching guest information found." | |
guest_info_tool = Tool( | |
name="guest_info_retriever", | |
func=extract_text, | |
description="Retrieves detailed information about gala guests based on their name or relation." | |
) | |
def get_hub_stats(author: str) -> str: | |
"""Fetches the most downloaded model from a specific author on the Hugging Face Hub.""" | |
try: | |
# List models from the specified author, sorted by downloads | |
models = list(list_models(author=author, sort="downloads", direction=-1, limit=1)) | |
if models: | |
model = models[0] | |
return f"The most downloaded model by {author} is {model.id} with {model.downloads:,} downloads." | |
else: | |
return f"No models found for author {author}." | |
except Exception as e: | |
return f"Error fetching models for {author}: {str(e)}" | |
# Initialize the tool | |
hub_stats_tool = Tool( | |
name="get_hub_stats", | |
func=get_hub_stats, | |
description="Fetches the most downloaded model from a specific author on the Hugging Face Hub." | |
) | |
def predict(message, history): | |
# Convert Gradio history to LangChain message format | |
history_langchain_format = [] | |
for msg in history: | |
if msg['role'] == "user": | |
history_langchain_format.append(HumanMessage(content=msg['content'])) | |
elif msg['role'] == "assistant": | |
history_langchain_format.append(AIMessage(content=msg['content'])) | |
# Add new user message | |
history_langchain_format.append(HumanMessage(content=message)) | |
# Invoke Alfred agent with full message history | |
response = alfred.invoke( | |
input={"messages": history_langchain_format}, | |
config={"recursion_limit": 100} | |
) | |
# Extract final assistant message | |
return response["messages"][-1].content | |
# setup agents | |
tools = [guest_info_tool, search_tool, weather_info_tool, hub_stats_tool] | |
#tools = [guest_info_tool] | |
chat_with_tools = model.bind_tools(tools) | |
# Generate the AgentState and Agent graph | |
class AgentState(TypedDict): | |
messages: Annotated[list[AnyMessage], add_messages] | |
def assistant(state: AgentState): | |
return { | |
"messages": [chat_with_tools.invoke(state["messages"])], | |
} | |
## The graph | |
builder = StateGraph(AgentState) | |
# Define nodes: these do the work | |
builder.add_node("assistant", assistant) | |
builder.add_node("tools", ToolNode(tools)) | |
# Define edges: these determine how the control flow moves | |
builder.add_edge(START, "assistant") | |
builder.add_conditional_edges( | |
"assistant", | |
# If the latest message requires a tool, route to tools | |
# Otherwise, provide a direct response | |
tools_condition, | |
) | |
builder.add_edge("tools", "assistant") | |
alfred = builder.compile() | |
demo = gr.ChatInterface( | |
predict, | |
type="messages" | |
) | |
demo.launch() |