brainsqueeze's picture
UI callbacks and style changes
cc80c3d verified
from typing import List, Dict, Tuple, Optional, Any
import gradio as gr
from langchain_core.language_models.llms import LLM
from langgraph.checkpoint.memory import MemorySaver
from ask_candid.utils import get_session_id
from ask_candid.graph import build_compute_graph
from ask_candid.base.config.constants import START_SYSTEM_PROMPT
def run_chat(
thread_id: str,
user_input: Dict[str, Any],
history: List[Dict],
llm: LLM,
indices: Optional[List[str]] = None,
premium_features: Optional[List[str]] = None,
) -> Tuple[gr.MultimodalTextbox, List[Dict[str, Any]], str]:
if premium_features is None:
premium_features = []
if len(history) == 0:
history.append({"role": "system", "content": START_SYSTEM_PROMPT})
history.append({"role": "user", "content": user_input["text"]})
inputs = {"messages": history}
# thread_id can be an email https://github.com/yurisasc/memory-enhanced-ai-assistant/blob/main/assistant.py
thread_id = get_session_id(thread_id)
config = {"configurable": {"thread_id": thread_id}}
enable_recommendations = "Recommendation" in premium_features
workflow = build_compute_graph(
llm=llm,
indices=indices,
user_callback=gr.Info,
enable_recommendations=enable_recommendations
)
memory = MemorySaver() # TODO: don't use for Prod
graph = workflow.compile(checkpointer=memory)
response = graph.invoke(inputs, config=config)
messages = response["messages"]
# Return the recommendation if there is any
recommendation = response.get("recommendation", None)
if recommendation:
ai_answer = recommendation
else:
# Fallback to the chatbot response
last_message = messages[-1]
ai_answer = last_message.content
sources_html = ""
for message in messages[-2:]:
if message.type == "HTML":
sources_html = message.content
history.append({"role": "assistant", "content": ai_answer})
if sources_html:
history.append({
"role": "assistant",
"content": sources_html,
"metadata": {"title": "Sources HTML"},
})
return gr.MultimodalTextbox(value=None, interactive=True), history, thread_id