File size: 4,712 Bytes
a61aa8c
2aa4095
 
 
 
 
 
 
 
a61aa8c
2aa4095
 
 
 
 
 
 
 
 
 
 
 
 
 
95bf231
2aa4095
 
 
 
 
 
 
 
 
 
 
 
 
95bf231
 
2aa4095
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95bf231
2aa4095
 
 
 
 
 
95bf231
2aa4095
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import streamlit as st
from google import genai
from google.genai import types
from gemini_tools_defination import GeminiToolDefination
from gemini_llm import GeminiLLM
from gemini_history import ConversationBuilder
from gemini_tools import GeminiCodeExecutionTool, GeminiThinking, GoogleSearchTool
from utils import get_typing_indicator_html
from gemini_tool_selector import GeminiToolSelector

geminillm=GeminiLLM()
geminitooldefination = GeminiToolDefination(geminillm)
conversationbuilder = ConversationBuilder()
geminitoolselector = GeminiToolSelector()
client = geminillm.client

if "chat" not in st.session_state:
    st.session_state.chat = client.chats.create(model=geminillm.model_id, config=geminitooldefination.config)

if "messages" not in st.session_state:
    st.session_state.messages = []

# default message
if not st.session_state.messages:
    with st.chat_message("assistant", avatar="src/images/ai.png"):
        st.markdown("πŸ‘‹ **Hi! I can help you think, code, or search real-time info. Just ask!**")

# Tool name map
tool_mapping = {
    "Auto": None,
    "Think": "GeminiThinking",
    "Search": "GoogleSearchTool",
    "Code": "GeminiCodeExecutionTool"
}

# display chat messages from history at every rerun  
for message in st.session_state.messages:
    avatar_path = (
        "src/images/person_15454011.png" if message["role"] == "user"
        else "src/images/ai.png"
    )
    with st.chat_message(message["role"], avatar=avatar_path):
        st.markdown(message["content"])

if "selected_tool" not in st.session_state:
    st.session_state.selected_tool = "Auto"

st.sidebar.title("Tools")
selected_tool = st.sidebar.selectbox(
    "Choose a tool:",
    options=list(tool_mapping.keys()),
    index=list(tool_mapping.keys()).index(st.session_state.selected_tool),
)

# Update the session state with current selection
st.session_state.selected_tool = selected_tool

prompt = st.chat_input("What is up?")
selected_tool_name = tool_mapping[st.session_state.selected_tool]


if prompt:
    try:
        # show user message
        with st.chat_message("user", avatar="src/images/person_15454011.png"):
            st.markdown(prompt)
        # Save user message
        st.session_state.messages.append({"role": "user", "content": prompt})

        # stream assistant response
        full_response = ""
        with st.chat_message("assistant", avatar="src/images/ai.png"):
            response_stream = st.session_state.chat.send_message_stream(prompt)
            response_placeholder = st.empty()
            
            if selected_tool_name:
                tool_args = {"query": prompt}
                full_response = geminitoolselector.handle_tool_response(selected_tool_name, tool_args, geminillm)
                # need to add response in chat history
                content_only_convo = conversationbuilder.build_with_content_only(full_response)
                st.session_state.chat.get_history().extend(content_only_convo)

            else:
                for chunk in response_stream:
                    if chunk.text is None:  # if response is None means a tool is hit
                        tool_name = chunk.candidates[0].content.parts[0].function_call.name
                        tool_args = chunk.candidates[0].content.parts[0].function_call.args

                        response_placeholder = st.empty()
                        
                        # handle tool response
                        streamed_output = geminitoolselector.handle_tool_response(tool_name, tool_args, geminillm)

                        full_response = streamed_output  
                        
                        # IMP..... add the response in the chathistory can see by st.session_state.chat.get_history() see complete history you will see the content is missing there and u can also more then one conttent
                        content_only_convo = conversationbuilder.build_with_content_only(full_response)
                        st.session_state.chat.get_history().extend(content_only_convo)
                        
                        # st.markdown(st.session_state.chat.get_history())  ## for reference how chat history is working
                    else:
                        full_response += chunk.text
                        response_placeholder.markdown(full_response + "")  # stream effect

        # save assistant message (load all the messages to show chat type view in UI )
        st.session_state.messages.append({"role": "assistant", "content": full_response})
        # print(st.session_state.chat.get_history())  ## for reference how chat history is working

    except Exception as e:
        st.error(f"An error occurred: {e}")