Spaces:
Sleeping
Sleeping
| # streamlit_app.py | |
| import streamlit as st | |
| import sys | |
| import os | |
| # *** Add these two lines at the very top *** | |
| from dotenv import load_dotenv | |
| load_dotenv() # Load variables from .env file | |
| # Add the directory containing app.py to the Python path | |
| # This assumes app.py is in the same directory as streamlit_app.py | |
| sys.path.append(os.path.dirname(os.path.abspath(__file__))) | |
| # Import your respond function and any necessary global variables from app.py | |
| # Make sure app.py loads the model, tokenizer, etc. when imported | |
| try: | |
| from app import respond, model_id # Import your main function and model_id | |
| # You might also need to import other things if respond relies on globals directly | |
| # from app import model, tokenizer, embedder, nlp, data, descriptions, embeddings, ... | |
| print("Successfully imported respond function from app.py") | |
| except ImportError as e: | |
| st.error(f"Error importing core logic from app.py: {e}") | |
| st.stop() # Stop the app if the core logic can't be loaded | |
| # Set Streamlit page config | |
| st.set_page_config(page_title="Business Q&A Assistant") | |
| st.title(f"Business Q&A Assistant with {model_id}") | |
| st.write("Ask questions about the business (details from Google Sheet) or general knowledge (via search).") | |
| # Initialize chat history in Streamlit's session state | |
| # Session state persists across reruns for a single user session | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| # Display chat messages from history on app rerun | |
| for message in st.session_state.messages: | |
| with st.chat_message(message["role"]): | |
| st.markdown(message["content"]) | |
| # Accept user input | |
| if prompt := st.chat_input("Your Question"): | |
| # Add user message to chat history | |
| st.session_state.messages.append({"role": "user", "content": prompt}) | |
| # Display user message in chat message container | |
| with st.chat_message("user"): | |
| st.markdown(prompt) | |
| # Get the current chat history in the format your respond function expects | |
| # Gradio's history is [(user, bot), (user, bot), ...] | |
| # Streamlit's session state is a list of dicts [{"role": "user", "content": "..."}] | |
| # We need to convert Streamlit's history format to Gradio's format for your respond function | |
| gradio_chat_history = [] | |
| # Start from the second message if the first was from the system/initial state | |
| # Or just iterate through pairs, skipping the latest user prompt for history pass | |
| # The respond function expects history *before* the current turn | |
| history_for_respond = [] | |
| # Iterate through messages, excluding the very last user prompt which is the current input | |
| for i in range(len(st.session_state.messages) - 1): | |
| if st.session_state.messages[i]["role"] == "user" and st.session_state.messages[i+1]["role"] == "assistant": | |
| history_for_respond.append((st.session_state.messages[i]["content"], st.st |