import streamlit as st import os from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace from langchain_core.messages import HumanMessage, AIMessage, SystemMessage # Set Hugging Face tokens hf = os.getenv('Data_science') os.environ['HUGGINGFACEHUB_API_TOKEN'] = hf os.environ['HF_TOKEN'] = hf # --- Config --- st.set_page_config(page_title="GenAI Mentor Chat", layout="centered") st.title("🤖 GenAI Mentor Chat") # --- Sidebar for selections --- st.sidebar.title("Mentor Preferences") exp_options = ['Beginner', 'Intermediate', 'Experienced'] exp = st.sidebar.selectbox("Select your experience level:", exp_options) # --- Initialize Chat Model --- genai_skeleton = HuggingFaceEndpoint( repo_id='google/gemma-2-9b-it', provider='nebius', temperature=0.7, max_new_tokens=150, task='conversational' ) genai_chat = ChatHuggingFace( llm=genai_skeleton, repo_id='google/gemma-2-9b-it', provider='nebius', temperature=0.7, max_new_tokens=150, task='conversational' ) # --- Session State --- if "chat_history_genai" not in st.session_state: st.session_state.chat_history_genai = [] # --- Chat Form --- with st.form(key="chat_form"): user_input = st.text_input("Ask your question:") submit = st.form_submit_button("Send") # --- Chat Logic --- if submit and user_input: # Add system context system_prompt = ( f"You are a Generative AI mentor with {exp.lower()} experience level. " f"Answer questions only related to Generative AI in a friendly tone and under 150 words. " f"If the question is not related to Generative AI, politely inform the user that it's out of scope." ) # Create message list messages = [SystemMessage(content=system_prompt), HumanMessage(content=user_input)] # Get model response result = genai_chat.invoke(messages) # Append to history st.session_state.chat_history_genai.append((user_input, result.content)) # --- Display Chat History --- st.subheader("🗨️ Chat History") for user, bot in st.session_state.chat_history_genai: st.markdown(f"**You:** {user}") st.markdown(f"**Mentor:** {bot}") st.markdown("---")