File size: 3,043 Bytes
1b77631
b31dcdd
1b77631
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bbd2d83
8882589
 
 
 
 
 
 
 
 
 
 
 
 
1334d12
 
79dba60
1334d12
 
 
00370ce
8882589
 
 
 
 
 
 
6bbe22b
8882589
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import streamlit as st
import re
import os
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage



hf = os.getenv('hf')
os.environ['HUGGINGFACEHUB_API_TOKEN'] = hf
os.environ['HF_TOKEN'] = hf
# --- Config ---
st.set_page_config(page_title="AI Mentor Chat", layout="centered")
st.title("🤖 AI Mentor Chat")

# --- Sidebar for selections ---
st.sidebar.title("Mentor Preferences")

exp1 = ['<1', '1', '2', '3', '4', '5', '5+']
exp = st.sidebar.selectbox("Select experience:", exp1)

# Map experience to label
experience_map = {
    '<1': 'New bie mentor',
    '1': '1', '2': '2', '3': '3', '4': '4', '5': '5',
    '5+': 'Professional'
}
experience_label = experience_map[exp]

# --- Initialize Chat Model ---
deep_seek_skeleton = HuggingFaceEndpoint(
    repo_id='Qwen/Qwen3-14B',
    provider='nebius',
    temperature=0.7,
    max_new_tokens=150,
    task='conversational'
)

deep_seek = ChatHuggingFace(
    llm=deep_seek_skeleton,
    repo_id='Qwen/Qwen3-14B',
    provider='nebius',
    temperature=0.7,
    max_new_tokens=150,
    task='conversational'
)

# --- Session State ---
PAGE_KEY = "machine_learning_chat_history" 
try:
    # --- Session State ---
    if PAGE_KEY not in st.session_state:
        st.session_state[PAGE_KEY] = []
    
    # --- Chat Form ---
    with st.form(key="chat_form"):
        user_input = st.text_input("Ask your question:")
        submit = st.form_submit_button("Send")
    
    # --- Chat Logic ---
    if submit and user_input:
        # Add system context
        system_prompt = f"""Act as a Machine learning mentor with {experience_label} years of experience. Teach in a friendly, approachable manner while following these strict rules:
        1. Only answer questions related to machine learning programming (including libraries, frameworks, and tools in the machine learning ecosystem)
        2. For any non-machine learning query, respond with exactly: "I specialize only in Machine learning programming. This appears to be a non-machine learning topic."
        3. Never suggest you can help with non-machine learning topics
        4. Keep explanations clear, practical, and beginner-friendly when appropriate
        5. Include practical examples when explaining concepts
        6. For advanced topics, assume the student has basic machine learning knowledge"""        
        # Create message list
        messages = [SystemMessage(content=system_prompt), HumanMessage(content=user_input)]
    
        # Get model response
        result = deep_seek.invoke(messages)
    
        # Append to history
        st.session_state[PAGE_KEY].append((user_input,result.content))
    
    # --- Display Chat History ---
    st.subheader("🗨️ Chat History")
    for user, bot in st.session_state[PAGE_KEY]:
        st.markdown(f"**You:** {user}")
        st.markdown(f"**Mentor:** {bot}")
        st.markdown("---")
except:
    st.warning('The token limit has reached please revisit in 24 hours!')