Spaces:
Sleeping
Sleeping
# import os | |
# import langchain | |
# import langchain_huggingface | |
# from langchain_huggingface import HuggingFaceEndpoint,HuggingFacePipeline, ChatHuggingFace | |
# from langchain_core.messages import HumanMessage, SystemMessage, AIMessage | |
# os.environ["HF_TOKEN"]=os.getenv('Ayush') | |
# os.environ["HUGGINGFACEHUB_API_KEY"]=os.getenv('Ayush') | |
# llama_model = HuggingFaceEndpoint(repo_id= "meta-llama/Llama-3.2-3B-Instruct",provider= "nebius",temperature=0.6, max_new_tokens=70,task="conversational") | |
# model_d=ChatHuggingFace(llm =llama_model,repo_id= "meta-llama/Llama-3.2-3B-Instruct",provider= "nebius",temperature=0.6, max_new_tokens=70,task="conversational") | |
# message = [SystemMessage(content = "Answer like you are a hardcore pc gamer"), | |
# HumanMessage(content = "Give me name of top 10 pc games of all time with description")] | |
# result = model_d.invoke(message) | |
# print(result.content) | |
import os | |
import streamlit as st | |
from langchain_community.chat_models import ChatHuggingFace | |
from langchain_community.llms import HuggingFaceHub | |
from langchain_core.messages import HumanMessage, SystemMessage | |
from fpdf import FPDF | |
# Set HuggingFace token from env or st.secrets | |
os.environ["HUGGINGFACEHUB_API_TOKEN"] = os.getenv("keys") | |
# Topic-wise base prompts and models | |
topic_config = { | |
"Python": { | |
"prompt": "Answer like a senior Python developer and coding mentor.", | |
"model": "meta-llama/Llama-3.2-3B-Instruct" | |
}, | |
"SQL": { | |
"prompt": "Answer like a senior SQL engineer with industry experience.", | |
"model": "google/gemma-3-27b-it" | |
}, | |
"Power BI": { | |
"prompt": "Answer like a Power BI expert helping a beginner.", | |
"model": "mistralai/Mistral-7B-Instruct-v0.1" | |
}, | |
"Statistics": { | |
"prompt": "Answer like a statistics professor explaining key concepts to a student.", | |
"model": "deepseek-ai/DeepSeek-R1" | |
}, | |
"Machine Learning": { | |
"prompt": "Answer like an ML mentor guiding a junior data scientist.", | |
"model": "google/gemma-3-27b-it" | |
}, | |
"Deep Learning": { | |
"prompt": "Answer like a deep learning researcher with real-world insights.", | |
"model": "meta-llama/Llama-3.2-3B-Instruct" | |
}, | |
"Generative AI": { | |
"prompt": "Answer like an expert in LLMs and Generative AI research.", | |
"model": "deepseek-ai/DeepSeek-R1" | |
} | |
} | |
# Experience level adjustments to prompt | |
experience_prompts = { | |
"Beginner": "Explain with simple language and clear examples for a beginner.", | |
"Intermediate": "Provide a detailed answer suitable for an intermediate learner.", | |
"Expert": "Give an in-depth and advanced explanation suitable for an expert." | |
} | |
# Streamlit app setup | |
st.set_page_config(page_title="Data Science Mentor", page_icon="π") | |
st.title("π Data Science Mentor App") | |
if "chat_history" not in st.session_state: | |
st.session_state.chat_history = [] | |
# Multi-select topics | |
selected_topics = st.multiselect("Select one or more topics:", list(topic_config.keys()), default=["Python"]) | |
# Select experience level | |
experience_level = st.selectbox("Select mentor experience level:", list(experience_prompts.keys())) | |
question = st.text_area("Ask your question here:") | |
if st.button("Get Answer"): | |
if not selected_topics: | |
st.warning("Please select at least one topic.") | |
elif not question.strip(): | |
st.warning("Please enter your question.") | |
else: | |
# Combine prompts from selected topics + experience level | |
combined_prompt = "" | |
models_used = set() | |
for topic in selected_topics: | |
base_prompt = topic_config[topic]["prompt"] | |
combined_prompt += f"{base_prompt} " | |
models_used.add(topic_config[topic]["model"]) | |
combined_prompt += experience_prompts[experience_level] | |
# Choose the first model from selected topics (or could do more advanced merging) | |
chosen_model = list(models_used)[0] | |
# Load model | |
llm = HuggingFaceHub( | |
repo_id=chosen_model, | |
model_kwargs={"temperature": 0.6, "max_new_tokens": 150} | |
) | |
chat_model = ChatHuggingFace(llm=llm) | |
messages = [ | |
SystemMessage(content=combined_prompt), | |
HumanMessage(content=question) | |
] | |
with st.spinner("Mentor is typing..."): | |
response = chat_model.invoke(messages) | |
st.markdown("### π§ Mentor's Response:") | |
st.markdown(response.content) | |
# Save chat | |
st.session_state.chat_history.append((selected_topics, experience_level, question, response.content)) | |
# Display chat history | |
if st.session_state.chat_history: | |
st.markdown("---") | |
st.subheader("π Chat History") | |
for i, (topics, exp, q, a) in enumerate(st.session_state.chat_history, 1): | |
st.markdown(f"**{i}. Topics:** {', '.join(topics)} | **Mentor Level:** {exp}") | |
st.markdown(f"**You:** {q}") | |
st.markdown(f"**Mentor:** {a}") | |
st.markdown("---") | |
# Download PDF | |
if st.button("π Download PDF of this chat"): | |
pdf = FPDF() | |
pdf.add_page() | |
pdf.set_font("Arial", size=12) | |
pdf.cell(200, 10, txt="Data Science Mentor Chat History", ln=True, align="C") | |
pdf.ln(10) | |
for i, (topics, exp, q, a) in enumerate(st.session_state.chat_history, 1): | |
pdf.multi_cell(0, 10, f"{i}. Topics: {', '.join(topics)} | Mentor Level: {exp}\nYou: {q}\nMentor: {a}\n\n") | |
pdf_path = "/tmp/mentor_chat.pdf" | |
pdf.output(pdf_path) | |
with open(pdf_path, "rb") as f: | |
st.download_button("π₯ Click to Download PDF", f, file_name="mentor_chat.pdf", mime="application/pdf") | |