Spaces:
Running
Running
File size: 8,114 Bytes
9d424da |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 |
import streamlit as st
import json
from mistralai import Mistral
# -----------------------
# Config
# -----------------------
EXIT_KEYWORDS = {"exit", "quit", "bye", "stop", "end"}
STORAGE_FILE = "candidates.json"
# Use Mistral hosted API (instead of local Ollama)
MODEL_NAME = "mistral-small" # can also try "mistral-medium" or "mistral-large"
# Init Mistral LLM
client = Mistral(api_key=st.secrets["MISTRAL_API_KEY"])
# -----------------------
# Helpers
# -----------------------
def save_candidate(data):
"""Save candidate info to local JSON file."""
try:
with open(STORAGE_FILE, "r") as f:
db = json.load(f)
except FileNotFoundError:
db = []
db.append(data)
with open(STORAGE_FILE, "w") as f:
json.dump(db, f, indent=2)
def generate_questions(tech_stack):
"""Generate structured technical questions in ENGLISH using Mistral."""
prompt = f"""
Candidate tech stack: {tech_stack}.
For each technology, generate exactly 3 short interview questions in ENGLISH.
Respond strictly in JSON format like this:
{{
"Python": ["Question1", "Question2", "Question3"],
"Django": ["Question1", "Question2", "Question3"]
}}
No explanations, no extra text, only valid JSON.
"""
response = client.chat.complete(
model=MODEL_NAME,
messages=[{"role": "user", "content": prompt}],
temperature=0.2,
)
raw = response.choices[0].message["content"]
# Try to parse clean JSON
try:
return json.loads(raw)
except:
cleaned = raw.strip().split("```")[-1]
try:
return json.loads(cleaned)
except:
return {"Error": [raw]}
def chat_response(user_input):
"""Handles normal chat responses."""
prompt = f"""
You are TalentScout Hiring Assistant.
Always answer clearly in English.
Do NOT repeat or rephrase the user question, only provide the best possible answer.
Question: {user_input}
Answer:
"""
response = client.chat.complete(
model=MODEL_NAME,
messages=[{"role": "user", "content": prompt}],
temperature=0.2,
)
return response.choices[0].message["content"]
def estimate_skill_levels(qa_history, tech_stack):
"""Estimate skill levels (Beginner / Intermediate / Expert) based on candidate answers."""
answers = "\n".join(
f"Q: {q}\nA: {a}"
for role, msg in qa_history
if role == "You"
for q, a in [(msg, "")]
)
prompt = f"""
Candidate tech stack: {tech_stack}
Candidate interview answers:
{answers}
Based on the answers, estimate the candidate's skill level for each technology
in this stack. Use only these labels: Beginner, Intermediate, Expert.
Respond strictly in JSON format like this:
{{
"Python": "Intermediate",
"Django": "Beginner"
}}
"""
response = client.chat.complete(
model=MODEL_NAME,
messages=[{"role": "user", "content": prompt}],
temperature=0.2,
)
raw = response.choices[0].message["content"]
try:
return json.loads(raw)
except:
return {"Error": raw}
# -----------------------
# Streamlit UI
# -----------------------
st.set_page_config(page_title="TalentScout Hiring Assistant", page_icon="π€")
st.title("π€ TalentScout Hiring Assistant")
st.write("""
Welcome to the **TalentScout Hiring Assistant**.
I will:
- Collect your professional details (experience, skills, and career interests)
- Generate tailored **technical interview questions** based on your tech stack
- Conduct a structured Q&A session where you can respond to questions or ask clarifications
- Provide a skill-level estimation (Beginner / Intermediate / Expert) for each technology
- Maintain a transcript of the conversation for review
At any time, type **exit**, **quit**, or **end** to finish the interview.
""")
# Session state
if "qa_history" not in st.session_state:
st.session_state.qa_history = []
if "candidate_info" not in st.session_state:
st.session_state.candidate_info = {}
if "chat_input" not in st.session_state:
st.session_state.chat_input = ""
if "skill_levels" not in st.session_state:
st.session_state.skill_levels = {}
# -----------------------
# Candidate Info Form
# -----------------------
with st.form("candidate_form"):
st.subheader("π Candidate Information")
name = st.text_input("Full Name")
email = st.text_input("Email Address")
phone = st.text_input("Phone Number")
exp = st.number_input("Years of Experience", min_value=0, step=1)
position = st.text_input("Desired Position(s)")
location = st.text_input("Current Location")
tech_stack = st.text_area("Tech Stack (e.g., Python, Django, MySQL)")
submitted = st.form_submit_button("Submit")
if submitted:
st.session_state.candidate_info = {
"name": name,
"email": email,
"phone": phone,
"experience": exp,
"position": position,
"location": location,
"tech_stack": tech_stack,
}
save_candidate(st.session_state.candidate_info)
st.success("β
Information saved! Scroll down to see your technical questions.")
# -----------------------
# Generate questions if candidate info available
# -----------------------
if st.session_state.candidate_info.get("tech_stack"):
st.subheader("π§βπ» Technical Questions")
with st.spinner("Generating questions..."):
questions_dict = generate_questions(st.session_state.candidate_info["tech_stack"])
if "Error" in questions_dict:
st.error("β οΈ Could not parse questions properly. Raw output:")
st.write(questions_dict["Error"])
else:
for tech, qs in questions_dict.items():
st.markdown(f"**{tech}**")
for i, q in enumerate(qs, 1):
st.write(f"{i}. {q}")
# -----------------------
# Chatbox for conversation
# -----------------------
st.subheader("π¬ Chat with Assistant")
def process_input():
"""Handles user input when submitted."""
user_input = st.session_state.chat_input.strip()
if not user_input:
return
cleaned = user_input.lower()
# β
Exit keywords
if cleaned in EXIT_KEYWORDS:
st.session_state.qa_history.append(("You", user_input))
st.session_state.qa_history.append(("Assistant", "π Thank you for your time! Weβll contact you with next steps."))
st.session_state.chat_input = "" # clears input
return
# Otherwise β normal chat flow
with st.spinner("Assistant is typing..."):
final_response = chat_response(user_input)
st.session_state.qa_history.append(("You", user_input))
st.session_state.qa_history.append(("Assistant", final_response))
# Clear input box after processing
st.session_state.chat_input = ""
# Input box with callback
st.text_input(
"Your message:",
key="chat_input",
on_change=process_input,
)
# Clear conversation button
if st.button("ποΈ Clear Conversation"):
st.session_state.qa_history = []
st.session_state.skill_levels = {}
st.success("Conversation cleared!")
# Display transcript
for role, msg in st.session_state.qa_history:
if role == "You":
st.markdown(f"**π§ {role}:** {msg}")
else:
st.markdown(f"**π€ {role}:** {msg}")
# -----------------------
# Skill Level Estimation
# -----------------------
if st.button("π Estimate Skill Levels"):
if st.session_state.qa_history and st.session_state.candidate_info.get("tech_stack"):
with st.spinner("Analyzing candidate answers..."):
st.session_state.skill_levels = estimate_skill_levels(
st.session_state.qa_history,
st.session_state.candidate_info["tech_stack"]
)
st.subheader("π Skill Level Estimation")
st.json(st.session_state.skill_levels)
else:
st.warning("β οΈ Please complete candidate details and provide some answers first.")
|