Spaces:
Running
Running
File size: 10,478 Bytes
77e690d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 |
# 1. IMPORTS AND SETUP
import os
import uuid
import gradio as gr
from typing import TypedDict, List, Optional
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.tools import tool
from pydantic import BaseModel, Field # CORRECTED LINE: Import directly from Pydantic
from langgraph.graph import StateGraph, END
import requests
from bs4 import BeautifulSoup
from pypdf import PdfReader # CORRECTED LINE: Import from 'pypdf' instead of 'PyPDF2'
from threading import Thread
print("--- Libraries imported. ---")
# Set API keys from Hugging Face Space Secrets
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
os.environ["LANGCHAIN_API_KEY"] = os.getenv("LANGCHAIN_API_KEY")
os.environ["TAVILY_API_KEY"] = os.getenv("TAVILY_API_KEY")
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_PROJECT"] = "Deployed Career Navigator"
# 2. LANGGRAPH AGENT BACKEND (The "Brain" of the App)
class SkillAnalysis(BaseModel):
technical_skills: List[str] = Field(description="List of top 5 technical skills.")
soft_skills: List[str] = Field(description="List of top 3 soft skills.")
class ResumeFeedback(BaseModel):
strengths: List[str] = Field(description="Resume strengths.")
gaps: List[str] = Field(description="Missing skills or experiences.")
suggestions: List[str] = Field(description="Suggestions for improvement.")
class CareerActionPlan(BaseModel):
career_overview: str = Field(description="Overview of the chosen career.")
skill_analysis: SkillAnalysis
resume_feedback: ResumeFeedback
learning_roadmap: str = Field(description="Markdown-formatted learning plan.")
portfolio_plan: str = Field(description="Markdown-formatted portfolio project plan.")
class TeamState(TypedDict):
student_interests: str
student_resume: str
career_options: List[str]
chosen_career: str
market_analysis: Optional[SkillAnalysis]
resume_analysis: Optional[ResumeFeedback]
final_plan: Optional[CareerActionPlan]
@tool
def scrape_web_content(url: str) -> str:
"""Scrapes text content from a URL."""
try:
response = requests.get(url, timeout=10)
soup = BeautifulSoup(response.content, 'html.parser')
return soup.get_text(separator=' ', strip=True)[:10000]
except requests.RequestException:
return "Error: Could not scrape the URL."
llm = ChatOpenAI(model="gpt-4o", temperature=0)
def job_market_analyst_agent(state: TeamState):
print("--- π΅οΈ Agent: Job Market Analyst ---")
structured_llm = llm.with_structured_output(SkillAnalysis)
prompt = ChatPromptTemplate.from_template(
"You are an expert job market analyst. Based on the career of '{career}', identify the top 5 technical skills and top 3 soft skills required."
)
chain = prompt | structured_llm
analysis = chain.invoke({"career": state['chosen_career']})
return {"market_analysis": analysis}
def resume_reviewer_agent(state: TeamState):
print("--- π Agent: Resume Reviewer ---")
structured_llm = llm.with_structured_output(ResumeFeedback)
prompt = ChatPromptTemplate.from_messages([
("system", "You are an expert career coach. Compare the user's resume with the provided analysis of in-demand skills and provide feedback."),
("human", "User's Resume:\n{resume}\n\nRequired Skills Analysis:\n{skill_analysis}")
])
chain = prompt | structured_llm
feedback = chain.invoke({
"resume": state["student_resume"],
"skill_analysis": state["market_analysis"].dict()
})
return {"resume_analysis": feedback}
def lead_agent_node(state: TeamState):
print("--- π Agent: Lead Agent (Synthesizing & Planning) ---")
structured_llm = llm.with_structured_output(CareerActionPlan)
prompt = ChatPromptTemplate.from_template(
"You are the lead career strategist. Synthesize all the provided information into a comprehensive Career Action Plan. "
"Create a detailed 8-week learning roadmap and suggest 3 portfolio projects.\n\n"
"Chosen Career: {career}\n"
"Required Skills: {skills}\n"
"Resume Feedback: {resume_feedback}"
)
chain = prompt | structured_llm
final_plan = chain.invoke({
"career": state["chosen_career"],
"skills": state["market_analysis"].dict(),
"resume_feedback": state["resume_analysis"].dict()
})
return {"final_plan": final_plan}
graph_builder = StateGraph(TeamState)
graph_builder.add_node("analyze_market", job_market_analyst_agent)
graph_builder.add_node("review_resume", resume_reviewer_agent)
graph_builder.add_node("create_final_plan", lead_agent_node)
graph_builder.set_entry_point("analyze_market")
graph_builder.add_edge("analyze_market", "review_resume")
graph_builder.add_edge("review_resume", "create_final_plan")
graph_builder.add_edge("create_final_plan", END)
navigator_agent = graph_builder.compile()
print("--- LangGraph Agent Backend is ready. ---")
# 3. HELPER FUNCTIONS FOR GRADIO
def extract_text_from_pdf(pdf_file_obj):
if not pdf_file_obj:
return "", "Please upload a resume to begin."
try:
reader = PdfReader(pdf_file_obj.name)
text = "".join(page.extract_text() or "" for page in reader.pages)
if not text.strip():
return "", "Error: Could not extract text from the PDF. Please try a different file."
return text, ""
except Exception as e:
return "", f"An error occurred while reading the PDF: {e}"
def run_agent_and_update_ui(resume_text, chosen_career):
if not resume_text:
return {
output_col: gr.update(visible=True),
output_overview: gr.update(value="<h3 style='color:red;'>Please upload a resume first.</h3>", visible=True)
}
yield {
output_col: gr.update(visible=True),
input_row: gr.update(visible=False),
output_overview: gr.update(value="### π§ The AI agent team is analyzing your profile...", visible=True)
}
initial_state = { "student_resume": resume_text, "chosen_career": chosen_career }
final_state = navigator_agent.invoke(initial_state)
plan = final_state['final_plan']
# Final update
yield {
output_plan_state: plan,
output_overview: gr.update(value=f"## 1. Career Overview: {plan.career_overview}"),
output_skills: gr.update(
value=f"## 2. Job Market Skill Analysis\n**Top Technical Skills:** {', '.join(plan.skill_analysis.technical_skills)}\n\n**Top Soft Skills:** {', '.join(plan.skill_analysis.soft_skills)}",
visible=True
),
output_resume_feedback: gr.update(
value=f"## 3. Your Resume Feedback\n**Strengths:** {' '.join(plan.resume_feedback.strengths)}\n\n**Gaps to Fill:** {', '.join(plan.resume_feedback.gaps)}\n\n**Suggestions:**\n" + "\n".join([f"- {s}" for s in plan.resume_feedback.suggestions]),
visible=True
),
output_learning_plan: gr.update(value=f"## 4. Your 8-Week Learning Roadmap\n{plan.learning_roadmap}", visible=True),
output_portfolio_plan: gr.update(value=f"## 5. Your Portfolio Project Plan\n{plan.portfolio_plan}", visible=True),
chat_row: gr.update(visible=True)
}
def chat_with_agent(message, history, plan_state):
if not plan_state:
return "An error occurred. Please generate a new plan."
prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful career coach. The user has just received the following career action plan. Answer their follow-up questions based ONLY on this plan.\n\n--- CAREER PLAN ---\n{plan_text}"),
("user", "{user_question}")
])
chat_chain = prompt | llm
plan_text = f"Career: {plan_state.career_overview}\nSkills: {plan_state.skill_analysis.dict()}\nResume Feedback: {plan_state.resume_feedback.dict()}\nLearning Plan: {plan_state.learning_roadmap}\nPortfolio Plan: {plan_state.portfolio_plan}"
response = chat_chain.invoke({"plan_text": plan_text, "user_question": message})
return response.content
# 4. GRADIO UI DEFINITION
with gr.Blocks(theme=gr.themes.Soft(), css="footer {visibility: hidden}") as demo:
output_plan_state = gr.State()
resume_text_state = gr.State()
error_text_state = gr.State()
gr.Markdown("# π Your AI Career Navigator")
gr.Markdown("Upload your resume, select a target career, and get a personalized, data-driven action plan from a team of AI agents.")
with gr.Row(visible=True) as input_row:
with gr.Column(scale=2):
input_pdf_resume = gr.File(label="Upload Your Resume (PDF)", file_types=[".pdf"])
input_career_choice = gr.Dropdown(
label="Select Your Target Career",
choices=["Data Analyst", "Software Engineer", "Product Manager", "UX Designer", "AI/ML Engineer"],
value="Data Analyst"
)
with gr.Column(scale=1, min_width=200):
submit_button = gr.Button("Generate My Action Plan", variant="primary", scale=2)
with gr.Column(visible=False) as output_col:
output_overview = gr.Markdown(visible=False)
output_skills = gr.Markdown(visible=False)
output_resume_feedback = gr.Markdown(visible=False)
output_learning_plan = gr.Markdown(visible=False)
output_portfolio_plan = gr.Markdown(visible=False)
with gr.Row(visible=False) as chat_row:
chat_interface = gr.ChatInterface(
chat_with_agent,
chatbot=gr.Chatbot(height=500, label="Chat with your Career Coach"),
additional_inputs=[output_plan_state],
title="Ask Follow-up Questions",
description="Ask any questions about your generated plan."
)
# Event Handling Logic
input_pdf_resume.upload(
fn=extract_text_from_pdf,
inputs=[input_pdf_resume],
outputs=[resume_text_state, error_text_state]
)
submit_button.click(
fn=run_agent_and_update_ui,
inputs=[resume_text_state, input_career_choice],
outputs=[
output_plan_state,
output_overview,
output_skills,
output_resume_feedback,
output_learning_plan,
output_portfolio_plan,
input_row,
output_col,
chat_row
]
)
if __name__ == "__main__":
demo.launch(debug=True)
|