Arshad112 commited on
Commit
3786f5d
Β·
verified Β·
1 Parent(s): 73b9b6a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -75
app.py CHANGED
@@ -1,88 +1,60 @@
1
  import streamlit as st
2
- import pdfplumber
3
- import spacy
4
- import openai
5
  import os
6
  from dotenv import load_dotenv
7
- import subprocess
8
 
9
- # Load environment variables from .env file
10
  load_dotenv()
 
11
 
12
- # Set OpenAI API key
13
- openai.api_key = os.getenv("OPENAI_API_KEY")
14
 
15
- # Load spaCy model with error handling
16
- try:
17
- nlp = spacy.load("en_core_web_sm")
18
- except OSError:
19
- st.warning("Downloading 'en_core_web_sm' model. Please wait...")
20
- subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"])
21
- nlp = spacy.load("en_core_web_sm")
22
 
23
- def extract_text_from_pdf(pdf_file):
24
- """Extracts text from an uploaded PDF resume."""
25
- text = ""
26
- try:
27
- with pdfplumber.open(pdf_file) as pdf:
28
- for page in pdf.pages:
29
- text += page.extract_text() + "\n"
30
- except Exception as e:
31
- st.error(f"Error extracting text from PDF: {e}")
32
- return text.strip()
33
-
34
- def extract_keywords(text):
35
- """Extracts important keywords from resume text using spaCy."""
36
- doc = nlp(text)
37
- keywords = set()
38
- for token in doc:
39
- if token.pos_ in ["NOUN", "PROPN"]: # Extract nouns and proper nouns
40
- keywords.add(token.text.lower())
41
- return list(keywords)
42
-
43
- def analyze_resume(text):
44
- """Uses OpenAI GPT to analyze resume and suggest improvements."""
45
- prompt = f"""
46
- You are a professional resume analyzer. Given the following resume text,
47
- provide a rating (out of 10), identify missing keywords based on general job trends,
48
- and suggest improvements.
49
-
50
- Resume Text:
51
- {text}
52
-
53
- Your response should be in this structured format:
54
- - Resume Rating: (score out of 10)
55
- - Missing Keywords: (comma-separated list)
56
- - Suggestions for Improvement: (bullet points)
57
- """
58
-
59
- response = openai.ChatCompletion.create(
60
- model="gpt-4",
61
- messages=[{"role": "user", "content": prompt}]
62
- )
63
- return response["choices"][0]["message"]["content"]
64
 
65
  # Streamlit UI
66
- st.title("πŸ“ AI-Powered Resume Analyzer")
67
- st.write("Upload your resume, and AI will provide feedback to improve it!")
68
 
69
- uploaded_file = st.file_uploader("Upload your Resume (PDF format)", type=["pdf"])
 
70
 
71
  if uploaded_file is not None:
72
- with st.spinner("Processing your resume..."):
73
- resume_text = extract_text_from_pdf(uploaded_file)
74
-
75
- if resume_text:
76
- keywords = extract_keywords(resume_text)
77
- analysis_result = analyze_resume(resume_text)
78
-
79
- st.subheader("πŸ” Resume Analysis Report")
80
- st.write(analysis_result)
81
-
82
- st.subheader("πŸ“Œ Extracted Keywords")
83
- st.write(", ".join(keywords))
84
-
85
- st.success("βœ… Resume analyzed successfully!")
86
- else:
87
- st.error("Could not extract text from the PDF. Please try another file.")
88
-
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
 
 
 
2
  import os
3
  from dotenv import load_dotenv
4
+ from groq import Groq
5
 
6
+ # Load API Key from .env
7
  load_dotenv()
8
+ api_key = os.getenv("GROQ_API_KEY")
9
 
10
+ # Initialize Groq Client
11
+ client = Groq(api_key=api_key)
12
 
13
+ # Chatbot System Prompt
14
+ system_prompt = """
15
+ You are an AI Resume Analyzer designed to provide feedback on resumes.
16
+ Your goal is to analyze resumes, identify missing keywords, rate resumes,
17
+ and suggest better skills to highlight.
 
 
18
 
19
+ ### Response Format:
20
+ 1️⃣ **Resume Rating:** (Score out of 10)
21
+ 2️⃣ **Missing Keywords:** (Comma-separated list)
22
+ 3️⃣ **Suggested Skills:** (Bullet points)
23
+ 4️⃣ **Expert Improvement Tips:** (Clear and actionable advice)
24
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  # Streamlit UI
27
+ st.title("πŸ“„ AI-Powered Resume Analyzer πŸ€–")
28
+ st.write("Upload your resume, and AI will analyze it for improvements!")
29
 
30
+ # File Uploader for PDF
31
+ uploaded_file = st.file_uploader("Upload Your Resume (PDF)", type=["pdf"])
32
 
33
  if uploaded_file is not None:
34
+ with st.spinner("Analyzing your resume..."):
35
+ resume_text = uploaded_file.read().decode("utf-8") # Decode PDF text
36
+ user_message = f"Resume Content:\n{resume_text}\n\nAnalyze the resume based on the given criteria."
37
+
38
+ # Send request to Groq API
39
+ completion = client.chat.completions.create(
40
+ model="deepseek-r1-distill-llama-70b",
41
+ messages=[
42
+ {"role": "system", "content": system_prompt},
43
+ {"role": "user", "content": user_message},
44
+ ],
45
+ temperature=0.6,
46
+ max_tokens=4096,
47
+ top_p=0.95,
48
+ stream=True,
49
+ )
50
+
51
+ # Stream response
52
+ st.subheader("πŸ’‘ Resume Analysis")
53
+ full_response = ""
54
+ for chunk in completion:
55
+ chunk_text = chunk.choices[0].delta.content or ""
56
+ full_response += chunk_text
57
+ st.write(chunk_text)
58
+
59
+ # Display final response
60
+ st.success("βœ… Resume analyzed successfully!")