File size: 6,967 Bytes
8780fb9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
"""
FastAPI routes for resume review functionality
"""
from fastapi import APIRouter, UploadFile, File, Form, HTTPException
from fastapi.responses import JSONResponse
from app.embedding import CorrectedResumeJobMatcher
from app.supabase import supabase_service
from app.config import settings
import logging
import os

logger = logging.getLogger(__name__)

router = APIRouter()

@router.post("/match", response_model=dict)
async def match_resume_job(
    file: UploadFile = File(..., description="PDF resume file"),
    job_description: str = Form(..., description="Job description text", min_length=50)
):
    """
    Enhanced resume-job matching using the CorrectedResumeJobMatcher
    
    Args:
        file: PDF file upload
        job_description: Job description as form data
        
    Returns:
        dict: Comprehensive matching results with detailed analysis
    """
    try:
        # Validate file type
        if file.content_type not in settings.ALLOWED_FILE_TYPES:
            raise HTTPException(
                status_code=400,
                detail=f"Invalid file type. Only PDF files are allowed."
            )
        
        # Check file size
        file_content = await file.read()
        if len(file_content) > settings.MAX_FILE_SIZE:
            raise HTTPException(
                status_code=400,
                detail=f"File size exceeds maximum allowed size of {settings.MAX_FILE_SIZE/1024/1024}MB"
            )
        
        # Initialize the enhanced matcher
        groq_api_key = os.getenv("GROQ_API_KEY")
        cohere_api_key = os.getenv("COHERE_API_KEY")
        
        matcher = CorrectedResumeJobMatcher(
            groq_api_key=groq_api_key,
            cohere_api_key=cohere_api_key,
            resume_bert_model=None  # Auto-select best model
        )
        
        # Perform the matching analysis
        result = matcher.match(file_content, job_description)
        
        # Print detailed analysis to terminal
        print("\n" + "="*80)
        print("πŸ” API REQUEST ANALYSIS RESULTS")
        print("="*80)
        
        final_score = result["final_similarity_score"]
        final_percentage = result["final_similarity_percentage"]
        category = result["similarity_category"]
        
        print(f"\n🎯 FINAL MATCH SCORE: {final_score:.4f} ({final_percentage:.2f}%)")
        print(f"πŸ“Š CATEGORY: {category}")
        print(f"πŸ” CONFIDENCE: {result['confidence']:.3f}")
        print(f"⚠️  ANOMALY: {result['anomaly']}")
        
        # Component scores
        print(f"\nπŸ“Š COMPONENT SCORES:")
        print(f"   β€’ Semantic Similarity: {result['semantic_score']:.3f} ({result['semantic_score']*100:.1f}%)")
        print(f"   β€’ Skills Matching: {result['skills_score']:.3f} ({result['skills_score']*100:.1f}%)")
        print(f"   β€’ Enhanced Skills: {result['enhanced_skills_score']:.3f} ({result['enhanced_skills_score']*100:.1f}%)")
        print(f"   β€’ Resume-BERT: {result['resume_bert_score']:.3f} ({result['resume_bert_score']*100:.1f}%)")
        print(f"   β€’ LLM Assessment: {result['llm_score']:.1f}/100")
        
        # LLM Details
        if result.get('llm_details'):
            llm_details = result['llm_details']
            print(f"\n🧠 LLM ANALYSIS:")
            print(f"   β€’ API Used: {llm_details.get('api_used', 'N/A')}")
            print(f"   β€’ Response Time: {llm_details.get('response_time', 0):.2f}s")
            print(f"   β€’ Compatibility: {llm_details.get('compatibility_score', 0)}/100")
            
            if llm_details.get('strengths'):
                print(f"   β€’ Key Strengths: {len(llm_details['strengths'])} identified")
            if llm_details.get('gaps'):
                print(f"   β€’ Areas for Improvement: {len(llm_details['gaps'])} identified")
            if llm_details.get('recommendations'):
                print(f"   β€’ Recommendations: {len(llm_details['recommendations'])} provided")
        
        # Skills Analysis
        if result.get('skills_analysis'):
            skills_analysis = result['skills_analysis']
            print(f"\nπŸ”§ SKILLS ANALYSIS:")
            print(f"   β€’ Coverage: {skills_analysis['coverage_percentage']*100:.1f}%")
            print(f"   β€’ Direct Matches: {skills_analysis['direct_match_count']}/{skills_analysis['total_job_skills']}")
            print(f"   β€’ Missing Skills: {len(skills_analysis['missing_skills'])}")
            print(f"   β€’ Critical Skills Missing: {len(skills_analysis['critical_skills_missing'])}")
        
        # Model Info
        if result.get('model_info'):
            model_info = result['model_info']
            print(f"\nπŸ€– MODEL INFO:")
            print(f"   β€’ Primary Model: {model_info.get('primary_semantic_model', 'N/A')}")
            print(f"   β€’ Resume Model: {model_info.get('resume_specific_model', 'N/A')}")
            print(f"   β€’ Total Models: {model_info.get('total_models_loaded', 0)}")
        
        print("\n" + "="*80)
        
        # Store results in database if Supabase is configured
        try:
            # Extract resume text for storage
            resume_text = matcher.pdf_extractor.extract_text(file_content)
            
            # Create a summary for storage
            feedback = f"Match Score: {result['final_similarity_percentage']:.1f}% - {result['similarity_category']}"
            
            # Store in database
            await supabase_service.insert_resume_review(
                resume_text=resume_text,
                job_description=job_description,
                match_score=result['final_similarity_percentage'],
                feedback=feedback
            )
        except Exception as e:
            logger.warning(f"Failed to store results in database: {str(e)}")
            # Continue without failing the request
        
        return result
        
    except Exception as e:
        logger.error(f"Resume matching failed: {str(e)}")
        raise HTTPException(
            status_code=500,
            detail=f"Failed to process resume matching: {str(e)}"
        )

@router.get("/reviews")
async def get_recent_reviews():
    """
    Get recent resume reviews
    
    Returns:
        list: Recent resume reviews
    """
    try:
        reviews = await supabase_service.get_resume_reviews(limit=10)
        return {"reviews": reviews}
    except Exception as e:
        logger.error(f"Error retrieving reviews: {str(e)}")
        raise HTTPException(
            status_code=500,
            detail="Failed to retrieve reviews"
        )

@router.get("/health")
async def health_check():
    """
    Health check for the review service
    
    Returns:
        dict: Health status
    """
    return {
        "status": "healthy",
        "embedding_model": settings.EMBEDDING_MODEL_NAME,
        "llm_model": settings.LLM_MODEL_NAME,
        "supabase_connected": supabase_service.client is not None
    }