File size: 2,242 Bytes
5420da2
11ba014
737aa03
11ba014
0194326
 
11ba014
 
0194326
11ba014
737aa03
 
11ba014
 
 
737aa03
 
 
 
11ba014
737aa03
11ba014
737aa03
0194326
 
 
 
737aa03
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0194326
737aa03
 
 
 
 
11ba014
737aa03
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import json
from fastapi import APIRouter, HTTPException
from fastapi.responses import JSONResponse
from core.llm import LLMClient
from core.session import session_manager
import logging

router = APIRouter()
logger = logging.getLogger(__name__)

# Initialize LLM client with fallback support
llm_client = LLMClient()

@router.post("/chat")
async def chat(user_id: str, message: str):
    """
    Handle chat requests with proper session management and error handling.
    """
    if not message or not message.strip():
        raise HTTPException(status_code=400, detail="Message is required")

    try:
        # Get session using session manager
        session = session_manager.get_session(user_id)
        conversation_history = session.get("conversation", [])
        
        # Add user message to history
        conversation_history.append({
            "role": "user", 
            "content": message
        })

        # Generate AI response using factory pattern
        try:
            ai_response = llm_client.generate(
                prompt=message,
                conversation_history=conversation_history
            )
            
            if not ai_response:
                raise Exception("Empty response from LLM")
                
        except Exception as e:
            logger.error(f"LLM generation failed: {e}")
            # Try to provide a graceful fallback
            ai_response = "I'm having trouble processing your request right now. Please try again."

        # Add AI response to conversation history
        conversation_history.append({
            "role": "assistant",
            "content": ai_response
        })

        # Update session with new conversation history
        session_manager.update_session(user_id, {
            "conversation": conversation_history
        })

        logger.info(f"Successfully processed chat for user {user_id}")
        return JSONResponse(
            content={"response": ai_response},
            status_code=200
        )

    except Exception as e:
        logger.error(f"Chat processing failed for user {user_id}: {e}")
        raise HTTPException(
            status_code=500, 
            detail=f"Failed to process chat: {str(e)}"
        )