|
import json |
|
from fastapi import APIRouter, HTTPException |
|
from fastapi.responses import JSONResponse |
|
from core.llm import LLMClient |
|
from core.session import session_manager |
|
import logging |
|
|
|
router = APIRouter() |
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
llm_client = LLMClient() |
|
|
|
@router.post("/chat") |
|
async def chat(user_id: str, message: str): |
|
""" |
|
Handle chat requests with proper session management and error handling. |
|
""" |
|
if not message or not message.strip(): |
|
raise HTTPException(status_code=400, detail="Message is required") |
|
|
|
try: |
|
|
|
session = session_manager.get_session(user_id) |
|
conversation_history = session.get("conversation", []) |
|
|
|
|
|
conversation_history.append({ |
|
"role": "user", |
|
"content": message |
|
}) |
|
|
|
|
|
try: |
|
ai_response = llm_client.generate( |
|
prompt=message, |
|
conversation_history=conversation_history |
|
) |
|
|
|
if not ai_response: |
|
raise Exception("Empty response from LLM") |
|
|
|
except Exception as e: |
|
logger.error(f"LLM generation failed: {e}") |
|
|
|
ai_response = "I'm having trouble processing your request right now. Please try again." |
|
|
|
|
|
conversation_history.append({ |
|
"role": "assistant", |
|
"content": ai_response |
|
}) |
|
|
|
|
|
session_manager.update_session(user_id, { |
|
"conversation": conversation_history |
|
}) |
|
|
|
logger.info(f"Successfully processed chat for user {user_id}") |
|
return JSONResponse( |
|
content={"response": ai_response}, |
|
status_code=200 |
|
) |
|
|
|
except Exception as e: |
|
logger.error(f"Chat processing failed for user {user_id}: {e}") |
|
raise HTTPException( |
|
status_code=500, |
|
detail=f"Failed to process chat: {str(e)}" |
|
) |
|
|