File size: 1,437 Bytes
adf8222 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
from typing import List, Dict, Optional
from src.llm.factory import llm_factory, ProviderNotAvailableError
import logging
logger = logging.getLogger(__name__)
class ChatService:
"""Service for handling chat interactions with LLM providers"""
def __init__(self):
try:
self.provider = llm_factory.get_provider()
except ProviderNotAvailableError:
self.provider = None
logger.error("No LLM providers available")
def generate_response(self, prompt: str, conversation_history: List[Dict]) -> Optional[str]:
"""Generate a response using the configured provider"""
if not self.provider:
raise ProviderNotAvailableError("No LLM provider available")
try:
return self.provider.generate(prompt, conversation_history)
except Exception as e:
logger.error(f"LLM generation failed: {e}")
raise
def stream_response(self, prompt: str, conversation_history: List[Dict]):
"""Stream a response using the configured provider"""
if not self.provider:
raise ProviderNotAvailableError("No LLM provider available")
try:
return self.provider.stream_generate(prompt, conversation_history)
except Exception as e:
logger.error(f"LLM stream generation failed: {e}")
raise
# Global chat service instance
chat_service = ChatService()
|