rdune71's picture
Implement modular LLM provider interface with Hugging Face support
adf8222
from typing import List, Dict, Optional
from src.llm.factory import llm_factory, ProviderNotAvailableError
import logging
logger = logging.getLogger(__name__)
class ChatService:
"""Service for handling chat interactions with LLM providers"""
def __init__(self):
try:
self.provider = llm_factory.get_provider()
except ProviderNotAvailableError:
self.provider = None
logger.error("No LLM providers available")
def generate_response(self, prompt: str, conversation_history: List[Dict]) -> Optional[str]:
"""Generate a response using the configured provider"""
if not self.provider:
raise ProviderNotAvailableError("No LLM provider available")
try:
return self.provider.generate(prompt, conversation_history)
except Exception as e:
logger.error(f"LLM generation failed: {e}")
raise
def stream_response(self, prompt: str, conversation_history: List[Dict]):
"""Stream a response using the configured provider"""
if not self.provider:
raise ProviderNotAvailableError("No LLM provider available")
try:
return self.provider.stream_generate(prompt, conversation_history)
except Exception as e:
logger.error(f"LLM stream generation failed: {e}")
raise
# Global chat service instance
chat_service = ChatService()