|
import logging |
|
from typing import List, Dict, Optional |
|
from core.llm_factory import llm_factory, ProviderNotAvailableError |
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
class LLMClient: |
|
"""High-level LLM client that uses the factory pattern""" |
|
|
|
def __init__(self, provider: Optional[str] = None): |
|
self.provider_name = provider |
|
try: |
|
self.provider = llm_factory.get_provider(provider) |
|
except ProviderNotAvailableError: |
|
self.provider = None |
|
logger.error("No LLM providers available") |
|
|
|
def generate(self, prompt: str, conversation_history: List[Dict], stream: bool = False): |
|
"""Generate a response""" |
|
if not self.provider: |
|
raise ProviderNotAvailableError("No LLM provider available") |
|
|
|
try: |
|
if stream: |
|
return self.provider.stream_generate(prompt, conversation_history) |
|
else: |
|
return self.provider.generate(prompt, conversation_history) |
|
except Exception as e: |
|
logger.error(f"LLM generation failed: {e}") |
|
raise |
|
|
|
def send_to_ollama(prompt: str, conversation_history: List[Dict], ollama_url: str, model: str) -> Optional[str]: |
|
"""Legacy function for backward compatibility""" |
|
try: |
|
from core.providers.ollama import OllamaProvider |
|
provider = OllamaProvider(model) |
|
return provider.generate(prompt, conversation_history) |
|
except Exception as e: |
|
logger.error(f"Ollama call failed: {e}") |
|
return None |
|
|
|
def send_to_hf(prompt: str, conversation_history: List[Dict]) -> Optional[str]: |
|
"""Legacy function for backward compatibility""" |
|
try: |
|
from utils.config import config |
|
from core.providers.huggingface import HuggingFaceProvider |
|
provider = HuggingFaceProvider("meta-llama/Llama-2-7b-chat-hf") |
|
return provider.generate(prompt, conversation_history) |
|
except Exception as e: |
|
logger.error(f"Hugging Face call failed: {e}") |
|
return None |
|
|