File size: 2,032 Bytes
5b5f50c 86b116d 5b5f50c 7878c29 5b5f50c 75f72a7 5b5f50c 86b116d 5b5f50c 86b116d 5b5f50c 86b116d 5b5f50c 87a7535 86b116d 5b5f50c 75f72a7 86b116d 5b5f50c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
import logging
from typing import List, Dict, Optional
from core.llm_factory import llm_factory, ProviderNotAvailableError
logger = logging.getLogger(__name__)
class LLMClient:
"""High-level LLM client that uses the factory pattern"""
def __init__(self, provider: Optional[str] = None):
self.provider_name = provider
try:
self.provider = llm_factory.get_provider(provider)
except ProviderNotAvailableError:
self.provider = None
logger.error("No LLM providers available")
def generate(self, prompt: str, conversation_history: List[Dict], stream: bool = False):
"""Generate a response"""
if not self.provider:
raise ProviderNotAvailableError("No LLM provider available")
try:
if stream:
return self.provider.stream_generate(prompt, conversation_history)
else:
return self.provider.generate(prompt, conversation_history)
except Exception as e:
logger.error(f"LLM generation failed: {e}")
raise
def send_to_ollama(prompt: str, conversation_history: List[Dict], ollama_url: str, model: str) -> Optional[str]:
"""Legacy function for backward compatibility"""
try:
from core.providers.ollama import OllamaProvider
provider = OllamaProvider(model)
return provider.generate(prompt, conversation_history)
except Exception as e:
logger.error(f"Ollama call failed: {e}")
return None
def send_to_hf(prompt: str, conversation_history: List[Dict]) -> Optional[str]:
"""Legacy function for backward compatibility"""
try:
from utils.config import config
from core.providers.huggingface import HuggingFaceProvider
provider = HuggingFaceProvider("meta-llama/Llama-2-7b-chat-hf")
return provider.generate(prompt, conversation_history)
except Exception as e:
logger.error(f"Hugging Face call failed: {e}")
return None
|