File size: 1,639 Bytes
e900a8d 2cb4727 e900a8d 2cb4727 22e5f83 e900a8d 2cb4727 d891499 2cb4727 d891499 2cb4727 22e5f83 2cb4727 22e5f83 2cb4727 d891499 22e5f83 d891499 2cb4727 22e5f83 2cb4727 22e5f83 2cb4727 22e5f83 d891499 2cb4727 b5d5e39 2cb4727 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
import logging
from core.llm_factory import llm_factory
from core.session import session_manager
logger = logging.getLogger(__name__)
class SimpleCoordinator:
"""Simplified coordinator without HF expert features"""
def __init__(self):
pass
def process_message(self, user_id: str, user_query: str):
"""Process a message with local Ollama only"""
try:
# Get session
session = session_manager.get_session(user_id)
conversation_history = session.get("conversation", []).copy()
# Add current message
conversation_history.append({"role": "user", "content": user_query})
# Get provider
provider = llm_factory.get_provider('ollama')
if not provider:
raise Exception("Ollama provider not available")
# Generate response
response = provider.generate(user_query, conversation_history)
# Update session
conversation = session.get("conversation", []).copy()
conversation.append({"role": "user", "content": user_query})
conversation.append({"role": "assistant", "content": response or ""})
session_manager.update_session(user_id, {"conversation": conversation})
return response or "I'm processing your request..."
except Exception as e:
logger.error(f"Message processing failed: {e}")
return "Sorry, I couldn't process your request."
# Global instance
coordinator = SimpleCoordinator()
|