import logging from core.llm_factory import llm_factory from core.session import session_manager logger = logging.getLogger(__name__) class SimpleCoordinator: """Simplified coordinator without HF expert features""" def __init__(self): pass def process_message(self, user_id: str, user_query: str): """Process a message with local Ollama only""" try: # Get session session = session_manager.get_session(user_id) conversation_history = session.get("conversation", []).copy() # Add current message conversation_history.append({"role": "user", "content": user_query}) # Get provider provider = llm_factory.get_provider('ollama') if not provider: raise Exception("Ollama provider not available") # Generate response response = provider.generate(user_query, conversation_history) # Update session conversation = session.get("conversation", []).copy() conversation.append({"role": "user", "content": user_query}) conversation.append({"role": "assistant", "content": response or ""}) session_manager.update_session(user_id, {"conversation": conversation}) return response or "I'm processing your request..." except Exception as e: logger.error(f"Message processing failed: {e}") return "Sorry, I couldn't process your request." # Global instance coordinator = SimpleCoordinator()