Spaces:
Running
Running

Added the ability to configure custom prompts for the main lifestyle assistant. Implemented methods for setting, resetting to default, and getting the current prompt. Updated the Gradio interface for editing prompts, including a new “Edit Prompts” tab with the ability to preview changes. Added instructions for using prompt customization in the documentation. Made changes to the lifestyle profile to synchronize custom prompts with the session.
2f80714
# core_classes.py - Core classes for Lifestyle Journey | |
import os | |
import json | |
from datetime import datetime | |
from dataclasses import dataclass | |
from typing import List, Dict, Optional | |
# Import AI client | |
from ai_client import UniversalAIClient, create_ai_client | |
from prompts import ( | |
# Active classifiers | |
SYSTEM_PROMPT_ENTRY_CLASSIFIER, | |
PROMPT_ENTRY_CLASSIFIER, | |
SYSTEM_PROMPT_TRIAGE_EXIT_CLASSIFIER, | |
PROMPT_TRIAGE_EXIT_CLASSIFIER, | |
# Lifestyle Profile Update | |
SYSTEM_PROMPT_LIFESTYLE_PROFILE_UPDATER, | |
PROMPT_LIFESTYLE_PROFILE_UPDATE, | |
# Main Lifestyle Assistant | |
SYSTEM_PROMPT_MAIN_LIFESTYLE, | |
PROMPT_MAIN_LIFESTYLE, | |
# Soft medical triage | |
SYSTEM_PROMPT_SOFT_MEDICAL_TRIAGE, | |
PROMPT_SOFT_MEDICAL_TRIAGE, | |
# Medical assistant | |
SYSTEM_PROMPT_MEDICAL_ASSISTANT, | |
PROMPT_MEDICAL_ASSISTANT | |
) | |
try: | |
from app_config import API_CONFIG | |
except ImportError: | |
API_CONFIG = {"gemini_model": "gemini-2.5-flash", "temperature": 0.3} | |
class ClinicalBackground: | |
patient_id: str | |
patient_name: str = "" | |
patient_age: str = "" | |
active_problems: List[str] = None | |
past_medical_history: List[str] = None | |
current_medications: List[str] = None | |
allergies: str = "" | |
vital_signs_and_measurements: List[str] = None | |
laboratory_results: List[str] = None | |
assessment_and_plan: str = "" | |
critical_alerts: List[str] = None | |
social_history: Dict = None | |
recent_clinical_events: List[str] = None | |
def __post_init__(self): | |
if self.active_problems is None: | |
self.active_problems = [] | |
if self.past_medical_history is None: | |
self.past_medical_history = [] | |
if self.current_medications is None: | |
self.current_medications = [] | |
if self.vital_signs_and_measurements is None: | |
self.vital_signs_and_measurements = [] | |
if self.laboratory_results is None: | |
self.laboratory_results = [] | |
if self.critical_alerts is None: | |
self.critical_alerts = [] | |
if self.recent_clinical_events is None: | |
self.recent_clinical_events = [] | |
if self.social_history is None: | |
self.social_history = {} | |
class LifestyleProfile: | |
patient_name: str | |
patient_age: str | |
conditions: List[str] | |
primary_goal: str | |
exercise_preferences: List[str] | |
exercise_limitations: List[str] | |
dietary_notes: List[str] | |
personal_preferences: List[str] | |
journey_summary: str | |
last_session_summary: str | |
next_check_in: str = "not set" | |
progress_metrics: Dict[str, str] = None | |
def __post_init__(self): | |
if self.progress_metrics is None: | |
self.progress_metrics = {} | |
class ChatMessage: | |
timestamp: str | |
role: str | |
message: str | |
mode: str | |
metadata: Dict = None | |
class SessionState: | |
current_mode: str | |
is_active_session: bool | |
session_start_time: Optional[str] | |
last_controller_decision: Dict | |
# New fields for lifecycle management | |
lifestyle_session_length: int = 0 | |
last_triage_summary: str = "" | |
entry_classification: Dict = None | |
def __post_init__(self): | |
if self.entry_classification is None: | |
self.entry_classification = {} | |
class AIClientManager: | |
""" | |
Manager for AI clients that provides backward compatibility with the old GeminiAPI interface | |
while supporting multiple AI providers | |
""" | |
def __init__(self): | |
self._clients = {} # Cache for AI clients | |
self.call_counter = 0 # Backward compatibility with old GeminiAPI interface | |
def get_client(self, agent_name: str) -> UniversalAIClient: | |
"""Get or create AI client for specific agent""" | |
if agent_name not in self._clients: | |
self._clients[agent_name] = create_ai_client(agent_name) | |
return self._clients[agent_name] | |
def generate_response(self, system_prompt: str, user_prompt: str, temperature: float = None, call_type: str = "", agent_name: str = "DefaultAgent") -> str: | |
""" | |
Generate response using appropriate AI client for the agent | |
Args: | |
system_prompt: System instruction | |
user_prompt: User message | |
temperature: Optional temperature override | |
call_type: Type of call for logging | |
agent_name: Name of the agent making the call | |
Returns: | |
AI-generated response | |
""" | |
self.call_counter += 1 # Track total API calls for backward compatibility | |
try: | |
client = self.get_client(agent_name) | |
response = client.generate_response(system_prompt, user_prompt, temperature, call_type) | |
return response | |
except Exception as e: | |
error_msg = f"AI Client Error: {str(e)}" | |
print(f"❌ {error_msg}") | |
return error_msg | |
def get_client_info(self, agent_name: str) -> Dict: | |
"""Get information about the client configuration for an agent""" | |
try: | |
client = self.get_client(agent_name) | |
return client.get_client_info() | |
except Exception as e: | |
return {"error": str(e), "agent_name": agent_name} | |
def get_all_clients_info(self) -> Dict: | |
"""Get information about all active clients""" | |
info = { | |
"total_calls": self.call_counter, | |
"active_clients": len(self._clients), | |
"clients": {} | |
} | |
for agent_name, client in self._clients.items(): | |
try: | |
client_info = client.get_client_info() | |
info["clients"][agent_name] = { | |
"provider": client_info.get("active_provider", "unknown"), | |
"model": client_info.get("active_model", "unknown"), | |
"using_fallback": client_info.get("using_fallback", False), | |
"calls": getattr(client.client or client.fallback_client, "call_counter", 0) | |
} | |
except Exception as e: | |
info["clients"][agent_name] = {"error": str(e)} | |
return info | |
# Backward compatibility alias | |
GeminiAPI = AIClientManager | |
class PatientDataLoader: | |
"""Class for loading patient data from JSON files""" | |
def load_clinical_background(file_path: str = "clinical_background.json") -> ClinicalBackground: | |
"""Loads clinical background from JSON file""" | |
try: | |
with open(file_path, 'r', encoding='utf-8') as f: | |
data = json.load(f) | |
patient_summary = data.get("patient_summary", {}) | |
vital_signs = data.get("vital_signs_and_measurements", []) | |
return ClinicalBackground( | |
patient_id="patient_001", | |
patient_name="Serhii", | |
patient_age="adult", | |
active_problems=patient_summary.get("active_problems", []), | |
past_medical_history=patient_summary.get("past_medical_history", []), | |
current_medications=patient_summary.get("current_medications", []), | |
allergies=patient_summary.get("allergies", ""), | |
vital_signs_and_measurements=vital_signs, | |
laboratory_results=data.get("laboratory_results", []), | |
assessment_and_plan=data.get("assessment_and_plan", ""), | |
critical_alerts=data.get("critical_alerts", []), | |
social_history=data.get("social_history", {}), | |
recent_clinical_events=data.get("recent_clinical_events_and_encounters", []) | |
) | |
except FileNotFoundError: | |
print(f"⚠️ Файл {file_path} не знайдено. Використовуємо тестові дані.") | |
return PatientDataLoader._get_default_clinical_background() | |
except Exception as e: | |
print(f"⚠️ Помилка завантаження {file_path}: {e}") | |
return PatientDataLoader._get_default_clinical_background() | |
def load_lifestyle_profile(file_path: str = "lifestyle_profile.json") -> LifestyleProfile: | |
"""Завантажує lifestyle profile з JSON файлу""" | |
try: | |
with open(file_path, 'r', encoding='utf-8') as f: | |
data = json.load(f) | |
return LifestyleProfile( | |
patient_name=data.get("patient_name", "Пацієнт"), | |
patient_age=data.get("patient_age", "невідомо"), | |
conditions=data.get("conditions", []), | |
primary_goal=data.get("primary_goal", ""), | |
exercise_preferences=data.get("exercise_preferences", []), | |
exercise_limitations=data.get("exercise_limitations", []), | |
dietary_notes=data.get("dietary_notes", []), | |
personal_preferences=data.get("personal_preferences", []), | |
journey_summary=data.get("journey_summary", ""), | |
last_session_summary=data.get("last_session_summary", ""), | |
next_check_in=data.get("next_check_in", "not set"), | |
progress_metrics=data.get("progress_metrics", {}) | |
) | |
except FileNotFoundError: | |
print(f"⚠️ Файл {file_path} не знайдено. Використовуємо тестові дані.") | |
return PatientDataLoader._get_default_lifestyle_profile() | |
except Exception as e: | |
print(f"⚠️ Помилка завантаження {file_path}: {e}") | |
return PatientDataLoader._get_default_lifestyle_profile() | |
def _get_default_clinical_background() -> ClinicalBackground: | |
"""Fallback дані для clinical background""" | |
return ClinicalBackground( | |
patient_id="test_001", | |
patient_name="Тестовий пацієнт", | |
active_problems=["Хронічна серцева недостатність", "Артеріальна гіпертензія"], | |
current_medications=["Еналаприл 10мг", "Метформін 500мг"], | |
allergies="Пеніцилін", | |
vital_signs_and_measurements=["АТ: 140/90", "ЧСС: 72"] | |
) | |
def _get_default_lifestyle_profile() -> LifestyleProfile: | |
"""Fallback дані для lifestyle profile""" | |
return LifestyleProfile( | |
patient_name="Тестовий пацієнт", | |
patient_age="52", | |
conditions=["гіпертензія"], | |
primary_goal="Покращити загальний стан здоров'я", | |
exercise_preferences=["ходьба"], | |
exercise_limitations=["уникати високих навантажень"], | |
dietary_notes=["низькосольова дієта"], | |
personal_preferences=["поступові зміни"], | |
journey_summary="Початок lifestyle journey", | |
last_session_summary="" | |
) | |
# ===== НОВІ КЛАСИФІКАТОРИ ===== | |
class EntryClassifier: | |
"""Класифікує повідомлення пацієнта на початку взаємодії з новим K/V/T форматом""" | |
def __init__(self, api: GeminiAPI): | |
self.api = api | |
def classify(self, user_message: str, clinical_background: ClinicalBackground) -> Dict: | |
"""Класифікує повідомлення та повертає K/V/T формат""" | |
system_prompt = SYSTEM_PROMPT_ENTRY_CLASSIFIER | |
user_prompt = PROMPT_ENTRY_CLASSIFIER(clinical_background, user_message) | |
response = self.api.generate_response( | |
system_prompt, user_prompt, | |
temperature=0.1, | |
call_type="ENTRY_CLASSIFIER", | |
agent_name="EntryClassifier" | |
) | |
try: | |
clean_response = response.replace("```json", "").replace("```", "").strip() | |
classification = json.loads(clean_response) | |
# Валідація формату K/V/T | |
if not all(key in classification for key in ["K", "V", "T"]): | |
raise ValueError("Missing K/V/T keys") | |
if classification["V"] not in ["on", "off", "hybrid"]: | |
classification["V"] = "off" # fallback | |
return classification | |
except: | |
from datetime import datetime | |
return { | |
"K": "Lifestyle Mode", | |
"V": "off", | |
"T": datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ") | |
} | |
class TriageExitClassifier: | |
"""Оцінює готовність пацієнта до lifestyle після медичного тріажу""" | |
def __init__(self, api: GeminiAPI): | |
self.api = api | |
def assess_readiness(self, clinical_background: ClinicalBackground, | |
triage_summary: str, user_message: str) -> Dict: | |
"""Оцінює чи пацієнт готовий до lifestyle режиму""" | |
system_prompt = SYSTEM_PROMPT_TRIAGE_EXIT_CLASSIFIER | |
user_prompt = PROMPT_TRIAGE_EXIT_CLASSIFIER(clinical_background, triage_summary, user_message) | |
response = self.api.generate_response( | |
system_prompt, user_prompt, | |
temperature=0.1, | |
call_type="TRIAGE_EXIT_CLASSIFIER", | |
agent_name="TriageExitClassifier" | |
) | |
try: | |
clean_response = response.replace("```json", "").replace("```", "").strip() | |
assessment = json.loads(clean_response) | |
return assessment | |
except: | |
return { | |
"ready_for_lifestyle": False, | |
"reasoning": "Parsing error - staying in medical mode for safety", | |
"medical_status": "needs_attention" | |
} | |
# LifestyleExitClassifier removed - functionality moved to MainLifestyleAssistant | |
# ===== DEPRECATED: Старий контролер (замінено на Entry Classifier + нову логіку) ===== | |
class SoftMedicalTriage: | |
"""М'який медичний тріаж для початку взаємодії""" | |
def __init__(self, api: GeminiAPI): | |
self.api = api | |
def conduct_triage(self, user_message: str, clinical_background: ClinicalBackground, | |
chat_history: List[ChatMessage] = None) -> str: | |
"""Проводить м'який медичний тріаж З УРАХУВАННЯМ КОНТЕКСТУ""" | |
system_prompt = SYSTEM_PROMPT_SOFT_MEDICAL_TRIAGE | |
# Додаємо історію розмови | |
history_text = "" | |
if chat_history and len(chat_history) > 1: # Якщо є попередні повідомлення | |
recent_history = chat_history[-4:] # Останні 4 повідомлення | |
history_text = "\n".join([f"{msg.role}: {msg.message}" for msg in recent_history[:-1]]) # Виключаємо поточне | |
user_prompt = PROMPT_SOFT_MEDICAL_TRIAGE_WITH_CONTEXT( | |
clinical_background, user_message, history_text | |
) | |
return self.api.generate_response( | |
system_prompt, user_prompt, | |
temperature=0.3, | |
call_type="SOFT_MEDICAL_TRIAGE", | |
agent_name="SoftMedicalTriage" | |
) | |
def PROMPT_SOFT_MEDICAL_TRIAGE_WITH_CONTEXT(clinical_background, user_message, history_text): | |
context_section = "" | |
if history_text.strip(): | |
context_section = f""" | |
CONVERSATION HISTORY: | |
{history_text} | |
""" | |
return f"""PATIENT: {clinical_background.patient_name} | |
MEDICAL CONTEXT: | |
- Active problems: {"; ".join(clinical_background.active_problems[:3]) if clinical_background.active_problems else "none"} | |
- Critical alerts: {"; ".join(clinical_background.critical_alerts) if clinical_background.critical_alerts else "none"} | |
{context_section}PATIENT'S CURRENT MESSAGE: "{user_message}" | |
ANALYSIS REQUIRED: | |
Conduct gentle medical triage considering the conversation context. If this is a continuation of an existing conversation, acknowledge it naturally without re-introducing yourself.""" | |
class MedicalAssistant: | |
def __init__(self, api: GeminiAPI): | |
self.api = api | |
def generate_response(self, user_message: str, chat_history: List[ChatMessage], | |
clinical_background: ClinicalBackground) -> str: | |
"""Генерує медичну відповідь""" | |
system_prompt = SYSTEM_PROMPT_MEDICAL_ASSISTANT | |
active_problems = "; ".join(clinical_background.active_problems[:5]) if clinical_background.active_problems else "не вказані" | |
medications = "; ".join(clinical_background.current_medications[:8]) if clinical_background.current_medications else "не вказані" | |
recent_vitals = "; ".join(clinical_background.vital_signs_and_measurements[-3:]) if clinical_background.vital_signs_and_measurements else "не вказані" | |
history_text = "\n".join([f"{msg.role}: {msg.message}" for msg in chat_history[-3:]]) | |
user_prompt = PROMPT_MEDICAL_ASSISTANT(clinical_background, active_problems, medications, recent_vitals, history_text, user_message) | |
return self.api.generate_response( | |
system_prompt, user_prompt, | |
call_type="MEDICAL_ASSISTANT", | |
agent_name="MedicalAssistant" | |
) | |
class LifestyleSessionManager: | |
"""Manages lifestyle session lifecycle and intelligent profile updates with LLM analysis""" | |
def __init__(self, api: GeminiAPI): | |
self.api = api | |
def update_profile_after_session(self, lifestyle_profile: LifestyleProfile, | |
chat_history: List[ChatMessage], | |
session_context: str = "", | |
save_to_disk: bool = True) -> LifestyleProfile: | |
"""Intelligently updates lifestyle profile using LLM analysis and saves to disk""" | |
# Get lifestyle messages from current session | |
lifestyle_messages = [msg for msg in chat_history if msg.mode == "lifestyle"] | |
if not lifestyle_messages: | |
print("⚠️ No lifestyle messages found in session - skipping profile update") | |
return lifestyle_profile | |
print(f"🔄 Analyzing lifestyle session with {len(lifestyle_messages)} messages...") | |
try: | |
# Prepare session data for LLM analysis | |
session_data = [] | |
for msg in lifestyle_messages: | |
session_data.append({ | |
'role': msg.role, | |
'message': msg.message, | |
'timestamp': msg.timestamp | |
}) | |
# Use LLM to analyze session and generate profile updates | |
system_prompt = SYSTEM_PROMPT_LIFESTYLE_PROFILE_UPDATER | |
user_prompt = PROMPT_LIFESTYLE_PROFILE_UPDATE(lifestyle_profile, session_data, session_context) | |
response = self.api.generate_response( | |
system_prompt, user_prompt, | |
temperature=0.2, | |
call_type="LIFESTYLE_PROFILE_UPDATE", | |
agent_name="LifestyleProfileUpdater" | |
) | |
# Parse LLM response | |
clean_response = response.replace("```json", "").replace("```", "").strip() | |
analysis = json.loads(clean_response) | |
# Create updated profile based on LLM analysis | |
updated_profile = self._apply_llm_updates(lifestyle_profile, analysis) | |
# Save to disk if requested | |
if save_to_disk: | |
self._save_profile_to_disk(updated_profile) | |
print(f"✅ Profile updated and saved for {updated_profile.patient_name}") | |
return updated_profile | |
except Exception as e: | |
print(f"❌ Error in LLM profile update: {e}") | |
# Fallback to simple update | |
return self._simple_profile_update(lifestyle_profile, lifestyle_messages, session_context) | |
def _apply_llm_updates(self, original_profile: LifestyleProfile, analysis: Dict) -> LifestyleProfile: | |
"""Apply LLM analysis results to create updated profile""" | |
# Create copy of original profile | |
updated_profile = LifestyleProfile( | |
patient_name=original_profile.patient_name, | |
patient_age=original_profile.patient_age, | |
conditions=original_profile.conditions.copy(), | |
primary_goal=original_profile.primary_goal, | |
exercise_preferences=original_profile.exercise_preferences.copy(), | |
exercise_limitations=original_profile.exercise_limitations.copy(), | |
dietary_notes=original_profile.dietary_notes.copy(), | |
personal_preferences=original_profile.personal_preferences.copy(), | |
journey_summary=original_profile.journey_summary, | |
last_session_summary=original_profile.last_session_summary, | |
next_check_in=original_profile.next_check_in, | |
progress_metrics=original_profile.progress_metrics.copy() | |
) | |
if not analysis.get("updates_needed", False): | |
print("ℹ️ LLM determined no profile updates needed") | |
return updated_profile | |
# Apply updates from LLM analysis | |
updated_fields = analysis.get("updated_fields", {}) | |
if "exercise_preferences" in updated_fields: | |
updated_profile.exercise_preferences = updated_fields["exercise_preferences"] | |
if "exercise_limitations" in updated_fields: | |
updated_profile.exercise_limitations = updated_fields["exercise_limitations"] | |
if "dietary_notes" in updated_fields: | |
updated_profile.dietary_notes = updated_fields["dietary_notes"] | |
if "personal_preferences" in updated_fields: | |
updated_profile.personal_preferences = updated_fields["personal_preferences"] | |
if "primary_goal" in updated_fields: | |
updated_profile.primary_goal = updated_fields["primary_goal"] | |
if "progress_metrics" in updated_fields: | |
# Merge new metrics with existing ones | |
updated_profile.progress_metrics.update(updated_fields["progress_metrics"]) | |
if "session_summary" in updated_fields: | |
session_date = datetime.now().strftime('%d.%m.%Y') | |
updated_profile.last_session_summary = f"[{session_date}] {updated_fields['session_summary']}" | |
if "next_check_in" in updated_fields: | |
updated_profile.next_check_in = updated_fields["next_check_in"] | |
print(f"📅 Next check-in scheduled: {updated_fields['next_check_in']}") | |
# Log the rationale if provided | |
rationale = analysis.get("next_session_rationale", "") | |
if rationale: | |
print(f"💭 Rationale: {rationale}") | |
# Update journey summary with session insights | |
session_date = datetime.now().strftime('%d.%m.%Y') | |
insights = analysis.get("session_insights", "Session completed") | |
new_entry = f" | {session_date}: {insights[:100]}..." | |
# Prevent journey_summary from growing too long | |
if len(updated_profile.journey_summary) > 800: | |
updated_profile.journey_summary = "..." + updated_profile.journey_summary[-600:] | |
updated_profile.journey_summary += new_entry | |
print(f"✅ Applied LLM updates: {analysis.get('reasoning', 'Profile updated')}") | |
return updated_profile | |
def _simple_profile_update(self, lifestyle_profile: LifestyleProfile, | |
lifestyle_messages: List[ChatMessage], | |
session_context: str) -> LifestyleProfile: | |
"""Fallback simple profile update without LLM""" | |
updated_profile = LifestyleProfile( | |
patient_name=lifestyle_profile.patient_name, | |
patient_age=lifestyle_profile.patient_age, | |
conditions=lifestyle_profile.conditions.copy(), | |
primary_goal=lifestyle_profile.primary_goal, | |
exercise_preferences=lifestyle_profile.exercise_preferences.copy(), | |
exercise_limitations=lifestyle_profile.exercise_limitations.copy(), | |
dietary_notes=lifestyle_profile.dietary_notes.copy(), | |
personal_preferences=lifestyle_profile.personal_preferences.copy(), | |
journey_summary=lifestyle_profile.journey_summary, | |
last_session_summary=lifestyle_profile.last_session_summary, | |
next_check_in=lifestyle_profile.next_check_in, | |
progress_metrics=lifestyle_profile.progress_metrics.copy() | |
) | |
# Simple session summary | |
session_date = datetime.now().strftime('%d.%m.%Y') | |
user_messages = [msg.message for msg in lifestyle_messages if msg.role == "user"] | |
if user_messages: | |
key_topics = [] | |
for msg in user_messages[:3]: | |
if len(msg) > 20: | |
key_topics.append(msg[:60] + "..." if len(msg) > 60 else msg) | |
session_summary = f"[{session_date}] Discussed: {'; '.join(key_topics)}" | |
updated_profile.last_session_summary = session_summary | |
new_entry = f" | {session_date}: {len(lifestyle_messages)} messages" | |
if len(updated_profile.journey_summary) > 800: | |
updated_profile.journey_summary = "..." + updated_profile.journey_summary[-600:] | |
updated_profile.journey_summary += new_entry | |
print("✅ Applied simple profile update (LLM fallback)") | |
return updated_profile | |
def _save_profile_to_disk(self, profile: LifestyleProfile, | |
file_path: str = "lifestyle_profile.json") -> bool: | |
"""Save updated lifestyle profile to disk""" | |
try: | |
profile_data = { | |
"patient_name": profile.patient_name, | |
"patient_age": profile.patient_age, | |
"conditions": profile.conditions, | |
"primary_goal": profile.primary_goal, | |
"exercise_preferences": profile.exercise_preferences, | |
"exercise_limitations": profile.exercise_limitations, | |
"dietary_notes": profile.dietary_notes, | |
"personal_preferences": profile.personal_preferences, | |
"journey_summary": profile.journey_summary, | |
"last_session_summary": profile.last_session_summary, | |
"next_check_in": profile.next_check_in, | |
"progress_metrics": profile.progress_metrics | |
} | |
# Create backup of current file | |
import shutil | |
if os.path.exists(file_path): | |
backup_path = f"{file_path}.backup" | |
shutil.copy2(file_path, backup_path) | |
# Save updated profile | |
with open(file_path, 'w', encoding='utf-8') as f: | |
json.dump(profile_data, f, indent=4, ensure_ascii=False) | |
print(f"💾 Profile saved to {file_path}") | |
return True | |
except Exception as e: | |
print(f"❌ Error saving profile to disk: {e}") | |
return False | |
class MainLifestyleAssistant: | |
"""Новий розумний lifestyle асистент з 3 діями: gather_info, lifestyle_dialog, close""" | |
def __init__(self, api: GeminiAPI): | |
self.api = api | |
def process_message(self, user_message: str, chat_history: List[ChatMessage], | |
clinical_background: ClinicalBackground, lifestyle_profile: LifestyleProfile, | |
session_length: int) -> Dict: | |
"""Обробляє повідомлення і повертає дію + відповідь""" | |
system_prompt = SYSTEM_PROMPT_MAIN_LIFESTYLE | |
history_text = "\n".join([f"{msg.role}: {msg.message}" for msg in chat_history[-5:]]) | |
user_prompt = PROMPT_MAIN_LIFESTYLE( | |
lifestyle_profile, clinical_background, session_length, history_text, user_message | |
) | |
response = self.api.generate_response( | |
system_prompt, user_prompt, | |
temperature=0.2, | |
call_type="MAIN_LIFESTYLE", | |
agent_name="MainLifestyleAssistant" | |
) | |
try: | |
clean_response = response.replace("```json", "").replace("```", "").strip() | |
result = json.loads(clean_response) | |
# Валідація дії | |
valid_actions = ["gather_info", "lifestyle_dialog", "close"] | |
if result.get("action") not in valid_actions: | |
result["action"] = "lifestyle_dialog" # fallback | |
return result | |
except: | |
return { | |
"message": "Вибачте, виникла технічна помилка. Як ви себе почуваєте?", | |
"action": "gather_info", | |
"reasoning": "Помилка парсингу - переходимо до збору інформації" | |
} | |
def __init__(self, api: GeminiAPI): | |
self.api = api | |
self.custom_system_prompt = None # NEW | |
self.default_system_prompt = SYSTEM_PROMPT_MAIN_LIFESTYLE # NEW | |
def set_custom_system_prompt(self, custom_prompt: str): | |
"""Set custom system prompt for this session""" | |
self.custom_system_prompt = custom_prompt.strip() if custom_prompt and custom_prompt.strip() else None | |
def reset_to_default_prompt(self): | |
"""Reset to default system prompt""" | |
self.custom_system_prompt = None | |
def get_current_system_prompt(self) -> str: | |
"""Get current system prompt (custom or default)""" | |
if self.custom_system_prompt: | |
return self.custom_system_prompt | |
return self.default_system_prompt | |
# ===== DEPRECATED: Старий lifestyle асистент (замінено на MainLifestyleAssistant) ===== |