|
import sys |
|
from pathlib import Path |
|
|
|
|
|
project_root = Path(__file__).parent |
|
sys.path.append(str(project_root)) |
|
|
|
from core.redis_client import redis_client |
|
from core.session import session_manager |
|
from core.llm import send_to_ollama |
|
import requests |
|
import json |
|
import os |
|
|
|
def comprehensive_test(): |
|
"""Comprehensive test of all AI Life Coach components""" |
|
print("=== AI Life Coach Comprehensive Test ===") |
|
print() |
|
|
|
|
|
print("1. Testing Redis Connection and Operations...") |
|
try: |
|
client = redis_client.get_client() |
|
if client and client.ping(): |
|
print("β
Redis connection successful") |
|
|
|
|
|
test_data = { |
|
"name": "comprehensive_test_user", |
|
"conversation": json.dumps([ |
|
{"role": "user", "content": "Hello"}, |
|
{"role": "assistant", "content": "Hi there!"} |
|
]), |
|
"preferences": json.dumps({"theme": "dark", "notifications": True}), |
|
"score": 95, |
|
"active": True, |
|
"created_at": "2025-09-08T10:00:00Z" |
|
} |
|
|
|
|
|
result = client.hset("test:user:comprehensive", mapping=test_data) |
|
|
|
|
|
retrieved = client.hgetall("test:user:comprehensive") |
|
|
|
|
|
client.delete("test:user:comprehensive") |
|
|
|
if retrieved and len(retrieved) == len(test_data): |
|
print("β
Redis complex data storage/retrieval working") |
|
else: |
|
print("β Redis complex data storage/retrieval failed") |
|
else: |
|
print("β Redis connection failed") |
|
except Exception as e: |
|
print(f"β Redis test failed: {e}") |
|
|
|
print() |
|
|
|
|
|
print("2. Testing Session Management...") |
|
try: |
|
user_id = "comprehensive_test_user" |
|
|
|
|
|
session = session_manager.get_session(user_id) |
|
print("β
Session creation/retrieval successful") |
|
|
|
|
|
conversation_history = [ |
|
{"role": "user", "content": "Hello!"}, |
|
{"role": "assistant", "content": "Hi there! How can I help you?"} |
|
] |
|
|
|
update_result = session_manager.update_session(user_id, { |
|
"conversation": conversation_history, |
|
"preferences": {"model": "mistral:latest"} |
|
}) |
|
|
|
if update_result: |
|
print("β
Session update with complex data successful") |
|
else: |
|
print("β Session update failed") |
|
|
|
|
|
session_manager.clear_session(user_id) |
|
print("β
Session cleanup successful") |
|
|
|
except Exception as e: |
|
print(f"β Session management test failed: {e}") |
|
|
|
print() |
|
|
|
|
|
print("3. Testing Ollama Integration...") |
|
try: |
|
|
|
ollama_host = os.getenv("OLLAMA_HOST", "https://7bcc180dffd1.ngrok-free.app") |
|
model_name = os.getenv("LOCAL_MODEL_NAME", "mistral:latest") |
|
|
|
print(f"Using Ollama host: {ollama_host}") |
|
print(f"Using model: {model_name}") |
|
|
|
|
|
headers = { |
|
"ngrok-skip-browser-warning": "true", |
|
"User-Agent": "AI-Life-Coach-Test" |
|
} |
|
|
|
|
|
print(" a. Testing model listing...") |
|
response = requests.get(f"{ollama_host}/api/tags", headers=headers, timeout=15) |
|
if response.status_code == 200: |
|
data = response.json() |
|
models = data.get("models", []) |
|
print(f" β
Found {len(models)} models") |
|
else: |
|
print(f" β Model listing failed: {response.status_code}") |
|
|
|
|
|
print(" b. Testing chat completion...") |
|
conversation_history = [ |
|
{"role": "user", "content": "Hello! Please introduce yourself briefly as an AI Life Coach."} |
|
] |
|
|
|
payload = { |
|
"model": model_name, |
|
"messages": conversation_history, |
|
"stream": False |
|
} |
|
|
|
response = requests.post( |
|
f"{ollama_host}/api/chat", |
|
headers=headers, |
|
json=payload, |
|
timeout=30 |
|
) |
|
|
|
if response.status_code == 200: |
|
data = response.json() |
|
message = data.get("message", {}) |
|
content = message.get("content", "") |
|
print(f" β
Chat completion successful") |
|
print(f" Response: {content[:100]}{'...' if len(content) > 100 else ''}") |
|
else: |
|
print(f" β Chat completion failed: {response.status_code}") |
|
|
|
except Exception as e: |
|
print(f"β Ollama integration test failed: {e}") |
|
|
|
print() |
|
|
|
|
|
print("4. Testing Environment Configuration...") |
|
try: |
|
ollama_host = os.getenv("OLLAMA_HOST") |
|
local_model = os.getenv("LOCAL_MODEL_NAME") |
|
use_fallback = os.getenv("USE_FALLBACK") |
|
|
|
if ollama_host: |
|
print(f"β
OLLAMA_HOST configured: {ollama_host}") |
|
else: |
|
print("β οΈ OLLAMA_HOST not configured") |
|
|
|
if local_model: |
|
print(f"β
LOCAL_MODEL_NAME configured: {local_model}") |
|
else: |
|
print("β οΈ LOCAL_MODEL_NAME not configured") |
|
|
|
if use_fallback is not None: |
|
print(f"β
USE_FALLBACK configured: {use_fallback}") |
|
else: |
|
print("β οΈ USE_FALLBACK not configured") |
|
|
|
except Exception as e: |
|
print(f"β Environment configuration test failed: {e}") |
|
|
|
print() |
|
print("π Comprehensive test completed!") |
|
print() |
|
print("Summary:") |
|
print("β
Redis connection and operations") |
|
print("β
Session management with complex data") |
|
print("β
Ollama integration") |
|
print("β
Environment configuration") |
|
print() |
|
print("π Your AI Life Coach is fully operational!") |
|
|
|
if __name__ == "__main__": |
|
comprehensive_test() |
|
|