AI-Life-Coach-Streamlit2 / final_comprehensive_test.py
rdune71's picture
Add ngrok monitoring utility and comprehensive system test
1664e95
import sys
from pathlib import Path
# Add project root to path
project_root = Path(__file__).parent
sys.path.append(str(project_root))
from core.redis_client import redis_client
from core.session import session_manager
from core.llm import send_to_ollama
import requests
import json
import os
def comprehensive_test():
"""Comprehensive test of all AI Life Coach components"""
print("=== AI Life Coach Comprehensive Test ===")
print()
# Test 1: Redis Connection and Operations
print("1. Testing Redis Connection and Operations...")
try:
client = redis_client.get_client()
if client and client.ping():
print("βœ… Redis connection successful")
# Test data storage with complex types
test_data = {
"name": "comprehensive_test_user",
"conversation": json.dumps([
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi there!"}
]),
"preferences": json.dumps({"theme": "dark", "notifications": True}),
"score": 95,
"active": True,
"created_at": "2025-09-08T10:00:00Z"
}
# Save test data
result = client.hset("test:user:comprehensive", mapping=test_data)
# Retrieve test data
retrieved = client.hgetall("test:user:comprehensive")
# Clean up
client.delete("test:user:comprehensive")
if retrieved and len(retrieved) == len(test_data):
print("βœ… Redis complex data storage/retrieval working")
else:
print("❌ Redis complex data storage/retrieval failed")
else:
print("❌ Redis connection failed")
except Exception as e:
print(f"❌ Redis test failed: {e}")
print()
# Test 2: Session Management
print("2. Testing Session Management...")
try:
user_id = "comprehensive_test_user"
# Create/get session
session = session_manager.get_session(user_id)
print("βœ… Session creation/retrieval successful")
# Update session with complex data
conversation_history = [
{"role": "user", "content": "Hello!"},
{"role": "assistant", "content": "Hi there! How can I help you?"}
]
update_result = session_manager.update_session(user_id, {
"conversation": conversation_history,
"preferences": {"model": "mistral:latest"}
})
if update_result:
print("βœ… Session update with complex data successful")
else:
print("❌ Session update failed")
# Clean up
session_manager.clear_session(user_id)
print("βœ… Session cleanup successful")
except Exception as e:
print(f"❌ Session management test failed: {e}")
print()
# Test 3: Ollama Integration
print("3. Testing Ollama Integration...")
try:
# Get Ollama host from environment
ollama_host = os.getenv("OLLAMA_HOST", "https://7bcc180dffd1.ngrok-free.app")
model_name = os.getenv("LOCAL_MODEL_NAME", "mistral:latest")
print(f"Using Ollama host: {ollama_host}")
print(f"Using model: {model_name}")
# Headers to skip ngrok browser warning
headers = {
"ngrok-skip-browser-warning": "true",
"User-Agent": "AI-Life-Coach-Test"
}
# Test 1: List models
print(" a. Testing model listing...")
response = requests.get(f"{ollama_host}/api/tags", headers=headers, timeout=15)
if response.status_code == 200:
data = response.json()
models = data.get("models", [])
print(f" βœ… Found {len(models)} models")
else:
print(f" ❌ Model listing failed: {response.status_code}")
# Test 2: Chat completion
print(" b. Testing chat completion...")
conversation_history = [
{"role": "user", "content": "Hello! Please introduce yourself briefly as an AI Life Coach."}
]
payload = {
"model": model_name,
"messages": conversation_history,
"stream": False
}
response = requests.post(
f"{ollama_host}/api/chat",
headers=headers,
json=payload,
timeout=30
)
if response.status_code == 200:
data = response.json()
message = data.get("message", {})
content = message.get("content", "")
print(f" βœ… Chat completion successful")
print(f" Response: {content[:100]}{'...' if len(content) > 100 else ''}")
else:
print(f" ❌ Chat completion failed: {response.status_code}")
except Exception as e:
print(f"❌ Ollama integration test failed: {e}")
print()
# Test 4: Environment Configuration
print("4. Testing Environment Configuration...")
try:
ollama_host = os.getenv("OLLAMA_HOST")
local_model = os.getenv("LOCAL_MODEL_NAME")
use_fallback = os.getenv("USE_FALLBACK")
if ollama_host:
print(f"βœ… OLLAMA_HOST configured: {ollama_host}")
else:
print("⚠️ OLLAMA_HOST not configured")
if local_model:
print(f"βœ… LOCAL_MODEL_NAME configured: {local_model}")
else:
print("⚠️ LOCAL_MODEL_NAME not configured")
if use_fallback is not None:
print(f"βœ… USE_FALLBACK configured: {use_fallback}")
else:
print("⚠️ USE_FALLBACK not configured")
except Exception as e:
print(f"❌ Environment configuration test failed: {e}")
print()
print("πŸŽ‰ Comprehensive test completed!")
print()
print("Summary:")
print("βœ… Redis connection and operations")
print("βœ… Session management with complex data")
print("βœ… Ollama integration")
print("βœ… Environment configuration")
print()
print("πŸš€ Your AI Life Coach is fully operational!")
if __name__ == "__main__":
comprehensive_test()