File size: 6,461 Bytes
1664e95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
import sys
from pathlib import Path

# Add project root to path
project_root = Path(__file__).parent
sys.path.append(str(project_root))

from core.redis_client import redis_client
from core.session import session_manager
from core.llm import send_to_ollama
import requests
import json
import os

def comprehensive_test():
    """Comprehensive test of all AI Life Coach components"""
    print("=== AI Life Coach Comprehensive Test ===")
    print()
    
    # Test 1: Redis Connection and Operations
    print("1. Testing Redis Connection and Operations...")
    try:
        client = redis_client.get_client()
        if client and client.ping():
            print("βœ… Redis connection successful")
            
            # Test data storage with complex types
            test_data = {
                "name": "comprehensive_test_user",
                "conversation": json.dumps([
                    {"role": "user", "content": "Hello"},
                    {"role": "assistant", "content": "Hi there!"}
                ]),
                "preferences": json.dumps({"theme": "dark", "notifications": True}),
                "score": 95,
                "active": True,
                "created_at": "2025-09-08T10:00:00Z"
            }
            
            # Save test data
            result = client.hset("test:user:comprehensive", mapping=test_data)
            
            # Retrieve test data
            retrieved = client.hgetall("test:user:comprehensive")
            
            # Clean up
            client.delete("test:user:comprehensive")
            
            if retrieved and len(retrieved) == len(test_data):
                print("βœ… Redis complex data storage/retrieval working")
            else:
                print("❌ Redis complex data storage/retrieval failed")
        else:
            print("❌ Redis connection failed")
    except Exception as e:
        print(f"❌ Redis test failed: {e}")
    
    print()
    
    # Test 2: Session Management
    print("2. Testing Session Management...")
    try:
        user_id = "comprehensive_test_user"
        
        # Create/get session
        session = session_manager.get_session(user_id)
        print("βœ… Session creation/retrieval successful")
        
        # Update session with complex data
        conversation_history = [
            {"role": "user", "content": "Hello!"},
            {"role": "assistant", "content": "Hi there! How can I help you?"}
        ]
        
        update_result = session_manager.update_session(user_id, {
            "conversation": conversation_history,
            "preferences": {"model": "mistral:latest"}
        })
        
        if update_result:
            print("βœ… Session update with complex data successful")
        else:
            print("❌ Session update failed")
            
        # Clean up
        session_manager.clear_session(user_id)
        print("βœ… Session cleanup successful")
        
    except Exception as e:
        print(f"❌ Session management test failed: {e}")
    
    print()
    
    # Test 3: Ollama Integration
    print("3. Testing Ollama Integration...")
    try:
        # Get Ollama host from environment
        ollama_host = os.getenv("OLLAMA_HOST", "https://7bcc180dffd1.ngrok-free.app")
        model_name = os.getenv("LOCAL_MODEL_NAME", "mistral:latest")
        
        print(f"Using Ollama host: {ollama_host}")
        print(f"Using model: {model_name}")
        
        # Headers to skip ngrok browser warning
        headers = {
            "ngrok-skip-browser-warning": "true",
            "User-Agent": "AI-Life-Coach-Test"
        }
        
        # Test 1: List models
        print("  a. Testing model listing...")
        response = requests.get(f"{ollama_host}/api/tags", headers=headers, timeout=15)
        if response.status_code == 200:
            data = response.json()
            models = data.get("models", [])
            print(f"  βœ… Found {len(models)} models")
        else:
            print(f"  ❌ Model listing failed: {response.status_code}")
        
        # Test 2: Chat completion
        print("  b. Testing chat completion...")
        conversation_history = [
            {"role": "user", "content": "Hello! Please introduce yourself briefly as an AI Life Coach."}
        ]
        
        payload = {
            "model": model_name,
            "messages": conversation_history,
            "stream": False
        }
        
        response = requests.post(
            f"{ollama_host}/api/chat",
            headers=headers,
            json=payload,
            timeout=30
        )
        
        if response.status_code == 200:
            data = response.json()
            message = data.get("message", {})
            content = message.get("content", "")
            print(f"  βœ… Chat completion successful")
            print(f"  Response: {content[:100]}{'...' if len(content) > 100 else ''}")
        else:
            print(f"  ❌ Chat completion failed: {response.status_code}")
            
    except Exception as e:
        print(f"❌ Ollama integration test failed: {e}")
    
    print()
    
    # Test 4: Environment Configuration
    print("4. Testing Environment Configuration...")
    try:
        ollama_host = os.getenv("OLLAMA_HOST")
        local_model = os.getenv("LOCAL_MODEL_NAME")
        use_fallback = os.getenv("USE_FALLBACK")
        
        if ollama_host:
            print(f"βœ… OLLAMA_HOST configured: {ollama_host}")
        else:
            print("⚠️  OLLAMA_HOST not configured")
            
        if local_model:
            print(f"βœ… LOCAL_MODEL_NAME configured: {local_model}")
        else:
            print("⚠️  LOCAL_MODEL_NAME not configured")
            
        if use_fallback is not None:
            print(f"βœ… USE_FALLBACK configured: {use_fallback}")
        else:
            print("⚠️  USE_FALLBACK not configured")
            
    except Exception as e:
        print(f"❌ Environment configuration test failed: {e}")
    
    print()
    print("πŸŽ‰ Comprehensive test completed!")
    print()
    print("Summary:")
    print("βœ… Redis connection and operations")
    print("βœ… Session management with complex data")
    print("βœ… Ollama integration")
    print("βœ… Environment configuration")
    print()
    print("πŸš€ Your AI Life Coach is fully operational!")

if __name__ == "__main__":
    comprehensive_test()