import requests import os from dotenv import load_dotenv # Load environment variables load_dotenv() # Use your current working ngrok URL OLLAMA_HOST = "https://7bcc180dffd1.ngrok-free.app" MODEL_NAME = "mistral:latest" # Default model print(f"Testing Ollama connection to: {OLLAMA_HOST}") print(f"Using model: {MODEL_NAME}") print() # Headers to skip ngrok browser warning headers = { "ngrok-skip-browser-warning": "true", "User-Agent": "AI-Life-Coach-Test" } # Test 1: List models print("Test 1: Listing available models...") try: response = requests.get(f"{OLLAMA_HOST}/api/tags", headers=headers, timeout=15) print(f"Status Code: {response.status_code}") if response.status_code == 200: data = response.json() models = data.get("models", []) print(f"Found {len(models)} models:") for model in models: print(f" - {model['name']} ({model.get('size', 'Unknown size')})") elif response.status_code == 404: print("⚠️ Endpoint not found - checking root endpoint...") # Try basic connectivity response2 = requests.get(f"{OLLAMA_HOST}", headers=headers, timeout=15) if response2.status_code == 200: print("✓ Server is running but /api/tags endpoint not available") else: print(f"✗ Server returned: {response2.status_code}") else: print(f"Error: {response.text}") except Exception as e: print(f"Connection failed: {e}") print() # Test 2: Simple chat test print("Test 2: Simple chat test...") try: payload = { "model": MODEL_NAME, "messages": [ {"role": "user", "content": "Hello! Respond with just 'Hi there!'"} ], "stream": False } response = requests.post(f"{OLLAMA_HOST}/api/chat", headers=headers, json=payload, timeout=30) print(f"Status Code: {response.status_code}") if response.status_code == 200: data = response.json() message = data.get("message", {}) content = message.get("content", "") print(f"Response: {content}") print("✅ Chat test successful!") else: print(f"Error: {response.text}") except Exception as e: print(f"Chat test failed: {e}") print() print("Test completed.")