AI-Life-Coach-Streamlit2 / test_setup.py
rdune71's picture
Fix critical LLM provider bugs, Redis config, and Streamlit null safety issues
b127732
import requests
import os
from dotenv import load_dotenv
load_dotenv()
# Test Ollama
ollama_host = os.getenv("OLLAMA_HOST", "http://localhost:11434")
model_name = os.getenv("LOCAL_MODEL_NAME", "mistral:latest")
print(f"Testing Ollama at: {ollama_host}")
try:
response = requests.get(f"{ollama_host}/api/tags")
print(f"Ollama Status: {response.status_code}")
print(f"Models available: {response.json()}")
except Exception as e:
print(f"Ollama Error: {e}")
# Test model generation
print(f"\nTesting model: {model_name}")
try:
response = requests.post(f"{ollama_host}/api/generate", json={
"model": model_name,
"prompt": "Hello, world!",
"stream": False
})
print(f"Model Test Status: {response.status_code}")
if response.status_code == 200:
print("✅ Ollama and model are working correctly!")
else:
print(f"❌ Model test failed: {response.text}")
except Exception as e:
print(f"Model Test Error: {e}")