AI-Life-Coach-Streamlit2 / services /ollama_monitor.py
rdune71's picture
Fix Ollama connection, Redis configuration, and monitoring issues
84ae379
import requests
import time
from utils.config import config
def check_ollama_status():
"""
Checks if Ollama is running and which model is loaded.
Returns:
dict: {
"running": True/False,
"model_loaded": "mistral-7b" or None,
"ngrok_url": "https://f943b91f0a0c.ngrok-free.app/",
"local_url": "http://localhost:11434/"
}
"""
# Use the working ngrok URL from your logs
ngrok_url = config.ollama_host
local_url = "http://localhost:11434/" # Always check localhost as fallback
def _get_model_from_url(base_url, retries=3, delay=1):
"""Try to get model info with retry logic"""
for attempt in range(retries):
try:
# Add headers to skip ngrok browser warning
headers = {
"ngrok-skip-browser-warning": "true",
"User-Agent": "AI-Life-Coach-App"
}
response = requests.get(f"{base_url}/api/tags", timeout=10, headers=headers)
if response.status_code == 200:
models = response.json().get("models", [])
if models:
return models[0].get("name")
elif response.status_code == 404:
# Try alternative endpoint
response2 = requests.get(f"{base_url}", timeout=10, headers=headers)
if response2.status_code == 200:
# Server is running but might not have /api/tags endpoint
return "unknown-model"
except Exception as e:
if attempt < retries - 1: # Don't sleep on last attempt
time.sleep(delay * (2 ** attempt)) # Exponential backoff
continue
return None
# First try remote ngrok URL, then fall back to local
remote_model = _get_model_from_url(ngrok_url)
local_model = None
if not remote_model: # Only check local if remote failed
local_model = _get_model_from_url(local_url)
model_loaded = remote_model or local_model
running = bool(model_loaded)
return {
"running": running,
"model_loaded": model_loaded,
"ngrok_url": ngrok_url,
"local_url": local_url,
}