version: ‘3.8’ services: x-ai-assistant: build: context: . dockerfile: Dockerfile container_name: x-ai-assistant ports: - “7860:7860” # Gradio interface - “11434:11434” # Ollama API volumes: - ollama_data:/root/.ollama - app_logs:/app/logs - app_models:/app/models - app_data:/app/data environment: - OLLAMA_HOST=0.0.0.0:11434 - OLLAMA_ORIGINS=* - PYTHONUNBUFFERED=1 - GRADIO_SERVER_NAME=0.0.0.0 - GRADIO_SERVER_PORT=7860 restart: unless-stopped healthcheck: test: [“CMD”, “curl”, “-f”, “http://localhost:7860”, “&&”, “curl”, “-f”, “http://localhost:11434/api/tags”] interval: 30s timeout: 10s retries: 3 start_period: 2m deploy: resources: limits: memory: 8G reservations: memory: 4G volumes: ollama_data: driver: local app_logs: driver: local app_models: driver: local app_data: driver: local networks: default: driver: bridge