# 🤖 GPT Local - Optimized Docker Container | |
FROM python:3.11-slim | |
# Metadata | |
LABEL maintainer="GPT Local Team" | |
LABEL description="Sistema de chat GPT local con Hugging Face" | |
LABEL version="1.0" | |
# Configurar variables de entorno | |
ENV PYTHONPATH=/app | |
ENV PYTHONDONTWRITEBYTECODE=1 | |
ENV PYTHONUNBUFFERED=1 | |
ENV HF_HOME=/app/.cache/huggingface | |
ENV TRANSFORMERS_CACHE=/app/.cache/huggingface | |
ENV TORCH_HOME=/app/.cache/torch | |
# Instalar dependencias del sistema | |
RUN apt-get update && apt-get install -y \ | |
curl \ | |
git \ | |
build-essential \ | |
&& rm -rf /var/lib/apt/lists/* \ | |
&& apt-get clean | |
# Crear directorio de trabajo | |
WORKDIR /app | |
# Copiar archivos de requirements primero (para cache de Docker) | |
COPY requirements.txt requirements-dev.txt ./ | |
# Instalar dependencias Python | |
RUN pip install --no-cache-dir --upgrade pip && \ | |
pip install --no-cache-dir -r requirements.txt | |
# Copiar código fuente | |
COPY . . | |
# Crear directorios necesarios | |
RUN mkdir -p models models_cache logs .cache/huggingface .cache/torch | |
# Configurar permisos | |
RUN chmod +x *.py && \ | |
chmod +x *.sh 2>/dev/null || true | |
# Exponer puerto | |
EXPOSE 7860 | |
# Healthcheck | |
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \ | |
CMD curl -f http://localhost:7860/ || exit 1 | |
# Comando por defecto (interfaz web) | |
CMD ["python3", "main.py"] | |