File size: 1,344 Bytes
22ca508 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
# 🤖 GPT Local - Optimized Docker Container
FROM python:3.11-slim
# Metadata
LABEL maintainer="GPT Local Team"
LABEL description="Sistema de chat GPT local con Hugging Face"
LABEL version="1.0"
# Configurar variables de entorno
ENV PYTHONPATH=/app
ENV PYTHONDONTWRITEBYTECODE=1
ENV PYTHONUNBUFFERED=1
ENV HF_HOME=/app/.cache/huggingface
ENV TRANSFORMERS_CACHE=/app/.cache/huggingface
ENV TORCH_HOME=/app/.cache/torch
# Instalar dependencias del sistema
RUN apt-get update && apt-get install -y \
curl \
git \
build-essential \
&& rm -rf /var/lib/apt/lists/* \
&& apt-get clean
# Crear directorio de trabajo
WORKDIR /app
# Copiar archivos de requirements primero (para cache de Docker)
COPY requirements.txt requirements-dev.txt ./
# Instalar dependencias Python
RUN pip install --no-cache-dir --upgrade pip && \
pip install --no-cache-dir -r requirements.txt
# Copiar código fuente
COPY . .
# Crear directorios necesarios
RUN mkdir -p models models_cache logs .cache/huggingface .cache/torch
# Configurar permisos
RUN chmod +x *.py && \
chmod +x *.sh 2>/dev/null || true
# Exponer puerto
EXPOSE 7860
# Healthcheck
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
CMD curl -f http://localhost:7860/ || exit 1
# Comando por defecto (interfaz web)
CMD ["python3", "main.py"]
|