File size: 2,520 Bytes
e9c8776
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
# Use Python 3.10 as a base image (matches the build environment from the error log)
FROM python:3.10-slim

LABEL maintainer="Your Name <your.email@example.com>"
LABEL description="Multi-modal server with LLM, Stable Diffusion, Whisper, and model downloading."

# Set environment variables for non-interactive installs and paths
ENV DEBIAN_FRONTEND=noninteractive \
    PIP_NO_CACHE_DIR=off \
    PIP_DISABLE_PIP_VERSION_CHECK=on \
    PYTHONUNBUFFERED=1 \
    # --- Crucial for whisper-cpp-python build ---
    CMAKE_ARGS="-DCMAKE_POLICY_VERSION_MINIMUM=3.5" \
    # --- Define where models will be stored inside the container ---
    MODELS_BASE_DIR=/app/models \
    # --- Pass through model repo/file env vars if needed for download flexibility ---
    # (These defaults match the script's defaults)
    LLM_MODEL_REPO="tensorblock/xddd-GGUF" \
    LLM_MODEL_FILE="xddd-Q2_K.gguf" \
    SD_MODEL_REPO="city96/FLUX.1-schnell-gguf" \
    SD_MODEL_FILE="flux1-schnell-Q8_0.gguf" \
    SD_REFINER_MODEL_REPO="gpustack/stable-diffusion-xl-refiner-1.0-GGUF" \
    SD_REFINER_MODEL_FILE="stable-diffusion-xl-refiner-1.0-Q8_0.gguf" \
    WHISPER_MODEL_REPO="ggerganov/whisper.cpp" \
    WHISPER_MODEL_FILE="ggml-base.en.bin" \
    VIDEO_MODEL_REPO="city96/Wan2.1-I2V-14B-720P-gguf" \
    VIDEO_MODEL_FILE="wan2.1-i2v-14b-720p-Q8_0.gguf"

# Install system dependencies needed for building Python packages
# build-essential (for C/C++ compiler), cmake, git, git-lfs
RUN apt-get update && apt-get install -y --no-install-recommends \
    build-essential \
    cmake \
    git \
    git-lfs \
    && apt-get clean \
    && rm -rf /var/lib/apt/lists/*

# Set the working directory
WORKDIR /app

# Copy requirements file first for layer caching
COPY requirements.txt .

# Install Python dependencies
# CMAKE_ARGS is used here during the build of whisper-cpp-python
RUN pip install --no-cache-dir -r requirements.txt

# Copy the application code
COPY app.py .

# Create model directories defined by the script's *_LOCAL_PATH logic
RUN mkdir -p ${MODELS_BASE_DIR}/llm \
             ${MODELS_BASE_DIR}/sd \
             ${MODELS_BASE_DIR}/sd_refiner \
             ${MODELS_BASE_DIR}/whisper \
             ${MODELS_BASE_DIR}/video

# Download models using huggingface_hub directly during build
# This makes the image self-contained with default models
# Error handling added: Build continues but logs errors if download fails

# Expose the port the app runs on
EXPOSE 7860

# Command to run the application
CMD ["python", "app.py"]