Spaces:
Running
Running
# Use Python 3.10 as a base image (matches the build environment from the error log) | |
FROM python:3.10-slim | |
LABEL maintainer="Your Name <your.email@example.com>" | |
LABEL description="Multi-modal server with LLM, Stable Diffusion, Whisper, and model downloading." | |
# Set environment variables for non-interactive installs and paths | |
ENV DEBIAN_FRONTEND=noninteractive \ | |
PIP_NO_CACHE_DIR=off \ | |
PIP_DISABLE_PIP_VERSION_CHECK=on \ | |
PYTHONUNBUFFERED=1 \ | |
# --- Crucial for whisper-cpp-python build --- | |
CMAKE_ARGS="-DCMAKE_POLICY_VERSION_MINIMUM=3.5" \ | |
# --- Define where models will be stored inside the container --- | |
MODELS_BASE_DIR=/app/models \ | |
# --- Pass through model repo/file env vars if needed for download flexibility --- | |
# (These defaults match the script's defaults) | |
LLM_MODEL_REPO="tensorblock/xddd-GGUF" \ | |
LLM_MODEL_FILE="xddd-Q2_K.gguf" \ | |
SD_MODEL_REPO="city96/FLUX.1-schnell-gguf" \ | |
SD_MODEL_FILE="flux1-schnell-Q8_0.gguf" \ | |
SD_REFINER_MODEL_REPO="gpustack/stable-diffusion-xl-refiner-1.0-GGUF" \ | |
SD_REFINER_MODEL_FILE="stable-diffusion-xl-refiner-1.0-Q8_0.gguf" \ | |
WHISPER_MODEL_REPO="ggerganov/whisper.cpp" \ | |
WHISPER_MODEL_FILE="ggml-base.en.bin" \ | |
VIDEO_MODEL_REPO="city96/Wan2.1-I2V-14B-720P-gguf" \ | |
VIDEO_MODEL_FILE="wan2.1-i2v-14b-720p-Q8_0.gguf" | |
# Install system dependencies needed for building Python packages | |
# build-essential (for C/C++ compiler), cmake, git, git-lfs | |
RUN apt-get update && apt-get install -y --no-install-recommends \ | |
build-essential \ | |
cmake \ | |
git \ | |
git-lfs \ | |
&& apt-get clean \ | |
&& rm -rf /var/lib/apt/lists/* | |
# Set the working directory | |
WORKDIR /app | |
# Copy requirements file first for layer caching | |
COPY requirements.txt . | |
# Install Python dependencies | |
# CMAKE_ARGS is used here during the build of whisper-cpp-python | |
RUN pip install --no-cache-dir -r requirements.txt | |
# Copy the application code | |
COPY app.py . | |
# Create model directories defined by the script's *_LOCAL_PATH logic | |
RUN mkdir -p ${MODELS_BASE_DIR}/llm \ | |
${MODELS_BASE_DIR}/sd \ | |
${MODELS_BASE_DIR}/sd_refiner \ | |
${MODELS_BASE_DIR}/whisper \ | |
${MODELS_BASE_DIR}/video | |
# Download models using huggingface_hub directly during build | |
# This makes the image self-contained with default models | |
# Error handling added: Build continues but logs errors if download fails | |
# Expose the port the app runs on | |
EXPOSE 7860 | |
# Command to run the application | |
CMD ["python", "app.py"] |