nanoVLM-inference / Dockerfile
vidhanm
switching to python 3.10
8b6145e
raw
history blame
1.23 kB
# Use a slim Python base image.
FROM python:3.10-slim
# Set the working directory in the container
WORKDIR /app
# Set Hugging Face cache directory and Gradio temp/flagging dir
ENV HF_HOME=/app/.cache/huggingface
ENV GRADIO_TEMP_DIR=/tmp/gradio_tmp
ENV GRADIO_FLAGGING_DIR=/tmp/gradio_flags
# Install git and build-essential
RUN apt-get update && apt-get install -y \
git \
build-essential \
&& rm -rf /var/lib/apt/lists/*
# Clone the original nanoVLM repository for its model definition files
# This makes the `models` directory from nanoVLM available under /app/nanoVLM
RUN git clone https://github.com/huggingface/nanoVLM.git /app/nanoVLM
# Create the cache and temp directories and make them writable
RUN mkdir -p $HF_HOME $GRADIO_TEMP_DIR $GRADIO_FLAGGING_DIR && \
chmod -R 777 $HF_HOME $GRADIO_TEMP_DIR $GRADIO_FLAGGING_DIR
# Copy the requirements file first
COPY requirements.txt requirements.txt
# Install Python dependencies
RUN pip install --no-cache-dir --prefer-binary -r requirements.txt
# Copy the application code into the container
COPY app.py app.py
# Expose the port Gradio will run on
EXPOSE 7860
# Set the default command to run the Gradio application
CMD ["python", "-u", "app.py"]