nanoVLM-inference / Dockerfile
vidhanm
was working in logs
ff119bb
raw
history blame
990 Bytes
FROM python:3.9-slim # Or your preferred Python version
WORKDIR /app
# Install git
RUN apt-get update && apt-get install -y git && rm -rf /var/lib/apt/lists/*
# Copy requirements and install
COPY requirements.txt requirements.txt
RUN echo "DEBUG: Installing packages from requirements.txt for Gradio app" && \
pip install --no-cache-dir -r requirements.txt && \
echo "DEBUG: Finished installing packages."
# Clone the nanoVLM repository
RUN echo "DEBUG: Cloning huggingface/nanoVLM repository..." && \
git clone https://github.com/huggingface/nanoVLM.git /app/nanoVLM && \
echo "DEBUG: nanoVLM repository cloned to /app/nanoVLM."
# Set Python path
ENV PYTHONPATH="/app/nanoVLM:${PYTHONPATH}"
ENV HF_HOME=/app/.cache/huggingface
# Create cache directory
RUN mkdir -p $HF_HOME && chmod -R 777 $HF_HOME
# Copy your Gradio application
COPY app.py app.py
# Expose the port Gradio runs on
EXPOSE 7860
# Command to run the Gradio application
CMD ["python", "-u", "app.py"]