nanoVLM-inference / Dockerfile
vidhanm
trying to stop docker from caching
de8d25e
raw
history blame
678 Bytes
FROM python:3.9-slim
WORKDIR /app
RUN apt-get update && apt-get install -y git && rm -rf /var/lib/apt/lists/*
COPY requirements.txt requirements.txt
RUN pip install --no-cache-dir -r requirements.txt
RUN echo "Forcing re-clone: v1" && rm -rf /app/nanoVLM && git clone https://github.com/huggingface/nanoVLM.git /app/nanoVLM
ENV PYTHONPATH="/app/nanoVLM:${PYTHONPATH}"
ENV HF_HOME=/app/.cache/huggingface
RUN mkdir -p $HF_HOME && chmod -R 777 $HF_HOME
COPY app.py app.py
# Your new Gradio app.py that calls generate.py
#changes to app.py should be made to use the nanoVLM library
# COPY static static
# COPY templates templates
EXPOSE 7860
CMD ["python", "-u", "app.py"]