File size: 1,257 Bytes
8828f20
 
c7408dc
8828f20
 
 
 
 
 
 
5ab200d
486bc5a
5ab200d
 
67b29cd
8828f20
5ab200d
 
 
609fc09
5ab200d
e52a14a
84ec10b
d026afb
e52a14a
4f5341e
 
 
 
 
9f75635
 
7b983c8
67b29cd
720aa3c
67b29cd
486bc5a
0bf9e71
67b29cd
15ed85c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
FROM python:3.11

# Create and use a non-root user (optional)
RUN useradd -m -u 1000 user
USER user
ENV PATH="/home/user/.local/bin:$PATH"

# Set working directory
WORKDIR /app

# Copy all project files to the container
COPY . .

# Install dependencies
RUN pip install --no-cache-dir -r requirements.txt

# Set Hugging Face cache directory to persist model downloads
ENV HF_HOME="/home/user/.cache/huggingface"
ENV SENTENCE_TRANSFORMERS_HOME="/home/user/.cache/huggingface/sentence-transformers"
ENV MEDGEMMA_HOME="/home/user/.cache/huggingface/sentence-transformers"

# Create cache directories and ensure permissions
RUN mkdir -p /app/model_cache /home/user/.cache/huggingface/sentence-transformers && \
    chown -R user:user /app/model_cache /home/user/.cache/huggingface

# Control preloading to avoid exhausting build disk on HF Spaces
ENV PRELOAD_TRANSLATORS="0"
ENV EMBEDDING_HALF="0"

# Pre-load model in a separate script (translation preload disabled by default)
RUN python /app/download_model.py && python /app/warmup.py

# Ensure ownership and permissions remain intact
RUN chown -R user:user /app/model_cache

# Expose port
EXPOSE 7860

# Run the application
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860", "--workers", "1"]