Spaces:
Running
Running
File size: 2,774 Bytes
163ed2c 269cfe9 163ed2c 269cfe9 39e6a08 269cfe9 39e6a08 269cfe9 39e6a08 269cfe9 39e6a08 163ed2c 269cfe9 163ed2c 269cfe9 163ed2c 9fe3c06 2bc4484 9fe3c06 050883e 7f46492 9fe3c06 0019500 163ed2c 0019500 399d1d1 2bc4484 163ed2c 269cfe9 163ed2c 269cfe9 39e6a08 74b6416 39e6a08 74b6416 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 |
FROM python:3.11-slim
# Set working directory
WORKDIR /app
# Install system dependencies including Ollama requirements
RUN apt-get update && apt-get install -y \
build-essential \
curl \
wget \
software-properties-common \
git \
ca-certificates \
gnupg \
lsb-release \
&& rm -rf /var/lib/apt/lists/*
# Install Ollama
RUN curl -fsSL https://ollama.ai/install.sh | sh
# Copy requirements and install Python dependencies
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copy application files
COPY . .
# Set Python path to include the app directory
ENV PYTHONPATH="/app:/app/src:$PYTHONPATH"
# Set HOME to app directory to avoid root directory writes
ENV HOME="/app"
ENV USER="appuser"
# Set Streamlit config directory to writable location
ENV STREAMLIT_CONFIG_DIR="/app/.streamlit"
# Disable Streamlit usage statistics to avoid permission issues
ENV STREAMLIT_BROWSER_GATHER_USAGE_STATS="false"
# Set HuggingFace cache directory to writable location
ENV HF_HOME="/app/.cache"
ENV SENTENCE_TRANSFORMERS_HOME="/app/.cache/sentence-transformers"
# Download NLTK data if needed (for text processing)
RUN python -c "import nltk; nltk.download('punkt', quiet=True)" || true
# Pre-download the embedding model to avoid runtime permission issues
RUN python -c "from sentence_transformers import SentenceTransformer; model = SentenceTransformer('sentence-transformers/multi-qa-MiniLM-L6-cos-v1'); print('Model downloaded successfully')" || echo "Model download failed, will retry at runtime"
# Create necessary directories with proper permissions
RUN mkdir -p .streamlit && \
mkdir -p .cache && \
mkdir -p .cache/transformers && \
mkdir -p .cache/sentence-transformers && \
mkdir -p temp_uploads && \
chmod -R 777 .cache && \
chmod 777 temp_uploads && \
touch .gitconfig && \
chmod 666 .gitconfig
# Expose Streamlit port
EXPOSE 8501
# Health check for container monitoring
HEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health || exit 1
# Pre-pull the Ollama model during build (optional - for faster startup)
# RUN ollama serve & sleep 5 && ollama pull llama3.2:3b && pkill ollama
# Expose ports for both Streamlit and Ollama
EXPOSE 8501 11434
# Create directories with proper permissions for any user
RUN mkdir -p /app/.ollama && \
mkdir -p /app/.ollama/models && \
chmod -R 777 /app/.ollama
# Set Ollama environment variables to use app directory
ENV OLLAMA_MODELS=/app/.ollama/models
ENV OLLAMA_HOST=0.0.0.0:11434
# Ensure Ollama can write to its directories
RUN chmod -R 777 /app
# Create startup Python script
COPY startup.py /app/startup.py
# Run startup script that handles both Ollama and Streamlit
ENTRYPOINT ["python", "/app/startup.py"] |