File size: 1,283 Bytes
d941729
9c43066
d941729
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9c43066
d941729
9c43066
 
d941729
 
 
 
9c43066
d941729
9c43066
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
# Use an official Python runtime as a parent image
FROM nvidia/cuda:12.3.1-runtime-ubuntu22.04

# Set Python to use unbuffered mode
ENV PYTHONUNBUFFERED=1

ENV PATH="/var/www/.local/bin:${PATH}"

# Create a non-root user
RUN useradd -m -u 1000 -U -s /bin/bash myuser

# Install dependencies
RUN apt-get update && \
    apt-get install -y --no-install-recommends python3-pip python3-dev && \
    rm -rf /var/lib/apt/lists/*

# Set the working directory in the container
RUN mkdir /var/www
ENV HOME=/var/www
WORKDIR /var/www

# Change ownership of /var/www to the non-root user
RUN chown -R myuser:myuser /var/www

# Switch to the non-root user
USER myuser

# Copy the current directory contents into the container at /var/www
COPY . /var/www

# Install Python dependencies
RUN pip install --user -r requirements.txt
# RUN pip install --user torch==1.12.1+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
# RUN pip install --user ../whl/semantic_search_multistep_data/whl/torch-1.12.1+cu113-cp310-cp310-linux_x86_64.whl

# Expose the port
EXPOSE 7860

RUN mkdir /var/www/logs

# Set environment variables
ENV MODEL_PATH="BAAI/bge-m3" \
    DEVICE="gpu" 

# Run fastapi_app.py when the container launches
CMD python3 -m uvicorn search_api:app --host=0.0.0.0 --port=7860