File size: 1,346 Bytes
26cf2e1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
FROM ubuntu:22.04

# Install system dependencies
RUN apt-get update && \
    apt-get install -y \
    build-essential \
    libssl-dev \
    zlib1g-dev \
    libboost-math-dev \
    libboost-python-dev \
    libboost-timer-dev \
    libboost-thread-dev \
    libboost-system-dev \
    libboost-filesystem-dev \
    libopenblas-dev \
    libomp-dev \
    cmake \
    pkg-config \
    git \
    python3-pip \
    curl \
    libcurl4-openssl-dev \
    wget && \
    rm -rf /var/lib/apt/lists/*

# Build llama.cpp with OpenBLAS
RUN git clone https://github.com/ggerganov/llama.cpp && \
    cd llama.cpp && \
    cmake -B build -S . \
        -DLLAMA_BUILD_SERVER=ON \
        -DLLAMA_BUILD_EXAMPLES=ON \
        -DGGML_BLAS=ON \
        -DGGML_BLAS_VENDOR=OpenBLAS \
        -DCMAKE_BUILD_TYPE=Release && \
    cmake --build build --config Release --target llama-server -j $(nproc)
    
RUN cd /llama.cpp/build && ./bin/llama-server --list-devices

# Download model
RUN mkdir -p /models && \
    wget -O /models/model.q8_0.gguf https://huggingface.co/hugging-quants/Llama-3.2-3B-Instruct-Q8_0-GGUF/resolve/main/llama-3.2-3b-instruct-q8_0.gguf


RUN pip install fastapi uvicorn openai

# Copy app and startup script
COPY app.py /app.py
COPY start.sh /start.sh
RUN chmod +x /start.sh

# Expose ports
EXPOSE 7860 8080

# Start services
CMD ["/start.sh"]