File size: 947 Bytes
ad56392
 
6c79855
ad56392
 
6c79855
ad56392
 
 
6c79855
ad56392
f7170a6
48f4e7f
ad56392
6c79855
ad56392
 
6c79855
ad56392
 
6c79855
b19f687
ad56392
48f4e7f
 
f7170a6
 
 
1efaefb
ad56392
6c79855
48f4e7f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
FROM ollama/ollama:latest

# Install curl
RUN apt-get update && apt-get install curl -y

# Add a non-root user
RUN useradd -m -u 1000 user
USER user

# Set environment variables
ENV HOME=/home/user \
    PATH=/home/user/.local/bin:$PATH \
    OLLAMA_HOST=0.0.0.0

# Set working directory
WORKDIR $HOME/app

# Copy Model file to container
COPY --chown=user:user Modelfile $HOME/app/

# Download the model file
RUN curl -fsSL https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct-GGUF/resolve/main/qwen2.5-1.5b-instruct-q4_0.gguf?download=true -o llama.gguf

# Start the Ollama server, create the model, and then kill the server
RUN sh -c "ollama serve & SERVE_PID=\$! && \
           sleep 10 && \
           ollama create llama -f Modelfile && \
           kill \$SERVE_PID"
# curl -X POST https://amansoni7477030-ollama-server.hf.space/api/generate -d '{ "model": "llama", "prompt":"Why is the sky blue?" }'

# Expose port for API access
EXPOSE 11434