File size: 752 Bytes
0879be9
 
36269ba
0879be9
 
36269ba
7aa55b9
0879be9
7cfd849
55ec264
7cfd849
 
21a38b1
7cfd849
f439688
 
 
 
8d20674
 
36269ba
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
#!/bin/bash

# Start the Ollama server in the background
ollama serve &

# Wait a few seconds for the server to be fully operational
sleep 5

ollama pull hf.co/bartowski/Qwen_Qwen3-4B-Instruct-2507-GGUF:Q4_K_M
#ollama pull hf.co/bartowski/Qwen_Qwen3-4B-Thinking-2507-GGUF:Q4_K_M
ollama pull smollm2:360m-instruct-q5_K_M
ollama pull hf.co/bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M
ollama pull gemma3n:e2b-it-q4_K_M #slow on Spaces CPU
ollama pull granite3.3:2b

###20250812 ollama fail to run
#ollama pull hf.co/bartowski/tencent_Hunyuan-1.8B-Instruct-GGUF:Q4_K_M
#ollama pull hf.co/bartowski/tencent_Hunyuan-4B-Instruct-GGUF:Q4_K_M


# Start the Gradio web application
# This will connect to the Ollama server which is already running.
python3 app.py