File size: 401 Bytes
861cb4a
 
 
9ee7a62
861cb4a
 
 
 
 
8e250e5
861cb4a
 
 
 
 
 
801de6e
861cb4a
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
import subprocess
import os



# Prepare the command
command = [
    "python",
    "run_inference_server.py",
    "-m", "./models/ggml-model-i2_s.gguf",
    "--host", "0.0.0.0",
    "--port", "7860"
]

# Run it as a subprocess
try:
    subprocess.run("ls",check=True)
    subprocess.run(command, check=True)
except subprocess.CalledProcessError as e:
    print(f"Error running inference server: {e}")