Spaces:
Running
Running
File size: 1,400 Bytes
ca5b08e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
import importlib.util
import logging
import subprocess
import sys
logger = logging.getLogger(__name__)
def check_poppler_version():
try:
result = subprocess.run(["pdftoppm", "-h"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
if result.returncode == 0 and result.stderr.startswith("pdftoppm"):
logger.info("pdftoppm is installed and working.")
else:
logger.error("pdftoppm is installed but returned an error.")
sys.exit(1)
except FileNotFoundError:
logger.error("pdftoppm is not installed.")
sys.exit(1)
def check_vllm_version():
if importlib.util.find_spec("vllm") is None:
logger.error("VLLM needs to be installed with a separate command in order to find all dependencies properly.")
sys.exit(1)
def check_torch_gpu_available(min_gpu_memory: int = 20 * 1024**3):
try:
import torch
except:
logger.error("Pytorch must be installed, visit https://pytorch.org/ for installation instructions")
raise
try:
gpu_memory = torch.cuda.get_device_properties(0).total_memory
assert gpu_memory >= min_gpu_memory
except:
logger.error(f"Torch was not able to find a GPU with at least {min_gpu_memory // (1024 ** 3)} GB of RAM.")
raise
if __name__ == "__main__":
check_poppler_version()
check_vllm_version()
|