runtime error

Exit code: 1. Reason: , line 104, in <module> dam = DescribeAnythingModel( File "/home/user/app/dam/describe_anything_model.py", line 24, in __init__ tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, None, None, **kwargs) File "/home/user/app/dam/model/model_utils.py", line 200, in load_pretrained_model model = LlavaLlamaModel( File "/home/user/app/dam/model/model_utils.py", line 33, in __init__ self.init_vlm(config=config, *args, **kwargs) File "/home/user/app/dam/model/llava_arch.py", line 70, in init_vlm self.llm, self.tokenizer = build_llm_and_tokenizer(llm_cfg, config, *args, **kwargs) File "/home/user/app/dam/model/language_model/builder.py", line 77, in build_llm_and_tokenizer llm = AutoModelForCausalLM.from_pretrained( File "/usr/local/lib/python3.10/site-packages/transformers/models/auto/auto_factory.py", line 604, in from_pretrained return model_class.from_pretrained( File "/usr/local/lib/python3.10/site-packages/transformers/modeling_utils.py", line 288, in _wrapper return func(*args, **kwargs) File "/usr/local/lib/python3.10/site-packages/transformers/modeling_utils.py", line 5176, in from_pretrained ) = cls._load_pretrained_model( File "/usr/local/lib/python3.10/site-packages/transformers/modeling_utils.py", line 5597, in _load_pretrained_model caching_allocator_warmup(model_to_load, expanded_device_map, hf_quantizer) File "/usr/local/lib/python3.10/site-packages/transformers/modeling_utils.py", line 6215, in caching_allocator_warmup index = device.index if device.index is not None else torch_accelerator_module.current_device() File "/usr/local/lib/python3.10/site-packages/torch/cuda/__init__.py", line 1071, in current_device _lazy_init() File "/usr/local/lib/python3.10/site-packages/torch/cuda/__init__.py", line 412, in _lazy_init torch._C._cuda_init() RuntimeError: Found no NVIDIA driver on your system. Please check that you have an NVIDIA GPU and installed a driver from http://www.nvidia.com/Download/index.aspx

Container logs:

Fetching error logs...