nanoVLM-inference / requirements.txt
vidhanm
.
984c158
raw
history blame contribute delete
871 Bytes
# Try to match your local working environment for generate.py
torch==2.7.0
# If 'transformers==4.52.3' and 'tokenizers==0.21.1' are from custom/dev builds,
# you MUST find a way to install those exact versions in Docker, or use the
# closest standard PyPI versions and test generate.py locally with THOSE first.
# For this example, I'm assuming they are pip-installable. If not, adjust.
transformers==4.52.3
tokenizers==0.21.1
huggingface-hub==0.32.0
safetensors==0.5.3
Pillow==10.4.0 # generate.py uses PIL.Image
# For protobuf, if your local 6.31.0 is confirmed, use it. Otherwise, a standard one:
protobuf==4.25.3 # Or your confirmed local 6.31.0 if pip-installable
accelerate # Good to include, though generate.py might not explicitly use it
sentencepiece # Often a dependency for tokenizers
# NO Gradio needed for this test
torchvision
gradio==3.50.2