torch>=2.4.0 torchvision>=0.19.0 opencv-python>=4.9.0.80 diffusers>=0.31.0 transformers==4.51.3 #transformers==4.46.3 # was needed by llamallava used by i2v hunyuan before patch tokenizers>=0.20.3 accelerate>=1.1.1 tqdm imageio easydict ftfy dashscope imageio-ffmpeg # flash_attn gradio==5.23.0 numpy>=1.23.5,<2 einops moviepy==1.0.3 mmgp==3.4.8 peft==0.14.0 mutagen pydantic==2.10.6 decord onnxruntime-gpu rembg[gpu]==2.0.65 matplotlib timm segment-anything omegaconf hydra-core librosa loguru sentencepiece av opencv-python # rembg==2.0.65 torchsde==0.2.6 torchvision opencv-contrib-python safetensors peft decord tqdm imageio==2.31.1 imageio-ffmpeg==0.4.8 ffmpeg-python==0.2.0 spaces devicetorch psutil basicsr facexlib>=0.2.5 gfpgan>=1.3.5 realesrgan https://github.com/Dao-AILab/flash-attention/releases/download/v2.5.9.post1/flash_attn-2.5.9.post1+cu118torch1.12cxx11abiFALSE-cp310-cp310-linux_x86_64.whl