File size: 534 Bytes
77092e4
 
 
 
 
 
 
 
 
 
 
 
b43fda5
77092e4
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
--extra-index-url https://download.pytorch.org/whl/cu121

# Core dependencies
torch==2.4.0
torchvision==0.19.0
pillow==10.4.0
tqdm==4.67.1
gradio==4.44.0
spaces

# Diffusion and model dependencies
transformers==4.46.3
diffusers
accelerate
xformers==0.0.27.post2

# Quantization and optimization
bitsandbytes
gguf
peft
sentencepiece

# Optional but recommended for faster inference
https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.0.post2/flash_attn-2.7.0.post2+cu12torch2.4cxx11abiFALSE-cp310-cp310-linux_x86_64.whl