Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -13,6 +13,7 @@ import spaces
|
|
| 13 |
import diffusers
|
| 14 |
from diffusers import ZImagePipeline, DiffusionPipeline, AutoencoderTiny, AutoencoderKL
|
| 15 |
from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
|
|
|
|
| 16 |
#from transformers import CLIPModel, CLIPProcessor, CLIPTextModel, CLIPTokenizer, CLIPConfig, T5EncoderModel, T5Tokenizer
|
| 17 |
#from diffusers.models.transformers import FluxTransformer2DModel
|
| 18 |
import copy
|
|
@@ -50,6 +51,8 @@ pipe = diffusers.ZImagePipeline.from_pretrained("dimitribarbot/Z-Image-Turbo-BF1
|
|
| 50 |
|
| 51 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 52 |
|
|
|
|
|
|
|
| 53 |
#pipe.enable_model_cpu_offload()
|
| 54 |
|
| 55 |
try: # A temp hack for some version diffusers lora loading problem
|
|
|
|
| 13 |
import diffusers
|
| 14 |
from diffusers import ZImagePipeline, DiffusionPipeline, AutoencoderTiny, AutoencoderKL
|
| 15 |
from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
|
| 16 |
+
from diffusers.models import AutoencoderKL as DiffusersAutoencoderKL
|
| 17 |
#from transformers import CLIPModel, CLIPProcessor, CLIPTextModel, CLIPTokenizer, CLIPConfig, T5EncoderModel, T5Tokenizer
|
| 18 |
#from diffusers.models.transformers import FluxTransformer2DModel
|
| 19 |
import copy
|
|
|
|
| 51 |
|
| 52 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 53 |
|
| 54 |
+
pipe.vae = DiffusersAutoencoderKL.from_pretrained("kaiyuyue/FLUX.2-dev-vae", torch_dtype=torch.float16).to("cuda")
|
| 55 |
+
|
| 56 |
#pipe.enable_model_cpu_offload()
|
| 57 |
|
| 58 |
try: # A temp hack for some version diffusers lora loading problem
|