Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
#3
by
linoyts
HF Staff
- opened
- app.py +10 -10
- optimization.py +9 -9
app.py
CHANGED
@@ -32,16 +32,16 @@ MAX_DURATION = round(MAX_FRAMES_MODEL/FIXED_FPS,1)
|
|
32 |
|
33 |
vae = AutoencoderKLWan.from_pretrained("Wan-AI/Wan2.2-T2V-A14B-Diffusers", subfolder="vae", torch_dtype=torch.float32)
|
34 |
pipe = WanPipeline.from_pretrained(MODEL_ID,
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
vae=vae,
|
46 |
torch_dtype=torch.bfloat16,
|
47 |
).to('cuda')
|
|
|
32 |
|
33 |
vae = AutoencoderKLWan.from_pretrained("Wan-AI/Wan2.2-T2V-A14B-Diffusers", subfolder="vae", torch_dtype=torch.float32)
|
34 |
pipe = WanPipeline.from_pretrained(MODEL_ID,
|
35 |
+
transformer=WanTransformer3DModel.from_pretrained('linoyts/Wan2.2-T2V-A14B-Diffusers-BF16',
|
36 |
+
subfolder='transformer',
|
37 |
+
torch_dtype=torch.bfloat16,
|
38 |
+
device_map='cuda',
|
39 |
+
),
|
40 |
+
transformer_2=WanTransformer3DModel.from_pretrained('linoyts/Wan2.2-T2V-A14B-Diffusers-BF16',
|
41 |
+
subfolder='transformer_2',
|
42 |
+
torch_dtype=torch.bfloat16,
|
43 |
+
device_map='cuda',
|
44 |
+
),
|
45 |
vae=vae,
|
46 |
torch_dtype=torch.bfloat16,
|
47 |
).to('cuda')
|
optimization.py
CHANGED
@@ -44,20 +44,20 @@ def optimize_pipeline_(pipeline: Callable[P, Any], *args: P.args, **kwargs: P.kw
|
|
44 |
def compile_transformer():
|
45 |
|
46 |
pipeline.load_lora_weights(
|
47 |
-
|
48 |
-
|
49 |
-
adapter_name="
|
50 |
)
|
51 |
kwargs_lora = {}
|
52 |
kwargs_lora["load_into_transformer_2"] = True
|
53 |
pipeline.load_lora_weights(
|
54 |
-
|
55 |
-
|
56 |
-
adapter_name="
|
57 |
)
|
58 |
-
pipeline.set_adapters(["
|
59 |
-
pipeline.fuse_lora(adapter_names=["
|
60 |
-
pipeline.fuse_lora(adapter_names=["
|
61 |
pipeline.unload_lora_weights()
|
62 |
|
63 |
with capture_component_call(pipeline, 'transformer') as call:
|
|
|
44 |
def compile_transformer():
|
45 |
|
46 |
pipeline.load_lora_weights(
|
47 |
+
"Kijai/WanVideo_comfy",
|
48 |
+
weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
|
49 |
+
adapter_name="lightx2v"
|
50 |
)
|
51 |
kwargs_lora = {}
|
52 |
kwargs_lora["load_into_transformer_2"] = True
|
53 |
pipeline.load_lora_weights(
|
54 |
+
"Kijai/WanVideo_comfy",
|
55 |
+
weight_name="Wan22-Lightning/Wan2.2-Lightning_T2V-A14B-4steps-lora_LOW_fp16.safetensors",
|
56 |
+
adapter_name="lightx2v_2", **kwargs_lora
|
57 |
)
|
58 |
+
pipeline.set_adapters(["lightx2v", "lightx2v_2"], adapter_weights=[1., 1.])
|
59 |
+
pipeline.fuse_lora(adapter_names=["lightx2v"], lora_scale=3., components=["transformer"])
|
60 |
+
pipeline.fuse_lora(adapter_names=["lightx2v_2"], lora_scale=1., components=["transformer_2"])
|
61 |
pipeline.unload_lora_weights()
|
62 |
|
63 |
with capture_component_call(pipeline, 'transformer') as call:
|