Spaces:
Running
on
Zero
Running
on
Zero
import torch | |
from diffusers import AutoencoderKLWan, WanPipeline, UniPCMultistepScheduler | |
from diffusers.utils import export_to_video | |
from diffusers.loaders.lora_conversion_utils import _convert_non_diffusers_lora_to_diffusers # Keep this if it's the base | |
import gradio as gr | |
import tempfile | |
import os | |
import spaces | |
from huggingface_hub import hf_hub_download | |
import logging # For better logging | |
# --- Global Model Loading & LoRA Handling --- | |
MODEL_ID = "Wan-AI/Wan2.1-T2V-14B-Diffusers" | |
LORA_REPO_ID = "Kijai/WanVideo_comfy" | |
LORA_FILENAME = "Wan21_CausVid_14B_T2V_lora_rank32.safetensors" | |
# Configure logging | |
logging.basicConfig(level=logging.INFO) | |
logger = logging.getLogger(__name__) | |
# This dictionary will store the manual patches extracted by the converter | |
MANUAL_PATCHES_STORE = {} | |
def _custom_convert_non_diffusers_wan_lora_to_diffusers(state_dict): | |
global MANUAL_PATCHES_STORE | |
MANUAL_PATCHES_STORE = {} # Clear previous patches | |
peft_state_dict = {} | |
unhandled_keys = [] | |
original_keys = list(state_dict.keys()) | |
processed_state_dict = {} | |
for k, v in state_dict.items(): | |
if k.startswith("diffusion_model."): | |
processed_state_dict[k[len("diffusion_model."):]] = v | |
elif k.startswith("difusion_model."): # Handle potential typo | |
processed_state_dict[k[len("difusion_model."):]] = v | |
else: | |
unhandled_keys.append(k) # Will be logged later if not handled by diff/diff_b | |
block_indices = set() | |
for k_proc in processed_state_dict: | |
if k_proc.startswith("blocks."): | |
try: | |
block_idx_str = k_proc.split("blocks.")[1].split(".")[0] | |
if block_idx_str.isdigit(): | |
block_indices.add(int(block_idx_str)) | |
except IndexError: | |
pass # Will be handled as a non-block key or logged | |
num_blocks = 0 | |
if block_indices: | |
num_blocks = max(block_indices) + 1 | |
is_i2v_lora = any("k_img" in k for k in processed_state_dict) and \ | |
any("v_img" in k for k in processed_state_dict) | |
handled_original_keys = set() | |
# --- Handle Block-level LoRAs & Diffs --- | |
for i in range(num_blocks): | |
# Self-attention (maps to attn1 in WanTransformerBlock) | |
for o_lora, c_diffusers in zip(["q", "k", "v", "o"], ["to_q", "to_k", "to_v", "to_out.0"]): | |
lora_down_key_proc = f"blocks.{i}.self_attn.{o_lora}.lora_down.weight" | |
lora_up_key_proc = f"blocks.{i}.self_attn.{o_lora}.lora_up.weight" | |
diff_b_key_proc = f"blocks.{i}.self_attn.{o_lora}.diff_b" | |
diff_w_key_proc = f"blocks.{i}.self_attn.{o_lora}.diff" # Assuming .diff for weight | |
if lora_down_key_proc in processed_state_dict and lora_up_key_proc in processed_state_dict: | |
peft_state_dict[f"transformer.blocks.{i}.attn1.{c_diffusers}.lora_A.weight"] = processed_state_dict[lora_down_key_proc] | |
peft_state_dict[f"transformer.blocks.{i}.attn1.{c_diffusers}.lora_B.weight"] = processed_state_dict[lora_up_key_proc] | |
handled_original_keys.add(f"diffusion_model.{lora_down_key_proc}") | |
handled_original_keys.add(f"diffusion_model.{lora_up_key_proc}") | |
if diff_b_key_proc in processed_state_dict: | |
target_bias_key = f"transformer.blocks.{i}.attn1.{c_diffusers}.bias" | |
MANUAL_PATCHES_STORE[target_bias_key] = ("diff_b", processed_state_dict[diff_b_key_proc]) | |
handled_original_keys.add(f"diffusion_model.{diff_b_key_proc}") | |
if diff_w_key_proc in processed_state_dict: | |
target_weight_key = f"transformer.blocks.{i}.attn1.{c_diffusers}.weight" | |
MANUAL_PATCHES_STORE[target_weight_key] = ("diff", processed_state_dict[diff_w_key_proc]) | |
handled_original_keys.add(f"diffusion_model.{diff_w_key_proc}") | |
# Cross-attention (maps to attn2 in WanTransformerBlock) | |
for o_lora, c_diffusers in zip(["q", "k", "v", "o"], ["to_q", "to_k", "to_v", "to_out.0"]): | |
lora_down_key_proc = f"blocks.{i}.cross_attn.{o_lora}.lora_down.weight" | |
lora_up_key_proc = f"blocks.{i}.cross_attn.{o_lora}.lora_up.weight" | |
diff_b_key_proc = f"blocks.{i}.cross_attn.{o_lora}.diff_b" | |
diff_w_key_proc = f"blocks.{i}.cross_attn.{o_lora}.diff" | |
norm_q_diff_key_proc = f"blocks.{i}.cross_attn.norm_q.diff" # specific norm diff | |
norm_k_diff_key_proc = f"blocks.{i}.cross_attn.norm_k.diff" # specific norm diff | |
if lora_down_key_proc in processed_state_dict and lora_up_key_proc in processed_state_dict: | |
peft_state_dict[f"transformer.blocks.{i}.attn2.{c_diffusers}.lora_A.weight"] = processed_state_dict[lora_down_key_proc] | |
peft_state_dict[f"transformer.blocks.{i}.attn2.{c_diffusers}.lora_B.weight"] = processed_state_dict[lora_up_key_proc] | |
handled_original_keys.add(f"diffusion_model.{lora_down_key_proc}") | |
handled_original_keys.add(f"diffusion_model.{lora_up_key_proc}") | |
if diff_b_key_proc in processed_state_dict: | |
target_bias_key = f"transformer.blocks.{i}.attn2.{c_diffusers}.bias" | |
MANUAL_PATCHES_STORE[target_bias_key] = ("diff_b", processed_state_dict[diff_b_key_proc]) | |
handled_original_keys.add(f"diffusion_model.{diff_b_key_proc}") | |
if diff_w_key_proc in processed_state_dict: | |
target_weight_key = f"transformer.blocks.{i}.attn2.{c_diffusers}.weight" | |
MANUAL_PATCHES_STORE[target_weight_key] = ("diff", processed_state_dict[diff_w_key_proc]) | |
handled_original_keys.add(f"diffusion_model.{diff_w_key_proc}") | |
if norm_q_diff_key_proc in processed_state_dict: # Assuming norm_q on q_proj | |
MANUAL_PATCHES_STORE[f"transformer.blocks.{i}.attn2.norm_q.weight"] = ("diff", processed_state_dict[norm_q_diff_key_proc]) | |
handled_original_keys.add(f"diffusion_model.{norm_q_diff_key_proc}") | |
if norm_k_diff_key_proc in processed_state_dict: # Assuming norm_k on k_proj | |
MANUAL_PATCHES_STORE[f"transformer.blocks.{i}.attn2.norm_k.weight"] = ("diff", processed_state_dict[norm_k_diff_key_proc]) | |
handled_original_keys.add(f"diffusion_model.{norm_k_diff_key_proc}") | |
if is_i2v_lora: | |
for o_lora, c_diffusers in zip(["k_img", "v_img"], ["add_k_proj", "add_v_proj"]): | |
lora_down_key_proc = f"blocks.{i}.cross_attn.{o_lora}.lora_down.weight" | |
lora_up_key_proc = f"blocks.{i}.cross_attn.{o_lora}.lora_up.weight" | |
diff_b_key_proc = f"blocks.{i}.cross_attn.{o_lora}.diff_b" | |
diff_w_key_proc = f"blocks.{i}.cross_attn.{o_lora}.diff" | |
if lora_down_key_proc in processed_state_dict and lora_up_key_proc in processed_state_dict: | |
peft_state_dict[f"transformer.blocks.{i}.attn2.{c_diffusers}.lora_A.weight"] = processed_state_dict[lora_down_key_proc] | |
peft_state_dict[f"transformer.blocks.{i}.attn2.{c_diffusers}.lora_B.weight"] = processed_state_dict[lora_up_key_proc] | |
handled_original_keys.add(f"diffusion_model.{lora_down_key_proc}") | |
handled_original_keys.add(f"diffusion_model.{lora_up_key_proc}") | |
if diff_b_key_proc in processed_state_dict: | |
target_bias_key = f"transformer.blocks.{i}.attn2.{c_diffusers}.bias" | |
MANUAL_PATCHES_STORE[target_bias_key] = ("diff_b", processed_state_dict[diff_b_key_proc]) | |
handled_original_keys.add(f"diffusion_model.{diff_b_key_proc}") | |
if diff_w_key_proc in processed_state_dict: | |
target_weight_key = f"transformer.blocks.{i}.attn2.{c_diffusers}.weight" | |
MANUAL_PATCHES_STORE[target_weight_key] = ("diff", processed_state_dict[diff_w_key_proc]) | |
handled_original_keys.add(f"diffusion_model.{diff_w_key_proc}") | |
# FFN | |
for o_lora_suffix, c_diffusers_path in zip([".0", ".2"], ["net.0.proj", "net.2"]): | |
lora_down_key_proc = f"blocks.{i}.ffn{o_lora_suffix}.lora_down.weight" | |
lora_up_key_proc = f"blocks.{i}.ffn{o_lora_suffix}.lora_up.weight" | |
diff_b_key_proc = f"blocks.{i}.ffn{o_lora_suffix}.diff_b" | |
diff_w_key_proc = f"blocks.{i}.ffn{o_lora_suffix}.diff" # Assuming .diff for weight | |
if lora_down_key_proc in processed_state_dict and lora_up_key_proc in processed_state_dict: | |
peft_state_dict[f"transformer.blocks.{i}.ffn.{c_diffusers_path}.lora_A.weight"] = processed_state_dict[lora_down_key_proc] | |
peft_state_dict[f"transformer.blocks.{i}.ffn.{c_diffusers_path}.lora_B.weight"] = processed_state_dict[lora_up_key_proc] | |
handled_original_keys.add(f"diffusion_model.{lora_down_key_proc}") | |
handled_original_keys.add(f"diffusion_model.{lora_up_key_proc}") | |
if diff_b_key_proc in processed_state_dict: | |
target_bias_key = f"transformer.blocks.{i}.ffn.{c_diffusers_path}.bias" | |
MANUAL_PATCHES_STORE[target_bias_key] = ("diff_b", processed_state_dict[diff_b_key_proc]) | |
handled_original_keys.add(f"diffusion_model.{diff_b_key_proc}") | |
if diff_w_key_proc in processed_state_dict: | |
target_weight_key = f"transformer.blocks.{i}.ffn.{c_diffusers_path}.weight" | |
MANUAL_PATCHES_STORE[target_weight_key] = ("diff", processed_state_dict[diff_w_key_proc]) | |
handled_original_keys.add(f"diffusion_model.{diff_w_key_proc}") | |
# Block norm3 diffs (assuming norm3 applies to the output of the FFN in the original Wan block structure) | |
norm3_diff_key_proc = f"blocks.{i}.norm3.diff" | |
norm3_diff_b_key_proc = f"blocks.{i}.norm3.diff_b" | |
if norm3_diff_key_proc in processed_state_dict: | |
MANUAL_PATCHES_STORE[f"transformer.blocks.{i}.norm3.weight"] = ("diff", processed_state_dict[norm3_diff_key_proc]) # Norms usually have .weight | |
handled_original_keys.add(f"diffusion_model.{norm3_diff_key_proc}") | |
if norm3_diff_b_key_proc in processed_state_dict: | |
MANUAL_PATCHES_STORE[f"transformer.blocks.{i}.norm3.bias"] = ("diff_b", processed_state_dict[norm3_diff_b_key_proc]) # And .bias | |
handled_original_keys.add(f"diffusion_model.{norm3_diff_b_key_proc}") | |
# --- Handle Top-level LoRAs & Diffs --- | |
top_level_mappings = [ | |
# (lora_base_path_proc, diffusers_base_path, lora_suffixes, diffusers_suffixes) | |
("text_embedding", "transformer.condition_embedder.text_embedder", ["0", "2"], ["linear_1", "linear_2"]), | |
("time_embedding", "transformer.condition_embedder.time_embedder", ["0", "2"], ["linear_1", "linear_2"]), | |
("time_projection", "transformer.condition_embedder.time_proj", ["1"], [""]), # Wan has .1, Diffusers has no suffix | |
("head", "transformer.proj_out", ["head"], [""]), # Wan has .head, Diffusers has no suffix | |
] | |
for lora_base_proc, diffusers_base, lora_suffixes, diffusers_suffixes in top_level_mappings: | |
for l_suffix, d_suffix in zip(lora_suffixes, diffusers_suffixes): | |
actual_lora_path_proc = f"{lora_base_proc}.{l_suffix}" if l_suffix else lora_base_proc | |
actual_diffusers_path = f"{diffusers_base}.{d_suffix}" if d_suffix else diffusers_base | |
lora_down_key_proc = f"{actual_lora_path_proc}.lora_down.weight" | |
lora_up_key_proc = f"{actual_lora_path_proc}.lora_up.weight" | |
diff_b_key_proc = f"{actual_lora_path_proc}.diff_b" | |
diff_w_key_proc = f"{actual_lora_path_proc}.diff" | |
if lora_down_key_proc in processed_state_dict and lora_up_key_proc in processed_state_dict: | |
peft_state_dict[f"{actual_diffusers_path}.lora_A.weight"] = processed_state_dict[lora_down_key_proc] | |
peft_state_dict[f"{actual_diffusers_path}.lora_B.weight"] = processed_state_dict[lora_up_key_proc] | |
handled_original_keys.add(f"diffusion_model.{lora_down_key_proc}") | |
handled_original_keys.add(f"diffusion_model.{lora_up_key_proc}") | |
if diff_b_key_proc in processed_state_dict: | |
MANUAL_PATCHES_STORE[f"{actual_diffusers_path}.bias"] = ("diff_b", processed_state_dict[diff_b_key_proc]) | |
handled_original_keys.add(f"diffusion_model.{diff_b_key_proc}") | |
if diff_w_key_proc in processed_state_dict: | |
MANUAL_PATCHES_STORE[f"{actual_diffusers_path}.weight"] = ("diff", processed_state_dict[diff_w_key_proc]) | |
handled_original_keys.add(f"diffusion_model.{diff_w_key_proc}") | |
# Patch Embedding | |
patch_emb_diff_b_key = "patch_embedding.diff_b" | |
if patch_emb_diff_b_key in processed_state_dict: | |
MANUAL_PATCHES_STORE["transformer.patch_embedding.bias"] = ("diff_b", processed_state_dict[patch_emb_diff_b_key]) | |
handled_original_keys.add(f"diffusion_model.{patch_emb_diff_b_key}") | |
# Assuming .diff might exist for patch_embedding.weight, though not explicitly in your example list | |
patch_emb_diff_w_key = "patch_embedding.diff" | |
if patch_emb_diff_w_key in processed_state_dict: | |
MANUAL_PATCHES_STORE["transformer.patch_embedding.weight"] = ("diff", processed_state_dict[patch_emb_diff_w_key]) | |
handled_original_keys.add(f"diffusion_model.{patch_emb_diff_w_key}") | |
# Log unhandled keys | |
final_unhandled_keys = [] | |
for k_orig in original_keys: | |
# Reconstruct the processed key to check if it was actually handled by diff/diff_b or lora A/B logic | |
k_proc = None | |
if k_orig.startswith("diffusion_model."): | |
k_proc = k_orig[len("diffusion_model."):] | |
elif k_orig.startswith("difusion_model."): | |
k_proc = k_orig[len("difusion_model."):] | |
if k_orig not in handled_original_keys and (k_proc is None or not any(k_proc.endswith(s) for s in [".lora_down.weight", ".lora_up.weight", ".diff", ".diff_b", ".alpha"])): | |
final_unhandled_keys.append(k_orig) | |
if final_unhandled_keys: | |
logger.warning( | |
f"The following keys from the Wan 2.1 LoRA checkpoint were not converted to PEFT LoRA A/B format " | |
f"nor identified as manual diff patches: {final_unhandled_keys}." | |
) | |
if not peft_state_dict and not MANUAL_PATCHES_STORE: | |
logger.warning("No valid LoRA A/B weights or manual diff patches found after conversion.") | |
return peft_state_dict | |
def apply_manual_diff_patches(pipe_model_component, patches_store, strength_model=1.0): | |
if not patches_store: | |
logger.info("No manual diff patches to apply.") | |
return | |
logger.info(f"Applying {len(patches_store)} manual diff patches...") | |
for target_key, (patch_type, diff_tensor) in patches_store.items(): | |
try: | |
module_path, param_name = target_key.rsplit('.', 1) | |
module = pipe_model_component.get_submodule(module_path) | |
original_param = getattr(module, param_name) | |
if original_param.shape != diff_tensor.shape: | |
logger.warning(f"Shape mismatch for {target_key}: model {original_param.shape}, LoRA {diff_tensor.shape}. Skipping patch.") | |
continue | |
with torch.no_grad(): | |
# Ensure diff_tensor is on the same device and dtype as the original parameter | |
diff_tensor_casted = diff_tensor.to(device=original_param.device, dtype=original_param.dtype) | |
scaled_diff = diff_tensor_casted * strength_model | |
original_param.add_(scaled_diff) | |
# logger.info(f"Applied {patch_type} to {target_key} with strength {strength_model}") | |
except AttributeError: | |
logger.warning(f"Could not find parameter {target_key} in the model component. Skipping patch.") | |
except Exception as e: | |
logger.error(f"Error applying patch to {target_key}: {e}") | |
logger.info("Finished applying manual diff patches.") | |
# --- Model Loading --- | |
logger.info(f"Loading VAE for {MODEL_ID}...") | |
vae = AutoencoderKLWan.from_pretrained( | |
MODEL_ID, | |
subfolder="vae", | |
torch_dtype=torch.float32 # float32 for VAE stability | |
) | |
logger.info(f"Loading Pipeline {MODEL_ID}...") | |
pipe = WanPipeline.from_pretrained( | |
MODEL_ID, | |
vae=vae, | |
torch_dtype=torch.bfloat16 # bfloat16 for pipeline | |
) | |
flow_shift = 8.0 # 5.0 for 720P, 3.0 for 480P | |
pipe.scheduler = UniPCMultistepScheduler.from_config( | |
pipe.scheduler.config, flow_shift=flow_shift | |
) | |
logger.info("Moving pipeline to CUDA...") | |
pipe.to("cuda") | |
# --- LoRA Loading --- | |
logger.info(f"Downloading LoRA {LORA_FILENAME} from {LORA_REPO_ID}...") | |
causvid_path = hf_hub_download(repo_id=LORA_REPO_ID, filename=LORA_FILENAME) | |
logger.info("Loading LoRA weights with custom converter...") | |
# lora_state_dict_raw = WanPipeline.lora_state_dict(causvid_path) # This might already do some conversion | |
# Alternative: Load raw state_dict and then convert | |
from safetensors.torch import load_file as load_safetensors | |
raw_lora_state_dict = load_safetensors(causvid_path) | |
peft_state_dict = _custom_convert_non_diffusers_wan_lora_to_diffusers(raw_lora_state_dict) | |
if peft_state_dict: | |
pipe.load_lora_weights( | |
peft_state_dict, | |
adapter_name="causvid_lora" | |
) | |
logger.info("PEFT LoRA A/B weights loaded.") | |
else: | |
logger.warning("No PEFT-compatible LoRA weights found after conversion.") | |
lora_strength = 1.0 | |
apply_manual_diff_patches(pipe.transformer, MANUAL_PATCHES_STORE, strength_model=lora_strength) | |
logger.info("Manual diff_b/diff patches applied.") | |
# --- Gradio Interface Function --- | |
def generate_video(prompt, negative_prompt, height, width, num_frames, guidance_scale, steps, fps, progress=gr.Progress(track_tqdm=True)): | |
logger.info("Starting video generation...") | |
logger.info(f" Prompt: {prompt}") | |
logger.info(f" Negative Prompt: {negative_prompt if negative_prompt else 'None'}") | |
logger.info(f" Height: {height}, Width: {width}") | |
logger.info(f" Num Frames: {num_frames}, FPS: {fps}") | |
logger.info(f" Guidance Scale: {guidance_scale}") | |
height = (int(height) // 8) * 8 | |
width = (int(width) // 8) * 8 | |
num_frames = int(num_frames) | |
fps = int(fps) | |
with torch.inference_mode(): | |
output_frames_list = pipe( | |
prompt=prompt, | |
negative_prompt=negative_prompt, | |
height=height, | |
width=width, | |
num_frames=num_frames, | |
guidance_scale=float(guidance_scale), | |
num_inference_steps=steps | |
).frames[0] | |
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile: | |
video_path = tmpfile.name | |
export_to_video(output_frames_list, video_path, fps=fps) | |
logger.info(f"Video successfully generated and saved to {video_path}") | |
return video_path | |
# --- Gradio UI Definition --- | |
default_prompt = "A cat walks on the grass, realistic" | |
default_negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards" | |
with gr.Blocks() as demo: | |
gr.Markdown(f""" | |
# Text-to-Video with Wan 2.1 (14B) + CausVid LoRA | |
Powered by `diffusers` and `Wan-AI/{MODEL_ID}`. | |
Model is loaded into memory when the app starts. This might take a few minutes. | |
Ensure you have a GPU with sufficient VRAM (e.g., ~24GB+ for these default settings). | |
""") | |
# ... (rest of your Gradio UI definition remains the same) ... | |
with gr.Row(): | |
with gr.Column(scale=2): | |
prompt_input = gr.Textbox(label="Prompt", value=default_prompt, lines=3) | |
negative_prompt_input = gr.Textbox( | |
label="Negative Prompt (Optional)", | |
value=default_negative_prompt, | |
lines=3 | |
) | |
with gr.Row(): | |
height_input = gr.Slider(minimum=256, maximum=768, step=64, value=480, label="Height (multiple of 8)") | |
width_input = gr.Slider(minimum=256, maximum=1024, step=64, value=832, label="Width (multiple of 8)") | |
with gr.Row(): | |
num_frames_input = gr.Slider(minimum=16, maximum=100, step=1, value=25, label="Number of Frames") | |
fps_input = gr.Slider(minimum=5, maximum=30, step=1, value=15, label="Output FPS") | |
steps = gr.Slider(minimum=1.0, maximum=30.0, value=4.0, label="Steps") | |
guidance_scale_input = gr.Slider(minimum=1.0, maximum=20.0, step=0.5, value=1.0, label="Guidance Scale") | |
generate_button = gr.Button("Generate Video", variant="primary") | |
with gr.Column(scale=3): | |
video_output = gr.Video(label="Generated Video") | |
generate_button.click( | |
fn=generate_video, | |
inputs=[ | |
prompt_input, | |
negative_prompt_input, | |
height_input, | |
width_input, | |
num_frames_input, | |
guidance_scale_input, | |
steps, | |
fps_input | |
], | |
outputs=video_output | |
) | |
gr.Examples( | |
examples=[ | |
["A panda eating bamboo in a lush forest, cinematic lighting", default_negative_prompt, 480, 832, 25, 5.0, 4, 15], | |
["A majestic eagle soaring over snowy mountains", default_negative_prompt, 512, 768, 30, 7.0, 4, 12], | |
["Timelapse of a flower blooming, vibrant colors", "static, ugly", 384, 640, 40, 6.0, 4, 20], | |
["Astronaut walking on the moon, Earth in the background, highly detailed", default_negative_prompt, 480, 832, 20, 5.5, 4, 10], | |
], | |
inputs=[prompt_input, negative_prompt_input, height_input, width_input, num_frames_input, guidance_scale_input, steps, fps_input], | |
outputs=video_output, | |
fn=generate_video, | |
cache_examples=False | |
) | |
if __name__ == "__main__": | |
demo.queue().launch(share=True, debug=True) |