Spaces:
Running
on
Zero
Running
on
Zero
File size: 11,861 Bytes
5f364b5 2bd65a7 c103ac7 f4cf641 5f364b5 c103ac7 5f364b5 f4cf641 12d6cf5 8268b44 5f364b5 2bd65a7 5f364b5 8268b44 2bd65a7 8268b44 8249703 2bd65a7 5f364b5 afdfe21 2bd65a7 1b75f51 f4cf641 c103ac7 8575388 8268b44 c103ac7 8268b44 1e531a7 8268b44 2bd65a7 8116465 f4cf641 8268b44 f4cf641 8575388 8268b44 f4cf641 8268b44 8575388 f4cf641 2bd65a7 f4cf641 2bd65a7 f4cf641 2bd65a7 8268b44 f4cf641 8268b44 f4cf641 afdfe21 2bd65a7 5c774ff b11d0d2 1b75f51 2bd65a7 8116465 1e531a7 d8ad2ca 2bd65a7 d8ad2ca 2bd65a7 d8ad2ca 2bd65a7 d8ad2ca 2bd65a7 f4cf641 2bd65a7 f4cf641 8268b44 1e531a7 8268b44 12d6cf5 8268b44 f4cf641 2bd65a7 afdfe21 5f364b5 8268b44 1d3a31b 5f364b5 1e531a7 54b40a7 5f364b5 2bd65a7 bd5e231 2bd65a7 5f364b5 c103ac7 2bd65a7 8268b44 8575388 f4cf641 8268b44 f4cf641 93f223c 5f364b5 2bd65a7 c103ac7 8268b44 2bd65a7 f4cf641 2bd65a7 c103ac7 8268b44 2bd65a7 f4cf641 8268b44 2bd65a7 8116465 8268b44 f4cf641 2bd65a7 54b40a7 5f364b5 d8ad2ca |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 |
import torch
from diffusers import AutoencoderKLWan, WanVACEPipeline UniPCMultistepScheduler
from diffusers.utils import export_to_video
from transformers import CLIPVisionModel
import gradio as gr
import tempfile
import spaces
from huggingface_hub import hf_hub_download
import numpy as np
from PIL import Image
import random
MODEL_ID = "Wan-AI/Wan2.1-VACE-14B-diffusers"
image_encoder = CLIPVisionModel.from_pretrained(MODEL_ID, subfolder="image_encoder", torch_dtype=torch.float32)
vae = AutoencoderKLWan.from_pretrained(MODEL_ID, subfolder="vae", torch_dtype=torch.float32)
pipe = WanVACEPipeline.from_pretrained(
MODEL_ID, vae=vae, image_encoder=image_encoder, torch_dtype=torch.bfloat16
)
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=2.0)
pipe.to("cuda")
pipe.load_lora_weights(
"vrgamedevgirl84/Wan14BT2VFusioniX",
weight_name="FusionX_LoRa/Phantom_Wan_14B_FusionX_LoRA.safetensors",
adapter_name="phantom"
)
pipe.load_lora_weights(
"vrgamedevgirl84/Wan14BT2VFusioniX",
weight_name="OtherLoRa's/DetailEnhancerV1.safetensors", adapter_name="detailer"
)
pipe.set_adapters(["phantom","detailer"], adapter_weights=[1, .9])
pipe.fuse_lora()
MOD_VALUE = 32
DEFAULT_H_SLIDER_VALUE = 512
DEFAULT_W_SLIDER_VALUE = 896
NEW_FORMULA_MAX_AREA = 480.0 * 832.0
SLIDER_MIN_H, SLIDER_MAX_H = 128, 896
SLIDER_MIN_W, SLIDER_MAX_W = 128, 896
MAX_SEED = np.iinfo(np.int32).max
FIXED_FPS = 24
MIN_FRAMES_MODEL = 8
MAX_FRAMES_MODEL = 81
# Default prompts for different modes
MODE_PROMPTS = {
"Ref2V": "",
"FLF2V": "",
"Random2V": ""
}
default_negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards, watermark, text, signature"
def _calculate_new_dimensions_wan(pil_image, mod_val, calculation_max_area,
min_slider_h, max_slider_h,
min_slider_w, max_slider_w,
default_h, default_w):
orig_w, orig_h = pil_image.size
if orig_w <= 0 or orig_h <= 0:
return default_h, default_w
aspect_ratio = orig_h / orig_w
calc_h = round(np.sqrt(calculation_max_area * aspect_ratio))
calc_w = round(np.sqrt(calculation_max_area / aspect_ratio))
calc_h = max(mod_val, (calc_h // mod_val) * mod_val)
calc_w = max(mod_val, (calc_w // mod_val) * mod_val)
new_h = int(np.clip(calc_h, min_slider_h, (max_slider_h // mod_val) * mod_val))
new_w = int(np.clip(calc_w, min_slider_w, (max_slider_w // mod_val) * mod_val))
return new_h, new_w
def handle_gallery_upload_for_dims_wan(gallery_images, current_h_val, current_w_val):
if gallery_images is None or len(gallery_images) == 0:
return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
try:
# Use the first image to calculate dimensions
first_image = gallery_images[0]
new_h, new_w = _calculate_new_dimensions_wan(
first_image, MOD_VALUE, NEW_FORMULA_MAX_AREA,
SLIDER_MIN_H, SLIDER_MAX_H, SLIDER_MIN_W, SLIDER_MAX_W,
DEFAULT_H_SLIDER_VALUE, DEFAULT_W_SLIDER_VALUE
)
return gr.update(value=new_h), gr.update(value=new_w)
except Exception as e:
gr.Warning("Error attempting to calculate new dimensions")
return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
def update_prompt_from_mode(mode):
"""Update the prompt based on the selected mode"""
return MODE_PROMPTS.get(mode, "")
def process_images_for_mode(images, mode):
"""Process images based on the selected mode"""
if not images or len(images) == 0:
return None
if mode == "Ref2V":
# Use the first image as reference
return images[0]
elif mode == "FLF2V":
# First and Last Frame: blend or interpolate between first and last image
if len(images) >= 2:
return None
else:
return images[0]
elif mode == "Random2V":
# Randomly select one image from the gallery
return images[0]
return images[0]
def get_duration(gallery_images, mode, prompt, height, width,
negative_prompt, duration_seconds,
guidance_scale, steps,
seed, randomize_seed,
progress):
if steps > 4 and duration_seconds > 2:
return 90
elif steps > 4 or duration_seconds > 2:
return 75
else:
return 60
@spaces.GPU(duration=get_duration)
def generate_video(gallery_images, mode, prompt, height, width,
negative_prompt=default_negative_prompt, duration_seconds = 2,
guidance_scale = 1, steps = 4,
seed = 42, randomize_seed = False,
progress=gr.Progress(track_tqdm=True)):
"""
Generate a video from gallery images using the selected mode.
Args:
gallery_images (list): List of PIL images from the gallery
mode (str): Processing mode - "Ref2V", "FLF2V", or "Random2V"
prompt (str): Text prompt describing the desired animation
height (int): Target height for the output video
width (int): Target width for the output video
negative_prompt (str): Negative prompt to avoid unwanted elements
duration_seconds (float): Duration of the generated video in seconds
guidance_scale (float): Controls adherence to the prompt
steps (int): Number of inference steps
seed (int): Random seed for reproducible results
randomize_seed (bool): Whether to use a random seed
progress (gr.Progress): Gradio progress tracker
Returns:
tuple: (video_path, current_seed)
"""
if gallery_images is None or len(gallery_images) == 0:
raise gr.Error("Please upload at least one image to the gallery.")
# Process images based on the selected mode
input_image = process_images_for_mode(gallery_images, mode)
if input_image is None:
raise gr.Error("Failed to process images for the selected mode.")
target_h = max(MOD_VALUE, (int(height) // MOD_VALUE) * MOD_VALUE)
target_w = max(MOD_VALUE, (int(width) // MOD_VALUE) * MOD_VALUE)
num_frames = np.clip(int(round(duration_seconds * FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)
current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
resized_image = input_image.resize((target_w, target_h))
# Mode-specific processing can be added here if needed
if mode == "FLF2V" and len(gallery_images) >= 2:
# You can add special handling for FLF2V mode here
# For example, use both first and last frames in some way
pass
with torch.inference_mode():
output_frames_list = pipe(
image=resized_image, prompt=prompt, negative_prompt=negative_prompt,
height=target_h, width=target_w, num_frames=num_frames,
guidance_scale=float(guidance_scale), num_inference_steps=int(steps),
generator=torch.Generator(device="cuda").manual_seed(current_seed)
).frames[0]
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
video_path = tmpfile.name
export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
return video_path, current_seed
with gr.Blocks() as demo:
gr.Markdown("# Fast 4 steps Wan 2.1 I2V (14B) with CausVid LoRA - Multi-Image Gallery")
gr.Markdown("[CausVid](https://github.com/tianweiy/CausVid) is a distilled version of Wan 2.1 to run faster in just 4-8 steps, [extracted as LoRA by Kijai](https://huggingface.co/Kijai/WanVideo_comfy/blob/main/Wan21_CausVid_14B_T2V_lora_rank32.safetensors) and is compatible with 🧨 diffusers")
with gr.Row():
with gr.Column():
# Gallery component for multiple image upload
gallery_component = gr.Gallery(
label="Upload Images",
show_label=True,
elem_id="gallery",
columns=3,
rows=2,
object_fit="contain",
height="auto",
type="pil",
allow_preview=True
)
# Radio button for mode selection
mode_radio = gr.Radio(
choices=["Ref2V", "FLF2V", "Random2V"],
value="Ref2V",
label="Processing Mode",
info="Ref2V: Reference to Video | FLF2V: First-Last Frame to Video | Random2V: Random Image to Video"
)
prompt_input = gr.Textbox(label="Prompt", value=MODE_PROMPTS["Ref2V"])
duration_seconds_input = gr.Slider(minimum=round(MIN_FRAMES_MODEL/FIXED_FPS,1), maximum=round(MAX_FRAMES_MODEL/FIXED_FPS,1), step=0.1, value=2, label="Duration (seconds)", info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps.")
with gr.Accordion("Advanced Settings", open=False):
negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True)
randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True, interactive=True)
with gr.Row():
height_input = gr.Slider(minimum=SLIDER_MIN_H, maximum=SLIDER_MAX_H, step=MOD_VALUE, value=DEFAULT_H_SLIDER_VALUE, label=f"Output Height (multiple of {MOD_VALUE})")
width_input = gr.Slider(minimum=SLIDER_MIN_W, maximum=SLIDER_MAX_W, step=MOD_VALUE, value=DEFAULT_W_SLIDER_VALUE, label=f"Output Width (multiple of {MOD_VALUE})")
steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=6, label="Inference Steps")
guidance_scale_input = gr.Slider(minimum=0.0, maximum=5.0, step=0.5, value=1.0, label="Guidance Scale", visible=False)
generate_button = gr.Button("Generate Video", variant="primary")
with gr.Column():
video_output = gr.Video(label="Generated Video", autoplay=True, interactive=False)
with gr.Accordion("Mode Information", open=True):
gr.Markdown("""
**Processing Modes:**
- **Ref2V**: Uses the first image as reference for video generation
- **FLF2V**: Blends first and last images for interpolation (requires at least 2 images)
- **Random2V**: Randomly selects one image from the gallery for generation
""")
# Update prompt when mode changes
mode_radio.change(
fn=update_prompt_from_mode,
inputs=[mode_radio],
outputs=[prompt_input]
)
# Update dimensions when gallery changes
gallery_component.change(
fn=handle_gallery_upload_for_dims_wan,
inputs=[gallery_component, height_input, width_input],
outputs=[height_input, width_input]
)
ui_inputs = [
gallery_component, mode_radio, prompt_input, height_input, width_input,
negative_prompt_input, duration_seconds_input,
guidance_scale_input, steps_slider, seed_input, randomize_seed_checkbox
]
generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
if __name__ == "__main__":
demo.queue().launch(mcp_server=True) |