Spaces:
Restarting
on
Zero
Restarting
on
Zero
File size: 8,415 Bytes
8d3f129 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 |
import os
# ✅ Patch for NVML-related crash in ZeroGPU
os.environ["PYTORCH_NO_NVML"] = "1"
# ✅ Ensure proper PyTorch version for CUDA 12.6 in Spaces
os.system('pip install --upgrade --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu126 "torch<2.9"')
import torch
from diffusers import AutoencoderKLWan, WanPipeline, UniPCMultistepScheduler
from diffusers.utils import export_to_video
import gradio as gr
import tempfile
import spaces
from huggingface_hub import hf_hub_download
import numpy as np
import random
# MODEL_ID
MODEL_ID = "Runware/Wan2.2-T2V-A14B"
# Load model and scheduler (no .to("cuda") yet)
vae = AutoencoderKLWan.from_pretrained(MODEL_ID, subfolder="vae", torch_dtype=torch.float32)
pipe = WanPipeline.from_pretrained(MODEL_ID, vae=vae, torch_dtype=torch.bfloat16)
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=8.0)
# Configuration
MOD_VALUE = 32
DEFAULT_H_SLIDER_VALUE = 768
DEFAULT_W_SLIDER_VALUE = 1344
IS_ORIGINAL_SPACE = os.environ.get("IS_ORIGINAL_SPACE", "True") == "True"
LIMITED_MAX_RESOLUTION = 640
LIMITED_MAX_DURATION = 2.0
LIMITED_MAX_STEPS = 4
ORIGINAL_SLIDER_MIN_H, ORIGINAL_SLIDER_MAX_H = 128, 1536
ORIGINAL_SLIDER_MIN_W, ORIGINAL_SLIDER_MAX_W = 128, 1536
ORIGINAL_MAX_DURATION = round(81 / 24, 1)
ORIGINAL_MAX_STEPS = 8
if IS_ORIGINAL_SPACE:
SLIDER_MIN_H, SLIDER_MAX_H = 128, LIMITED_MAX_RESOLUTION
SLIDER_MIN_W, SLIDER_MAX_W = 128, LIMITED_MAX_RESOLUTION
MAX_DURATION = LIMITED_MAX_DURATION
MAX_STEPS = LIMITED_MAX_STEPS
else:
SLIDER_MIN_H, SLIDER_MAX_H = ORIGINAL_SLIDER_MIN_H, ORIGINAL_SLIDER_MAX_H
SLIDER_MIN_W, SLIDER_MAX_W = ORIGINAL_SLIDER_MIN_W, ORIGINAL_SLIDER_MAX_W
MAX_DURATION = ORIGINAL_MAX_DURATION
MAX_STEPS = ORIGINAL_MAX_STEPS
MAX_SEED = np.iinfo(np.int32).max
FIXED_FPS = 24
FIXED_OUTPUT_FPS = 18
MIN_FRAMES_MODEL = 8
MAX_FRAMES_MODEL = 81
default_prompt_t2v = "cinematic footage, group of pedestrians dancing in the streets of NYC, high quality breakdance, 4K, tiktok video, intricate details, instagram feel, dynamic camera, smooth dance motion, dimly lit, stylish, beautiful faces, smiling, music video"
default_negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards, watermark, text, signature"
def get_duration(prompt, height, width, negative_prompt, duration_seconds, guidance_scale, steps, seed, randomize_seed, progress):
return int(duration_seconds) * int(steps) * 2.25 + 5
@spaces.GPU(duration=get_duration)
def generate_video(prompt, height, width,
negative_prompt=default_negative_prompt, duration_seconds=2,
guidance_scale=1, steps=4,
seed=42, randomize_seed=False,
progress=gr.Progress(track_tqdm=True)):
if not prompt or prompt.strip() == "":
raise gr.Error("Please enter a text prompt. Try to use long and precise descriptions.")
if IS_ORIGINAL_SPACE:
height = min(height, LIMITED_MAX_RESOLUTION)
width = min(width, LIMITED_MAX_RESOLUTION)
duration_seconds = min(duration_seconds, LIMITED_MAX_DURATION)
steps = min(steps, LIMITED_MAX_STEPS)
target_h = max(MOD_VALUE, (int(height) // MOD_VALUE) * MOD_VALUE)
target_w = max(MOD_VALUE, (int(width) // MOD_VALUE) * MOD_VALUE)
num_frames = np.clip(int(round(duration_seconds * FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)
current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
# ✅ Move to GPU inside @spaces.GPU function
pipe.to("cuda")
with torch.inference_mode():
output_frames_list = pipe(
prompt=prompt, negative_prompt=negative_prompt,
height=target_h, width=target_w, num_frames=num_frames,
guidance_scale=float(guidance_scale), num_inference_steps=int(steps),
generator=torch.Generator(device="cuda").manual_seed(current_seed)
).frames[0]
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
video_path = tmpfile.name
export_to_video(output_frames_list, video_path, fps=FIXED_OUTPUT_FPS)
return video_path, current_seed
# Gradio UI
with gr.Blocks(css="body { max-width: 100vw; overflow-x: hidden; }") as demo:
gr.HTML('<meta name="viewport" content="width=device-width, initial-scale=1">')
gr.Markdown("# ⚡ InstaVideo")
gr.Markdown("This Gradio space is a fork of [wan2-1-fast from multimodalart](https://huggingface.co/spaces/multimodalart/wan2-1-fast), and is powered by the Wan CausVid LoRA [from Kijai](https://huggingface.co/Kijai/WanVideo_comfy/blob/main/Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors).")
if IS_ORIGINAL_SPACE:
gr.Markdown("⚠️ **This free public demo limits the resolution to 640px, duration to 2s, and inference steps to 4. For full capabilities please duplicate this space.**")
with gr.Row():
with gr.Column():
prompt_input = gr.Textbox(label="Prompt", value=default_prompt_t2v)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True)
randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True, interactive=True)
with gr.Row():
height_input = gr.Slider(
minimum=SLIDER_MIN_H,
maximum=SLIDER_MAX_H,
step=MOD_VALUE,
value=min(DEFAULT_H_SLIDER_VALUE, SLIDER_MAX_H),
label=f"Output Height (multiple of {MOD_VALUE})"
)
width_input = gr.Slider(
minimum=SLIDER_MIN_W,
maximum=SLIDER_MAX_W,
step=MOD_VALUE,
value=min(DEFAULT_W_SLIDER_VALUE, SLIDER_MAX_W),
label=f"Output Width (multiple of {MOD_VALUE})"
)
duration_seconds_input = gr.Slider(
minimum=round(MIN_FRAMES_MODEL / FIXED_FPS, 1),
maximum=MAX_DURATION,
step=0.1,
value=2,
label="Duration (seconds)",
info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps."
)
steps_slider = gr.Slider(minimum=1, maximum=MAX_STEPS, step=1, value=4, label="Inference Steps")
guidance_scale_input = gr.Slider(minimum=0.0, maximum=20.0, step=0.5, value=1.0, label="Guidance Scale", visible=False)
generate_button = gr.Button("Generate Video", variant="primary")
with gr.Column():
video_output = gr.Video(label="Generated Video", autoplay=True, interactive=False)
ui_inputs = [
prompt_input, height_input, width_input,
negative_prompt_input, duration_seconds_input,
guidance_scale_input, steps_slider, seed_input, randomize_seed_checkbox
]
generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
example_configs = [
["a majestic eagle soaring through mountain peaks, cinematic aerial view", 896, 512],
["a serene ocean wave crashing on a sandy beach at sunset", 448, 832],
["a field of flowers swaying in the wind, spring morning light", 512, 896],
]
if IS_ORIGINAL_SPACE:
example_configs = [
[example[0], min(example[1], LIMITED_MAX_RESOLUTION), min(example[2], LIMITED_MAX_RESOLUTION)]
for example in example_configs
]
gr.Examples(
examples=example_configs,
inputs=[prompt_input, height_input, width_input],
outputs=[video_output, seed_input],
fn=generate_video,
cache_examples="lazy"
)
if __name__ == "__main__":
demo.queue().launch()
|