File size: 2,672 Bytes
9497fb7
06fee64
 
0636a39
06fee64
 
 
 
 
 
6d75076
6b75a12
6b017a5
 
bc59b6c
6b017a5
06fee64
 
 
 
 
 
 
9a29c93
d8e3531
 
 
 
 
 
 
0636a39
 
 
 
6d75076
7d45cea
06fee64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9497fb7
 
06fee64
 
 
 
 
 
 
 
 
d8e3531
2d7315c
 
094cf79
d8e3531
06fee64
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import spaces
import torch
from diffusers import HunyuanVideoPipeline, HunyuanVideoTransformer3DModel
# from para_attn.first_block_cache.diffusers_adapters import apply_cache_on_pipe
from PIL import Image
import numpy as np
import gradio as gr
import os


# device = "cuda" if torch.cuda.is_available() else "cpu"

# model_id = "hunyuanvideo-community/HunyuanVideo"

model_id = "FastVideo/FastHunyuan-diffusers"

transformer = HunyuanVideoTransformer3DModel.from_pretrained(
    model_id, subfolder="transformer", torch_dtype=torch.bfloat16
    )


pipe = HunyuanVideoPipeline.from_pretrained(model_id, transformer=transformer, torch_dtype=torch.float16)
pipe.vae.enable_tiling()
# pipe.load_lora_weights("")
# pipe.to("cuda")

@spaces.GPU()
def generate(prompt, width=832, height=832, num_inference_steps=30, lora_id=None, progress=gr.Progress(track_tqdm=True)):
    if lora_id and lora_id.strip() != "":
        pipe.unload_lora_weights()
        pipe.load_lora_weights(lora_id.strip())
    # apply_cache_on_pipe(
    #     pipe,
    #     # residual_diff_threshold=0.2,
    # )
    pipe.to("cuda")
    torch.cuda.empty_cache()
    try:
        output = pipe(
            prompt=prompt,
            # negative_prompt=negative_prompt,
            height=height,
            width=width,
            num_frames=1,
            num_inference_steps=num_inference_steps,
            # guidance_scale=5.0,
        ).frames[0][0]
        # image = (output * 255).astype(np.uint8)
        # return Image.fromarray(image)
        return output
    finally:
        # Always clear memory, even if an error occurs
        if lora_id and lora_id.strip() != "":
            pipe.unload_lora_weights()
        torch.cuda.empty_cache()


iface = gr.Interface(
    fn=generate,
    inputs=[
        gr.Textbox(label="Input prompt"),
    ],
    additional_inputs = [
        # gr.Textbox(label="Negative prompt", value = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards"),
        gr.Slider(label="Width", minimum=192, maximum=1280, step=16, value=832),
        gr.Slider(label="Height", minimum=192, maximum=1280, step=16, value=832),
        gr.Slider(minimum=1, maximum=80, step=1, label="Inference Steps", value=10),
        gr.Textbox(label="LoRA ID"),
    ],
    outputs=gr.Image(label="output"),
)


iface.launch(share=True, debug=True)