Spaces:
Running
Running
File size: 1,167 Bytes
1265cca 5849473 1265cca 5849473 1265cca 03840de 1265cca 5849473 1265cca 5849473 1265cca 03840de 5849473 03840de 5849473 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
import gradio as gr
import spaces
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
import torch
# Initialize models outside the GPU function
controlnet = ControlNetModel.from_pretrained(
"lllyasviel/control_v11p_sd15_openpose", torch_dtype=torch.float16
)
pipe = StableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5",
controlnet=controlnet,
torch_dtype=torch.float16,
safety_checker=None
)
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
# Move model to GPU inside the decorated function
@spaces.GPU(duration=60) # Request GPU for 60 seconds per call
def generate(image, prompt="a person posing"):
pipe.to("cuda")
result = pipe(prompt=prompt, image=image, num_inference_steps=20).images[0]
return result
demo = gr.Interface(
fn=generate,
inputs=[gr.Image(type="pil"), gr.Textbox(label="Prompt", value="a person posing")],
outputs="image",
title="Pose Generator",
description="Upload an image and enter a prompt to generate a ControlNet-based pose output."
)
if __name__ == "__main__":
demo.launch() |