import gradio as gr import spaces from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler import torch # Initialize models outside the GPU function controlnet = ControlNetModel.from_pretrained( "lllyasviel/control_v11p_sd15_openpose", torch_dtype=torch.float16 ) pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16, safety_checker=None ) pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) # Move model to GPU inside the decorated function @spaces.GPU(duration=60) # Request GPU for 60 seconds per call def generate(image, prompt="a person posing"): pipe.to("cuda") result = pipe(prompt=prompt, image=image, num_inference_steps=20).images[0] return result demo = gr.Interface( fn=generate, inputs=[gr.Image(type="pil"), gr.Textbox(label="Prompt", value="a person posing")], outputs="image", title="Pose Generator", description="Upload an image and enter a prompt to generate a ControlNet-based pose output." ) if __name__ == "__main__": demo.launch()