|
import gradio as gr |
|
import numpy as np |
|
import random |
|
from diffusers import DiffusionPipeline |
|
import torch |
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
if torch.cuda.is_available(): |
|
torch.cuda.max_memory_allocated(device=device) |
|
pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True) |
|
pipe.enable_xformers_memory_efficient_attention() |
|
pipe = pipe.to(device) |
|
else: |
|
pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True) |
|
pipe = pipe.to(device) |
|
|
|
|
|
def generate_image(prompt, steps, guidance): |
|
image = pipe(prompt=prompt, num_inference_steps=steps, guidance_scale=guidance).images[0] |
|
return image |
|
|
|
|
|
iface = gr.Interface( |
|
fn=generate_image, |
|
inputs=[ |
|
gr.inputs.Textbox(lines=2, placeholder="Enter your prompt here...", label="Prompt"), |
|
gr.inputs.Slider(minimum=1, maximum=50, default=10, step=1, label="Inference Steps"), |
|
gr.inputs.Slider(minimum=0.0, maximum=20.0, default=8.0, step=0.5, label="Guidance Scale") |
|
], |
|
outputs=gr.outputs.Image(type="pil", label="Generated Image"), |
|
title="SDXL-Turbo Image Generator", |
|
description="Generate images based on text prompts using the SDXL-Turbo model." |
|
) |
|
|
|
|
|
iface.launch() |
|
|