|
import gradio as gr |
|
import numpy as np |
|
|
|
from diffusers import DiffusionPipeline |
|
import torch |
|
|
|
|
|
base = DiffusionPipeline.from_pretrained( |
|
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True, cache_dir="./local_model_cache" |
|
) |
|
|
|
base.to("cpu") |
|
|
|
refiner = DiffusionPipeline.from_pretrained( |
|
"stabilityai/stable-diffusion-xl-refiner-1.0", |
|
text_encoder_2=base.text_encoder_2, |
|
vae=base.vae, |
|
torch_dtype=torch.float16, |
|
use_safetensors=True, |
|
variant="fp16", |
|
cache_dir="./local_model_cache" |
|
) |
|
|
|
refiner.to("cpu") |
|
|
|
|
|
n_steps = 40 |
|
high_noise_frac = 0.8 |
|
|
|
def create_image(prompt): |
|
|
|
|
|
image = base( |
|
prompt=prompt, |
|
num_inference_steps=n_steps, |
|
denoising_end=high_noise_frac, |
|
output_type="latent", |
|
).images |
|
image = refiner( |
|
prompt=prompt, |
|
num_inference_steps=n_steps, |
|
denoising_start=high_noise_frac, |
|
image=image, |
|
).images[0] |
|
|
|
return image |
|
|
|
|
|
demo = gr.Interface( |
|
fn=create_image, |
|
inputs=["text"], |
|
outputs=["image"], |
|
) |
|
|
|
demo.launch() |