Spaces:
Running
on
Zero
Running
on
Zero
File size: 5,218 Bytes
1cb6bad ee7720a 460c68a 2d87298 1cb6bad ee7720a 1cb6bad ee7720a 1cb6bad 0473e4c ff6eb27 bda8588 54703ae b879745 1cb6bad b879745 468cf40 b879745 1cb6bad b879745 ee7720a b879745 ee7720a b879745 ee7720a b879745 ee7720a b879745 ee7720a b879745 ee7720a ceb312c b879745 ee7720a ceb312c b879745 ee7720a b879745 ee7720a b879745 1cb6bad ee7720a 1cb6bad f06bd9d 22bb482 f06bd9d 2ea2166 22bb482 0a595e2 6c71d3e 22bb482 dff7b2f 6c71d3e 22bb482 6c71d3e 7367001 6c71d3e 0968858 7367001 6c71d3e dff7b2f 54703ae 2ea2166 0968858 2ea2166 bda8588 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
import gradio as gr
from diffusers import StableDiffusionXLPipeline, DDIMScheduler
import torch
import sa_handler
import math
from diffusers.utils import load_image
import inversion
import numpy as np
import spaces
# init models
scheduler = DDIMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear",
clip_sample=False, set_alpha_to_one=False)
pipeline = StableDiffusionXLPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16",
use_safetensors=True,
scheduler=scheduler
).to("cuda")
pipeline.enable_model_cpu_offload()
pipeline.enable_vae_slicing()
@spaces.GPU(duration=60)
def run(ref_path, ref_style, ref_prompt, prompt1, progress=gr.Progress(track_tqdm=True)):
"""Generate an image in the style of a reference image using the StyleAligned method.
This function performs DDIM inversion on a reference image to capture its latent representation
and then applies the StyleAligned diffusion technique to transfer the style to a new prompt.
Args:
ref_path: File path to the reference image.
ref_style: Textual description of the reference style (e.g., 'medieval painting').
ref_prompt: Description of the content in the reference image.
prompt1: The prompt describing the new image to be generated with the same style.
progress: Internal Gradio progress tracker (automatically handled).
Returns:
A list of generated images with the style of the reference image applied to the new prompt.
"""
# DDIM inversion
src_style = f"{ref_style}"
src_prompt = f"{ref_prompt}, {src_style}."
image_path = f"{ref_path}"
num_inference_steps = 50
x0 = np.array(load_image(image_path).resize((1024, 1024)))
try:
zts = inversion.ddim_inversion(pipeline, x0, src_prompt, num_inference_steps, 2)
except:
zts = inversion.ddim_inversion(pipeline, x0, src_prompt, num_inference_steps, 2)
#mediapy.show_image(x0, title="innput reference image", height=256)
# run StyleAligned
prompts = [
src_prompt,
prompt1,
]
# some parameters you can adjust to control fidelity to reference
shared_score_shift = np.log(2) # higher value induces higher fidelity, set 0 for no shift
shared_score_scale = 1.0 # higher value induces higher, set 1 for no rescale
# for very famouse images consider supressing attention to refference, here is a configuration example:
# shared_score_shift = np.log(1)
# shared_score_scale = 0.5
for i in range(1, len(prompts)):
prompts[i] = f'{prompts[i]}, {src_style}.'
handler = sa_handler.Handler(pipeline)
sa_args = sa_handler.StyleAlignedArgs(
share_group_norm=True, share_layer_norm=True, share_attention=True,
adain_queries=True, adain_keys=True, adain_values=False,
shared_score_shift=shared_score_shift, shared_score_scale=shared_score_scale,)
handler.register(sa_args)
zT, inversion_callback = inversion.make_inversion_callback(zts, offset=5)
g_cpu = torch.Generator(device='cuda')
g_cpu.manual_seed(10)
latents = torch.randn(len(prompts), 4, 128, 128, device='cuda', generator=g_cpu,
dtype=pipeline.unet.dtype,).to('cuda')
latents[0] = zT
images_a = pipeline(prompts, latents=latents,
callback_on_step_end=inversion_callback,
num_inference_steps=num_inference_steps, guidance_scale=10.0).images
handler.remove()
#mediapy.show_images(images_a, titles=[p[:-(len(src_style) + 3)] for p in prompts])
return images_a
css = """
#col-container{
margin: 0 auto;
max-width: 820px;
}
"""
with gr.Blocks(css=css) as demo:
with gr. Column(elem_id="col-container"):
gr.HTML("""
<h2 style="text-align: center;">Google's StyleAligned Transfer</h2>
"""
)
with gr.Row():
with gr.Column():
with gr.Group():
ref_path = gr.Image(type="filepath")
ref_style = gr.Textbox(label="Reference style")
ref_prompt = gr.Textbox(label="Reference prompt")
with gr.Column():
with gr.Group():
results = gr.Gallery()
prompt1 = gr.Textbox(label="Prompt1")
run_button = gr.Button("Submit")
gr.Examples(
examples=[
[
"./example_image/medieval-bed.jpeg",
"medieval painting",
"Man laying on bed",
"A man working on a laptop",
]
],
fn=run,
inputs = [
ref_path, ref_style, ref_prompt,
prompt1
],
outputs=[results],
cache_examples=False
)
run_button.click(
fn = run,
inputs = [
ref_path, ref_style, ref_prompt,
prompt1
],
outputs = [
results
]
)
demo.queue().launch(ssr_mode=False, mcp_server=True) |