|
import torch |
|
from PIL.Image import Image |
|
from diffusers import StableDiffusionXLPipeline |
|
|
|
from pipelines.models import TextToImageRequest |
|
from diffusers import DDIMScheduler |
|
from torch import Generator |
|
from loss import SchedulerWrapper |
|
from utils import register_parallel_pipeline_orig, register_faster_orig_forward, register_time |
|
from onediffx import compile_pipe, save_pipe, load_pipe |
|
import numpy as np |
|
def callback_dynamic_cfg(pipe, step_index, timestep, callback_kwargs): |
|
if step_index == int(pipe.num_timesteps * 0.78): |
|
callback_kwargs['prompt_embeds'] = callback_kwargs['prompt_embeds'].chunk(2)[-1] |
|
callback_kwargs['add_text_embeds'] = callback_kwargs['add_text_embeds'].chunk(2)[-1] |
|
callback_kwargs['add_time_ids'] = callback_kwargs['add_time_ids'].chunk(2)[-1] |
|
pipe._guidance_scale = 0.0 |
|
|
|
return callback_kwargs |
|
|
|
def load_pipeline(pipeline=None) -> StableDiffusionXLPipeline: |
|
if not pipeline: |
|
pipeline = StableDiffusionXLPipeline.from_pretrained( |
|
"stablediffusionapi/newdream-sdxl-20", |
|
torch_dtype=torch.float16, |
|
).to("cuda") |
|
|
|
pipeline.scheduler = SchedulerWrapper(DDIMScheduler.from_config(pipeline.scheduler.config)) |
|
register_parallel_pipeline_orig(pipeline) |
|
register_faster_orig_forward(pipeline.unet) |
|
register_time(pipeline.unet, 0) |
|
pipeline = compile_pipe(pipeline) |
|
|
|
np.save('count_array.npy', [0]) |
|
|
|
for _ in range(1): |
|
deepcache_output = pipeline(prompt="telestereography, unstrengthen, preadministrator, copatroness, hyperpersonal, paramountness, paranoid, guaniferous", output_type="pil", num_inference_steps=20) |
|
pipeline.scheduler.prepare_loss() |
|
|
|
|
|
return pipeline |
|
|
|
def infer(request: TextToImageRequest, pipeline: StableDiffusionXLPipeline) -> Image: |
|
global image_count |
|
if request.seed is None: |
|
generator = None |
|
else: |
|
generator = Generator(pipeline.device).manual_seed(request.seed) |
|
|
|
step = np.load('count_array.npy')[0] |
|
register_time(pipeline.unet, step) |
|
print("step is : ", step) |
|
step+=1; np.save('count_array.npy', [step]) |
|
|
|
|
|
return pipeline( |
|
prompt=request.prompt, |
|
negative_prompt=request.negative_prompt, |
|
width=request.width, |
|
height=request.height, |
|
generator=generator, |
|
num_inference_steps=20, |
|
|
|
|
|
|
|
eta=1.0, |
|
guidance_scale = 5.0, |
|
guidance_rescale = 0.0, |
|
|
|
|
|
).images[0] |
|
|
|
|