sensitive / src /pipeline.py
manbeast3b's picture
Update src/pipeline.py
3432750 verified
import torch
from PIL.Image import Image
from diffusers import StableDiffusionXLPipeline
from pipelines.models import TextToImageRequest
from diffusers import DDIMScheduler
from torch import Generator
from loss import SchedulerWrapper, hook_pipe, get_instance
from onediffx import compile_pipe, save_pipe, load_pipe
def callback_dynamic_cfg(pipe, step_index, timestep, callback_kwargs):
if step_index == int(pipe.num_timesteps * 0.78):
callback_kwargs['prompt_embeds'] = callback_kwargs['prompt_embeds'].chunk(2)[-1]
callback_kwargs['add_text_embeds'] = callback_kwargs['add_text_embeds'].chunk(2)[-1]
callback_kwargs['add_time_ids'] = callback_kwargs['add_time_ids'].chunk(2)[-1]
pipe._guidance_scale = 0.1
return callback_kwargs
def load_pipeline(pipeline=None) -> StableDiffusionXLPipeline:
device = 'cpu'
if not pipeline:
device = "cuda"
pipeline = StableDiffusionXLPipeline.from_pretrained(
"stablediffusionapi/newdream-sdxl-20",
torch_dtype=torch.float16,
).to(device)
pipeline.scheduler = SchedulerWrapper(DDIMScheduler.from_config(pipeline.scheduler.config))
pipeline = compile_pipe(pipeline)
load_pipe(pipeline, dir="/home/sandbox/.cache/huggingface/hub/models--RobertML--cached-pipe-02/snapshots/58d70deae87034cce351b780b48841f9746d4ad7")
# instance = get_instance(device)
# mul = torch.nn.Parameter(torch.tensor(0.3038, requires_grad=False, device=device))
# sub = torch.nn.Parameter(torch.tensor(-0.3141, requires_grad=False, device=device))
# scaling_factor = torch.nn.Parameter(torch.tensor(0.5439, requires_grad=False, device=device))
# mul = torch.nn.Parameter(torch.tensor(0.2940097749233246, requires_grad=False, device=device))
# sub = torch.nn.Parameter(torch.tensor(-0.31909096240997314, requires_grad=False, device=device))
# scaling_factor = torch.nn.Parameter(torch.tensor(0.554410457611084, requires_grad=False, device=device))
# mul = torch.nn.Parameter(torch.tensor(1.2, requires_grad=False, device=device))
# sub = torch.nn.Parameter(torch.tensor(0.75, requires_grad=False, device=device))
# scaling_factor = torch.nn.Parameter(torch.tensor(pipeline.vae.config.scaling_factor, requires_grad=False, device=device))
# hook_pipe(pipeline, instance, mul, sub, scaling_factor)
for _ in range(1):
deepcache_output = pipeline(prompt="telestereography, unstrengthen, preadministrator, copatroness, hyperpersonal, paramountness, paranoid, guaniferous", output_type="pil", num_inference_steps=20)
pipeline.scheduler.prepare_loss()
for _ in range(2):
pipeline(prompt="telestereography, unstrengthen, preadministrator, copatroness, hyperpersonal, paramountness, paranoid, guaniferous", output_type="pil", num_inference_steps=20)
return pipeline
def infer(request: TextToImageRequest, pipeline: StableDiffusionXLPipeline) -> Image:
if request.seed is None:
generator = None
else:
generator = Generator(pipeline.device).manual_seed(request.seed)
return pipeline(
prompt=request.prompt,
negative_prompt=request.negative_prompt,
width=request.width,
height=request.height,
generator=generator,
num_inference_steps=13,
cache_interval=1,
cache_layer_id=1,
cache_block_id=0,
eta=1.0,
guidance_scale = 5.0,
guidance_rescale = 0.0,
callback_on_step_end=callback_dynamic_cfg,
callback_on_step_end_tensor_inputs=['prompt_embeds', 'add_text_embeds', 'add_time_ids'],
).images[0]