MagicQuillV2 / train /train_kontext_edge.py
LiuZichen's picture
update
f460ce6
import argparse
import copy
import logging
import math
import os
import shutil
from contextlib import nullcontext
from pathlib import Path
import re
from safetensors.torch import save_file
from PIL import Image
import numpy as np
import torch
import torch.utils.checkpoint
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed
import diffusers
from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler
from diffusers.optimization import get_scheduler
from diffusers.training_utils import (
cast_training_params,
compute_density_for_timestep_sampling,
compute_loss_weighting_for_sd3,
)
from diffusers.utils.torch_utils import is_compiled_module
from diffusers.utils import (
check_min_version,
is_wandb_available,
)
from src.prompt_helper import *
from src.lora_helper import *
from src.jsonl_datasets_kontext_edge import make_train_dataset_inpaint_mask, collate_fn
from src.pipeline_flux_kontext_control import (
FluxKontextControlPipeline,
resize_position_encoding,
prepare_latent_subject_ids,
PREFERRED_KONTEXT_RESOLUTIONS
)
from src.transformer_flux import FluxTransformer2DModel
from diffusers.models.attention_processor import FluxAttnProcessor2_0
from src.layers import MultiDoubleStreamBlockLoraProcessor, MultiSingleStreamBlockLoraProcessor
from tqdm.auto import tqdm
if is_wandb_available():
import wandb
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
check_min_version("0.31.0.dev0")
logger = get_logger(__name__)
def log_validation(
pipeline,
args,
accelerator,
pipeline_args,
step,
torch_dtype,
is_final_validation=False,
):
logger.info(
f"Running validation... Strict per-case evaluation for image, spatial image, and prompt."
)
pipeline = pipeline.to(accelerator.device)
pipeline.set_progress_bar_config(disable=True)
generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None
autocast_ctx = nullcontext()
# Build per-case evaluation: require equal lengths for image, spatial image, and prompt
if args.validation_images is None or args.validation_images == ['None']:
raise ValueError("validation_images must be provided and non-empty")
if args.validation_prompt is None:
raise ValueError("validation_prompt must be provided and non-empty")
control_dict_root = dict(pipeline_args.get("control_dict", {})) if pipeline_args is not None else {}
spatial_ls = control_dict_root.get("spatial_images", []) or []
val_imgs = args.validation_images
prompts = args.validation_prompt
if not (len(val_imgs) == len(prompts) == len(spatial_ls)):
raise ValueError(
f"Length mismatch: validation_images={len(val_imgs)}, validation_prompt={len(prompts)}, spatial_images={len(spatial_ls)}"
)
results = []
def _resize_to_preferred(img: Image.Image) -> Image.Image:
w, h = img.size
aspect_ratio = w / h if h != 0 else 1.0
_, target_w, target_h = min(
(abs(aspect_ratio - (pref_w / pref_h)), pref_w, pref_h)
for (pref_h, pref_w) in PREFERRED_KONTEXT_RESOLUTIONS
)
return img.resize((target_w, target_h), Image.BICUBIC)
# Strict per-case loop
num_cases = len(prompts)
logger.info(f"Paired validation: {num_cases} (image, spatial, prompt) cases")
with autocast_ctx:
for idx in range(num_cases):
resized_img = None
# If validation image path is a non-empty string, load and resize; otherwise, skip passing image
if isinstance(val_imgs[idx], str) and val_imgs[idx] != "":
try:
base_img = Image.open(val_imgs[idx]).convert("RGB")
resized_img = _resize_to_preferred(base_img)
except Exception as e:
raise ValueError(f"Failed to load/resize validation image idx={idx}: {e}")
case_args = dict(pipeline_args) if pipeline_args is not None else {}
case_args.pop("height", None)
case_args.pop("width", None)
if resized_img is not None:
tw, th = resized_img.size
case_args["height"] = th
case_args["width"] = tw
else:
# When no image is provided, default to 1024x1024
case_args["height"] = 1024
case_args["width"] = 1024
# Bind single spatial control image per case; pass it directly (no masking)
case_control = dict(case_args.get("control_dict", {}))
spatial_case = spatial_ls[idx]
# Load spatial image if it's a path; else assume it's already an image
try:
spatial_img = Image.open(spatial_case).convert("RGB") if isinstance(spatial_case, str) else spatial_case
except Exception:
spatial_img = spatial_case
case_control["spatial_images"] = [spatial_img]
case_control["subject_images"] = []
case_args["control_dict"] = case_control
# Override prompt per case
case_args["prompt"] = prompts[idx]
if resized_img is not None:
img = pipeline(image=resized_img, **case_args, generator=generator).images[0]
else:
img = pipeline(**case_args, generator=generator).images[0]
results.append(img)
# Log results (resize to 1024x1024 for logging only)
resized_for_log = [img.resize((1024, 1024), Image.BICUBIC) for img in results]
for tracker in accelerator.trackers:
phase_name = "test" if is_final_validation else "validation"
if tracker.name == "tensorboard":
np_images = np.stack([np.asarray(img) for img in resized_for_log])
tracker.writer.add_images(phase_name, np_images, step, dataformats="NHWC")
if tracker.name == "wandb":
tracker.log({
phase_name: [wandb.Image(image, caption=f"{i}: {prompts[i] if i < len(prompts) else ''}") for i, image in enumerate(resized_for_log)]
})
del pipeline
if torch.cuda.is_available():
torch.cuda.empty_cache()
return results
def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder"):
text_encoder_config = transformers.PretrainedConfig.from_pretrained(
pretrained_model_name_or_path, subfolder=subfolder, revision=revision
)
model_class = text_encoder_config.architectures[0]
if model_class == "CLIPTextModel":
from transformers import CLIPTextModel
return CLIPTextModel
elif model_class == "T5EncoderModel":
from transformers import T5EncoderModel
return T5EncoderModel
else:
raise ValueError(f"{model_class} is not supported.")
def parse_args(input_args=None):
parser = argparse.ArgumentParser(description="Training script for Flux Kontext with EasyControl.")
parser.add_argument("--lora_num", type=int, default=1, help="number of the lora.")
parser.add_argument("--cond_size", type=int, default=512, help="size of the condition data.")
parser.add_argument("--mode", type=str, default=None, help="Controller mode; kept for compatibility.")
parser.add_argument("--train_data_dir", type=str, default="", help="Path to JSONL dataset.")
parser.add_argument("--pretrained_model_name_or_path", type=str, default="", required=False, help="Base model path")
parser.add_argument("--pretrained_lora_path", type=str, default=None, required=False, help="LoRA checkpoint to initialize from")
parser.add_argument("--revision", type=str, default=None, required=False, help="Revision of pretrained model")
parser.add_argument("--variant", type=str, default=None, help="Variant of the model files")
parser.add_argument("--repeats", type=int, default=1, help="How many times to repeat the training data.")
parser.add_argument("--max_sequence_length", type=int, default=128, help="Max sequence length for T5")
parser.add_argument("--kontext", type=str, default="disable")
parser.add_argument("--validation_prompt", type=str, nargs="+", default=None)
parser.add_argument("--validation_images", type=str, nargs="+", default=None, help="List of valiadation images")
parser.add_argument("--subject_test_images", type=str, nargs="+", default=None, help="List of subject test images")
parser.add_argument("--spatial_test_images", type=str, nargs="+", default=None, help="List of spatial test images")
parser.add_argument("--num_validation_images", type=int, default=4)
parser.add_argument("--validation_steps", type=int, default=20)
parser.add_argument("--ranks", type=int, nargs="+", default=[128], help="LoRA ranks")
parser.add_argument("--network_alphas", type=int, nargs="+", default=[128], help="LoRA network alphas")
parser.add_argument("--output_dir", type=str, default="/tiamat-NAS/zhangyuxuan/projects2/Easy_Control_0120/single_models/subject_model", help="Output directory")
parser.add_argument("--seed", type=int, default=None)
parser.add_argument("--train_batch_size", type=int, default=1)
parser.add_argument("--num_train_epochs", type=int, default=50)
parser.add_argument("--max_train_steps", type=int, default=None)
parser.add_argument("--checkpointing_steps", type=int, default=1000)
parser.add_argument("--checkpoints_total_limit", type=int, default=None)
parser.add_argument("--resume_from_checkpoint", type=str, default=None)
parser.add_argument("--gradient_accumulation_steps", type=int, default=1)
parser.add_argument("--gradient_checkpointing", action="store_true")
parser.add_argument("--learning_rate", type=float, default=1e-4)
parser.add_argument("--guidance_scale", type=float, default=1.0, help="Flux Kontext is guidance distilled")
parser.add_argument("--scale_lr", action="store_true", default=False)
parser.add_argument("--lr_scheduler", type=str, default="constant")
parser.add_argument("--lr_warmup_steps", type=int, default=500)
parser.add_argument("--lr_num_cycles", type=int, default=1)
parser.add_argument("--lr_power", type=float, default=1.0)
parser.add_argument("--dataloader_num_workers", type=int, default=1)
parser.add_argument("--weighting_scheme", type=str, default="none", choices=["sigma_sqrt", "logit_normal", "mode", "cosmap", "none"])
parser.add_argument("--logit_mean", type=float, default=0.0)
parser.add_argument("--logit_std", type=float, default=1.0)
parser.add_argument("--mode_scale", type=float, default=1.29)
parser.add_argument("--optimizer", type=str, default="AdamW")
parser.add_argument("--use_8bit_adam", action="store_true")
parser.add_argument("--adam_beta1", type=float, default=0.9)
parser.add_argument("--adam_beta2", type=float, default=0.999)
parser.add_argument("--prodigy_beta3", type=float, default=None)
parser.add_argument("--prodigy_decouple", type=bool, default=True)
parser.add_argument("--adam_weight_decay", type=float, default=1e-04)
parser.add_argument("--adam_weight_decay_text_encoder", type=float, default=1e-03)
parser.add_argument("--adam_epsilon", type=float, default=1e-08)
parser.add_argument("--prodigy_use_bias_correction", type=bool, default=True)
parser.add_argument("--prodigy_safeguard_warmup", type=bool, default=True)
parser.add_argument("--max_grad_norm", type=float, default=1.0)
parser.add_argument("--logging_dir", type=str, default="logs")
parser.add_argument("--cache_latents", action="store_true", default=False)
parser.add_argument("--report_to", type=str, default="tensorboard")
parser.add_argument("--mixed_precision", type=str, default="bf16", choices=["no", "fp16", "bf16"])
parser.add_argument("--upcast_before_saving", action="store_true", default=False)
if input_args is not None:
args = parser.parse_args(input_args)
else:
args = parser.parse_args()
return args
def main(args):
if torch.backends.mps.is_available() and args.mixed_precision == "bf16":
raise ValueError("Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 or fp32 instead.")
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
os.makedirs(args.logging_dir, exist_ok=True)
logging_dir = Path(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision,
log_with=args.report_to,
project_config=accelerator_project_config,
kwargs_handlers=[kwargs],
)
if torch.backends.mps.is_available():
accelerator.native_amp = False
if args.report_to == "wandb":
if not is_wandb_available():
raise ImportError("Install wandb for logging during training.")
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
if args.seed is not None:
set_seed(args.seed)
if accelerator.is_main_process and args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
# Tokenizers
tokenizer_one = transformers.CLIPTokenizer.from_pretrained(
args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision
)
tokenizer_two = transformers.T5TokenizerFast.from_pretrained(
args.pretrained_model_name_or_path, subfolder="tokenizer_2", revision=args.revision
)
# Text encoders
text_encoder_cls_one = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder")
text_encoder_cls_two = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_2")
# Scheduler and models
noise_scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
noise_scheduler_copy = copy.deepcopy(noise_scheduler)
text_encoder_one, text_encoder_two = load_text_encoders(args, text_encoder_cls_one, text_encoder_cls_two)
vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, variant=args.variant)
transformer = FluxTransformer2DModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="transformer", revision=args.revision, variant=args.variant)
# Train only LoRA adapters
transformer.requires_grad_(True)
vae.requires_grad_(False)
text_encoder_one.requires_grad_(False)
text_encoder_two.requires_grad_(False)
weight_dtype = torch.float32
if accelerator.mixed_precision == "fp16":
weight_dtype = torch.float16
elif accelerator.mixed_precision == "bf16":
weight_dtype = torch.bfloat16
if torch.backends.mps.is_available() and weight_dtype == torch.bfloat16:
raise ValueError("Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 or fp32 instead.")
vae.to(accelerator.device, dtype=weight_dtype)
transformer.to(accelerator.device, dtype=weight_dtype)
text_encoder_one.to(accelerator.device, dtype=weight_dtype)
text_encoder_two.to(accelerator.device, dtype=weight_dtype)
if args.gradient_checkpointing:
transformer.enable_gradient_checkpointing()
# Setup LoRA attention processors
if args.pretrained_lora_path is not None:
lora_path = args.pretrained_lora_path
checkpoint = load_checkpoint(lora_path)
lora_attn_procs = {}
double_blocks_idx = list(range(19))
single_blocks_idx = list(range(38))
number = 1
for name, attn_processor in transformer.attn_processors.items():
match = re.search(r'\.(\d+)\.', name)
if match:
layer_index = int(match.group(1))
if name.startswith("transformer_blocks") and layer_index in double_blocks_idx:
lora_state_dicts = {}
for key, value in checkpoint.items():
if re.search(r'\.(\d+)\.', key):
checkpoint_layer_index = int(re.search(r'\.(\d+)\.', key).group(1))
if checkpoint_layer_index == layer_index and key.startswith("transformer_blocks"):
lora_state_dicts[key] = value
lora_attn_procs[name] = MultiDoubleStreamBlockLoraProcessor(
dim=3072, ranks=args.ranks, network_alphas=args.network_alphas, lora_weights=[1 for _ in range(args.lora_num)], device=accelerator.device, dtype=weight_dtype, cond_width=args.cond_size, cond_height=args.cond_size, n_loras=args.lora_num
)
for n in range(number):
lora_attn_procs[name].q_loras[n].down.weight.data = lora_state_dicts.get(f'{name}.q_loras.{n}.down.weight', None)
lora_attn_procs[name].q_loras[n].up.weight.data = lora_state_dicts.get(f'{name}.q_loras.{n}.up.weight', None)
lora_attn_procs[name].k_loras[n].down.weight.data = lora_state_dicts.get(f'{name}.k_loras.{n}.down.weight', None)
lora_attn_procs[name].k_loras[n].up.weight.data = lora_state_dicts.get(f'{name}.k_loras.{n}.up.weight', None)
lora_attn_procs[name].v_loras[n].down.weight.data = lora_state_dicts.get(f'{name}.v_loras.{n}.down.weight', None)
lora_attn_procs[name].v_loras[n].up.weight.data = lora_state_dicts.get(f'{name}.v_loras.{n}.up.weight', None)
lora_attn_procs[name].proj_loras[n].down.weight.data = lora_state_dicts.get(f'{name}.proj_loras.{n}.down.weight', None)
lora_attn_procs[name].proj_loras[n].up.weight.data = lora_state_dicts.get(f'{name}.proj_loras.{n}.up.weight', None)
elif name.startswith("single_transformer_blocks") and layer_index in single_blocks_idx:
lora_state_dicts = {}
for key, value in checkpoint.items():
if re.search(r'\.(\d+)\.', key):
checkpoint_layer_index = int(re.search(r'\.(\d+)\.', key).group(1))
if checkpoint_layer_index == layer_index and key.startswith("single_transformer_blocks"):
lora_state_dicts[key] = value
lora_attn_procs[name] = MultiSingleStreamBlockLoraProcessor(
dim=3072, ranks=args.ranks, network_alphas=args.network_alphas, lora_weights=[1 for _ in range(args.lora_num)], device=accelerator.device, dtype=weight_dtype, cond_width=args.cond_size, cond_height=args.cond_size, n_loras=args.lora_num
)
for n in range(number):
lora_attn_procs[name].q_loras[n].down.weight.data = lora_state_dicts.get(f'{name}.q_loras.{n}.down.weight', None)
lora_attn_procs[name].q_loras[n].up.weight.data = lora_state_dicts.get(f'{name}.q_loras.{n}.up.weight', None)
lora_attn_procs[name].k_loras[n].down.weight.data = lora_state_dicts.get(f'{name}.k_loras.{n}.down.weight', None)
lora_attn_procs[name].k_loras[n].up.weight.data = lora_state_dicts.get(f'{name}.k_loras.{n}.up.weight', None)
lora_attn_procs[name].v_loras[n].down.weight.data = lora_state_dicts.get(f'{name}.v_loras.{n}.down.weight', None)
lora_attn_procs[name].v_loras[n].up.weight.data = lora_state_dicts.get(f'{name}.v_loras.{n}.up.weight', None)
else:
lora_attn_procs[name] = FluxAttnProcessor2_0()
else:
lora_attn_procs = {}
double_blocks_idx = list(range(19))
single_blocks_idx = list(range(38))
for name, attn_processor in transformer.attn_processors.items():
match = re.search(r'\.(\d+)\.', name)
if match:
layer_index = int(match.group(1))
if name.startswith("transformer_blocks") and layer_index in double_blocks_idx:
lora_attn_procs[name] = MultiDoubleStreamBlockLoraProcessor(
dim=3072, ranks=args.ranks, network_alphas=args.network_alphas, lora_weights=[1 for _ in range(args.lora_num)], device=accelerator.device, dtype=weight_dtype, cond_width=args.cond_size, cond_height=args.cond_size, n_loras=args.lora_num
)
elif name.startswith("single_transformer_blocks") and layer_index in single_blocks_idx:
lora_attn_procs[name] = MultiSingleStreamBlockLoraProcessor(
dim=3072, ranks=args.ranks, network_alphas=args.network_alphas, lora_weights=[1 for _ in range(args.lora_num)], device=accelerator.device, dtype=weight_dtype, cond_width=args.cond_size, cond_height=args.cond_size, n_loras=args.lora_num
)
else:
lora_attn_procs[name] = attn_processor
transformer.set_attn_processor(lora_attn_procs)
transformer.train()
for n, param in transformer.named_parameters():
if '_lora' not in n:
param.requires_grad = False
print(sum([p.numel() for p in transformer.parameters() if p.requires_grad]) / 1000000, 'M parameters')
def unwrap_model(model):
model = accelerator.unwrap_model(model)
model = model._orig_mod if is_compiled_module(model) else model
return model
if args.resume_from_checkpoint:
path = args.resume_from_checkpoint
global_step = int(path.split("-")[-1])
initial_global_step = global_step
else:
initial_global_step = 0
global_step = 0
first_epoch = 0
if args.scale_lr:
args.learning_rate = (
args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
)
if args.mixed_precision == "fp16":
models = [transformer]
cast_training_params(models, dtype=torch.float32)
params_to_optimize = [p for p in transformer.parameters() if p.requires_grad]
transformer_parameters_with_lr = {"params": params_to_optimize, "lr": args.learning_rate}
print(sum([p.numel() for p in transformer.parameters() if p.requires_grad]) / 1000000, 'parameters')
optimizer_class = torch.optim.AdamW
optimizer = optimizer_class(
[transformer_parameters_with_lr],
betas=(args.adam_beta1, args.adam_beta2),
weight_decay=args.adam_weight_decay,
eps=args.adam_epsilon,
)
tokenizers = [tokenizer_one, tokenizer_two]
text_encoders = [text_encoder_one, text_encoder_two]
train_dataset = make_train_dataset_inpaint_mask(args, tokenizers, accelerator)
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.train_batch_size,
shuffle=True,
collate_fn=collate_fn,
num_workers=args.dataloader_num_workers,
)
vae_config_shift_factor = vae.config.shift_factor
vae_config_scaling_factor = vae.config.scaling_factor
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.resume_from_checkpoint:
first_epoch = global_step // num_update_steps_per_epoch
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
lr_scheduler = get_scheduler(
args.lr_scheduler,
optimizer=optimizer,
num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
num_training_steps=args.max_train_steps * accelerator.num_processes,
num_cycles=args.lr_num_cycles,
power=args.lr_power,
)
transformer, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
transformer, optimizer, train_dataloader, lr_scheduler
)
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if overrode_max_train_steps:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# Sanitize config for TensorBoard hparams (only allow int/float/bool/str/tensor). Others are stringified if possible; otherwise dropped
def _sanitize_hparams(config_dict):
sanitized = {}
for key, value in dict(config_dict).items():
try:
if value is None:
continue
# numpy scalar types
if isinstance(value, (np.integer,)):
sanitized[key] = int(value)
elif isinstance(value, (np.floating,)):
sanitized[key] = float(value)
elif isinstance(value, (int, float, bool, str)):
sanitized[key] = value
elif isinstance(value, Path):
sanitized[key] = str(value)
elif isinstance(value, (list, tuple)):
# stringify simple sequences; skip if fails
sanitized[key] = str(value)
else:
# best-effort stringify
sanitized[key] = str(value)
except Exception:
# skip unconvertible entries
continue
return sanitized
if accelerator.is_main_process:
tracker_name = "Easy_Control_Kontext"
accelerator.init_trackers(tracker_name, config=_sanitize_hparams(vars(args)))
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num batches each epoch = {len(train_dataloader)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
progress_bar = tqdm(
range(0, args.max_train_steps),
initial=initial_global_step,
desc="Steps",
disable=not accelerator.is_local_main_process,
)
def get_sigmas(timesteps, n_dim=4, dtype=torch.float32):
sigmas = noise_scheduler_copy.sigmas.to(device=accelerator.device, dtype=dtype)
schedule_timesteps = noise_scheduler_copy.timesteps.to(accelerator.device)
timesteps = timesteps.to(accelerator.device)
step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps]
sigma = sigmas[step_indices].flatten()
while len(sigma.shape) < n_dim:
sigma = sigma.unsqueeze(-1)
return sigma
# Kontext specifics
vae_scale_factor = 8 # Kontext uses 8x VAE factor; pack/unpack uses additional 2x in methods
# Match pipeline's prepare_latents cond resolution: 2 * (cond_size // (vae_scale_factor * 2))
height_cond = 2 * (args.cond_size // (vae_scale_factor * 2))
width_cond = 2 * (args.cond_size // (vae_scale_factor * 2))
offset = 64
for epoch in range(first_epoch, args.num_train_epochs):
transformer.train()
for step, batch in enumerate(train_dataloader):
models_to_accumulate = [transformer]
with accelerator.accumulate(models_to_accumulate):
tokens = [batch["text_ids_1"], batch["text_ids_2"]]
prompt_embeds, pooled_prompt_embeds, text_ids = encode_token_ids(text_encoders, tokens, accelerator)
prompt_embeds = prompt_embeds.to(dtype=vae.dtype, device=accelerator.device)
pooled_prompt_embeds = pooled_prompt_embeds.to(dtype=vae.dtype, device=accelerator.device)
text_ids = text_ids.to(dtype=vae.dtype, device=accelerator.device)
pixel_values = batch["pixel_values"].to(dtype=vae.dtype)
height_ = 2 * (int(pixel_values.shape[-2]) // (vae_scale_factor * 2))
width_ = 2 * (int(pixel_values.shape[-1]) // (vae_scale_factor * 2))
model_input = vae.encode(pixel_values).latent_dist.sample()
model_input = (model_input - vae_config_shift_factor) * vae_config_scaling_factor
model_input = model_input.to(dtype=weight_dtype)
latent_image_ids, cond_latent_image_ids = resize_position_encoding(
model_input.shape[0], height_, width_, height_cond, width_cond, accelerator.device, weight_dtype
)
noise = torch.randn_like(model_input)
bsz = model_input.shape[0]
u = compute_density_for_timestep_sampling(
weighting_scheme=args.weighting_scheme,
batch_size=bsz,
logit_mean=args.logit_mean,
logit_std=args.logit_std,
mode_scale=args.mode_scale,
)
indices = (u * noise_scheduler_copy.config.num_train_timesteps).long()
timesteps = noise_scheduler_copy.timesteps[indices].to(device=model_input.device)
sigmas = get_sigmas(timesteps, n_dim=model_input.ndim, dtype=model_input.dtype)
noisy_model_input = (1.0 - sigmas) * model_input + sigmas * noise
packed_noisy_model_input = FluxKontextControlPipeline._pack_latents(
noisy_model_input,
batch_size=model_input.shape[0],
num_channels_latents=model_input.shape[1],
height=model_input.shape[2],
width=model_input.shape[3],
)
latent_image_ids_to_concat = [latent_image_ids]
packed_cond_model_input_to_concat = []
if args.kontext == "enable":
source_pixel_values = batch["source_pixel_values"].to(dtype=vae.dtype)
source_image_latents = vae.encode(source_pixel_values).latent_dist.sample()
source_image_latents = (source_image_latents - vae_config_shift_factor) * vae_config_scaling_factor
image_latent_h, image_latent_w = source_image_latents.shape[2:]
packed_image_latents = FluxKontextControlPipeline._pack_latents(
source_image_latents,
batch_size=source_image_latents.shape[0],
num_channels_latents=source_image_latents.shape[1],
height=image_latent_h,
width=image_latent_w,
)
source_image_ids = FluxKontextControlPipeline._prepare_latent_image_ids(
batch_size=source_image_latents.shape[0],
height=image_latent_h // 2,
width=image_latent_w // 2,
device=accelerator.device,
dtype=weight_dtype,
)
source_image_ids[..., 0] = 1 # Mark as condition
latent_image_ids_to_concat.append(source_image_ids)
subject_pixel_values = batch.get("subject_pixel_values")
if subject_pixel_values is not None:
subject_pixel_values = subject_pixel_values.to(dtype=vae.dtype)
subject_input = vae.encode(subject_pixel_values).latent_dist.sample()
subject_input = (subject_input - vae_config_shift_factor) * vae_config_scaling_factor
subject_input = subject_input.to(dtype=weight_dtype)
sub_number = subject_pixel_values.shape[-2] // args.cond_size
latent_subject_ids = prepare_latent_subject_ids(height_cond // 2, width_cond // 2, accelerator.device, weight_dtype)
latent_subject_ids[..., 0] = 2
latent_subject_ids[:, 1] += offset
sub_latent_image_ids = torch.cat([latent_subject_ids for _ in range(sub_number)], dim=0)
latent_image_ids_to_concat.append(sub_latent_image_ids)
packed_subject_model_input = FluxKontextControlPipeline._pack_latents(
subject_input,
batch_size=subject_input.shape[0],
num_channels_latents=subject_input.shape[1],
height=subject_input.shape[2],
width=subject_input.shape[3],
)
packed_cond_model_input_to_concat.append(packed_subject_model_input)
cond_pixel_values = batch.get("cond_pixel_values")
if cond_pixel_values is not None:
cond_pixel_values = cond_pixel_values.to(dtype=vae.dtype)
cond_input = vae.encode(cond_pixel_values).latent_dist.sample()
cond_input = (cond_input - vae_config_shift_factor) * vae_config_scaling_factor
cond_input = cond_input.to(dtype=weight_dtype)
cond_number = cond_pixel_values.shape[-2] // args.cond_size
cond_latent_image_ids[..., 0] = 2
cond_latent_image_ids_rep = torch.cat([cond_latent_image_ids for _ in range(cond_number)], dim=0)
latent_image_ids_to_concat.append(cond_latent_image_ids_rep)
packed_cond_model_input = FluxKontextControlPipeline._pack_latents(
cond_input,
batch_size=cond_input.shape[0],
num_channels_latents=cond_input.shape[1],
height=cond_input.shape[2],
width=cond_input.shape[3],
)
packed_cond_model_input_to_concat.append(packed_cond_model_input)
latent_image_ids = torch.cat(latent_image_ids_to_concat, dim=0)
cond_packed_noisy_model_input = torch.cat(packed_cond_model_input_to_concat, dim=1)
if accelerator.unwrap_model(transformer).config.guidance_embeds:
guidance = torch.tensor([args.guidance_scale], device=accelerator.device)
guidance = guidance.expand(model_input.shape[0])
else:
guidance = None
latent_model_input=packed_noisy_model_input
if args.kontext == "enable":
latent_model_input = torch.cat([latent_model_input, packed_image_latents], dim=1)
model_pred = transformer(
hidden_states=latent_model_input,
cond_hidden_states=cond_packed_noisy_model_input,
timestep=timesteps / 1000,
guidance=guidance,
pooled_projections=pooled_prompt_embeds,
encoder_hidden_states=prompt_embeds,
txt_ids=text_ids,
img_ids=latent_image_ids,
return_dict=False,
)[0]
model_pred = model_pred[:, : packed_noisy_model_input.size(1)]
model_pred = FluxKontextControlPipeline._unpack_latents(
model_pred,
height=int(pixel_values.shape[-2]),
width=int(pixel_values.shape[-1]),
vae_scale_factor=vae_scale_factor,
)
weighting = compute_loss_weighting_for_sd3(weighting_scheme=args.weighting_scheme, sigmas=sigmas)
target = noise - model_input
loss = torch.mean((weighting.float() * (model_pred.float() - target.float()) ** 2).reshape(target.shape[0], -1), 1)
loss = loss.mean()
accelerator.backward(loss)
if accelerator.sync_gradients:
params_to_clip = (transformer.parameters())
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
if accelerator.sync_gradients:
progress_bar.update(1)
global_step += 1
if accelerator.is_main_process:
if global_step % args.checkpointing_steps == 0:
if args.checkpoints_total_limit is not None:
checkpoints = os.listdir(args.output_dir)
checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
if len(checkpoints) >= args.checkpoints_total_limit:
num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
removing_checkpoints = checkpoints[0:num_to_remove]
logger.info(f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints")
logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
for removing_checkpoint in removing_checkpoints:
removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
shutil.rmtree(removing_checkpoint)
save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
os.makedirs(save_path, exist_ok=True)
unwrapped_model_state = accelerator.unwrap_model(transformer).state_dict()
lora_state_dict = {k: unwrapped_model_state[k] for k in unwrapped_model_state.keys() if '_lora' in k}
save_file(lora_state_dict, os.path.join(save_path, "lora.safetensors"))
logger.info(f"Saved state to {save_path}")
logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
accelerator.log(logs, step=global_step)
if accelerator.is_main_process:
if args.validation_prompt is not None and global_step % args.validation_steps == 0:
pipeline = FluxKontextControlPipeline.from_pretrained(
args.pretrained_model_name_or_path,
vae=vae,
text_encoder=accelerator.unwrap_model(text_encoder_one),
text_encoder_2=accelerator.unwrap_model(text_encoder_two),
transformer=accelerator.unwrap_model(transformer),
revision=args.revision,
variant=args.variant,
torch_dtype=weight_dtype,
)
if args.subject_test_images is not None and len(args.subject_test_images) != 0 and args.subject_test_images != ['None']:
subject_paths = args.subject_test_images
subject_ls = [Image.open(image_path).convert("RGB") for image_path in subject_paths]
else:
subject_ls = []
if args.spatial_test_images is not None and len(args.spatial_test_images) != 0 and args.spatial_test_images != ['None']:
spatial_paths = args.spatial_test_images
spatial_ls = [Image.open(image_path).convert("RGB") for image_path in spatial_paths]
else:
spatial_ls = []
pipeline_args = {
"prompt": args.validation_prompt,
"cond_size": args.cond_size,
"guidance_scale": 3.5,
"num_inference_steps": 20,
"max_sequence_length": 128,
"control_dict": {"spatial_images": spatial_ls, "subject_images": subject_ls},
}
images = log_validation(
pipeline=pipeline,
args=args,
accelerator=accelerator,
pipeline_args=pipeline_args,
step=global_step,
torch_dtype=weight_dtype,
)
save_path = os.path.join(args.output_dir, "validation")
os.makedirs(save_path, exist_ok=True)
save_folder = os.path.join(save_path, f"checkpoint-{global_step}")
os.makedirs(save_folder, exist_ok=True)
for idx, img in enumerate(images):
img.save(os.path.join(save_folder, f"{idx}.jpg"))
del pipeline
accelerator.wait_for_everyone()
accelerator.end_training()
if __name__ == "__main__":
args = parse_args()
main(args)