import argparse import copy import logging import math import os import shutil from contextlib import nullcontext from pathlib import Path import re import time from safetensors.torch import save_file from PIL import Image import numpy as np import torch import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed import diffusers from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, FluxPipeline from diffusers.optimization import get_scheduler from diffusers.training_utils import ( cast_training_params, compute_density_for_timestep_sampling, compute_loss_weighting_for_sd3, ) from diffusers.utils.torch_utils import is_compiled_module from diffusers.utils import ( check_min_version, is_wandb_available, ) from src.prompt_helper import * from src.lora_helper import * from src.jsonl_datasets_kontext_interactive_lora import make_interactive_dataset_subjects, make_placement_dataset_subjects, make_pexels_dataset_subjects, make_mixed_dataset, collate_fn from diffusers import FluxKontextPipeline from diffusers.models import FluxTransformer2DModel from tqdm.auto import tqdm from peft import LoraConfig from peft.utils import get_peft_model_state_dict from diffusers.utils import convert_state_dict_to_diffusers if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.31.0.dev0") logger = get_logger(__name__) PREFERRED_KONTEXT_RESOLUTIONS = [ (672, 1568), (688, 1504), (720, 1456), (752, 1392), (832, 1248), (880, 1184), (944, 1104), (1024, 1024), (1104, 944), (1184, 880), (1248, 832), (1392, 752), (1456, 720), (1504, 688), (1568, 672), ] def log_validation( pipeline, args, accelerator, pipeline_args, step, torch_dtype, is_final_validation=False, ): logger.info( f"Running validation... Paired evaluation for image and prompt." ) pipeline = pipeline.to(device=accelerator.device, dtype=torch_dtype) pipeline.set_progress_bar_config(disable=True) generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None # Match compute dtype for validation to avoid dtype mismatches (e.g., VAE bf16 vs float latents) if torch_dtype in (torch.float16, torch.bfloat16): device_type = 'cuda' if torch.cuda.is_available() else 'cpu' autocast_ctx = torch.autocast(device_type=device_type, dtype=torch_dtype) else: autocast_ctx = nullcontext() # Build per-case evaluation if args.validation_images is None or args.validation_images == ['None']: raise ValueError("validation_images must be provided and non-empty") if args.validation_prompt is None: raise ValueError("validation_prompt must be provided and non-empty") val_imgs = args.validation_images prompts = args.validation_prompt # Prepend instruction to each prompt (same as dataset/test requirement) instruction = "Fill in the white region naturally and adapt the foreground into the background. Fix the perspective of the foreground object if necessary." try: prompts = [f"{instruction} {p}".strip() if isinstance(p, str) and len(p.strip()) > 0 else instruction for p in prompts] except Exception: # Fallback: keep original prompts if unexpected pass if not (len(val_imgs) == len(prompts)): raise ValueError( f"Length mismatch: validation_images={len(val_imgs)}, validation_prompt={len(prompts)}" ) results = [] def _resize_to_preferred(img: Image.Image) -> Image.Image: w, h = img.size aspect_ratio = w / h if h != 0 else 1.0 _, target_w, target_h = min( (abs(aspect_ratio - (pref_w / pref_h)), pref_w, pref_h) for (pref_h, pref_w) in PREFERRED_KONTEXT_RESOLUTIONS ) return img.resize((target_w, target_h), Image.BICUBIC) # Distributed per-rank assignment: each process handles its own slice of cases num_cases = len(prompts) logger.info(f"Paired validation (distributed): {num_cases} cases across {accelerator.num_processes} ranks") # Indices assigned to this rank rank = accelerator.process_index world_size = accelerator.num_processes local_indices = list(range(rank, num_cases, world_size)) local_images = [] with autocast_ctx: for idx in local_indices: try: base_img = Image.open(val_imgs[idx]).convert("RGB") resized_img = _resize_to_preferred(base_img) except Exception as e: raise ValueError(f"Failed to load/resize validation image idx={idx}: {e}") case_args = dict(pipeline_args) if pipeline_args is not None else {} case_args.pop("height", None) case_args.pop("width", None) if resized_img is not None: tw, th = resized_img.size case_args["height"] = th case_args["width"] = tw case_args["prompt"] = prompts[idx] img = pipeline(image=resized_img, **case_args, generator=generator).images[0] local_images.append(img) # Gather all images per rank (pad to equal count) to main process fixed_size = (1024, 1024) max_local = int(math.ceil(num_cases / world_size)) if world_size > 0 else len(local_images) # Build per-rank batch tensors imgs_rank = [] idx_rank = [] has_rank = [] for j in range(max_local): if j < len(local_images): resized = local_images[j].resize(fixed_size, Image.BICUBIC) img_np = np.asarray(resized).astype(np.uint8) imgs_rank.append(torch.from_numpy(img_np)) idx_rank.append(local_indices[j]) has_rank.append(1) else: imgs_rank.append(torch.from_numpy(np.zeros((fixed_size[1], fixed_size[0], 3), dtype=np.uint8))) idx_rank.append(-1) has_rank.append(0) imgs_rank_tensor = torch.stack([t.to(device=accelerator.device) for t in imgs_rank], dim=0) # [max_local, H, W, C] idx_rank_tensor = torch.tensor(idx_rank, device=accelerator.device, dtype=torch.long) # [max_local] has_rank_tensor = torch.tensor(has_rank, device=accelerator.device, dtype=torch.int) # [max_local] gathered_has = accelerator.gather(has_rank_tensor) # [world * max_local] gathered_idx = accelerator.gather(idx_rank_tensor) # [world * max_local] gathered_imgs = accelerator.gather(imgs_rank_tensor) # [world * max_local, H, W, C] if accelerator.is_main_process: world = int(world_size) slots = int(max_local) try: gathered_has = gathered_has.view(world, slots) gathered_idx = gathered_idx.view(world, slots) gathered_imgs = gathered_imgs.view(world, slots, fixed_size[1], fixed_size[0], 3) except Exception: # Fallback: treat as flat if reshape fails gathered_has = gathered_has.view(-1, 1) gathered_idx = gathered_idx.view(-1, 1) gathered_imgs = gathered_imgs.view(-1, 1, fixed_size[1], fixed_size[0], 3) world = int(gathered_has.shape[0]) slots = 1 for i in range(world): for j in range(slots): if int(gathered_has[i, j].item()) == 1: idx = int(gathered_idx[i, j].item()) arr = gathered_imgs[i, j].cpu().numpy() pil_img = Image.fromarray(arr.astype(np.uint8)) # Resize back to original validation image size try: orig = Image.open(val_imgs[idx]).convert("RGB") pil_img = pil_img.resize(orig.size, Image.BICUBIC) except Exception: pass results.append(pil_img) # Log results (resize to 1024x1024 for saving or external trackers). Skip TensorBoard per request. resized_for_log = [img.resize((1024, 1024), Image.BICUBIC) for img in results] for tracker in accelerator.trackers: phase_name = "test" if is_final_validation else "validation" if tracker.name == "tensorboard": continue if tracker.name == "wandb": tracker.log({ phase_name: [wandb.Image(image, caption=f"{i}: {prompts[i] if i < len(prompts) else ''}") for i, image in enumerate(resized_for_log)] }) del pipeline if torch.cuda.is_available(): torch.cuda.empty_cache() return results def save_with_retry(img: Image.Image, path: str, max_retries: int = 3) -> bool: """Save PIL image with simple retry and exponential backoff to mitigate transient I/O errors.""" last_err = None for attempt in range(max_retries): try: os.makedirs(os.path.dirname(path), exist_ok=True) img.save(path) return True except OSError as e: last_err = e # Exponential backoff: 1.0, 1.5, 2.25 seconds ... time.sleep(1.5 ** attempt) logger.warning(f"Failed to save {path} after {max_retries} retries: {last_err}") return False def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder"): text_encoder_config = transformers.PretrainedConfig.from_pretrained( pretrained_model_name_or_path, subfolder=subfolder, revision=revision ) model_class = text_encoder_config.architectures[0] if model_class == "CLIPTextModel": from transformers import CLIPTextModel return CLIPTextModel elif model_class == "T5EncoderModel": from transformers import T5EncoderModel return T5EncoderModel else: raise ValueError(f"{model_class} is not supported.") def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Training script for Flux Kontext with EasyControl.") parser.add_argument("--mode", type=str, default=None, help="Controller mode; kept for compatibility.") # Dataset arguments parser.add_argument("--dataset_mode", type=str, default="mixed", choices=["interactive", "placement", "pexels", "mixed"], help="Dataset mode: interactive, placement, pexels, or mixed") parser.add_argument("--train_data_jsonl", type=str, default="/robby/share/Editing/lzc/HOI_v1/final_metadata.jsonl", help="Path to interactive dataset JSONL") parser.add_argument("--placement_data_jsonl", type=str, default="/robby/share/Editing/lzc/subject_placement/metadata_relight.jsonl", help="Path to placement dataset JSONL") parser.add_argument("--pexels_data_jsonl", type=str, default=None, help="Path to pexels dataset JSONL") parser.add_argument("--interactive_base_dir", type=str, default="/robby/share/Editing/lzc/HOI_v1", help="Base directory for interactive dataset") parser.add_argument("--placement_base_dir", type=str, default="/robby/share/Editing/lzc/subject_placement", help="Base directory for placement dataset") parser.add_argument("--pexels_base_dir", type=str, default=None, help="Base directory for pexels dataset") parser.add_argument("--pexels_relight_base_dir", type=str, default=None, help="Base directory for pexels relighted images") parser.add_argument("--seg_base_dir", type=str, default=None, help="Directory containing segmentation maps for pexels dataset") parser.add_argument("--interactive_weight", type=float, default=1.0, help="Sampling weight for interactive dataset (default: 1.0)") parser.add_argument("--placement_weight", type=float, default=1.0, help="Sampling weight for placement dataset (default: 1.0)") parser.add_argument("--pexels_weight", type=float, default=0.1, help="Sampling weight for pexels dataset (default: 1.0)") parser.add_argument("--pretrained_model_name_or_path", type=str, default="", required=False, help="Base model path") parser.add_argument("--pretrained_lora_path", type=str, default=None, required=False, help="LoRA checkpoint to initialize from") parser.add_argument("--revision", type=str, default=None, required=False, help="Revision of pretrained model") parser.add_argument("--variant", type=str, default=None, help="Variant of the model files") parser.add_argument("--repeats", type=int, default=1, help="How many times to repeat the training data.") parser.add_argument("--max_sequence_length", type=int, default=128, help="Max sequence length for T5") parser.add_argument("--kontext", type=str, default="enable") parser.add_argument("--validation_prompt", type=str, nargs="+", default=None) parser.add_argument("--validation_images", type=str, nargs="+", default=None, help="List of valiadation images") parser.add_argument("--num_validation_images", type=int, default=4) parser.add_argument("--validation_steps", type=int, default=20) parser.add_argument("--ranks", type=int, nargs="+", default=[32], help="LoRA ranks") parser.add_argument("--output_dir", type=str, default="", help="Output directory") parser.add_argument("--seed", type=int, default=None) parser.add_argument("--train_batch_size", type=int, default=1) parser.add_argument("--num_train_epochs", type=int, default=50) parser.add_argument("--max_train_steps", type=int, default=None) parser.add_argument("--checkpointing_steps", type=int, default=1000) parser.add_argument("--checkpoints_total_limit", type=int, default=None) parser.add_argument("--resume_from_checkpoint", type=str, default=None) parser.add_argument("--gradient_accumulation_steps", type=int, default=1) parser.add_argument("--gradient_checkpointing", action="store_true") parser.add_argument("--learning_rate", type=float, default=1e-4) parser.add_argument("--guidance_scale", type=float, default=1.0, help="Flux Kontext is guidance distilled") parser.add_argument("--scale_lr", action="store_true", default=False) parser.add_argument("--lr_scheduler", type=str, default="constant") parser.add_argument("--lr_warmup_steps", type=int, default=500) parser.add_argument("--lr_num_cycles", type=int, default=1) parser.add_argument("--lr_power", type=float, default=1.0) parser.add_argument("--dataloader_num_workers", type=int, default=8) parser.add_argument("--weighting_scheme", type=str, default="none", choices=["sigma_sqrt", "logit_normal", "mode", "cosmap", "none"]) parser.add_argument("--logit_mean", type=float, default=0.0) parser.add_argument("--logit_std", type=float, default=1.0) parser.add_argument("--mode_scale", type=float, default=1.29) parser.add_argument("--optimizer", type=str, default="AdamW") parser.add_argument("--use_8bit_adam", action="store_true") parser.add_argument("--adam_beta1", type=float, default=0.9) parser.add_argument("--adam_beta2", type=float, default=0.999) parser.add_argument("--prodigy_beta3", type=float, default=None) parser.add_argument("--prodigy_decouple", type=bool, default=True) parser.add_argument("--adam_weight_decay", type=float, default=1e-04) parser.add_argument("--adam_weight_decay_text_encoder", type=float, default=1e-03) parser.add_argument("--adam_epsilon", type=float, default=1e-08) parser.add_argument("--prodigy_use_bias_correction", type=bool, default=True) parser.add_argument("--prodigy_safeguard_warmup", type=bool, default=True) parser.add_argument("--max_grad_norm", type=float, default=1.0) parser.add_argument("--logging_dir", type=str, default="logs") parser.add_argument("--cache_latents", action="store_true", default=False) parser.add_argument("--report_to", type=str, default="tensorboard") parser.add_argument("--mixed_precision", type=str, default="bf16", choices=["no", "fp16", "bf16"]) parser.add_argument("--upcast_before_saving", action="store_true", default=False) # Blending options for dataset pixel_values parser.add_argument("--blend_pixel_values", action="store_true", help="Blend target/source into pixel_values using mask") parser.add_argument("--blend_kernel", type=int, default=21, help="Gaussian blur kernel size (must be odd)") parser.add_argument("--blend_sigma", type=float, default=10.0, help="Gaussian blur sigma") if input_args is not None: args = parser.parse_args(input_args) else: args = parser.parse_args() return args def main(args): if torch.backends.mps.is_available() and args.mixed_precision == "bf16": raise ValueError("Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 or fp32 instead.") if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) os.makedirs(args.logging_dir, exist_ok=True) logging_dir = Path(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) kwargs = DistributedDataParallelKwargs(find_unused_parameters=True) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, kwargs_handlers=[kwargs], ) if torch.backends.mps.is_available(): accelerator.native_amp = False if args.report_to == "wandb": if not is_wandb_available(): raise ImportError("Install wandb for logging during training.") logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() if args.seed is not None: set_seed(args.seed) if accelerator.is_main_process and args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) # Tokenizers tokenizer_one = transformers.CLIPTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision ) tokenizer_two = transformers.T5TokenizerFast.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer_2", revision=args.revision ) # Text encoders text_encoder_cls_one = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder") text_encoder_cls_two = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_2") # Scheduler and models noise_scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") noise_scheduler_copy = copy.deepcopy(noise_scheduler) text_encoder_one, text_encoder_two = load_text_encoders(args, text_encoder_cls_one, text_encoder_cls_two) vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, variant=args.variant) transformer = FluxTransformer2DModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="transformer", revision=args.revision, variant=args.variant) # Train only LoRA adapters: freeze base transformer/text encoders/vae transformer.requires_grad_(False) vae.requires_grad_(False) text_encoder_one.requires_grad_(False) text_encoder_two.requires_grad_(False) weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 if torch.backends.mps.is_available() and weight_dtype == torch.bfloat16: raise ValueError("Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 or fp32 instead.") vae.to(accelerator.device, dtype=weight_dtype) transformer.to(accelerator.device, dtype=weight_dtype) text_encoder_one.to(accelerator.device, dtype=weight_dtype) text_encoder_two.to(accelerator.device, dtype=weight_dtype) if args.gradient_checkpointing: transformer.enable_gradient_checkpointing() # Setup standard PEFT LoRA on FluxTransformer2DModel # target_modules = [ # "attn.to_k", # "attn.to_q", # "attn.to_v", # "attn.to_out.0", # "attn.add_k_proj", # "attn.add_q_proj", # "attn.add_v_proj", # "attn.to_add_out", # "ff.net.0.proj", # "ff.net.2", # "ff_context.net.0.proj", # "ff_context.net.2", # ] target_modules = [ "attn.to_k", "attn.to_q", "attn.to_v", "attn.to_out.0", "attn.add_k_proj", "attn.add_q_proj", "attn.add_v_proj", "attn.to_add_out", "ff.net.0.proj", "ff.net.2", "ff_context.net.0.proj", "ff_context.net.2", # =========================================================== # 【补全部分 1】: 单流模块 (single_transformer_blocks) 的专属层 # =========================================================== # 说明:单流块中的注意力层 (to_q, to_k, to_v) 已被上面的通用名称覆盖。 # 这里补充的是它们特有的 MLP 和输出层。 "proj_mlp", "proj_out", # 这个名称也会匹配单流块各自的输出层和模型总输出层 # =========================================================== # 【补全部分 2】: 所有的归一化 (Norm) 层 # =========================================================== # 说明:这些层负责调整特征分布,对风格学习很重要。 # 使用 "linear" 可以一次性匹配所有以 ".linear" 结尾的Norm层。 "linear", # 匹配 norm1.linear, norm1_context.linear, norm.linear, norm_out.linear ] lora_rank = int(args.ranks[0]) if isinstance(args.ranks, list) and len(args.ranks) > 0 else 256 lora_config = LoraConfig( r=lora_rank, lora_alpha=lora_rank, init_lora_weights="gaussian", target_modules=target_modules, ) transformer.add_adapter(lora_config) transformer.train() print(sum([p.numel() for p in transformer.parameters() if p.requires_grad]) / 1000000, 'M parameters') def unwrap_model(model): model = accelerator.unwrap_model(model) model = model._orig_mod if is_compiled_module(model) else model return model if args.resume_from_checkpoint: path = args.resume_from_checkpoint global_step = int(path.split("-")[-1]) initial_global_step = global_step else: initial_global_step = 0 global_step = 0 first_epoch = 0 if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) if args.mixed_precision == "fp16": models = [transformer] cast_training_params(models, dtype=torch.float32) params_to_optimize = [p for p in transformer.parameters() if p.requires_grad] transformer_parameters_with_lr = {"params": params_to_optimize, "lr": args.learning_rate} # print(sum([p.numel() for p in transformer.parameters() if p.requires_grad]) / 1000000, 'parameters') optimizer_class = torch.optim.AdamW optimizer = optimizer_class( [transformer_parameters_with_lr], betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) tokenizers = [tokenizer_one, tokenizer_two] text_encoders = [text_encoder_one, text_encoder_two] # Create dataset based on mode if args.dataset_mode == "mixed": # Mixed mode: combine all available datasets train_dataset = make_mixed_dataset( args, tokenizers, interactive_jsonl_path=args.train_data_jsonl, placement_jsonl_path=args.placement_data_jsonl, pexels_jsonl_path=args.pexels_data_jsonl, interactive_base_dir=args.interactive_base_dir, placement_base_dir=args.placement_base_dir, pexels_base_dir=args.pexels_base_dir, interactive_weight=args.interactive_weight, placement_weight=args.placement_weight, pexels_weight=args.pexels_weight, accelerator=accelerator ) weights_str = [] if args.train_data_jsonl: weights_str.append(f"Interactive: {args.interactive_weight:.2f}") if args.placement_data_jsonl: weights_str.append(f"Placement: {args.placement_weight:.2f}") if args.pexels_data_jsonl: weights_str.append(f"Pexels: {args.pexels_weight:.2f}") logger.info(f"Mixed dataset created with weights - {', '.join(weights_str)}") elif args.dataset_mode == "pexels": if not args.pexels_data_jsonl: raise ValueError("pexels_data_jsonl must be provided for pexels mode") train_dataset = make_pexels_dataset_subjects(args, tokenizers, accelerator) elif args.dataset_mode == "placement": if not args.placement_data_jsonl: raise ValueError("placement_data_jsonl must be provided for placement mode") train_dataset = make_placement_dataset_subjects(args, tokenizers, accelerator) else: # interactive mode train_dataset = make_interactive_dataset_subjects(args, tokenizers, accelerator) train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn, num_workers=args.dataloader_num_workers, ) vae_config_shift_factor = vae.config.shift_factor vae_config_scaling_factor = vae.config.scaling_factor overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.resume_from_checkpoint: first_epoch = global_step // num_update_steps_per_epoch if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, num_cycles=args.lr_num_cycles, power=args.lr_power, ) transformer, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( transformer, optimizer, train_dataloader, lr_scheduler ) num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Sanitize config for TensorBoard hparams (only allow int/float/bool/str/tensor). Others are stringified if possible; otherwise dropped def _sanitize_hparams(config_dict): sanitized = {} for key, value in dict(config_dict).items(): try: if value is None: continue # numpy scalar types if isinstance(value, (np.integer,)): sanitized[key] = int(value) elif isinstance(value, (np.floating,)): sanitized[key] = float(value) elif isinstance(value, (int, float, bool, str)): sanitized[key] = value elif isinstance(value, Path): sanitized[key] = str(value) elif isinstance(value, (list, tuple)): # stringify simple sequences; skip if fails sanitized[key] = str(value) else: # best-effort stringify sanitized[key] = str(value) except Exception: # skip unconvertible entries continue return sanitized if accelerator.is_main_process: tracker_name = "Easy_Control_Kontext" accelerator.init_trackers(tracker_name, config=_sanitize_hparams(vars(args))) total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num batches each epoch = {len(train_dataloader)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", disable=not accelerator.is_local_main_process, ) def get_sigmas(timesteps, n_dim=4, dtype=torch.float32): sigmas = noise_scheduler_copy.sigmas.to(device=accelerator.device, dtype=dtype) schedule_timesteps = noise_scheduler_copy.timesteps.to(accelerator.device) timesteps = timesteps.to(accelerator.device) step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps] sigma = sigmas[step_indices].flatten() while len(sigma.shape) < n_dim: sigma = sigma.unsqueeze(-1) return sigma # Kontext specifics vae_scale_factor = 8 # Kontext uses 8x VAE factor; pack/unpack uses additional 2x in methods for epoch in range(first_epoch, args.num_train_epochs): transformer.train() for step, batch in enumerate(train_dataloader): models_to_accumulate = [transformer] with accelerator.accumulate(models_to_accumulate): tokens = [batch["text_ids_1"], batch["text_ids_2"]] prompt_embeds, pooled_prompt_embeds, text_ids = encode_token_ids(text_encoders, tokens, accelerator) prompt_embeds = prompt_embeds.to(dtype=vae.dtype, device=accelerator.device) pooled_prompt_embeds = pooled_prompt_embeds.to(dtype=vae.dtype, device=accelerator.device) text_ids = text_ids.to(dtype=vae.dtype, device=accelerator.device) pixel_values = batch["pixel_values"].to(dtype=vae.dtype) height_ = 2 * (int(pixel_values.shape[-2]) // (vae_scale_factor * 2)) width_ = 2 * (int(pixel_values.shape[-1]) // (vae_scale_factor * 2)) model_input = vae.encode(pixel_values).latent_dist.sample() model_input = (model_input - vae_config_shift_factor) * vae_config_scaling_factor model_input = model_input.to(dtype=weight_dtype) # Prepare latent ids for transformer (positional encodings) latent_image_ids = FluxKontextPipeline._prepare_latent_image_ids( batch_size=model_input.shape[0], height=model_input.shape[2] // 2, width=model_input.shape[3] // 2, device=accelerator.device, dtype=weight_dtype, ) noise = torch.randn_like(model_input) bsz = model_input.shape[0] u = compute_density_for_timestep_sampling( weighting_scheme=args.weighting_scheme, batch_size=bsz, logit_mean=args.logit_mean, logit_std=args.logit_std, mode_scale=args.mode_scale, ) indices = (u * noise_scheduler_copy.config.num_train_timesteps).long() timesteps = noise_scheduler_copy.timesteps[indices].to(device=model_input.device) sigmas = get_sigmas(timesteps, n_dim=model_input.ndim, dtype=model_input.dtype) noisy_model_input = (1.0 - sigmas) * model_input + sigmas * noise packed_noisy_model_input = FluxKontextPipeline._pack_latents( noisy_model_input, batch_size=model_input.shape[0], num_channels_latents=model_input.shape[1], height=model_input.shape[2], width=model_input.shape[3], ) if accelerator.unwrap_model(transformer).config.guidance_embeds: guidance = torch.tensor([args.guidance_scale], device=accelerator.device) guidance = guidance.expand(model_input.shape[0]) else: guidance = None # If kontext editing is enabled, append source image latents to the sequence latent_model_input = packed_noisy_model_input if args.kontext == "enable": source_pixel_values = batch["source_pixel_values"].to(dtype=vae.dtype) source_image_latents = vae.encode(source_pixel_values).latent_dist.sample() source_image_latents = (source_image_latents - vae_config_shift_factor) * vae_config_scaling_factor image_latent_h, image_latent_w = source_image_latents.shape[2:] packed_image_latents = FluxKontextPipeline._pack_latents( source_image_latents, batch_size=source_image_latents.shape[0], num_channels_latents=source_image_latents.shape[1], height=image_latent_h, width=image_latent_w, ) source_image_ids = FluxKontextPipeline._prepare_latent_image_ids( batch_size=source_image_latents.shape[0], height=image_latent_h // 2, width=image_latent_w // 2, device=accelerator.device, dtype=weight_dtype, ) source_image_ids[..., 0] = 1 latent_model_input = torch.cat([latent_model_input, packed_image_latents], dim=1) latent_image_ids = torch.cat([latent_image_ids, source_image_ids], dim=0) # Forward transformer with packed latents and ids model_pred = transformer( hidden_states=latent_model_input, timestep=timesteps / 1000, guidance=guidance, pooled_projections=pooled_prompt_embeds, encoder_hidden_states=prompt_embeds, txt_ids=text_ids, img_ids=latent_image_ids, return_dict=False, )[0] model_pred = model_pred[:, : packed_noisy_model_input.size(1)] model_pred = FluxKontextPipeline._unpack_latents( model_pred, height=int(pixel_values.shape[-2]), width=int(pixel_values.shape[-1]), vae_scale_factor=vae_scale_factor, ) weighting = compute_loss_weighting_for_sd3(weighting_scheme=args.weighting_scheme, sigmas=sigmas) target = noise - model_input loss = torch.mean((weighting.float() * (model_pred.float() - target.float()) ** 2).reshape(target.shape[0], -1), 1) loss = loss.mean() accelerator.backward(loss) if accelerator.sync_gradients: params_to_clip = (transformer.parameters()) accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 if accelerator.is_main_process: if global_step % args.checkpointing_steps == 0: if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info(f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints") logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") os.makedirs(save_path, exist_ok=True) unwrapped = accelerator.unwrap_model(transformer) peft_state = get_peft_model_state_dict(unwrapped) # Convert PEFT state dict to diffusers LoRA format for transformer diffusers_lora = convert_state_dict_to_diffusers(peft_state) save_file(diffusers_lora, os.path.join(save_path, "pytorch_lora_weights.safetensors")) logger.info(f"Saved state to {save_path}") logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if args.validation_prompt is not None and global_step % args.validation_steps == 0: # Create pipeline on every rank to run validation in parallel pipeline = FluxKontextPipeline.from_pretrained( args.pretrained_model_name_or_path, vae=vae, text_encoder=accelerator.unwrap_model(text_encoder_one), text_encoder_2=accelerator.unwrap_model(text_encoder_two), transformer=accelerator.unwrap_model(transformer), revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, ) pipeline_args = { "prompt": args.validation_prompt, "guidance_scale": 3.5, "num_inference_steps": 20, "max_sequence_length": 128, } images = log_validation( pipeline=pipeline, args=args, accelerator=accelerator, pipeline_args=pipeline_args, step=global_step, torch_dtype=weight_dtype, ) # Only main process saves/logs if accelerator.is_main_process: save_path = os.path.join(args.output_dir, "validation") os.makedirs(save_path, exist_ok=True) save_folder = os.path.join(save_path, f"checkpoint-{global_step}") os.makedirs(save_folder, exist_ok=True) for idx, img in enumerate(images): out_path = os.path.join(save_folder, f"{idx}.jpg") save_with_retry(img, out_path) del pipeline accelerator.wait_for_everyone() accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)