import torch import torch.nn as nn import torch.nn.functional as F from typing import List, Optional, Tuple, Dict, Any, Union import numpy as np from collections import deque import math from PIL import Image import torchvision.transforms as transforms class TemporalConsistencyBuffer: """Enhanced temporal buffer for flexible batch generation""" def __init__(self, buffer_size: int = 8, feature_dim: int = 512): self.buffer_size = buffer_size self.feature_dim = feature_dim self.frame_features = deque(maxlen=buffer_size) self.frame_latents = deque(maxlen=buffer_size) self.frame_images = deque(maxlen=buffer_size) # Store actual frames for I2V self.motion_vectors = deque(maxlen=buffer_size-1) self.temporal_weights = deque(maxlen=buffer_size) # Importance weights def add_frames(self, features: torch.Tensor, latents: torch.Tensor, images: Optional[torch.Tensor] = None, batch_size: int = 1): """Add batch of frames to temporal buffer""" for i in range(batch_size): frame_feat = features[i:i+1] if features.dim() > 3 else features frame_lat = latents[i:i+1] if latents.dim() > 3 else latents frame_img = images[i:i+1] if images is not None and images.dim() > 3 else images # Calculate motion vector if we have previous frames if len(self.frame_features) > 0: motion = frame_feat - self.frame_features[-1] self.motion_vectors.append(motion) self.frame_features.append(frame_feat) self.frame_latents.append(frame_lat) if frame_img is not None: self.frame_images.append(frame_img) # Weight newer frames more heavily weight = 1.0 / (len(self.frame_features) + 1) self.temporal_weights.append(weight) def get_reference_frame(self) -> Optional[torch.Tensor]: """Get the most recent frame as reference for I2V""" if len(self.frame_images) > 0: return self.frame_images[-1] elif len(self.frame_latents) > 0: return self.frame_latents[-1] return None def get_temporal_context(self, num_context_frames: int = 4) -> Dict[str, torch.Tensor]: """Get weighted temporal context for next frame batch""" if len(self.frame_features) == 0: return {"has_context": False} # Get most recent frames up to num_context_frames context_size = min(num_context_frames, len(self.frame_features)) recent_features = list(self.frame_features)[-context_size:] recent_latents = list(self.frame_latents)[-context_size:] recent_weights = list(self.temporal_weights)[-context_size:] # Stack with attention to batch dimension stacked_features = torch.cat(recent_features, dim=0) # [T, C, H, W] stacked_latents = torch.cat(recent_latents, dim=0) weights = torch.tensor(recent_weights, device=stacked_features.device) # Predict motion for next frames predicted_motions = [] if len(self.motion_vectors) >= 2: # Multi-step motion prediction recent_motions = list(self.motion_vectors)[-3:] # Last 3 motions for step in range(1, 4): # Predict up to 3 steps ahead if len(recent_motions) >= 2: # Weighted motion extrapolation motion_pred = ( recent_motions[-1] * 1.5 - recent_motions[-2] * 0.5 ) if len(recent_motions) >= 3: motion_pred += recent_motions[-3] * 0.1 else: motion_pred = recent_motions[-1] if recent_motions else None predicted_motions.append(motion_pred) return { "has_context": True, "frame_features": stacked_features, "frame_latents": stacked_latents, "temporal_weights": weights, "predicted_motions": predicted_motions, "sequence_length": len(self.frame_features), "reference_frame": self.get_reference_frame() } class FlexibleTemporalAttention(nn.Module): """Flexible attention that handles variable batch sizes""" def __init__(self, dim: int, num_heads: int = 8, max_frames: int = 16): super().__init__() self.dim = dim self.num_heads = num_heads self.head_dim = dim // num_heads self.scale = self.head_dim ** -0.5 self.max_frames = max_frames self.qkv = nn.Linear(dim, dim * 3, bias=False) self.proj = nn.Linear(dim, dim) # Learnable temporal positional embeddings self.temporal_pos_embed = nn.Parameter(torch.randn(1, max_frames, dim) * 0.02) self.frame_type_embed = nn.Parameter(torch.randn(3, dim) * 0.02) # past, current, future # Cross-frame interaction self.cross_frame_norm = nn.LayerNorm(dim) self.cross_frame_mlp = nn.Sequential( nn.Linear(dim, dim * 2), nn.GELU(), nn.Linear(dim * 2, dim) ) def forward(self, current_frames: torch.Tensor, temporal_context: Dict[str, Any], num_current_frames: int = 1): """ current_frames: [B*N, H*W, C] where N is number of frames being generated temporal_context: dict with past frame information """ B_times_N, HW, C = current_frames.shape B = B_times_N // num_current_frames if not temporal_context.get("has_context", False): return current_frames # Reshape current frames current = current_frames.view(B, num_current_frames, HW, C) # Get temporal context past_features = temporal_context["frame_features"] # [T, C, H, W] T, _, H, W = past_features.shape past_features = past_features.view(T, C, H*W).permute(0, 2, 1) # [T, H*W, C] past_features = past_features.unsqueeze(0).expand(B, -1, -1, -1) # [B, T, H*W, C] # Combine all frames (past + current) all_frames = torch.cat([past_features, current], dim=1) # [B, T+N, H*W, C] total_frames = T + num_current_frames # Add positional embeddings pos_ids = torch.arange(total_frames, device=current_frames.device) pos_embed = self.temporal_pos_embed[:, :total_frames] # [1, T+N, C] # Add frame type embeddings (past=0, current=1, future=2) frame_type_ids = torch.cat([ torch.zeros(T, device=current_frames.device), # past frames torch.ones(num_current_frames, device=current_frames.device) # current frames ]).long() type_embed = self.frame_type_embed[frame_type_ids] # [T+N, C] # Apply embeddings all_frames = all_frames + pos_embed.unsqueeze(2) + type_embed.unsqueeze(0).unsqueeze(2) # Flatten for attention all_frames_flat = all_frames.view(B, total_frames * HW, C) # Multi-head attention qkv = self.qkv(all_frames_flat).reshape(B, -1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # Temporal attention with causality mask for current frames attn = (q @ k.transpose(-2, -1)) * self.scale # Create causal mask - current frames can see past + themselves, but not future mask = torch.triu(torch.ones(total_frames * HW, total_frames * HW, device=current_frames.device), diagonal=1) mask = mask.bool() attn = attn.masked_fill(mask.unsqueeze(0).unsqueeze(0), float('-inf')) attn = attn.softmax(dim=-1) out = (attn @ v).transpose(1, 2).reshape(B, total_frames * HW, C) # Extract only current frame features current_start = T * HW enhanced_current = out[:, current_start:] # [B, N*H*W, C] enhanced_current = self.proj(enhanced_current) # Cross-frame interaction within current batch if num_current_frames > 1: enhanced_current = enhanced_current.view(B, num_current_frames, HW, C) for i in range(num_current_frames): frame_i = enhanced_current[:, i] # [B, H*W, C] # Interact with other frames in current batch other_frames = torch.cat([ enhanced_current[:, :i], enhanced_current[:, i+1:] ], dim=1) if num_current_frames > 1 else None if other_frames is not None: cross_context = other_frames.mean(dim=1) # [B, H*W, C] frame_i_norm = self.cross_frame_norm(frame_i + cross_context) frame_i = frame_i + self.cross_frame_mlp(frame_i_norm) enhanced_current[:, i] = frame_i enhanced_current = enhanced_current.view(B * num_current_frames, HW, C) return enhanced_current class FlexibleI2VDiffuser(nn.Module): """Flexible I2V diffusion model that generates N frames at a time""" def __init__( self, base_diffusion_model, feature_dim: int = 512, temporal_buffer_size: int = 8, num_attention_heads: int = 8, max_batch_frames: int = 3 ): super().__init__() self.base_model = base_diffusion_model self.feature_dim = feature_dim self.temporal_buffer_size = temporal_buffer_size self.max_batch_frames = max_batch_frames # Enhanced feature extraction for I2V self.image_encoder = nn.Sequential( nn.Conv2d(3, feature_dim // 4, 7, padding=3), nn.GroupNorm(8, feature_dim // 4), nn.SiLU(), nn.Conv2d(feature_dim // 4, feature_dim // 2, 3, padding=1, stride=2), nn.GroupNorm(8, feature_dim // 2), nn.SiLU(), nn.Conv2d(feature_dim // 2, feature_dim, 3, padding=1, stride=2), nn.GroupNorm(8, feature_dim), nn.SiLU() ) self.latent_encoder = nn.Conv2d( base_diffusion_model.in_channels, feature_dim, 3, padding=1 ) # Flexible temporal attention self.temporal_attention = FlexibleTemporalAttention( feature_dim, num_attention_heads, max_batch_frames * 4 ) # I2V specific components self.reference_adapter = nn.Sequential( nn.Conv2d(feature_dim * 2, feature_dim, 1), nn.GroupNorm(8, feature_dim), nn.SiLU() ) self.motion_conditioner = nn.Sequential( nn.Linear(feature_dim, feature_dim * 2), nn.GELU(), nn.Linear(feature_dim * 2, feature_dim) ) # Multi-frame consistency self.frame_consistency_net = nn.Sequential( nn.Conv3d(feature_dim, feature_dim, (3, 3, 3), padding=(1, 1, 1)), nn.GroupNorm(8, feature_dim), nn.SiLU(), nn.Conv3d(feature_dim, feature_dim, (1, 3, 3), padding=(0, 1, 1)) ) # Initialize temporal buffer self.temporal_buffer = TemporalConsistencyBuffer(temporal_buffer_size, feature_dim) def encode_reference_image(self, image: torch.Tensor) -> torch.Tensor: """Encode reference image for I2V conditioning""" if image.shape[1] == 3: # RGB image return self.image_encoder(image) else: # Already encoded latent return self.latent_encoder(image) def apply_i2v_conditioning( self, current_latents: torch.Tensor, # [B*N, C, H, W] temporal_context: Dict[str, Any], num_frames: int = 1 ) -> torch.Tensor: """Apply I2V conditioning with flexible frame count""" B_times_N, C, H, W = current_latents.shape B = B_times_N // num_frames # Extract features from current latents current_features = self.latent_encoder(current_latents) # [B*N, F, H, W] if not temporal_context.get("has_context", False): return current_latents # Apply temporal attention current_flat = current_features.flatten(2).transpose(1, 2) # [B*N, H*W, F] enhanced_features = self.temporal_attention(current_flat, temporal_context, num_frames) enhanced_features = enhanced_features.transpose(1, 2).reshape(B_times_N, -1, H, W) # Reference frame conditioning for I2V if temporal_context.get("reference_frame") is not None: ref_frame = temporal_context["reference_frame"] ref_features = self.encode_reference_image(ref_frame) # Broadcast reference to all current frames ref_features = ref_features.repeat(num_frames, 1, 1, 1) # Combine with current features combined_features = torch.cat([enhanced_features, ref_features], dim=1) conditioned_features = self.reference_adapter(combined_features) else: conditioned_features = enhanced_features # Multi-frame consistency for batch generation if num_frames > 1: # Reshape for 3D convolution batch_features = conditioned_features.view(B, num_frames, -1, H, W) batch_features = batch_features.permute(0, 2, 1, 3, 4) # [B, C, T, H, W] # Apply 3D consistency consistent_features = self.frame_consistency_net(batch_features) consistent_features = consistent_features.permute(0, 2, 1, 3, 4) # [B, T, C, H, W] conditioned_features = consistent_features.reshape(B_times_N, -1, H, W) # Motion conditioning if temporal_context.get("predicted_motions"): motions = temporal_context["predicted_motions"][:num_frames] for i, motion in enumerate(motions): if motion is not None: frame_idx = i * B + torch.arange(B, device=current_latents.device) motion_flat = motion.flatten(2).transpose(1, 2).mean(dim=1) # [B, F] motion_cond = self.motion_conditioner(motion_flat) # [B, F] motion_cond = motion_cond.unsqueeze(-1).unsqueeze(-1) # [B, F, 1, 1] conditioned_features[frame_idx] += motion_cond # Blend with original latents alpha = 0.4 # I2V conditioning strength enhanced_latents = current_latents + alpha * conditioned_features return enhanced_latents def forward( self, noisy_latents: torch.Tensor, # [B*N, C, H, W] timestep: torch.Tensor, text_embeddings: torch.Tensor, num_frames: int = 1, use_temporal_consistency: bool = True ) -> torch.Tensor: """Forward pass with flexible frame count""" if use_temporal_consistency: # Get temporal context temporal_context = self.temporal_buffer.get_temporal_context() # Apply I2V conditioning enhanced_latents = self.apply_i2v_conditioning( noisy_latents, temporal_context, num_frames ) else: enhanced_latents = noisy_latents # Expand text embeddings for multiple frames if text_embeddings.shape[0] != enhanced_latents.shape[0]: text_embeddings = text_embeddings.repeat(num_frames, 1, 1) # Run base diffusion model noise_pred = self.base_model(enhanced_latents, timestep, text_embeddings) return noise_pred def update_temporal_buffer(self, latents: torch.Tensor, images: Optional[torch.Tensor] = None, num_frames: int = 1): """Update temporal buffer with generated frames""" with torch.no_grad(): features = self.latent_encoder(latents) self.temporal_buffer.add_frames(features, latents, images, num_frames) class FlexibleI2VGenerator: """High-level generator with configurable frame batch sizes""" def __init__( self, diffusion_model: FlexibleI2VDiffuser, scheduler, vae, # For encoding/decoding images device: str = "cuda" ): self.model = diffusion_model self.scheduler = scheduler self.vae = vae self.device = device # Image preprocessing self.image_transform = transforms.Compose([ transforms.Resize((512, 512)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]) ]) def encode_image(self, image: Union[Image.Image, torch.Tensor]) -> torch.Tensor: """Encode PIL image or tensor to latent space""" if isinstance(image, Image.Image): image = self.image_transform(image).unsqueeze(0).to(self.device) with torch.no_grad(): latent = self.vae.encode(image).latent_dist.sample() latent = latent * self.vae.config.scaling_factor return latent def decode_latents(self, latents: torch.Tensor) -> torch.Tensor: """Decode latents to images""" with torch.no_grad(): latents = latents / self.vae.config.scaling_factor images = self.vae.decode(latents).sample images = (images + 1.0) / 2.0 images = torch.clamp(images, 0.0, 1.0) return images @torch.no_grad() def generate_i2v_sequence( self, reference_image: Union[Image.Image, torch.Tensor], prompt: str, text_encoder, tokenizer, num_frames: int = 16, frames_per_batch: int = 2, # This is the key parameter! num_inference_steps: int = 20, guidance_scale: float = 7.5, generator: Optional[torch.Generator] = None, callback=None ) -> List[torch.Tensor]: """Generate I2V sequence with configurable batch size""" print(f"🎬 Generating {num_frames} frames in batches of {frames_per_batch}") # Encode reference image ref_latent = self.encode_image(reference_image) ref_image_tensor = reference_image if isinstance(reference_image, torch.Tensor) else \ self.image_transform(reference_image).unsqueeze(0).to(self.device) # Encode text prompt text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt" ) text_embeddings = text_encoder(text_inputs.input_ids.to(self.device))[0] # Prepare unconditional embeddings uncond_tokens = [""] uncond_inputs = tokenizer( uncond_tokens, padding="max_length", max_length=tokenizer.model_max_length, return_tensors="pt" ) uncond_embeddings = text_encoder(uncond_inputs.input_ids.to(self.device))[0] # Reset temporal buffer and add reference frame self.model.temporal_buffer = TemporalConsistencyBuffer( self.model.temporal_buffer_size, self.model.feature_dim ) self.model.update_temporal_buffer(ref_latent, ref_image_tensor, 1) generated_frames = [ref_latent] latent_shape = ref_latent.shape # Generate in flexible batches frames_generated = 1 # Start with reference frame while frames_generated < num_frames: # Calculate current batch size remaining_frames = num_frames - frames_generated current_batch_size = min(frames_per_batch, remaining_frames) print(f"🎯 Generating frames {frames_generated+1}-{frames_generated+current_batch_size}") # Initialize noise for current batch batch_latents = torch.randn( (current_batch_size, *latent_shape[1:]), generator=generator, device=self.device, dtype=text_embeddings.dtype ) # Prepare embeddings for batch batch_text_embeddings = torch.cat([ uncond_embeddings.repeat(current_batch_size, 1, 1), text_embeddings.repeat(current_batch_size, 1, 1) ]) # Set scheduler timesteps self.scheduler.set_timesteps(num_inference_steps, device=self.device) timesteps = self.scheduler.timesteps # Denoising loop for current batch for i, t in enumerate(timesteps): # Expand for classifier-free guidance latent_model_input = torch.cat([batch_latents] * 2) latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # Predict noise with temporal consistency noise_pred = self.model( latent_model_input, t, batch_text_embeddings, num_frames=current_batch_size, use_temporal_consistency=True ) # Classifier-free guidance noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # Scheduler step batch_latents = self.scheduler.step(noise_pred, t, batch_latents).prev_sample if callback: callback(i, t, batch_latents) # Update temporal buffer with generated batch batch_images = self.decode_latents(batch_latents) self.model.update_temporal_buffer(batch_latents, batch_images, current_batch_size) # Add to results for j in range(current_batch_size): generated_frames.append(batch_latents[j:j+1]) frames_generated += current_batch_size print(f"✅ Generated {current_batch_size} frames") return generated_frames def generate_with_stepping_strategy( self, reference_image: Union[Image.Image, torch.Tensor], prompt: str, text_encoder, tokenizer, total_frames: int = 24, stepping_pattern: List[int] = [1, 2, 3, 2, 1], # Variable batch sizes **kwargs ) -> List[torch.Tensor]: """Generate with dynamic stepping pattern""" all_frames = [] frames_generated = 0 step_idx = 0 while frames_generated < total_frames: # Get current step size current_step = stepping_pattern[step_idx % len(stepping_pattern)] remaining = total_frames - frames_generated actual_step = min(current_step, remaining) print(f"📊 Step {step_idx + 1}: Generating {actual_step} frames") # Generate batch if frames_generated == 0: # First generation includes reference frames = self.generate_i2v_sequence( reference_image=reference_image, prompt=prompt, text_encoder=text_encoder, tokenizer=tokenizer, num_frames=actual_step + 1, # +1 for reference frames_per_batch=actual_step, **kwargs ) all_frames.extend(frames) frames_generated += len(frames) else: # Continue from last frame last_frame_latent = all_frames[-1] last_frame_image = self.decode_latents(last_frame_latent) frames = self.generate_i2v_sequence( reference_image=last_frame_image, prompt=prompt, text_encoder=text_encoder, tokenizer=tokenizer, num_frames=actual_step + 1, frames_per_batch=actual_step, **kwargs ) all_frames.extend(frames[1:]) # Skip reference (duplicate) frames_generated += len(frames) - 1 step_idx += 1 return all_frames[:total_frames] # Ensure exact frame count # Example usage and integration points class TemporalMiddleware: """Middleware layer for external AI control""" def __init__(self): self.prompt_scheduler = None self.controlnet_adapter = None self.audio_sync = None def intercept_temporal_state(self, temporal_context: Dict, frame_idx: int) -> Dict: """Hook for external manipulation of temporal state""" # Prompt traveling if self.prompt_scheduler: new_prompt = self.prompt_scheduler.get_prompt_at_frame(frame_idx) temporal_context["dynamic_prompt"] = new_prompt # ControlNet injection if self.controlnet_adapter: control_inputs = self.controlnet_adapter.get_control_at_frame(frame_idx) temporal_context["control_inputs"] = control_inputs # Audio synchronization if self.audio_sync: audio_features = self.audio_sync.get_features_at_frame(frame_idx) temporal_context["audio_conditioning"] = audio_features return temporal_context def example_usage(): """Example of flexible I2V generation with middleware""" # Load your models (pseudo-code) # base_model, scheduler, vae, text_encoder, tokenizer = load_models() # Create flexible I2V model # i2v_model = FlexibleI2VDiffuser( # base_diffusion_model=base_model, # feature_dim=512, # temporal_buffer_size=8, # max_batch_frames=3 # ) # Create generator # generator = FlexibleI2VGenerator( # diffusion_model=i2v_model, # scheduler=scheduler, # vae=vae, # device="cuda" # ) # Load reference image # reference_image = Image.open("reference.jpg") # Strategy 1: Fixed batch size (The core innovation!) # frames_fixed = generator.generate_i2v_sequence( # reference_image=reference_image, # prompt="A cat walking in a garden", # text_encoder=text_encoder, # tokenizer=tokenizer, # num_frames=16, # frames_per_batch=2, # Generate 2 frames at a time # num_inference_steps=20 # ) # Strategy 2: Variable stepping pattern (For dynamic control) # frames_variable = generator.generate_with_stepping_strategy( # reference_image=reference_image, # prompt="A cat walking in a garden", # text_encoder=text_encoder, # tokenizer=tokenizer, # total_frames=24, # stepping_pattern=[1, 2, 3, 2, 1], # Start slow, ramp up, slow down # num_inference_steps=20 # ) print("🎉 Flexible Batch I2V Generator with Temporal Consistency - IMPLEMENTED!") print("🚀 Ready for infinite frame generation with external AI control!") if __name__ == "__main__": example_usage()