Spaces:
Running
Running
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved. | |
import logging | |
from mmgp import offload | |
import torch | |
import torch.cuda.amp as amp | |
import torch.nn as nn | |
import torch.nn.functional as F | |
from einops import rearrange | |
__all__ = [ | |
'WanVAE', | |
] | |
CACHE_T = 2 | |
class CausalConv3d(nn.Conv3d): | |
""" | |
Causal 3d convolusion. | |
""" | |
def __init__(self, *args, **kwargs): | |
super().__init__(*args, **kwargs) | |
self._padding = (self.padding[2], self.padding[2], self.padding[1], | |
self.padding[1], 2 * self.padding[0], 0) | |
self.padding = (0, 0, 0) | |
def forward(self, x, cache_x=None): | |
padding = list(self._padding) | |
if cache_x is not None and self._padding[4] > 0: | |
cache_x = cache_x.to(x.device) | |
x = torch.cat([cache_x, x], dim=2) | |
padding[4] -= cache_x.shape[2] | |
cache_x = None | |
x = F.pad(x, padding) | |
try: | |
out = super().forward(x) | |
return out | |
except RuntimeError as e: | |
if "miopenStatus" in str(e): | |
print("⚠️ MIOpen fallback: AMD gets upset when trying to work with large areas, and so CPU will be " | |
"used for this decoding (which is very slow). Consider using tiled VAE Decoding.") | |
x_cpu = x.float().cpu() | |
weight_cpu = self.weight.float().cpu() | |
bias_cpu = self.bias.float().cpu() if self.bias is not None else None | |
print(f"[Fallback] x shape: {x_cpu.shape}, weight shape: {weight_cpu.shape}") | |
out = F.conv3d(x_cpu, weight_cpu, bias_cpu, | |
self.stride, (0, 0, 0), # avoid double padding here | |
self.dilation, self.groups) | |
out = out.to(x.device) | |
if x.dtype in (torch.float16, torch.bfloat16): | |
out = out.half() | |
if x.dtype != out.dtype: | |
out = out.to(x.dtype) | |
return out | |
raise | |
class RMS_norm(nn.Module): | |
def __init__(self, dim, channel_first=True, images=True, bias=False): | |
super().__init__() | |
broadcastable_dims = (1, 1, 1) if not images else (1, 1) | |
shape = (dim, *broadcastable_dims) if channel_first else (dim,) | |
self.channel_first = channel_first | |
self.scale = dim**0.5 | |
self.gamma = nn.Parameter(torch.ones(shape)) | |
self.bias = nn.Parameter(torch.zeros(shape)) if bias else 0. | |
def forward(self, x): | |
dtype = x.dtype | |
x = F.normalize( | |
x, dim=(1 if self.channel_first else | |
-1)) * self.scale * self.gamma + self.bias | |
x = x.to(dtype) | |
return x | |
class Upsample(nn.Upsample): | |
def forward(self, x): | |
""" | |
Fix bfloat16 support for nearest neighbor interpolation. | |
""" | |
return super().forward(x.float()).type_as(x) | |
class Resample(nn.Module): | |
def __init__(self, dim, mode): | |
assert mode in ('none', 'upsample2d', 'upsample3d', 'downsample2d', | |
'downsample3d') | |
super().__init__() | |
self.dim = dim | |
self.mode = mode | |
# layers | |
if mode == 'upsample2d': | |
self.resample = nn.Sequential( | |
Upsample(scale_factor=(2., 2.), mode='nearest-exact'), | |
nn.Conv2d(dim, dim // 2, 3, padding=1)) | |
elif mode == 'upsample3d': | |
self.resample = nn.Sequential( | |
Upsample(scale_factor=(2., 2.), mode='nearest-exact'), | |
nn.Conv2d(dim, dim // 2, 3, padding=1)) | |
self.time_conv = CausalConv3d( | |
dim, dim * 2, (3, 1, 1), padding=(1, 0, 0)) | |
elif mode == 'downsample2d': | |
self.resample = nn.Sequential( | |
nn.ZeroPad2d((0, 1, 0, 1)), | |
nn.Conv2d(dim, dim, 3, stride=(2, 2))) | |
elif mode == 'downsample3d': | |
self.resample = nn.Sequential( | |
nn.ZeroPad2d((0, 1, 0, 1)), | |
nn.Conv2d(dim, dim, 3, stride=(2, 2))) | |
self.time_conv = CausalConv3d( | |
dim, dim, (3, 1, 1), stride=(2, 1, 1), padding=(0, 0, 0)) | |
else: | |
self.resample = nn.Identity() | |
def forward(self, x, feat_cache=None, feat_idx=[0]): | |
b, c, t, h, w = x.size() | |
if self.mode == 'upsample3d': | |
if feat_cache is not None: | |
idx = feat_idx[0] | |
if feat_cache[idx] is None: | |
feat_cache[idx] = 'Rep' | |
feat_idx[0] += 1 | |
else: | |
clone = True | |
cache_x = x[:, :, -CACHE_T:, :, :]#.clone() | |
if cache_x.shape[2] < 2 and feat_cache[ | |
idx] is not None and feat_cache[idx] != 'Rep': | |
# cache last frame of last two chunk | |
clone = False | |
cache_x = torch.cat([ | |
feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to( | |
cache_x.device), cache_x | |
], | |
dim=2) | |
if cache_x.shape[2] < 2 and feat_cache[ | |
idx] is not None and feat_cache[idx] == 'Rep': | |
clone = False | |
cache_x = torch.cat([ | |
torch.zeros_like(cache_x).to(cache_x.device), | |
cache_x | |
], | |
dim=2) | |
if clone: | |
cache_x = cache_x.clone() | |
if feat_cache[idx] == 'Rep': | |
x = self.time_conv(x) | |
else: | |
x = self.time_conv(x, feat_cache[idx]) | |
feat_cache[idx] = cache_x | |
feat_idx[0] += 1 | |
x = x.reshape(b, 2, c, t, h, w) | |
x = torch.stack((x[:, 0, :, :, :, :], x[:, 1, :, :, :, :]), | |
3) | |
x = x.reshape(b, c, t * 2, h, w) | |
t = x.shape[2] | |
x = rearrange(x, 'b c t h w -> (b t) c h w') | |
x = self.resample(x) | |
x = rearrange(x, '(b t) c h w -> b c t h w', t=t) | |
if self.mode == 'downsample3d': | |
if feat_cache is not None: | |
idx = feat_idx[0] | |
if feat_cache[idx] is None: | |
feat_cache[idx] = x #.to("cpu") #x.clone() yyyy | |
feat_idx[0] += 1 | |
else: | |
cache_x = x[:, :, -1:, :, :].clone() | |
# if cache_x.shape[2] < 2 and feat_cache[idx] is not None and feat_cache[idx]!='Rep': | |
# # cache last frame of last two chunk | |
# cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2) | |
x = self.time_conv( | |
torch.cat([feat_cache[idx][:, :, -1:, :, :], x], 2)) | |
feat_cache[idx] = cache_x#.to("cpu") #yyyyy | |
feat_idx[0] += 1 | |
return x | |
def init_weight(self, conv): | |
conv_weight = conv.weight | |
nn.init.zeros_(conv_weight) | |
c1, c2, t, h, w = conv_weight.size() | |
one_matrix = torch.eye(c1, c2) | |
init_matrix = one_matrix | |
nn.init.zeros_(conv_weight) | |
#conv_weight.data[:,:,-1,1,1] = init_matrix * 0.5 | |
conv_weight.data[:, :, 1, 0, 0] = init_matrix #* 0.5 | |
conv.weight.data.copy_(conv_weight) | |
nn.init.zeros_(conv.bias.data) | |
def init_weight2(self, conv): | |
conv_weight = conv.weight.data | |
nn.init.zeros_(conv_weight) | |
c1, c2, t, h, w = conv_weight.size() | |
init_matrix = torch.eye(c1 // 2, c2) | |
#init_matrix = repeat(init_matrix, 'o ... -> (o 2) ...').permute(1,0,2).contiguous().reshape(c1,c2) | |
conv_weight[:c1 // 2, :, -1, 0, 0] = init_matrix | |
conv_weight[c1 // 2:, :, -1, 0, 0] = init_matrix | |
conv.weight.data.copy_(conv_weight) | |
nn.init.zeros_(conv.bias.data) | |
class ResidualBlock(nn.Module): | |
def __init__(self, in_dim, out_dim, dropout=0.0): | |
super().__init__() | |
self.in_dim = in_dim | |
self.out_dim = out_dim | |
# layers | |
self.residual = nn.Sequential( | |
RMS_norm(in_dim, images=False), nn.SiLU(), | |
CausalConv3d(in_dim, out_dim, 3, padding=1), | |
RMS_norm(out_dim, images=False), nn.SiLU(), nn.Dropout(dropout), | |
CausalConv3d(out_dim, out_dim, 3, padding=1)) | |
self.shortcut = CausalConv3d(in_dim, out_dim, 1) \ | |
if in_dim != out_dim else nn.Identity() | |
def forward(self, x, feat_cache=None, feat_idx=[0]): | |
h = self.shortcut(x) | |
dtype = x.dtype | |
for layer in self.residual: | |
if isinstance(layer, CausalConv3d) and feat_cache is not None: | |
idx = feat_idx[0] | |
cache_x = x[:, :, -CACHE_T:, :, :].clone() | |
if cache_x.shape[2] < 2 and feat_cache[idx] is not None: | |
# cache last frame of last two chunk | |
cache_x = torch.cat([ | |
feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to( | |
cache_x.device), cache_x | |
], | |
dim=2) | |
x = layer(x, feat_cache[idx]).to(dtype) | |
feat_cache[idx] = cache_x#.to("cpu") | |
feat_idx[0] += 1 | |
else: | |
x = layer(x).to(dtype) | |
return x + h | |
class AttentionBlock(nn.Module): | |
""" | |
Causal self-attention with a single head. | |
""" | |
def __init__(self, dim): | |
super().__init__() | |
self.dim = dim | |
# layers | |
self.norm = RMS_norm(dim) | |
self.to_qkv = nn.Conv2d(dim, dim * 3, 1) | |
self.proj = nn.Conv2d(dim, dim, 1) | |
# zero out the last layer params | |
nn.init.zeros_(self.proj.weight) | |
def forward(self, x): | |
identity = x | |
b, c, t, h, w = x.size() | |
x = rearrange(x, 'b c t h w -> (b t) c h w') | |
x = self.norm(x) | |
# compute query, key, value | |
q, k, v = self.to_qkv(x).reshape(b * t, 1, c * 3, | |
-1).permute(0, 1, 3, | |
2).contiguous().chunk( | |
3, dim=-1) | |
# apply attention | |
x = F.scaled_dot_product_attention( | |
q, | |
k, | |
v, | |
) | |
x = x.squeeze(1).permute(0, 2, 1).reshape(b * t, c, h, w) | |
# output | |
x = self.proj(x) | |
x = rearrange(x, '(b t) c h w-> b c t h w', t=t) | |
return x + identity | |
class Encoder3d(nn.Module): | |
def __init__(self, | |
dim=128, | |
z_dim=4, | |
dim_mult=[1, 2, 4, 4], | |
num_res_blocks=2, | |
attn_scales=[], | |
temperal_downsample=[True, True, False], | |
dropout=0.0): | |
super().__init__() | |
self.dim = dim | |
self.z_dim = z_dim | |
self.dim_mult = dim_mult | |
self.num_res_blocks = num_res_blocks | |
self.attn_scales = attn_scales | |
self.temperal_downsample = temperal_downsample | |
# dimensions | |
dims = [dim * u for u in [1] + dim_mult] | |
scale = 1.0 | |
# init block | |
self.conv1 = CausalConv3d(3, dims[0], 3, padding=1) | |
# downsample blocks | |
downsamples = [] | |
for i, (in_dim, out_dim) in enumerate(zip(dims[:-1], dims[1:])): | |
# residual (+attention) blocks | |
for _ in range(num_res_blocks): | |
downsamples.append(ResidualBlock(in_dim, out_dim, dropout)) | |
if scale in attn_scales: | |
downsamples.append(AttentionBlock(out_dim)) | |
in_dim = out_dim | |
# downsample block | |
if i != len(dim_mult) - 1: | |
mode = 'downsample3d' if temperal_downsample[ | |
i] else 'downsample2d' | |
downsamples.append(Resample(out_dim, mode=mode)) | |
scale /= 2.0 | |
self.downsamples = nn.Sequential(*downsamples) | |
# middle blocks | |
self.middle = nn.Sequential( | |
ResidualBlock(out_dim, out_dim, dropout), AttentionBlock(out_dim), | |
ResidualBlock(out_dim, out_dim, dropout)) | |
# output blocks | |
self.head = nn.Sequential( | |
RMS_norm(out_dim, images=False), nn.SiLU(), | |
CausalConv3d(out_dim, z_dim, 3, padding=1)) | |
def forward(self, x, feat_cache=None, feat_idx=[0]): | |
dtype = x.dtype | |
if feat_cache is not None: | |
idx = feat_idx[0] | |
cache_x = x[:, :, -CACHE_T:, :, :].clone() | |
if cache_x.shape[2] < 2 and feat_cache[idx] is not None: | |
# cache last frame of last two chunk | |
cache_x = torch.cat([ | |
feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to( | |
cache_x.device), cache_x | |
], | |
dim=2) | |
x = self.conv1(x, feat_cache[idx]).to(dtype) | |
feat_cache[idx] = cache_x | |
del cache_x | |
feat_idx[0] += 1 | |
else: | |
x = self.conv1(x) | |
## downsamples | |
for layer in self.downsamples: | |
if feat_cache is not None: | |
x = layer(x, feat_cache, feat_idx) | |
else: | |
x = layer(x) | |
## middle | |
for layer in self.middle: | |
if isinstance(layer, ResidualBlock) and feat_cache is not None: | |
x = layer(x, feat_cache, feat_idx) | |
else: | |
x = layer(x) | |
## head | |
for layer in self.head: | |
if isinstance(layer, CausalConv3d) and feat_cache is not None: | |
idx = feat_idx[0] | |
cache_x = x[:, :, -CACHE_T:, :, :].clone() | |
if cache_x.shape[2] < 2 and feat_cache[idx] is not None: | |
# cache last frame of last two chunk | |
cache_x = torch.cat([ | |
feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to( | |
cache_x.device), cache_x | |
], | |
dim=2) | |
x = layer(x, feat_cache[idx]) | |
feat_cache[idx] = cache_x | |
del cache_x | |
feat_idx[0] += 1 | |
else: | |
x = layer(x) | |
return x | |
class Decoder3d(nn.Module): | |
def __init__(self, | |
dim=128, | |
z_dim=4, | |
dim_mult=[1, 2, 4, 4], | |
num_res_blocks=2, | |
attn_scales=[], | |
temperal_upsample=[False, True, True], | |
dropout=0.0): | |
super().__init__() | |
self.dim = dim | |
self.z_dim = z_dim | |
self.dim_mult = dim_mult | |
self.num_res_blocks = num_res_blocks | |
self.attn_scales = attn_scales | |
self.temperal_upsample = temperal_upsample | |
# dimensions | |
dims = [dim * u for u in [dim_mult[-1]] + dim_mult[::-1]] | |
scale = 1.0 / 2**(len(dim_mult) - 2) | |
# init block | |
self.conv1 = CausalConv3d(z_dim, dims[0], 3, padding=1) | |
# middle blocks | |
self.middle = nn.Sequential( | |
ResidualBlock(dims[0], dims[0], dropout), AttentionBlock(dims[0]), | |
ResidualBlock(dims[0], dims[0], dropout)) | |
# upsample blocks | |
upsamples = [] | |
for i, (in_dim, out_dim) in enumerate(zip(dims[:-1], dims[1:])): | |
# residual (+attention) blocks | |
if i == 1 or i == 2 or i == 3: | |
in_dim = in_dim // 2 | |
for _ in range(num_res_blocks + 1): | |
upsamples.append(ResidualBlock(in_dim, out_dim, dropout)) | |
if scale in attn_scales: | |
upsamples.append(AttentionBlock(out_dim)) | |
in_dim = out_dim | |
# upsample block | |
if i != len(dim_mult) - 1: | |
mode = 'upsample3d' if temperal_upsample[i] else 'upsample2d' | |
upsamples.append(Resample(out_dim, mode=mode)) | |
scale *= 2.0 | |
self.upsamples = nn.Sequential(*upsamples) | |
# output blocks | |
self.head = nn.Sequential( | |
RMS_norm(out_dim, images=False), nn.SiLU(), | |
CausalConv3d(out_dim, 3, 3, padding=1)) | |
def forward(self, x, feat_cache=None, feat_idx=[0]): | |
## conv1 | |
if feat_cache is not None: | |
idx = feat_idx[0] | |
cache_x = x[:, :, -CACHE_T:, :, :].clone() | |
if cache_x.shape[2] < 2 and feat_cache[idx] is not None: | |
# cache last frame of last two chunk | |
cache_x = torch.cat([ | |
feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to( | |
cache_x.device), cache_x | |
], | |
dim=2) | |
x = self.conv1(x, feat_cache[idx]) | |
feat_cache[idx] = cache_x#.to("cpu") | |
del cache_x | |
feat_idx[0] += 1 | |
else: | |
x = self.conv1(x) | |
cache_x = None | |
## middle | |
for layer in self.middle: | |
if isinstance(layer, ResidualBlock) and feat_cache is not None: | |
x = layer(x, feat_cache, feat_idx) | |
else: | |
x = layer(x) | |
## upsamples | |
for layer in self.upsamples: | |
if feat_cache is not None: | |
x = layer(x, feat_cache, feat_idx) | |
else: | |
x = layer(x) | |
## head | |
for layer in self.head: | |
if isinstance(layer, CausalConv3d) and feat_cache is not None: | |
idx = feat_idx[0] | |
cache_x = x[:, :, -CACHE_T:, :, :] .clone() | |
if cache_x.shape[2] < 2 and feat_cache[idx] is not None: | |
# cache last frame of last two chunk | |
cache_x = torch.cat([ | |
feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to( | |
cache_x.device), cache_x | |
], | |
dim=2) | |
x = layer(x, feat_cache[idx]) | |
feat_cache[idx] = cache_x#.to("cpu") | |
del cache_x | |
feat_idx[0] += 1 | |
else: | |
x = layer(x) | |
return x | |
def count_conv3d(model): | |
count = 0 | |
for m in model.modules(): | |
if isinstance(m, CausalConv3d): | |
count += 1 | |
return count | |
class WanVAE_(nn.Module): | |
def __init__(self, | |
dim=128, | |
z_dim=4, | |
dim_mult=[1, 2, 4, 4], | |
num_res_blocks=2, | |
attn_scales=[], | |
temperal_downsample=[True, True, False], | |
dropout=0.0): | |
super().__init__() | |
self.dim = dim | |
self.z_dim = z_dim | |
self.dim_mult = dim_mult | |
self.num_res_blocks = num_res_blocks | |
self.attn_scales = attn_scales | |
self.temperal_downsample = temperal_downsample | |
self.temperal_upsample = temperal_downsample[::-1] | |
# modules | |
self.encoder = Encoder3d(dim, z_dim * 2, dim_mult, num_res_blocks, | |
attn_scales, self.temperal_downsample, dropout) | |
self.conv1 = CausalConv3d(z_dim * 2, z_dim * 2, 1) | |
self.conv2 = CausalConv3d(z_dim, z_dim, 1) | |
self.decoder = Decoder3d(dim, z_dim, dim_mult, num_res_blocks, | |
attn_scales, self.temperal_upsample, dropout) | |
def forward(self, x): | |
mu, log_var = self.encode(x) | |
z = self.reparameterize(mu, log_var) | |
x_recon = self.decode(z) | |
return x_recon, mu, log_var | |
def encode(self, x, scale = None, any_end_frame = False): | |
self.clear_cache() | |
## cache | |
t = x.shape[2] | |
if any_end_frame: | |
iter_ = 2 + (t - 2) // 4 | |
else: | |
iter_ = 1 + (t - 1) // 4 | |
## 对encode输入的x,按时间拆分为1、4、4、4.... | |
out_list = [] | |
for i in range(iter_): | |
self._enc_conv_idx = [0] | |
if i == 0: | |
out_list.append(self.encoder( | |
x[:, :, :1, :, :], | |
feat_cache=self._enc_feat_map, | |
feat_idx=self._enc_conv_idx)) | |
elif any_end_frame and i== iter_ -1: | |
out_list.append(self.encoder( | |
x[:, :, -1:, :, :], | |
feat_cache= None, | |
feat_idx=self._enc_conv_idx)) | |
else: | |
out_list.append(self.encoder( | |
x[:, :, 1 + 4 * (i - 1):1 + 4 * i, :, :], | |
feat_cache=self._enc_feat_map, | |
feat_idx=self._enc_conv_idx)) | |
self.clear_cache() | |
out = torch.cat(out_list, 2) | |
out_list = None | |
mu, log_var = self.conv1(out).chunk(2, dim=1) | |
if scale != None: | |
if isinstance(scale[0], torch.Tensor): | |
mu = (mu - scale[0].view(1, self.z_dim, 1, 1, 1)) * scale[1].view( | |
1, self.z_dim, 1, 1, 1) | |
else: | |
mu = (mu - scale[0]) * scale[1] | |
return mu | |
def decode(self, z, scale=None, any_end_frame = False): | |
self.clear_cache() | |
# z: [b,c,t,h,w] | |
if scale != None: | |
if isinstance(scale[0], torch.Tensor): | |
z = z / scale[1].view(1, self.z_dim, 1, 1, 1) + scale[0].view( | |
1, self.z_dim, 1, 1, 1) | |
else: | |
z = z / scale[1] + scale[0] | |
iter_ = z.shape[2] | |
x = self.conv2(z) | |
out_list = [] | |
for i in range(iter_): | |
self._conv_idx = [0] | |
if i == 0: | |
out_list.append(self.decoder( | |
x[:, :, i:i + 1, :, :], | |
feat_cache=self._feat_map, | |
feat_idx=self._conv_idx)) | |
elif any_end_frame and i==iter_-1: | |
out_list.append(self.decoder( | |
x[:, :, -1:, :, :], | |
feat_cache=None , | |
feat_idx=self._conv_idx)) | |
else: | |
out_list.append(self.decoder( | |
x[:, :, i:i + 1, :, :], | |
feat_cache=self._feat_map, | |
feat_idx=self._conv_idx)) | |
self.clear_cache() | |
out = torch.cat(out_list, 2) | |
return out | |
def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: | |
blend_extent = min(a.shape[-2], b.shape[-2], blend_extent) | |
for y in range(blend_extent): | |
b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, :, y, :] * (y / blend_extent) | |
return b | |
def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: | |
blend_extent = min(a.shape[-1], b.shape[-1], blend_extent) | |
for x in range(blend_extent): | |
b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, :, x] * (x / blend_extent) | |
return b | |
def spatial_tiled_decode(self, z, scale, tile_size, any_end_frame= False): | |
tile_sample_min_size = tile_size | |
tile_latent_min_size = int(tile_sample_min_size / 8) | |
tile_overlap_factor = 0.25 | |
# z: [b,c,t,h,w] | |
if isinstance(scale[0], torch.Tensor): | |
z = z / scale[1].view(1, self.z_dim, 1, 1, 1) + scale[0].view( | |
1, self.z_dim, 1, 1, 1) | |
else: | |
z = z / scale[1] + scale[0] | |
overlap_size = int(tile_latent_min_size * (1 - tile_overlap_factor)) #8 0.75 | |
blend_extent = int(tile_sample_min_size * tile_overlap_factor) #256 0.25 | |
row_limit = tile_sample_min_size - blend_extent | |
# Split z into overlapping tiles and decode them separately. | |
# The tiles have an overlap to avoid seams between tiles. | |
rows = [] | |
for i in range(0, z.shape[-2], overlap_size): | |
row = [] | |
for j in range(0, z.shape[-1], overlap_size): | |
tile = z[:, :, :, i: i + tile_latent_min_size, j: j + tile_latent_min_size] | |
decoded = self.decode(tile, any_end_frame= any_end_frame) | |
row.append(decoded) | |
rows.append(row) | |
result_rows = [] | |
for i, row in enumerate(rows): | |
result_row = [] | |
for j, tile in enumerate(row): | |
# blend the above tile and the left tile | |
# to the current tile and add the current tile to the result row | |
if i > 0: | |
tile = self.blend_v(rows[i - 1][j], tile, blend_extent) | |
if j > 0: | |
tile = self.blend_h(row[j - 1], tile, blend_extent) | |
result_row.append(tile[:, :, :, :row_limit, :row_limit]) | |
result_rows.append(torch.cat(result_row, dim=-1)) | |
return torch.cat(result_rows, dim=-2) | |
def spatial_tiled_encode(self, x, scale, tile_size, any_end_frame = False) : | |
tile_sample_min_size = tile_size | |
tile_latent_min_size = int(tile_sample_min_size / 8) | |
tile_overlap_factor = 0.25 | |
overlap_size = int(tile_sample_min_size * (1 - tile_overlap_factor)) | |
blend_extent = int(tile_latent_min_size * tile_overlap_factor) | |
row_limit = tile_latent_min_size - blend_extent | |
# Split video into tiles and encode them separately. | |
rows = [] | |
for i in range(0, x.shape[-2], overlap_size): | |
row = [] | |
for j in range(0, x.shape[-1], overlap_size): | |
tile = x[:, :, :, i: i + tile_sample_min_size, j: j + tile_sample_min_size] | |
tile = self.encode(tile, any_end_frame= any_end_frame) | |
row.append(tile) | |
rows.append(row) | |
result_rows = [] | |
for i, row in enumerate(rows): | |
result_row = [] | |
for j, tile in enumerate(row): | |
# blend the above tile and the left tile | |
# to the current tile and add the current tile to the result row | |
if i > 0: | |
tile = self.blend_v(rows[i - 1][j], tile, blend_extent) | |
if j > 0: | |
tile = self.blend_h(row[j - 1], tile, blend_extent) | |
result_row.append(tile[:, :, :, :row_limit, :row_limit]) | |
result_rows.append(torch.cat(result_row, dim=-1)) | |
mu = torch.cat(result_rows, dim=-2) | |
if isinstance(scale[0], torch.Tensor): | |
mu = (mu - scale[0].view(1, self.z_dim, 1, 1, 1)) * scale[1].view( | |
1, self.z_dim, 1, 1, 1) | |
else: | |
mu = (mu - scale[0]) * scale[1] | |
return mu | |
def reparameterize(self, mu, log_var): | |
std = torch.exp(0.5 * log_var) | |
eps = torch.randn_like(std) | |
return eps * std + mu | |
def sample(self, imgs, deterministic=False): | |
mu, log_var = self.encode(imgs) | |
if deterministic: | |
return mu | |
std = torch.exp(0.5 * log_var.clamp(-30.0, 20.0)) | |
return mu + std * torch.randn_like(std) | |
def clear_cache(self): | |
self._conv_num = count_conv3d(self.decoder) | |
self._conv_idx = [0] | |
self._feat_map = [None] * self._conv_num | |
#cache encode | |
self._enc_conv_num = count_conv3d(self.encoder) | |
self._enc_conv_idx = [0] | |
self._enc_feat_map = [None] * self._enc_conv_num | |
def _video_vae(pretrained_path=None, z_dim=None, device='cpu', **kwargs): | |
""" | |
Autoencoder3d adapted from Stable Diffusion 1.x, 2.x and XL. | |
""" | |
# params | |
cfg = dict( | |
dim=96, | |
z_dim=z_dim, | |
dim_mult=[1, 2, 4, 4], | |
num_res_blocks=2, | |
attn_scales=[], | |
temperal_downsample=[False, True, True], | |
dropout=0.0) | |
cfg.update(**kwargs) | |
# init model | |
with torch.device('meta'): | |
model = WanVAE_(**cfg) | |
from mmgp import offload | |
# load checkpoint | |
logging.info(f'loading {pretrained_path}') | |
# model.load_state_dict( | |
# torch.load(pretrained_path, map_location=device), assign=True) | |
# offload.load_model_data(model, pretrained_path.replace(".pth", "_bf16.safetensors"), writable_tensors= False) | |
offload.load_model_data(model, pretrained_path.replace(".pth", ".safetensors"), writable_tensors= False) | |
return model | |
class WanVAE: | |
def __init__(self, | |
z_dim=16, | |
vae_pth='cache/vae_step_411000.pth', | |
dtype=torch.float, | |
device="cuda"): | |
self.dtype = dtype | |
self.device = device | |
mean = [ | |
-0.7571, -0.7089, -0.9113, 0.1075, -0.1745, 0.9653, -0.1517, 1.5508, | |
0.4134, -0.0715, 0.5517, -0.3632, -0.1922, -0.9497, 0.2503, -0.2921 | |
] | |
std = [ | |
2.8184, 1.4541, 2.3275, 2.6558, 1.2196, 1.7708, 2.6052, 2.0743, | |
3.2687, 2.1526, 2.8652, 1.5579, 1.6382, 1.1253, 2.8251, 1.9160 | |
] | |
self.mean = torch.tensor(mean, dtype=dtype, device=device) | |
self.std = torch.tensor(std, dtype=dtype, device=device) | |
self.scale = [self.mean, 1.0 / self.std] | |
# init model | |
self.model = _video_vae( | |
pretrained_path=vae_pth, | |
z_dim=z_dim, | |
).to(dtype).eval() #.requires_grad_(False).to(device) | |
self.model._model_dtype = dtype | |
def get_VAE_tile_size(vae_config, device_mem_capacity, mixed_precision): | |
# VAE Tiling | |
if vae_config == 0: | |
if mixed_precision: | |
device_mem_capacity = device_mem_capacity / 2 | |
if device_mem_capacity >= 24000: | |
use_vae_config = 1 | |
elif device_mem_capacity >= 8000: | |
use_vae_config = 2 | |
else: | |
use_vae_config = 3 | |
else: | |
use_vae_config = vae_config | |
if use_vae_config == 1: | |
VAE_tile_size = 0 | |
elif use_vae_config == 2: | |
VAE_tile_size = 256 | |
else: | |
VAE_tile_size = 128 | |
return VAE_tile_size | |
def encode(self, videos, tile_size = 256, any_end_frame = False): | |
""" | |
videos: A list of videos each with shape [C, T, H, W]. | |
""" | |
original_dtype = videos[0].dtype | |
if tile_size > 0: | |
return [ self.model.spatial_tiled_encode(u.to(self.dtype).unsqueeze(0), self.scale, tile_size, any_end_frame=any_end_frame).float().squeeze(0) for u in videos ] | |
else: | |
return [ self.model.encode(u.to(self.dtype).unsqueeze(0), self.scale, any_end_frame=any_end_frame).float().squeeze(0) for u in videos ] | |
def decode(self, zs, tile_size, any_end_frame = False): | |
if tile_size > 0: | |
return [ self.model.spatial_tiled_decode(u.to(self.dtype).unsqueeze(0), self.scale, tile_size, any_end_frame=any_end_frame).clamp_(-1, 1).float().squeeze(0) for u in zs ] | |
else: | |
return [ self.model.decode(u.to(self.dtype).unsqueeze(0), self.scale, any_end_frame=any_end_frame).clamp_(-1, 1).float().squeeze(0) for u in zs ] | |