import torch import torch.nn as nn import torch.nn.functional as F from model.warplayer import warp # from train_log.refine import * device = torch.device("cuda" if torch.cuda.is_available() else "cpu") def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1): return nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.2, True) ) def conv_bn(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1): return nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=False), nn.BatchNorm2d(out_planes), nn.LeakyReLU(0.2, True) ) class ResConv(nn.Module): def __init__(self, c, dilation=1): super(ResConv, self).__init__() self.conv = nn.Conv2d(c, c, 3, 1, dilation, dilation=dilation, groups=1\ ) self.beta = nn.Parameter(torch.ones((1, c, 1, 1)), requires_grad=True) self.relu = nn.LeakyReLU(0.2, True) def forward(self, x): return self.relu(self.conv(x) * self.beta + x) class IFBlock(nn.Module): def __init__(self, in_planes, c=64): super(IFBlock, self).__init__() self.conv0 = nn.Sequential( conv(in_planes, c//2, 3, 2, 1), conv(c//2, c, 3, 2, 1), ) self.convblock = nn.Sequential( ResConv(c), ResConv(c), ResConv(c), ResConv(c), ResConv(c), ResConv(c), ResConv(c), ResConv(c), ) self.lastconv = nn.Sequential( nn.ConvTranspose2d(c, 4*6, 4, 2, 1), nn.PixelShuffle(2) ) def forward(self, x, flow=None, scale=1): x = F.interpolate(x, scale_factor= 1. / scale, mode="bilinear", align_corners=False) if flow is not None: flow = F.interpolate(flow, scale_factor= 1. / scale, mode="bilinear", align_corners=False) * 1. / scale x = torch.cat((x, flow), 1) feat = self.conv0(x) feat = self.convblock(feat) tmp = self.lastconv(feat) tmp = F.interpolate(tmp, scale_factor=scale, mode="bilinear", align_corners=False) flow = tmp[:, :4] * scale mask = tmp[:, 4:5] return flow, mask class IFNet(nn.Module): def __init__(self): super(IFNet, self).__init__() self.block0 = IFBlock(7+8, c=192) self.block1 = IFBlock(8+4+8, c=128) self.block2 = IFBlock(8+4+8, c=96) self.block3 = IFBlock(8+4+8, c=64) self.encode = nn.Sequential( nn.Conv2d(3, 16, 3, 2, 1), nn.ConvTranspose2d(16, 4, 4, 2, 1) ) # self.contextnet = Contextnet() # self.unet = Unet() def forward(self, x, timestep=0.5, scale_list=[8, 4, 2, 1], training=False, fastmode=True, ensemble=False): if ensemble: print('ensemble is removed') if training == False: channel = x.shape[1] // 2 img0 = x[:, :channel] img1 = x[:, channel:] if not torch.is_tensor(timestep): timestep = (x[:, :1].clone() * 0 + 1) * timestep else: timestep = timestep.repeat(1, 1, img0.shape[2], img0.shape[3]) f0 = self.encode(img0[:, :3]) f1 = self.encode(img1[:, :3]) flow_list = [] merged = [] mask_list = [] warped_img0 = img0 warped_img1 = img1 flow = None mask = None loss_cons = 0 block = [self.block0, self.block1, self.block2, self.block3] for i in range(4): if flow is None: flow, mask = block[i](torch.cat((img0[:, :3], img1[:, :3], f0, f1, timestep), 1), None, scale=scale_list[i]) else: fd, mask = block[i](torch.cat((warped_img0[:, :3], warped_img1[:, :3], warp(f0, flow[:, :2]), warp(f1, flow[:, 2:4]), timestep, mask), 1), flow, scale=scale_list[i]) flow = flow + fd mask_list.append(mask) flow_list.append(flow) warped_img0 = warp(img0, flow[:, :2]) warped_img1 = warp(img1, flow[:, 2:4]) merged.append((warped_img0, warped_img1)) mask = torch.sigmoid(mask) merged[3] = (warped_img0 * mask + warped_img1 * (1 - mask)) if not fastmode: print('contextnet is removed') ''' c0 = self.contextnet(img0, flow[:, :2]) c1 = self.contextnet(img1, flow[:, 2:4]) tmp = self.unet(img0, img1, warped_img0, warped_img1, mask, flow, c0, c1) res = tmp[:, :3] * 2 - 1 merged[3] = torch.clamp(merged[3] + res, 0, 1) ''' return flow_list, mask_list[3], merged