Spaces:
Running
on
Zero
Running
on
Zero
# copy from gaussian-opacity-fields | |
# copy from 2DGS | |
import math | |
import torch | |
import numpy as np | |
def depths_to_points(view, depthmap): | |
# c2w = (view.world_view_transform.T).inverse() | |
# we train in camera coordinate | |
c2w = torch.eye(4).float().cuda() | |
W, H = view.image_width, view.image_height | |
fx = W / (2 * math.tan(view.FoVx / 2.)) | |
fy = H / (2 * math.tan(view.FoVy / 2.)) | |
intrins = torch.tensor( | |
[[fx, 0., W/2.], | |
[0., fy, H/2.], | |
[0., 0., 1.0]] | |
).float().cuda() | |
grid_x, grid_y = torch.meshgrid(torch.arange(W)+0.5, torch.arange(H)+0.5, indexing='xy') | |
points = torch.stack([grid_x, grid_y, torch.ones_like(grid_x)], dim=-1).reshape(-1, 3).float().cuda() | |
rays_d = points @ intrins.inverse().T @ c2w[:3,:3].T | |
rays_o = c2w[:3,3] | |
points = depthmap.reshape(-1, 1) * rays_d + rays_o | |
return points | |
def depth_to_normal(view, depth): | |
""" | |
view: view camera | |
depth: depthmap | |
""" | |
points = depths_to_points(view, depth).reshape(*depth.shape[1:], 3) | |
output = torch.zeros_like(points) | |
dx = torch.cat([points[2:, 1:-1] - points[:-2, 1:-1]], dim=0) | |
dy = torch.cat([points[1:-1, 2:] - points[1:-1, :-2]], dim=1) | |
normal_map = torch.nn.functional.normalize(torch.cross(dx, dy, dim=-1), dim=-1) | |
output[1:-1, 1:-1, :] = normal_map | |
return output, points | |