Spaces:
Running
on
Zero
Running
on
Zero
# ---------------------------------------------------------------------------- | |
# - TanksAndTemples Website Toolbox - | |
# - http://www.tanksandtemples.org - | |
# ---------------------------------------------------------------------------- | |
# The MIT License (MIT) | |
# | |
# Copyright (c) 2017 | |
# Arno Knapitsch <arno.knapitsch@gmail.com > | |
# Jaesik Park <syncle@gmail.com> | |
# Qian-Yi Zhou <Qianyi.Zhou@gmail.com> | |
# Vladlen Koltun <vkoltun@gmail.com> | |
# | |
# Permission is hereby granted, free of charge, to any person obtaining a copy | |
# of this software and associated documentation files (the "Software"), to deal | |
# in the Software without restriction, including without limitation the rights | |
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
# copies of the Software, and to permit persons to whom the Software is | |
# furnished to do so, subject to the following conditions: | |
# | |
# The above copyright notice and this permission notice shall be included in | |
# all copies or substantial portions of the Software. | |
# | |
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
# THE SOFTWARE. | |
# ---------------------------------------------------------------------------- | |
# | |
# This python script is for downloading dataset from www.tanksandtemples.org | |
# The dataset has a different license, please refer to | |
# https://tanksandtemples.org/license/ | |
# this script requires Open3D python binding | |
# please follow the intructions in setup.py before running this script. | |
import numpy as np | |
import open3d as o3d | |
import os | |
import argparse | |
import torch | |
from config import scenes_tau_dict | |
from registration import ( | |
trajectory_alignment, | |
registration_vol_ds, | |
registration_unif, | |
read_trajectory, | |
) | |
from help_func import auto_orient_and_center_poses | |
from trajectory_io import CameraPose | |
from evaluation import EvaluateHisto | |
from util import make_dir | |
from plot import plot_graph | |
def run_evaluation(dataset_dir, traj_path, ply_path, out_dir, view_crop): | |
scene = os.path.basename(os.path.normpath(dataset_dir)) | |
if scene not in scenes_tau_dict: | |
print(dataset_dir, scene) | |
raise Exception("invalid dataset-dir, not in scenes_tau_dict") | |
print("") | |
print("===========================") | |
print("Evaluating %s" % scene) | |
print("===========================") | |
dTau = scenes_tau_dict[scene] | |
# put the crop-file, the GT file, the COLMAP SfM log file and | |
# the alignment of the according scene in a folder of | |
# the same scene name in the dataset_dir | |
colmap_ref_logfile = os.path.join(dataset_dir, scene + "_COLMAP_SfM.log") | |
# this is for groundtruth pointcloud, we can use it | |
alignment = os.path.join(dataset_dir, scene + "_trans.txt") | |
gt_filen = os.path.join(dataset_dir, scene + ".ply") | |
# this crop file is also w.r.t the groundtruth pointcloud, we can use it. | |
# Otherwise we have to crop the estimated pointcloud by ourself | |
cropfile = os.path.join(dataset_dir, scene + ".json") | |
# this is not so necessary | |
map_file = os.path.join(dataset_dir, scene + "_mapping_reference.txt") | |
if not os.path.isfile(map_file): | |
map_file = None | |
map_file = None | |
make_dir(out_dir) | |
# Load reconstruction and according GT | |
print(ply_path) | |
pcd = o3d.io.read_point_cloud(ply_path) | |
print(gt_filen) | |
gt_pcd = o3d.io.read_point_cloud(gt_filen) | |
gt_trans = np.loadtxt(alignment) | |
print(traj_path) | |
traj_to_register = [] | |
if traj_path.endswith('.npy'): | |
ld = np.load(traj_path) | |
for i in range(len(ld)): | |
traj_to_register.append(CameraPose(meta=None, mat=ld[i])) | |
elif traj_path.endswith('.json'): # instant-npg or sdfstudio format | |
import json | |
with open(traj_path, encoding='UTF-8') as f: | |
meta = json.load(f) | |
poses_dict = {} | |
for i, frame in enumerate(meta['frames']): | |
filepath = frame['file_path'] | |
new_i = int(filepath[13:18]) - 1 | |
poses_dict[new_i] = np.array(frame['transform_matrix']) | |
poses = [] | |
for i in range(len(poses_dict)): | |
poses.append(poses_dict[i]) | |
poses = torch.from_numpy(np.array(poses).astype(np.float32)) | |
poses, _ = auto_orient_and_center_poses(poses, method='up', center_poses=True) | |
scale_factor = 1.0 / float(torch.max(torch.abs(poses[:, :3, 3]))) | |
poses[:, :3, 3] *= scale_factor | |
poses = poses.numpy() | |
for i in range(len(poses)): | |
traj_to_register.append(CameraPose(meta=None, mat=poses[i])) | |
else: | |
traj_to_register = read_trajectory(traj_path) | |
print(colmap_ref_logfile) | |
gt_traj_col = read_trajectory(colmap_ref_logfile) | |
trajectory_transform = trajectory_alignment(map_file, traj_to_register, | |
gt_traj_col, gt_trans, scene) | |
inv_transform = np.linalg.inv(trajectory_transform) | |
points = np.asarray(gt_pcd.points) | |
points = points @ inv_transform[:3, :3].T + inv_transform[:3, 3:].T | |
print(points.min(axis=0), points.max(axis=0)) | |
print(np.concatenate([points.min(axis=0), points.max(axis=0)]).reshape(-1).tolist()) | |
if __name__ == "__main__": | |
parser = argparse.ArgumentParser() | |
parser.add_argument( | |
"--dataset-dir", | |
type=str, | |
required=True, | |
help="path to a dataset/scene directory containing X.json, X.ply, ...", | |
) | |
parser.add_argument( | |
"--traj-path", | |
type=str, | |
required=True, | |
help= | |
"path to trajectory file. See `convert_to_logfile.py` to create this file.", | |
) | |
parser.add_argument( | |
"--ply-path", | |
type=str, | |
required=True, | |
help="path to reconstruction ply file", | |
) | |
parser.add_argument( | |
"--out-dir", | |
type=str, | |
default="", | |
help= | |
"output directory, default: an evaluation directory is created in the directory of the ply file", | |
) | |
parser.add_argument( | |
"--view-crop", | |
type=int, | |
default=0, | |
help="whether view the crop pointcloud after aligned", | |
) | |
args = parser.parse_args() | |
args.view_crop = False # (args.view_crop > 0) | |
if args.out_dir.strip() == "": | |
args.out_dir = os.path.join(os.path.dirname(args.ply_path), | |
"evaluation") | |
run_evaluation( | |
dataset_dir=args.dataset_dir, | |
traj_path=args.traj_path, | |
ply_path=args.ply_path, | |
out_dir=args.out_dir, | |
view_crop=args.view_crop | |
) | |