import os import cv2 import torch import numpy as np import gradio as gr import sys import shutil from datetime import datetime import glob import gc import time import spaces from pi3.utils.geometry import se3_inverse, homogenize_points, depth_edge from pi3.models.pi3 import Pi3 from pi3.utils.basic import load_images_as_tensor, write_ply import trimesh import matplotlib from scipy.spatial.transform import Rotation """ Gradio utils """ def predictions_to_glb( predictions, conf_thres=50.0, filter_by_frames="all", show_cam=True, ) -> trimesh.Scene: """ Converts VGGT predictions to a 3D scene represented as a GLB file. Args: predictions (dict): Dictionary containing model predictions with keys: - world_points: 3D point coordinates (S, H, W, 3) - world_points_conf: Confidence scores (S, H, W) - images: Input images (S, H, W, 3) - extrinsic: Camera extrinsic matrices (S, 3, 4) conf_thres (float): Percentage of low-confidence points to filter out (default: 50.0) filter_by_frames (str): Frame filter specification (default: "all") show_cam (bool): Include camera visualization (default: True) Returns: trimesh.Scene: Processed 3D scene containing point cloud and cameras Raises: ValueError: If input predictions structure is invalid """ if not isinstance(predictions, dict): raise ValueError("predictions must be a dictionary") if conf_thres is None: conf_thres = 10 print("Building GLB scene") selected_frame_idx = None if filter_by_frames != "all" and filter_by_frames != "All": try: # Extract the index part before the colon selected_frame_idx = int(filter_by_frames.split(":")[0]) except (ValueError, IndexError): pass pred_world_points = predictions["points"] pred_world_points_conf = predictions.get("conf", np.ones_like(pred_world_points[..., 0])) # Get images from predictions images = predictions["images"] # Use extrinsic matrices instead of pred_extrinsic_list camera_poses = predictions["camera_poses"] if selected_frame_idx is not None: pred_world_points = pred_world_points[selected_frame_idx][None] pred_world_points_conf = pred_world_points_conf[selected_frame_idx][None] images = images[selected_frame_idx][None] camera_poses = camera_poses[selected_frame_idx][None] vertices_3d = pred_world_points.reshape(-1, 3) # Handle different image formats - check if images need transposing if images.ndim == 4 and images.shape[1] == 3: # NCHW format colors_rgb = np.transpose(images, (0, 2, 3, 1)) else: # Assume already in NHWC format colors_rgb = images colors_rgb = (colors_rgb.reshape(-1, 3) * 255).astype(np.uint8) conf = pred_world_points_conf.reshape(-1) # Convert percentage threshold to actual confidence value if conf_thres == 0.0: conf_threshold = 0.0 else: # conf_threshold = np.percentile(conf, conf_thres) conf_threshold = conf_thres / 100 conf_mask = (conf >= conf_threshold) & (conf > 1e-5) vertices_3d = vertices_3d[conf_mask] colors_rgb = colors_rgb[conf_mask] if vertices_3d is None or np.asarray(vertices_3d).size == 0: vertices_3d = np.array([[1, 0, 0]]) colors_rgb = np.array([[255, 255, 255]]) scene_scale = 1 else: # Calculate the 5th and 95th percentiles along each axis lower_percentile = np.percentile(vertices_3d, 5, axis=0) upper_percentile = np.percentile(vertices_3d, 95, axis=0) # Calculate the diagonal length of the percentile bounding box scene_scale = np.linalg.norm(upper_percentile - lower_percentile) colormap = matplotlib.colormaps.get_cmap("gist_rainbow") # Initialize a 3D scene scene_3d = trimesh.Scene() scene_3d_no_cam = trimesh.Scene() # Add point cloud data to the scene point_cloud_data = trimesh.PointCloud(vertices=vertices_3d, colors=colors_rgb) scene_3d.add_geometry(point_cloud_data) # Prepare 4x4 matrices for camera extrinsics num_cameras = len(camera_poses) if show_cam: # Add camera models to the scene for i in range(num_cameras): camera_to_world = camera_poses[i] rgba_color = colormap(i / num_cameras) current_color = tuple(int(255 * x) for x in rgba_color[:3]) # integrate_camera_into_scene(scene_3d, camera_to_world, current_color, scene_scale) integrate_camera_into_scene(scene_3d, camera_to_world, current_color, 1.) # fixed camera size # Rotate scene for better visualize align_rotation = np.eye(4) align_rotation[:3, :3] = Rotation.from_euler("y", 100, degrees=True).as_matrix() # plane rotate align_rotation[:3, :3] = align_rotation[:3, :3] @ Rotation.from_euler("x", 155, degrees=True).as_matrix() # roll scene_3d.apply_transform(align_rotation) print("GLB Scene built") return scene_3d, [vertices_3d, colors_rgb] def integrate_camera_into_scene(scene: trimesh.Scene, transform: np.ndarray, face_colors: tuple, scene_scale: float): """ Integrates a fake camera mesh into the 3D scene. Args: scene (trimesh.Scene): The 3D scene to add the camera model. transform (np.ndarray): Transformation matrix for camera positioning. face_colors (tuple): Color of the camera face. scene_scale (float): Scale of the scene. """ cam_width = scene_scale * 0.05 cam_height = scene_scale * 0.1 # Create cone shape for camera rot_45_degree = np.eye(4) rot_45_degree[:3, :3] = Rotation.from_euler("z", 45, degrees=True).as_matrix() rot_45_degree[2, 3] = -cam_height opengl_transform = get_opengl_conversion_matrix() # Combine transformations complete_transform = transform @ opengl_transform @ rot_45_degree camera_cone_shape = trimesh.creation.cone(cam_width, cam_height, sections=4) # Generate mesh for the camera slight_rotation = np.eye(4) slight_rotation[:3, :3] = Rotation.from_euler("z", 2, degrees=True).as_matrix() vertices_combined = np.concatenate( [ camera_cone_shape.vertices, 0.95 * camera_cone_shape.vertices, transform_points(slight_rotation, camera_cone_shape.vertices), ] ) vertices_transformed = transform_points(complete_transform, vertices_combined) mesh_faces = compute_camera_faces(camera_cone_shape) # Add the camera mesh to the scene camera_mesh = trimesh.Trimesh(vertices=vertices_transformed, faces=mesh_faces) camera_mesh.visual.face_colors[:, :3] = face_colors scene.add_geometry(camera_mesh) def get_opengl_conversion_matrix() -> np.ndarray: """ Constructs and returns the OpenGL conversion matrix. Returns: numpy.ndarray: A 4x4 OpenGL conversion matrix. """ # Create an identity matrix matrix = np.identity(4) # Flip the y and z axes matrix[1, 1] = -1 matrix[2, 2] = -1 return matrix def transform_points(transformation: np.ndarray, points: np.ndarray, dim: int = None) -> np.ndarray: """ Applies a 4x4 transformation to a set of points. Args: transformation (np.ndarray): Transformation matrix. points (np.ndarray): Points to be transformed. dim (int, optional): Dimension for reshaping the result. Returns: np.ndarray: Transformed points. """ points = np.asarray(points) initial_shape = points.shape[:-1] dim = dim or points.shape[-1] # Apply transformation transformation = transformation.swapaxes(-1, -2) # Transpose the transformation matrix points = points @ transformation[..., :-1, :] + transformation[..., -1:, :] # Reshape the result result = points[..., :dim].reshape(*initial_shape, dim) return result def compute_camera_faces(cone_shape: trimesh.Trimesh) -> np.ndarray: """ Computes the faces for the camera mesh. Args: cone_shape (trimesh.Trimesh): The shape of the camera cone. Returns: np.ndarray: Array of faces for the camera mesh. """ # Create pseudo cameras faces_list = [] num_vertices_cone = len(cone_shape.vertices) for face in cone_shape.faces: if 0 in face: continue v1, v2, v3 = face v1_offset, v2_offset, v3_offset = face + num_vertices_cone v1_offset_2, v2_offset_2, v3_offset_2 = face + 2 * num_vertices_cone faces_list.extend( [ (v1, v2, v2_offset), (v1, v1_offset, v3), (v3_offset, v2, v3), (v1, v2, v2_offset_2), (v1, v1_offset_2, v3), (v3_offset_2, v2, v3), ] ) faces_list += [(v3, v2, v1) for v1, v2, v3 in faces_list] return np.array(faces_list) # ------------------------------------------------------------------------- # 1) Core model inference # ------------------------------------------------------------------------- @spaces.GPU(duration=120) def run_model(target_dir, model) -> dict: print(f"Processing images from {target_dir}") # Device check device = "cuda" if torch.cuda.is_available() else "cpu" if not torch.cuda.is_available(): raise ValueError("CUDA is not available. Check your environment.") # Move model to device model = model.to(device) model.eval() # Load and preprocess images image_names = glob.glob(os.path.join(target_dir, "images", "*")) image_names = sorted(image_names) print(f"Found {len(image_names)} images") if len(image_names) == 0: raise ValueError("No images found. Check your upload.") # interval = 10 if target_dir.endswith('.mp4') else 1 interval = 1 imgs = load_images_as_tensor(os.path.join(target_dir, "images"), interval=interval).to(device) # (N, 3, H, W) # 3. Infer print("Running model inference...") dtype = torch.bfloat16 with torch.no_grad(): with torch.amp.autocast('cuda', dtype=dtype): predictions = model(imgs[None]) # Add batch dimension predictions['images'] = imgs[None].permute(0, 1, 3, 4, 2) predictions['conf'] = torch.sigmoid(predictions['conf']) edge = depth_edge(predictions['local_points'][..., 2], rtol=0.03) predictions['conf'][edge] = 0.0 del predictions['local_points'] # # transform to first camera coordinate # predictions['points'] = torch.einsum('bij, bnhwj -> bnhwi', se3_inverse(predictions['camera_poses'][:, 0]), homogenize_points(predictions['points']))[..., :3] # predictions['camera_poses'] = torch.einsum('bij, bnjk -> bnik', se3_inverse(predictions['camera_poses'][:, 0]), predictions['camera_poses']) # Convert tensors to numpy for key in predictions.keys(): if isinstance(predictions[key], torch.Tensor): predictions[key] = predictions[key].cpu().numpy().squeeze(0) # remove batch dimension # Clean up torch.cuda.empty_cache() return predictions # ------------------------------------------------------------------------- # 2) Handle uploaded video/images --> produce target_dir + images # ------------------------------------------------------------------------- def handle_uploads(input_video, input_images, interval=-1): """ Create a new 'target_dir' + 'images' subfolder, and place user-uploaded images or extracted frames from video into it. Return (target_dir, image_paths). """ start_time = time.time() gc.collect() torch.cuda.empty_cache() # Create a unique folder name timestamp = datetime.now().strftime("%Y%m%d_%H%M%S_%f") target_dir = f"input_images_{timestamp}" target_dir_images = os.path.join(target_dir, "images") # Clean up if somehow that folder already exists if os.path.exists(target_dir): shutil.rmtree(target_dir) os.makedirs(target_dir, exist_ok=True) os.makedirs(target_dir_images, exist_ok=True) image_paths = [] # --- Handle images --- if input_images is not None: if interval is not None and interval > 0: input_images = input_images[::interval] for file_data in input_images: if isinstance(file_data, dict) and "name" in file_data: file_path = file_data["name"] else: file_path = file_data dst_path = os.path.join(target_dir_images, os.path.basename(file_path)) shutil.copy(file_path, dst_path) image_paths.append(dst_path) # --- Handle video --- if input_video is not None: if isinstance(input_video, dict) and "name" in input_video: video_path = input_video["name"] else: video_path = input_video vs = cv2.VideoCapture(video_path) fps = vs.get(cv2.CAP_PROP_FPS) if interval is not None and interval > 0: frame_interval = interval else: frame_interval = int(fps * 1) # 1 frame/sec count = 0 video_frame_num = 0 while True: gotit, frame = vs.read() if not gotit: break count += 1 if count % frame_interval == 0: image_path = os.path.join(target_dir_images, f"{video_frame_num:06}.png") cv2.imwrite(image_path, frame) image_paths.append(image_path) video_frame_num += 1 # Sort final images for gallery image_paths = sorted(image_paths) end_time = time.time() print(f"Files copied to {target_dir_images}; took {end_time - start_time:.3f} seconds") return target_dir, image_paths # ------------------------------------------------------------------------- # 3) Update gallery on upload # ------------------------------------------------------------------------- def update_gallery_on_upload(input_video, input_images, interval=-1): """ Whenever user uploads or changes files, immediately handle them and show in the gallery. Return (target_dir, image_paths). If nothing is uploaded, returns "None" and empty list. """ if not input_video and not input_images: return None, None, None, None target_dir, image_paths = handle_uploads(input_video, input_images, interval=interval) return None, target_dir, image_paths, "Upload complete. Click 'Reconstruct' to begin 3D processing." # ------------------------------------------------------------------------- # 4) Reconstruction: uses the target_dir plus any viz parameters # ------------------------------------------------------------------------- @spaces.GPU(duration=120) def gradio_demo( target_dir, conf_thres=3.0, frame_filter="All", show_cam=True, ): """ Perform reconstruction using the already-created target_dir/images. """ if not os.path.isdir(target_dir) or target_dir == "None": return None, "No valid target directory found. Please upload first.", None, None start_time = time.time() gc.collect() torch.cuda.empty_cache() # Prepare frame_filter dropdown target_dir_images = os.path.join(target_dir, "images") all_files = sorted(os.listdir(target_dir_images)) if os.path.isdir(target_dir_images) else [] all_files = [f"{i}: {filename}" for i, filename in enumerate(all_files)] frame_filter_choices = ["All"] + all_files print("Running run_model...") with torch.no_grad(): predictions = run_model(target_dir, model) # Save predictions prediction_save_path = os.path.join(target_dir, "predictions.npz") np.savez(prediction_save_path, **predictions) # Handle None frame_filter if frame_filter is None: frame_filter = "All" # Build a GLB file name glbfile = os.path.join( target_dir, f"glbscene_{conf_thres}_{frame_filter.replace('.', '_').replace(':', '').replace(' ', '_')}_cam{show_cam}.glb", ) # Convert predictions to GLB glbscene, pcd = predictions_to_glb( predictions, conf_thres=conf_thres, filter_by_frames=frame_filter, show_cam=show_cam, ) glbscene.export(file_obj=glbfile) # we save a .ply file too plyfile = glbfile.replace('.glb', '.ply') write_ply(pcd[0], pcd[1]/255, path=plyfile) print(f'Saved .ply file to {plyfile}') # Cleanup del predictions gc.collect() torch.cuda.empty_cache() end_time = time.time() print(f"Total time: {end_time - start_time:.2f} seconds (including IO)") log_msg = f"Reconstruction Success ({len(all_files)} frames). Waiting for visualization." return glbfile, plyfile, log_msg, gr.Dropdown(choices=frame_filter_choices, value=frame_filter, interactive=True) # ------------------------------------------------------------------------- # 5) Helper functions for UI resets + re-visualization # ------------------------------------------------------------------------- def clear_fields(): """ Clears the 3D viewer, the stored target_dir, and empties the gallery. """ return None def update_log(): """ Display a quick log message while waiting. """ return "Loading and Reconstructing..." def update_visualization( target_dir, conf_thres, frame_filter, show_cam, is_example ): """ Reload saved predictions from npz, create (or reuse) the GLB for new parameters, and return it for the 3D viewer. If is_example == "True", skip. """ # If it's an example click, skip as requested if is_example == "True": return None, "No reconstruction available. Please click the Reconstruct button first." if not target_dir or target_dir == "None" or not os.path.isdir(target_dir): return None, "No reconstruction available. Please click the Reconstruct button first." predictions_path = os.path.join(target_dir, "predictions.npz") if not os.path.exists(predictions_path): return None, f"No reconstruction available at {predictions_path}. Please run 'Reconstruct' first." key_list = [ "images", "points", "conf", "camera_poses", ] loaded = np.load(predictions_path) predictions = {key: np.array(loaded[key]) for key in key_list} glbfile = os.path.join( target_dir, f"glbscene_{conf_thres}_{frame_filter.replace('.', '_').replace(':', '').replace(' ', '_')}_cam{show_cam}.glb", ) if not os.path.exists(glbfile): glbscene, pcd = predictions_to_glb( predictions, conf_thres=conf_thres, filter_by_frames=frame_filter, show_cam=show_cam, ) glbscene.export(file_obj=glbfile) # we save a .ply file too plyfile = glbfile.replace('.glb', '.ply') write_ply(pcd[0], pcd[1]/255, path=plyfile) print(f'Saved .ply file to {plyfile}') return glbfile, plyfile, "Updating Visualization" # ------------------------------------------------------------------------- # Example images # ------------------------------------------------------------------------- house = "examples/gradio_examples/house.mp4" man_walking_long = "examples/gradio_examples/man_walking_long.mp4" parkour = "examples/gradio_examples/parkour.mp4" valley = "examples/gradio_examples/valley.mp4" cartoon_horse = "examples/cartoon_horse.mp4" parkour_long = "examples/parkour_long.mp4" skating = "examples/skating.mp4" skiing = "examples/skiing.mp4" # ------------------------------------------------------------------------- # 6) Build Gradio UI # ------------------------------------------------------------------------- if __name__ == '__main__': device = "cuda" if torch.cuda.is_available() else "cpu" print("Initializing and loading Pi3 model...") model = Pi3.from_pretrained("yyfz233/Pi3") # model.load_state_dict(torch.load('ckpts/pi3.pt', weights_only=False, map_location=device)) model.eval() model = model.to(device) theme = gr.themes.Ocean() theme.set( checkbox_label_background_fill_selected="*button_primary_background_fill", checkbox_label_text_color_selected="*button_primary_text_color", ) with gr.Blocks( theme=theme, css=""" /* --- Google 字体导入 (科技感字体) --- */ @import url('https://fonts.googleapis.com/css2?family=Orbitron:wght@400;700;900&family=Rajdhani:wght@400;500;700&display=swap'); /* --- 动画关键帧 --- */ /* 背景动态星云效果 */ @keyframes gradient-animation { 0% { background-position: 0% 50%; } 50% { background-position: 100% 50%; } 100% { background-position: 0% 50%; } } /* 标题和状态文字的霓虹灯光效 */ @keyframes text-glow { 0%, 100% { text-shadow: 0 0 10px #0ea5e9, 0 0 20px #0ea5e9, 0 0 30px #4f46e5, 0 0 40px #4f46e5; } 50% { text-shadow: 0 0 5px #0ea5e9, 0 0 10px #0ea5e9, 0 0 15px #4f46e5, 0 0 20px #4f46e5; } } /* 卡片边框呼吸光晕 */ @keyframes border-glow { 0% { border-color: rgba(79, 70, 229, 0.5); box-shadow: 0 0 15px rgba(79, 70, 229, 0.3); } 50% { border-color: rgba(14, 165, 233, 0.8); box-shadow: 0 0 25px rgba(14, 165, 233, 0.5); } 100% { border-color: rgba(79, 70, 229, 0.5); box-shadow: 0 0 15px rgba(79, 70, 229, 0.3); } } /* --- 全局样式:宇宙黑暗主题 --- */ .gradio-container { font-family: 'Rajdhani', sans-serif; background: linear-gradient(-45deg, #020617, #111827, #082f49, #4f46e5); background-size: 400% 400%; animation: gradient-animation 20s ease infinite; color: #9ca3af; } /* --- 全局文字颜色修复 (解决Light Mode问题) --- */ /* 1. 修复全局、标签和输入框内的文字颜色 */ .gradio-container, .gr-label label, .gr-input, input, textarea, .gr-check-radio label { color: #d1d5db !important; /* 设置一个柔和的浅灰色 */ } /* 2. 修复 Examples 表头 (这是您问题的核心) */ thead th { color: white !important; background-color: #1f2937 !important; /* 同时给表头一个背景色,视觉效果更好 */ } /* 3. 修复 Examples 表格内容文字 */ tbody td { color: #d1d5db !important; } /* --- 状态信息 & 输出标题样式 (custom-log) ✨ --- */ .custom-log * { font-family: 'Orbitron', sans-serif; font-size: 24px !important; font-weight: 700 !important; text-align: center !important; color: transparent !important; background-image: linear-gradient(120deg, #93c5fd, #6ee7b7, #fde047); background-size: 300% 300%; -webkit-background-clip: text; background-clip: text; animation: gradient-animation 8s ease-in-out infinite, text-glow 3s ease-in-out infinite; padding: 10px 0; } /* --- UI 卡片/分组样式 (玻璃拟态) 💎 --- */ .gr-block.gr-group { background-color: rgba(17, 24, 39, 0.6); backdrop-filter: blur(10px); -webkit-backdrop-filter: blur(10px); border: 1px solid rgba(55, 65, 81, 0.5); border-radius: 16px; box-shadow: 0 8px 32px 0 rgba(0, 0, 0, 0.37); transition: all 0.3s ease; /* 应用边框呼吸光晕动画 */ animation: border-glow 5s infinite alternate; } .gr-block.gr-group:hover { box-shadow: 0 0 25px rgba(14, 165, 233, 0.4); border-color: rgba(14, 165, 233, 0.6); } /* --- 酷炫按钮样式 🚀 --- */ .gr-button { background: linear-gradient(to right, #4f46e5, #7c3aed, #0ea5e9) !important; background-size: 200% auto !important; color: white !important; font-weight: bold !important; border: none !important; border-radius: 10px !important; box-shadow: 0 4px 15px 0 rgba(79, 70, 229, 0.5) !important; transition: all 0.4s ease-in-out !important; font-family: 'Orbitron', sans-serif !important; text-transform: uppercase; letter-spacing: 1px; } .gr-button:hover { background-position: right center !important; box-shadow: 0 4px 20px 0 rgba(14, 165, 233, 0.6) !important; transform: translateY(-3px) scale(1.02); } .gr-button.primary { /* 主按钮增加呼吸光晕动画 */ animation: border-glow 3s infinite alternate; } """, ) as demo: # Instead of gr.State, we use a hidden Textbox: is_example = gr.Textbox(label="is_example", visible=False, value="None") num_images = gr.Textbox(label="num_images", visible=False, value="None") target_dir_output = gr.Textbox(label="Target Dir", visible=False, value="None") gr.HTML( """
🐙 GitHub Repository | 🚀 Project Page
Transform your videos or image collections into detailed 3D models. The π³ model processes your visual data to generate a rich 3D point cloud and calculate the corresponding camera perspectives.
A Quick Note on Performance: The core processing by π³ is incredibly fast, typically finishing in under a second. However, rendering the final 3D point cloud can take longer, depending on the complexity of the scene and the capabilities of the rendering engine.