-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathprune.py
164 lines (149 loc) · 6.42 KB
/
prune.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact george.drettakis@inria.fr
#
import os
import torch
from random import randint
from gaussian_renderer import render, count_render
import sys
from scene import Scene, GaussianModel
from scene.dataset_readers import CameraInfo
from utils.general_utils import safe_state
import uuid
from tqdm import tqdm
from utils.image_utils import psnr
from argparse import ArgumentParser, Namespace
from arguments import ModelParams, PipelineParams, OptimizationParams
from utils.graphics_utils import getWorld2View2
from utils.camera_utils import loadCam
from icecream import ic
import random
import copy
import gc
import numpy as np
from collections import defaultdict
# from cuml.cluster import HDBSCAN
# def HDBSCAN_prune(gaussians, score_list, prune_percent):
# # Ensure the tensor is on the GPU and detached from the graph
# s, d = gaussians.get_xyz.shape
# X_gpu = cp.asarray(gaussians.get_xyz.detach().cuda())
# scores_gpu = cp.asarray(score_list.detach().cuda())
# hdbscan = HDBSCAN(min_cluster_size = 100)
# cluster_labels = hdbscan.fit_predict(X_gpu)
# points_by_centroid = {}
# ic("cluster_labels")
# ic(cluster_labels.shape)
# ic(cluster_labels)
# for i, label in enumerate(cluster_labels):
# if label not in points_by_centroid:
# points_by_centroid[label] = []
# points_by_centroid[label].append(i)
# points_to_prune = []
# for centroid_idx, point_indices in points_by_centroid.items():
# # Skip noise points with label -1
# if centroid_idx == -1:
# continue
# num_to_prune = int(cp.ceil(prune_percent * len(point_indices)))
# if num_to_prune <= 3:
# continue
# point_indices_cp = cp.array(point_indices)
# distances = scores_gpu[point_indices_cp].squeeze()
# indices_to_prune = point_indices_cp[cp.argsort(distances)[:num_to_prune]]
# points_to_prune.extend(indices_to_prune)
# points_to_prune = np.array(points_to_prune)
# mask = np.zeros(s, dtype=bool)
# mask[points_to_prune] = True
# # points_to_prune now contains the indices of the points to be pruned
# return mask
# def uniform_prune(gaussians, k, score_list, prune_percent, sample = "k_mean"):
# # get the farthest_point
# D, I = None, None
# s, d = gaussians.get_xyz.shape
# if sample == "k_mean":
# ic("k_mean")
# n_iter = 200
# verbose = False
# kmeans = faiss.Kmeans(d, k=k, niter=n_iter, verbose=verbose, gpu=True)
# kmeans.train(gaussians.get_xyz.detach().cpu().numpy())
# # The cluster centroids can be accessed as follows
# centroids = kmeans.centroids
# D, I = kmeans.index.search(gaussians.get_xyz.detach().cpu().numpy(), 1)
# else:
# point_idx = farthest_point_sampler(torch.unsqueeze(gaussians.get_xyz, 0), k)
# centroids = gaussians.get_xyz[point_idx,: ]
# centroids = centroids.squeeze(0)
# index = faiss.IndexFlatL2(d)
# index.add(centroids.detach().cpu().numpy())
# D, I = index.search(gaussians.get_xyz.detach().cpu().numpy(), 1)
# points_to_prune = []
# points_by_centroid = defaultdict(list)
# for point_idx, centroid_idx in enumerate(I.flatten()):
# points_by_centroid[centroid_idx.item()].append(point_idx)
# for centroid_idx in points_by_centroid:
# points_by_centroid[centroid_idx] = np.array(points_by_centroid[centroid_idx])
# for centroid_idx, point_indices in points_by_centroid.items():
# # Find the number of points to prune
# num_to_prune = int(np.ceil(prune_percent * len(point_indices)))
# if num_to_prune <= 3:
# continue
# distances = score_list[point_indices].squeeze().cpu().detach().numpy()
# indices_to_prune = point_indices[np.argsort(distances)[:num_to_prune]]
# points_to_prune.extend(indices_to_prune)
# # Convert the list to an array
# points_to_prune = np.array(points_to_prune)
# mask = np.zeros(s, dtype=bool)
# mask[points_to_prune] = True
# return mask
def calculate_v_imp_score(gaussians, imp_list, v_pow):
"""
:param gaussians: A data structure containing Gaussian components with a get_scaling method.
:param imp_list: The importance scores for each Gaussian component.
:param v_pow: The power to which the volume ratios are raised.
:return: A list of adjusted values (v_list) used for pruning.
"""
# Calculate the volume of each Gaussian component
volume = torch.prod(gaussians.get_scaling, dim=1)
# Determine the kth_percent_largest value
index = int(len(volume) * 0.9)
sorted_volume, _ = torch.sort(volume, descending=True)
kth_percent_largest = sorted_volume[index]
# Calculate v_list
v_list = torch.pow(volume / kth_percent_largest, v_pow)
v_list = v_list * imp_list
return v_list
def prune_list(gaussians, scene, pipe, background, dataset=None):
viewpoint_stack = scene.getTrainCameras().copy()
gaussian_list, imp_list = None, None
viewpoint_cam = viewpoint_stack.pop()
# check class of viewpoint_cam
if isinstance(viewpoint_cam, CameraInfo):
viewpoint_cam = loadCam(dataset, viewpoint_cam.uid, viewpoint_cam, 1)
render_pkg = count_render(viewpoint_cam, gaussians, pipe, background)
gaussian_list, imp_list = (
render_pkg["gaussians_count"],
render_pkg["important_score"],
)
# ic(dataset.model_path)
for iteration in range(len(viewpoint_stack)):
# Pick a random Camera
# prunning
viewpoint_cam = viewpoint_stack.pop()
if isinstance(viewpoint_cam, CameraInfo):
viewpoint_cam = loadCam(dataset, viewpoint_cam.uid, viewpoint_cam, 1)
render_pkg = count_render(viewpoint_cam, gaussians, pipe, background)
# image, viewspace_point_tensor, visibility_filter, radii = render_pkg["render"], render_pkg["viewspace_points"], render_pkg["visibility_filter"], render_pkg["radii"]
gaussians_count, important_score = (
render_pkg["gaussians_count"].detach(),
render_pkg["important_score"].detach(),
)
gaussian_list += gaussians_count
imp_list += important_score
gc.collect()
return gaussian_list, imp_list