reduced-3dgs 1.8.15__tar.gz → 1.8.17__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/PKG-INFO +1 -1
- reduced_3dgs-1.8.17/reduced_3dgs/pruning/__init__.py +2 -0
- reduced_3dgs-1.8.17/reduced_3dgs/pruning/combinations.py +51 -0
- reduced_3dgs-1.8.17/reduced_3dgs/pruning/importance/__init__.py +1 -0
- reduced_3dgs-1.8.17/reduced_3dgs/pruning/importance/trainer.py +141 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/reduced_3dgs/pruning/trainer.py +16 -76
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/reduced_3dgs/train.py +3 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/reduced_3dgs.egg-info/PKG-INFO +1 -1
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/reduced_3dgs.egg-info/SOURCES.txt +9 -1
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/setup.py +20 -3
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/submodules/diff-gaussian-rasterization/reduced_3dgs/sh_culling.cu +20 -1
- reduced_3dgs-1.8.17/submodules/gaussian-importance/cuda_rasterizer/backward.cu +657 -0
- reduced_3dgs-1.8.17/submodules/gaussian-importance/cuda_rasterizer/forward.cu +614 -0
- reduced_3dgs-1.8.17/submodules/gaussian-importance/cuda_rasterizer/rasterizer_impl.cu +584 -0
- reduced_3dgs-1.8.17/submodules/gaussian-importance/diff_gaussian_rasterization/__init__.py +347 -0
- reduced_3dgs-1.8.17/submodules/gaussian-importance/ext.cpp +20 -0
- reduced_3dgs-1.8.17/submodules/gaussian-importance/rasterize_points.cu +319 -0
- reduced_3dgs-1.8.15/reduced_3dgs/pruning/__init__.py +0 -2
- reduced_3dgs-1.8.15/reduced_3dgs/pruning/combinations.py +0 -20
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/LICENSE.md +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/README.md +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/reduced_3dgs/__init__.py +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/reduced_3dgs/combinations.py +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/reduced_3dgs/quantization/__init__.py +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/reduced_3dgs/quantization/abc.py +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/reduced_3dgs/quantization/exclude_zeros.py +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/reduced_3dgs/quantization/quantizer.py +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/reduced_3dgs/quantization/wrapper.py +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/reduced_3dgs/quantize.py +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/reduced_3dgs/shculling/__init__.py +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/reduced_3dgs/shculling/gaussian_model.py +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/reduced_3dgs/shculling/trainer.py +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/reduced_3dgs.egg-info/dependency_links.txt +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/reduced_3dgs.egg-info/requires.txt +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/reduced_3dgs.egg-info/top_level.txt +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/setup.cfg +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/submodules/diff-gaussian-rasterization/cuda_rasterizer/backward.cu +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/submodules/diff-gaussian-rasterization/cuda_rasterizer/forward.cu +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/submodules/diff-gaussian-rasterization/cuda_rasterizer/rasterizer_impl.cu +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/submodules/diff-gaussian-rasterization/diff_gaussian_rasterization/__init__.py +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/submodules/diff-gaussian-rasterization/ext.cpp +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/submodules/diff-gaussian-rasterization/rasterize_points.cu +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/submodules/diff-gaussian-rasterization/reduced_3dgs/kmeans.cu +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/submodules/diff-gaussian-rasterization/reduced_3dgs/redundancy_score.cu +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/submodules/diff-gaussian-rasterization/reduced_3dgs.cu +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/submodules/simple-knn/ext.cpp +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/submodules/simple-knn/simple_knn.cu +0 -0
- {reduced_3dgs-1.8.15 → reduced_3dgs-1.8.17}/submodules/simple-knn/spatial.cu +0 -0
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
|
|
2
|
+
from typing import List
|
|
3
|
+
from gaussian_splatting import Camera, GaussianModel
|
|
4
|
+
from gaussian_splatting.dataset import TrainableCameraDataset
|
|
5
|
+
from gaussian_splatting.trainer import DepthTrainerWrapper, NoopDensifier, DensificationTrainerWrapper
|
|
6
|
+
from .trainer import BasePruner, BasePruningTrainer
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def BasePrunerInDensifyTrainer(
|
|
10
|
+
model: GaussianModel,
|
|
11
|
+
scene_extent: float,
|
|
12
|
+
dataset: List[Camera],
|
|
13
|
+
prune_from_iter=1000,
|
|
14
|
+
prune_until_iter=15000,
|
|
15
|
+
prune_interval: int = 100,
|
|
16
|
+
box_size=1.,
|
|
17
|
+
lambda_mercy=1.,
|
|
18
|
+
mercy_minimum=3,
|
|
19
|
+
mercy_type='redundancy_opacity',
|
|
20
|
+
*args, **kwargs):
|
|
21
|
+
return DensificationTrainerWrapper(
|
|
22
|
+
lambda model, scene_extent: BasePruner(
|
|
23
|
+
NoopDensifier(model),
|
|
24
|
+
dataset,
|
|
25
|
+
prune_from_iter=prune_from_iter,
|
|
26
|
+
prune_until_iter=prune_until_iter,
|
|
27
|
+
prune_interval=prune_interval,
|
|
28
|
+
box_size=box_size,
|
|
29
|
+
lambda_mercy=lambda_mercy,
|
|
30
|
+
mercy_minimum=mercy_minimum,
|
|
31
|
+
mercy_type=mercy_type,
|
|
32
|
+
),
|
|
33
|
+
model,
|
|
34
|
+
scene_extent,
|
|
35
|
+
*args, **kwargs
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
# Depth trainer
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def DepthPruningTrainer(model: GaussianModel, scene_extent: float, dataset: TrainableCameraDataset, *args, **kwargs):
|
|
43
|
+
return DepthTrainerWrapper(BasePruningTrainer, model, scene_extent, *args, dataset=dataset, **kwargs)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def DepthPrunerInDensifyTrainer(model: GaussianModel, scene_extent: float, dataset: TrainableCameraDataset, *args, **kwargs):
|
|
47
|
+
return DepthTrainerWrapper(BasePrunerInDensifyTrainer, model, scene_extent, *args, dataset=dataset, **kwargs)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
PruningTrainer = DepthPruningTrainer
|
|
51
|
+
PrunerInDensifyTrainer = DepthPrunerInDensifyTrainer
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .trainer import ImportancePruningTrainerWrapper, BaseImportancePruningTrainer, ImportancePruningTrainer
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
import math
|
|
2
|
+
import torch
|
|
3
|
+
|
|
4
|
+
from gaussian_splatting import Camera, GaussianModel
|
|
5
|
+
from gaussian_splatting.trainer import AbstractTrainer, TrainerWrapper, BaseTrainer, Trainer
|
|
6
|
+
from gaussian_splatting.dataset import CameraDataset
|
|
7
|
+
from .diff_gaussian_rasterization import GaussianRasterizationSettings, GaussianRasterizer
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def count_render(self: GaussianModel, viewpoint_camera: Camera):
|
|
11
|
+
"""
|
|
12
|
+
Render the scene.
|
|
13
|
+
|
|
14
|
+
Background tensor (bg_color) must be on GPU!
|
|
15
|
+
"""
|
|
16
|
+
# Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means
|
|
17
|
+
screenspace_points = torch.zeros_like(self.get_xyz, dtype=self.get_xyz.dtype, requires_grad=True, device=self._xyz.device) + 0
|
|
18
|
+
try:
|
|
19
|
+
screenspace_points.retain_grad()
|
|
20
|
+
except:
|
|
21
|
+
pass
|
|
22
|
+
|
|
23
|
+
# Set up rasterization configuration
|
|
24
|
+
tanfovx = math.tan(viewpoint_camera.FoVx * 0.5)
|
|
25
|
+
tanfovy = math.tan(viewpoint_camera.FoVy * 0.5)
|
|
26
|
+
|
|
27
|
+
raster_settings = GaussianRasterizationSettings(
|
|
28
|
+
image_height=int(viewpoint_camera.image_height),
|
|
29
|
+
image_width=int(viewpoint_camera.image_width),
|
|
30
|
+
tanfovx=tanfovx,
|
|
31
|
+
tanfovy=tanfovy,
|
|
32
|
+
bg=viewpoint_camera.bg_color.to(self._xyz.device),
|
|
33
|
+
scale_modifier=self.scale_modifier,
|
|
34
|
+
viewmatrix=viewpoint_camera.world_view_transform,
|
|
35
|
+
projmatrix=viewpoint_camera.full_proj_transform,
|
|
36
|
+
sh_degree=self.active_sh_degree,
|
|
37
|
+
campos=viewpoint_camera.camera_center,
|
|
38
|
+
prefiltered=False,
|
|
39
|
+
debug=self.debug,
|
|
40
|
+
f_count=True,
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
rasterizer = GaussianRasterizer(raster_settings=raster_settings)
|
|
44
|
+
means3D = self.get_xyz
|
|
45
|
+
means2D = screenspace_points
|
|
46
|
+
opacity = self.get_opacity
|
|
47
|
+
|
|
48
|
+
scales = self.get_scaling
|
|
49
|
+
rotations = self.get_rotation
|
|
50
|
+
|
|
51
|
+
shs = self.get_features
|
|
52
|
+
|
|
53
|
+
# Rasterize visible Gaussians to image, obtain their radii (on screen).
|
|
54
|
+
gaussians_count, opacity_important_score, T_alpha_important_score, rendered_image, radii = rasterizer(
|
|
55
|
+
means3D=means3D,
|
|
56
|
+
means2D=means2D,
|
|
57
|
+
shs=shs,
|
|
58
|
+
colors_precomp=None,
|
|
59
|
+
opacities=opacity,
|
|
60
|
+
scales=scales,
|
|
61
|
+
rotations=rotations,
|
|
62
|
+
cov3D_precomp=None)
|
|
63
|
+
|
|
64
|
+
# Those Gaussians that were frustum culled or had a radius of 0 were not visible.
|
|
65
|
+
# They will be excluded from value updates used in the splitting criteria.
|
|
66
|
+
return {
|
|
67
|
+
"render": rendered_image,
|
|
68
|
+
"viewspace_points": screenspace_points,
|
|
69
|
+
"visibility_filter": radii > 0,
|
|
70
|
+
"radii": radii,
|
|
71
|
+
"gaussians_count": gaussians_count,
|
|
72
|
+
"opacity_important_score": opacity_important_score,
|
|
73
|
+
"T_alpha_important_score": T_alpha_important_score
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
class ImportancePruner(TrainerWrapper):
|
|
78
|
+
def __init__(
|
|
79
|
+
self, base_trainer: AbstractTrainer,
|
|
80
|
+
dataset: CameraDataset,
|
|
81
|
+
importance_prune_at_steps=[15000],
|
|
82
|
+
):
|
|
83
|
+
super().__init__(base_trainer)
|
|
84
|
+
self.dataset = dataset
|
|
85
|
+
self.importance_prune_at_steps = importance_prune_at_steps
|
|
86
|
+
|
|
87
|
+
def optim_step(self):
|
|
88
|
+
ret = super().optim_step()
|
|
89
|
+
if self.curr_step in self.importance_prune_at_steps:
|
|
90
|
+
gaussian_count = torch.zeros(self.model.get_xyz.shape[0], device=self.model.get_xyz.device, dtype=torch.int)
|
|
91
|
+
opacity_important_score = torch.zeros(self.model.get_xyz.shape[0], device=self.model.get_xyz.device, dtype=torch.float)
|
|
92
|
+
T_alpha_important_score = torch.zeros(self.model.get_xyz.shape[0], device=self.model.get_xyz.device, dtype=torch.float)
|
|
93
|
+
for camera in self.dataset:
|
|
94
|
+
out = count_render(self.model, camera)
|
|
95
|
+
gaussian_count += out["gaussians_count"]
|
|
96
|
+
opacity_important_score += out["opacity_important_score"]
|
|
97
|
+
T_alpha_important_score += out["T_alpha_important_score"]
|
|
98
|
+
pass
|
|
99
|
+
return ret
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def ImportancePruningTrainerWrapper(
|
|
103
|
+
base_trainer_constructor,
|
|
104
|
+
model: GaussianModel,
|
|
105
|
+
scene_extent: float,
|
|
106
|
+
dataset: CameraDataset,
|
|
107
|
+
importance_prune_at_steps=[15000],
|
|
108
|
+
*args, **kwargs):
|
|
109
|
+
return ImportancePruner(
|
|
110
|
+
base_trainer_constructor(model, scene_extent, dataset, *args, **kwargs),
|
|
111
|
+
dataset,
|
|
112
|
+
importance_prune_at_steps=importance_prune_at_steps,
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def BaseImportancePruningTrainer(
|
|
117
|
+
model: GaussianModel,
|
|
118
|
+
scene_extent: float,
|
|
119
|
+
dataset: CameraDataset,
|
|
120
|
+
importance_prune_at_steps=[15000],
|
|
121
|
+
*args, **kwargs):
|
|
122
|
+
return ImportancePruningTrainerWrapper(
|
|
123
|
+
lambda model, scene_extent, dataset, *args, **kwargs: BaseTrainer(model, scene_extent, *args, **kwargs),
|
|
124
|
+
model, scene_extent, dataset,
|
|
125
|
+
importance_prune_at_steps=importance_prune_at_steps,
|
|
126
|
+
*args, **kwargs,
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def ImportancePruningTrainer(
|
|
131
|
+
model: GaussianModel,
|
|
132
|
+
scene_extent: float,
|
|
133
|
+
dataset: CameraDataset,
|
|
134
|
+
importance_prune_at_steps=[15000],
|
|
135
|
+
*args, **kwargs):
|
|
136
|
+
return ImportancePruningTrainerWrapper(
|
|
137
|
+
lambda model, scene_extent, dataset, *args, **kwargs: Trainer(model, scene_extent, *args, **kwargs),
|
|
138
|
+
model, scene_extent, dataset,
|
|
139
|
+
importance_prune_at_steps=importance_prune_at_steps,
|
|
140
|
+
*args, **kwargs,
|
|
141
|
+
)
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from typing import List
|
|
2
2
|
import torch
|
|
3
3
|
from gaussian_splatting import GaussianModel, Camera
|
|
4
|
-
from gaussian_splatting.trainer import AbstractDensifier,
|
|
4
|
+
from gaussian_splatting.trainer import AbstractDensifier, DensifierWrapper, DensificationTrainer, NoopDensifier
|
|
5
5
|
from reduced_3dgs.diff_gaussian_rasterization._C import sphere_ellipsoid_intersection, allocate_minimum_redundancy_value, find_minimum_projected_pixel_size
|
|
6
6
|
from reduced_3dgs.simple_knn._C import distIndex2
|
|
7
7
|
|
|
@@ -79,9 +79,10 @@ def mercy_gaussians(
|
|
|
79
79
|
return mask
|
|
80
80
|
|
|
81
81
|
|
|
82
|
-
class BasePruner(
|
|
82
|
+
class BasePruner(DensifierWrapper):
|
|
83
83
|
def __init__(
|
|
84
|
-
self,
|
|
84
|
+
self, base_densifier: AbstractDensifier,
|
|
85
|
+
dataset: List[Camera],
|
|
85
86
|
prune_from_iter=1000,
|
|
86
87
|
prune_until_iter=15000,
|
|
87
88
|
prune_interval: int = 100,
|
|
@@ -89,7 +90,7 @@ class BasePruner(AbstractDensifier):
|
|
|
89
90
|
lambda_mercy=1.,
|
|
90
91
|
mercy_minimum=3,
|
|
91
92
|
mercy_type='redundancy_opacity'):
|
|
92
|
-
|
|
93
|
+
super().__init__(base_densifier)
|
|
93
94
|
self.dataset = dataset
|
|
94
95
|
self.prune_from_iter = prune_from_iter
|
|
95
96
|
self.prune_until_iter = prune_until_iter
|
|
@@ -99,14 +100,12 @@ class BasePruner(AbstractDensifier):
|
|
|
99
100
|
self.mercy_minimum = mercy_minimum
|
|
100
101
|
self.mercy_type = mercy_type
|
|
101
102
|
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
return self._model
|
|
105
|
-
|
|
106
|
-
def densify_and_prune(self, loss, out, camera, step: int) -> DensificationInstruct:
|
|
103
|
+
def densify_and_prune(self, loss, out, camera, step: int):
|
|
104
|
+
ret = super().densify_and_prune(loss, out, camera, step)
|
|
107
105
|
if self.prune_from_iter <= step <= self.prune_until_iter and step % self.prune_interval == 0:
|
|
108
|
-
|
|
109
|
-
|
|
106
|
+
remove_mask = mercy_gaussians(self.model, self.dataset, self.box_size, self.lambda_mercy, self.mercy_minimum, self.mercy_type)
|
|
107
|
+
ret = ret._replace(remove_mask=remove_mask if ret.remove_mask is None else torch.logical_or(remove_mask, ret.remove_mask))
|
|
108
|
+
return ret
|
|
110
109
|
|
|
111
110
|
|
|
112
111
|
def BasePruningTrainer(
|
|
@@ -124,73 +123,14 @@ def BasePruningTrainer(
|
|
|
124
123
|
return DensificationTrainer(
|
|
125
124
|
model, scene_extent,
|
|
126
125
|
BasePruner(
|
|
127
|
-
model,
|
|
128
|
-
|
|
129
|
-
box_size, lambda_mercy, mercy_minimum, mercy_type
|
|
130
|
-
), *args, **kwargs
|
|
131
|
-
)
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
class PrunerInDensify(Densifier):
|
|
135
|
-
def __init__(
|
|
136
|
-
self, model: GaussianModel, scene_extent, dataset: List[Camera],
|
|
137
|
-
box_size=1.,
|
|
138
|
-
lambda_mercy=1.,
|
|
139
|
-
mercy_minimum=3,
|
|
140
|
-
mercy_type='redundancy_opacity',
|
|
141
|
-
*args, **kwargs):
|
|
142
|
-
super().__init__(model, scene_extent, *args, **kwargs)
|
|
143
|
-
self.dataset = dataset
|
|
144
|
-
self.box_size = box_size
|
|
145
|
-
self.lambda_mercy = lambda_mercy
|
|
146
|
-
self.mercy_minimum = mercy_minimum
|
|
147
|
-
self.mercy_type = mercy_type
|
|
148
|
-
|
|
149
|
-
def prune(self) -> torch.Tensor:
|
|
150
|
-
return torch.logical_or(mercy_gaussians(self.model, self.dataset, self.box_size, self.lambda_mercy, self.mercy_minimum, self.mercy_type), super().prune())
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
def BasePrunerInDensifyTrainer(
|
|
154
|
-
model: GaussianModel,
|
|
155
|
-
scene_extent: float,
|
|
156
|
-
|
|
157
|
-
dataset: List[Camera],
|
|
158
|
-
box_size=1.,
|
|
159
|
-
lambda_mercy=1.,
|
|
160
|
-
mercy_minimum=3,
|
|
161
|
-
mercy_type='redundancy_opacity',
|
|
162
|
-
|
|
163
|
-
densify_from_iter=500,
|
|
164
|
-
densify_until_iter=15000,
|
|
165
|
-
densify_interval=100,
|
|
166
|
-
densify_grad_threshold=0.0002,
|
|
167
|
-
densify_opacity_threshold=0.005,
|
|
168
|
-
densify_percent_dense=0.01,
|
|
169
|
-
densify_percent_too_big=0.8,
|
|
170
|
-
|
|
171
|
-
prune_from_iter=1000,
|
|
172
|
-
prune_until_iter=15000,
|
|
173
|
-
prune_interval=100,
|
|
174
|
-
prune_screensize_threshold=20,
|
|
175
|
-
prune_percent_too_big=1,
|
|
176
|
-
|
|
177
|
-
*args, **kwargs):
|
|
178
|
-
return DensificationTrainer(
|
|
179
|
-
model, scene_extent,
|
|
180
|
-
PrunerInDensify(
|
|
181
|
-
model, scene_extent, dataset,
|
|
182
|
-
box_size, lambda_mercy, mercy_minimum, mercy_type,
|
|
183
|
-
densify_from_iter=densify_from_iter,
|
|
184
|
-
densify_until_iter=densify_until_iter,
|
|
185
|
-
densify_interval=densify_interval,
|
|
186
|
-
densify_grad_threshold=densify_grad_threshold,
|
|
187
|
-
densify_opacity_threshold=densify_opacity_threshold,
|
|
188
|
-
densify_percent_dense=densify_percent_dense,
|
|
189
|
-
densify_percent_too_big=densify_percent_too_big,
|
|
126
|
+
NoopDensifier(model),
|
|
127
|
+
dataset,
|
|
190
128
|
prune_from_iter=prune_from_iter,
|
|
191
129
|
prune_until_iter=prune_until_iter,
|
|
192
130
|
prune_interval=prune_interval,
|
|
193
|
-
|
|
194
|
-
|
|
131
|
+
box_size=box_size,
|
|
132
|
+
lambda_mercy=lambda_mercy,
|
|
133
|
+
mercy_minimum=mercy_minimum,
|
|
134
|
+
mercy_type=mercy_type,
|
|
195
135
|
), *args, **kwargs
|
|
196
136
|
)
|
|
@@ -14,6 +14,7 @@ from gaussian_splatting.trainer.extensions import ScaleRegularizeTrainerWrapper
|
|
|
14
14
|
from reduced_3dgs.quantization import AbstractQuantizer, VectorQuantizeTrainerWrapper
|
|
15
15
|
from reduced_3dgs.shculling import VariableSHGaussianModel, SHCullingTrainer
|
|
16
16
|
from reduced_3dgs.pruning import PruningTrainer
|
|
17
|
+
from reduced_3dgs.pruning.importance import ImportancePruningTrainerWrapper
|
|
17
18
|
from reduced_3dgs.combinations import OpacityResetPrunerInDensifyTrainer, SHCullingDensifyTrainer, SHCullingPruneTrainer, SHCullingPruningDensifyTrainer
|
|
18
19
|
from reduced_3dgs.combinations import CameraTrainableVariableSHGaussianModel, CameraSHCullingTrainer, CameraPruningTrainer
|
|
19
20
|
from reduced_3dgs.combinations import CameraOpacityResetPrunerInDensifyTrainer, CameraSHCullingDensifyTrainer, CameraSHCullingPruneTrainer, CameraSHCullingPruningDensifyTrainer
|
|
@@ -27,6 +28,7 @@ basemodes = {
|
|
|
27
28
|
"prune-shculling": SHCullingPruneTrainer,
|
|
28
29
|
"densify-prune-shculling": SHCullingPruningDensifyTrainer,
|
|
29
30
|
}
|
|
31
|
+
basemodes = {k: lambda *args, **kwargs: ImportancePruningTrainerWrapper(v, *args, **kwargs) for k, v in basemodes.items()}
|
|
30
32
|
cameramodes = {
|
|
31
33
|
"camera-shculling": CameraSHCullingTrainer,
|
|
32
34
|
"camera-pruning": CameraPruningTrainer,
|
|
@@ -35,6 +37,7 @@ cameramodes = {
|
|
|
35
37
|
"camera-prune-shculling": CameraSHCullingPruneTrainer,
|
|
36
38
|
"camera-densify-prune-shculling": CameraSHCullingPruningDensifyTrainer,
|
|
37
39
|
}
|
|
40
|
+
cameramodes = {k: lambda *args, **kwargs: ImportancePruningTrainerWrapper(v, *args, **kwargs) for k, v in cameramodes.items()}
|
|
38
41
|
|
|
39
42
|
|
|
40
43
|
def prepare_quantizer(
|
|
@@ -13,6 +13,8 @@ reduced_3dgs.egg-info/top_level.txt
|
|
|
13
13
|
reduced_3dgs/pruning/__init__.py
|
|
14
14
|
reduced_3dgs/pruning/combinations.py
|
|
15
15
|
reduced_3dgs/pruning/trainer.py
|
|
16
|
+
reduced_3dgs/pruning/importance/__init__.py
|
|
17
|
+
reduced_3dgs/pruning/importance/trainer.py
|
|
16
18
|
reduced_3dgs/quantization/__init__.py
|
|
17
19
|
reduced_3dgs/quantization/abc.py
|
|
18
20
|
reduced_3dgs/quantization/exclude_zeros.py
|
|
@@ -30,7 +32,13 @@ submodules/diff-gaussian-rasterization/cuda_rasterizer/rasterizer_impl.cu
|
|
|
30
32
|
submodules/diff-gaussian-rasterization/reduced_3dgs/kmeans.cu
|
|
31
33
|
submodules/diff-gaussian-rasterization/reduced_3dgs/redundancy_score.cu
|
|
32
34
|
submodules/diff-gaussian-rasterization/reduced_3dgs/sh_culling.cu
|
|
35
|
+
submodules/gaussian-importance/ext.cpp
|
|
36
|
+
submodules/gaussian-importance/rasterize_points.cu
|
|
37
|
+
submodules/gaussian-importance/cuda_rasterizer/backward.cu
|
|
38
|
+
submodules/gaussian-importance/cuda_rasterizer/forward.cu
|
|
39
|
+
submodules/gaussian-importance/cuda_rasterizer/rasterizer_impl.cu
|
|
33
40
|
submodules/simple-knn/ext.cpp
|
|
34
41
|
submodules/simple-knn/simple_knn.cu
|
|
35
42
|
submodules/simple-knn/spatial.cu
|
|
36
|
-
submodules/diff-gaussian-rasterization/diff_gaussian_rasterization/__init__.py
|
|
43
|
+
submodules/diff-gaussian-rasterization/diff_gaussian_rasterization/__init__.py
|
|
44
|
+
submodules/gaussian-importance/diff_gaussian_rasterization/__init__.py
|
|
@@ -40,6 +40,17 @@ rasterizor_packages = {
|
|
|
40
40
|
'reduced_3dgs.simple_knn': 'submodules/simple-knn/simple_knn',
|
|
41
41
|
}
|
|
42
42
|
|
|
43
|
+
importance_root = "submodules/gaussian-importance"
|
|
44
|
+
importance_sources = [
|
|
45
|
+
"cuda_rasterizer/rasterizer_impl.cu",
|
|
46
|
+
"cuda_rasterizer/forward.cu",
|
|
47
|
+
"cuda_rasterizer/backward.cu",
|
|
48
|
+
"rasterize_points.cu",
|
|
49
|
+
"ext.cpp"]
|
|
50
|
+
importance_packages = {
|
|
51
|
+
'reduced_3dgs.pruning.importance.diff_gaussian_rasterization': 'submodules/gaussian-importance/diff_gaussian_rasterization',
|
|
52
|
+
}
|
|
53
|
+
|
|
43
54
|
cxx_compiler_flags = []
|
|
44
55
|
nvcc_compiler_flags = []
|
|
45
56
|
|
|
@@ -49,7 +60,7 @@ if os.name == 'nt':
|
|
|
49
60
|
|
|
50
61
|
setup(
|
|
51
62
|
name="reduced_3dgs",
|
|
52
|
-
version='1.8.
|
|
63
|
+
version='1.8.17',
|
|
53
64
|
author='yindaheng98',
|
|
54
65
|
author_email='yindaheng98@gmail.com',
|
|
55
66
|
url='https://github.com/yindaheng98/reduced-3dgs',
|
|
@@ -59,10 +70,11 @@ setup(
|
|
|
59
70
|
classifiers=[
|
|
60
71
|
"Programming Language :: Python :: 3",
|
|
61
72
|
],
|
|
62
|
-
packages=packages + list(rasterizor_packages.keys()),
|
|
73
|
+
packages=packages + list(rasterizor_packages.keys()) + list(importance_packages.keys()),
|
|
63
74
|
package_dir={
|
|
64
75
|
'reduced_3dgs': 'reduced_3dgs',
|
|
65
|
-
**rasterizor_packages
|
|
76
|
+
**rasterizor_packages,
|
|
77
|
+
**importance_packages,
|
|
66
78
|
},
|
|
67
79
|
ext_modules=[
|
|
68
80
|
CUDAExtension(
|
|
@@ -70,6 +82,11 @@ setup(
|
|
|
70
82
|
sources=[os.path.join(rasterizor_root, source) for source in rasterizor_sources],
|
|
71
83
|
extra_compile_args={"nvcc": nvcc_compiler_flags + ["-I" + os.path.join(os.path.abspath(rasterizor_root), "third_party/glm/")]}
|
|
72
84
|
),
|
|
85
|
+
CUDAExtension(
|
|
86
|
+
name="reduced_3dgs.pruning.importance.diff_gaussian_rasterization._C",
|
|
87
|
+
sources=[os.path.join(importance_root, source) for source in importance_sources],
|
|
88
|
+
extra_compile_args={"nvcc": nvcc_compiler_flags + ["-I" + os.path.join(os.path.abspath(importance_root), "third_party/glm/")]}
|
|
89
|
+
),
|
|
73
90
|
CUDAExtension(
|
|
74
91
|
name="reduced_3dgs.simple_knn._C",
|
|
75
92
|
sources=[os.path.join(simpleknn_root, source) for source in simpleknn_sources],
|
|
@@ -1,8 +1,27 @@
|
|
|
1
1
|
#include "sh_culling.h"
|
|
2
|
-
#include "../cuda_rasterizer/auxiliary.h"
|
|
3
2
|
#include <cooperative_groups.h>
|
|
4
3
|
namespace cg = cooperative_groups;
|
|
5
4
|
|
|
5
|
+
// Spherical harmonics coefficients
|
|
6
|
+
__device__ const float SH_C0 = 0.28209479177387814f;
|
|
7
|
+
__device__ const float SH_C1 = 0.4886025119029199f;
|
|
8
|
+
__device__ const float SH_C2[] = {
|
|
9
|
+
1.0925484305920792f,
|
|
10
|
+
-1.0925484305920792f,
|
|
11
|
+
0.31539156525252005f,
|
|
12
|
+
-1.0925484305920792f,
|
|
13
|
+
0.5462742152960396f
|
|
14
|
+
};
|
|
15
|
+
__device__ const float SH_C3[] = {
|
|
16
|
+
-0.5900435899266435f,
|
|
17
|
+
2.890611442640554f,
|
|
18
|
+
-0.4570457994644658f,
|
|
19
|
+
0.3731763325901154f,
|
|
20
|
+
-0.4570457994644658f,
|
|
21
|
+
1.445305721320277f,
|
|
22
|
+
-0.5900435899266435f
|
|
23
|
+
};
|
|
24
|
+
|
|
6
25
|
__device__ void computeColorFromSH(const int idx, const int *degs, int max_coeffs, const glm::vec3 *means, glm::vec3 campos, const float *shs, glm::vec3 *out_colours)
|
|
7
26
|
{
|
|
8
27
|
// The implementation is loosely based on code for
|