reduced-3dgs 1.8.16__tar.gz → 1.8.18__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of reduced-3dgs might be problematic. Click here for more details.
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/PKG-INFO +1 -1
- reduced_3dgs-1.8.18/reduced_3dgs/importance/__init__.py +2 -0
- reduced_3dgs-1.8.18/reduced_3dgs/importance/combinations.py +42 -0
- reduced_3dgs-1.8.18/reduced_3dgs/importance/trainer.py +147 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/reduced_3dgs/pruning/combinations.py +1 -1
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/reduced_3dgs/pruning/trainer.py +3 -8
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/reduced_3dgs.egg-info/PKG-INFO +1 -1
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/reduced_3dgs.egg-info/SOURCES.txt +3 -2
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/setup.py +3 -3
- reduced_3dgs-1.8.16/reduced_3dgs/pruning/importance/__init__.py +0 -1
- reduced_3dgs-1.8.16/reduced_3dgs/pruning/importance/trainer.py +0 -141
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/LICENSE.md +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/README.md +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/reduced_3dgs/__init__.py +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/reduced_3dgs/combinations.py +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/reduced_3dgs/pruning/__init__.py +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/reduced_3dgs/quantization/__init__.py +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/reduced_3dgs/quantization/abc.py +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/reduced_3dgs/quantization/exclude_zeros.py +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/reduced_3dgs/quantization/quantizer.py +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/reduced_3dgs/quantization/wrapper.py +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/reduced_3dgs/quantize.py +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/reduced_3dgs/shculling/__init__.py +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/reduced_3dgs/shculling/gaussian_model.py +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/reduced_3dgs/shculling/trainer.py +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/reduced_3dgs/train.py +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/reduced_3dgs.egg-info/dependency_links.txt +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/reduced_3dgs.egg-info/requires.txt +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/reduced_3dgs.egg-info/top_level.txt +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/setup.cfg +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/submodules/diff-gaussian-rasterization/cuda_rasterizer/backward.cu +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/submodules/diff-gaussian-rasterization/cuda_rasterizer/forward.cu +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/submodules/diff-gaussian-rasterization/cuda_rasterizer/rasterizer_impl.cu +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/submodules/diff-gaussian-rasterization/diff_gaussian_rasterization/__init__.py +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/submodules/diff-gaussian-rasterization/ext.cpp +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/submodules/diff-gaussian-rasterization/rasterize_points.cu +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/submodules/diff-gaussian-rasterization/reduced_3dgs/kmeans.cu +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/submodules/diff-gaussian-rasterization/reduced_3dgs/redundancy_score.cu +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/submodules/diff-gaussian-rasterization/reduced_3dgs/sh_culling.cu +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/submodules/diff-gaussian-rasterization/reduced_3dgs.cu +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/submodules/gaussian-importance/cuda_rasterizer/backward.cu +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/submodules/gaussian-importance/cuda_rasterizer/forward.cu +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/submodules/gaussian-importance/cuda_rasterizer/rasterizer_impl.cu +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/submodules/gaussian-importance/diff_gaussian_rasterization/__init__.py +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/submodules/gaussian-importance/ext.cpp +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/submodules/gaussian-importance/rasterize_points.cu +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/submodules/simple-knn/ext.cpp +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/submodules/simple-knn/simple_knn.cu +0 -0
- {reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/submodules/simple-knn/spatial.cu +0 -0
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
from typing import List
|
|
2
|
+
from gaussian_splatting import Camera, GaussianModel
|
|
3
|
+
from gaussian_splatting.dataset import TrainableCameraDataset
|
|
4
|
+
from gaussian_splatting.trainer import DepthTrainerWrapper, NoopDensifier, DensificationTrainerWrapper
|
|
5
|
+
from .trainer import ImportancePruner, BaseImportancePruningTrainer
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def BaseImportancePrunerInDensifyTrainer(
|
|
9
|
+
model: GaussianModel,
|
|
10
|
+
scene_extent: float,
|
|
11
|
+
dataset: List[Camera],
|
|
12
|
+
importance_prune_from_iter=1000,
|
|
13
|
+
importance_prune_until_iter=15000,
|
|
14
|
+
importance_prune_interval=100,
|
|
15
|
+
*args, **kwargs):
|
|
16
|
+
return DensificationTrainerWrapper(
|
|
17
|
+
lambda model, scene_extent: ImportancePruner(
|
|
18
|
+
NoopDensifier(model),
|
|
19
|
+
dataset,
|
|
20
|
+
importance_prune_from_iter=importance_prune_from_iter,
|
|
21
|
+
importance_prune_until_iter=importance_prune_until_iter,
|
|
22
|
+
importance_prune_interval=importance_prune_interval,
|
|
23
|
+
),
|
|
24
|
+
model,
|
|
25
|
+
scene_extent,
|
|
26
|
+
*args, **kwargs
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
# Depth trainer
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def DepthImportancePruningTrainer(model: GaussianModel, scene_extent: float, dataset: TrainableCameraDataset, *args, **kwargs):
|
|
34
|
+
return DepthTrainerWrapper(BaseImportancePruningTrainer, model, scene_extent, *args, dataset=dataset, **kwargs)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def DepthImportancePrunerInDensifyTrainer(model: GaussianModel, scene_extent: float, dataset: TrainableCameraDataset, *args, **kwargs):
|
|
38
|
+
return DepthTrainerWrapper(BaseImportancePrunerInDensifyTrainer, model, scene_extent, *args, dataset=dataset, **kwargs)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
ImportancePruningTrainer = DepthImportancePruningTrainer
|
|
42
|
+
ImportancePrunerInDensifyTrainer = DepthImportancePrunerInDensifyTrainer
|
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
import math
|
|
2
|
+
from typing import Callable, List
|
|
3
|
+
import torch
|
|
4
|
+
|
|
5
|
+
from gaussian_splatting import Camera, GaussianModel
|
|
6
|
+
from gaussian_splatting.trainer import AbstractDensifier, DensifierWrapper, DensificationTrainer, NoopDensifier
|
|
7
|
+
from gaussian_splatting.dataset import CameraDataset
|
|
8
|
+
from .diff_gaussian_rasterization import GaussianRasterizationSettings, GaussianRasterizer
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def count_render(self: GaussianModel, viewpoint_camera: Camera):
|
|
12
|
+
"""
|
|
13
|
+
Render the scene.
|
|
14
|
+
|
|
15
|
+
Background tensor (bg_color) must be on GPU!
|
|
16
|
+
"""
|
|
17
|
+
# Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means
|
|
18
|
+
screenspace_points = torch.zeros_like(self.get_xyz, dtype=self.get_xyz.dtype, requires_grad=True, device=self._xyz.device) + 0
|
|
19
|
+
try:
|
|
20
|
+
screenspace_points.retain_grad()
|
|
21
|
+
except:
|
|
22
|
+
pass
|
|
23
|
+
|
|
24
|
+
# Set up rasterization configuration
|
|
25
|
+
tanfovx = math.tan(viewpoint_camera.FoVx * 0.5)
|
|
26
|
+
tanfovy = math.tan(viewpoint_camera.FoVy * 0.5)
|
|
27
|
+
|
|
28
|
+
raster_settings = GaussianRasterizationSettings(
|
|
29
|
+
image_height=int(viewpoint_camera.image_height),
|
|
30
|
+
image_width=int(viewpoint_camera.image_width),
|
|
31
|
+
tanfovx=tanfovx,
|
|
32
|
+
tanfovy=tanfovy,
|
|
33
|
+
bg=viewpoint_camera.bg_color.to(self._xyz.device),
|
|
34
|
+
scale_modifier=self.scale_modifier,
|
|
35
|
+
viewmatrix=viewpoint_camera.world_view_transform,
|
|
36
|
+
projmatrix=viewpoint_camera.full_proj_transform,
|
|
37
|
+
sh_degree=self.active_sh_degree,
|
|
38
|
+
campos=viewpoint_camera.camera_center,
|
|
39
|
+
prefiltered=False,
|
|
40
|
+
debug=self.debug,
|
|
41
|
+
f_count=True,
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
rasterizer = GaussianRasterizer(raster_settings=raster_settings)
|
|
45
|
+
means3D = self.get_xyz
|
|
46
|
+
means2D = screenspace_points
|
|
47
|
+
opacity = self.get_opacity
|
|
48
|
+
|
|
49
|
+
scales = self.get_scaling
|
|
50
|
+
rotations = self.get_rotation
|
|
51
|
+
|
|
52
|
+
shs = self.get_features
|
|
53
|
+
|
|
54
|
+
# Rasterize visible Gaussians to image, obtain their radii (on screen).
|
|
55
|
+
gaussians_count, opacity_important_score, T_alpha_important_score, rendered_image, radii = rasterizer(
|
|
56
|
+
means3D=means3D,
|
|
57
|
+
means2D=means2D,
|
|
58
|
+
shs=shs,
|
|
59
|
+
colors_precomp=None,
|
|
60
|
+
opacities=opacity,
|
|
61
|
+
scales=scales,
|
|
62
|
+
rotations=rotations,
|
|
63
|
+
cov3D_precomp=None)
|
|
64
|
+
|
|
65
|
+
# Those Gaussians that were frustum culled or had a radius of 0 were not visible.
|
|
66
|
+
# They will be excluded from value updates used in the splitting criteria.
|
|
67
|
+
return {
|
|
68
|
+
"render": rendered_image,
|
|
69
|
+
"viewspace_points": screenspace_points,
|
|
70
|
+
"visibility_filter": radii > 0,
|
|
71
|
+
"radii": radii,
|
|
72
|
+
"gaussians_count": gaussians_count,
|
|
73
|
+
"opacity_important_score": opacity_important_score,
|
|
74
|
+
"T_alpha_important_score": T_alpha_important_score
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def prune_gaussians(model: GaussianModel, dataset: CameraDataset):
|
|
79
|
+
gaussian_count = torch.zeros(model.get_xyz.shape[0], device=model.get_xyz.device, dtype=torch.int)
|
|
80
|
+
opacity_important_score = torch.zeros(model.get_xyz.shape[0], device=model.get_xyz.device, dtype=torch.float)
|
|
81
|
+
T_alpha_important_score = torch.zeros(model.get_xyz.shape[0], device=model.get_xyz.device, dtype=torch.float)
|
|
82
|
+
for camera in dataset:
|
|
83
|
+
out = count_render(model, camera)
|
|
84
|
+
gaussian_count += out["gaussians_count"]
|
|
85
|
+
opacity_important_score += out["opacity_important_score"]
|
|
86
|
+
T_alpha_important_score += out["T_alpha_important_score"]
|
|
87
|
+
return None
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
class ImportancePruner(DensifierWrapper):
|
|
91
|
+
def __init__(
|
|
92
|
+
self, base_densifier: AbstractDensifier,
|
|
93
|
+
dataset: CameraDataset,
|
|
94
|
+
importance_prune_from_iter=1000,
|
|
95
|
+
importance_prune_until_iter=15000,
|
|
96
|
+
importance_prune_interval: int = 100,
|
|
97
|
+
):
|
|
98
|
+
super().__init__(base_densifier)
|
|
99
|
+
self.dataset = dataset
|
|
100
|
+
self.importance_prune_from_iter = importance_prune_from_iter
|
|
101
|
+
self.importance_prune_until_iter = importance_prune_until_iter
|
|
102
|
+
self.importance_prune_interval = importance_prune_interval
|
|
103
|
+
|
|
104
|
+
def densify_and_prune(self, loss, out, camera, step: int):
|
|
105
|
+
ret = super().densify_and_prune(loss, out, camera, step)
|
|
106
|
+
if self.importance_prune_from_iter <= step <= self.importance_prune_until_iter and step % self.importance_prune_interval == 0:
|
|
107
|
+
remove_mask = prune_gaussians(self.model, self.dataset)
|
|
108
|
+
ret = ret._replace(remove_mask=remove_mask if ret.remove_mask is None else torch.logical_or(ret.remove_mask, remove_mask))
|
|
109
|
+
return ret
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def ImportancePrunerWrapper(
|
|
113
|
+
base_densifier_constructor: Callable[..., AbstractDensifier],
|
|
114
|
+
model: GaussianModel,
|
|
115
|
+
scene_extent: float,
|
|
116
|
+
dataset: List[Camera],
|
|
117
|
+
importance_prune_from_iter=1000,
|
|
118
|
+
importance_prune_until_iter=15000,
|
|
119
|
+
importance_prune_interval: int = 100,
|
|
120
|
+
*args, **kwargs):
|
|
121
|
+
return ImportancePruner(
|
|
122
|
+
base_densifier_constructor(model, scene_extent, *args, **kwargs),
|
|
123
|
+
dataset,
|
|
124
|
+
importance_prune_from_iter=importance_prune_from_iter,
|
|
125
|
+
importance_prune_until_iter=importance_prune_until_iter,
|
|
126
|
+
importance_prune_interval=importance_prune_interval,
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def BaseImportancePruningTrainer(
|
|
131
|
+
model: GaussianModel,
|
|
132
|
+
scene_extent: float,
|
|
133
|
+
dataset: List[Camera],
|
|
134
|
+
importance_prune_from_iter=1000,
|
|
135
|
+
importance_prune_until_iter=15000,
|
|
136
|
+
importance_prune_interval: int = 100,
|
|
137
|
+
*args, **kwargs):
|
|
138
|
+
return DensificationTrainer(
|
|
139
|
+
model, scene_extent,
|
|
140
|
+
ImportancePruner(
|
|
141
|
+
NoopDensifier(model),
|
|
142
|
+
dataset,
|
|
143
|
+
importance_prune_from_iter=importance_prune_from_iter,
|
|
144
|
+
importance_prune_until_iter=importance_prune_until_iter,
|
|
145
|
+
importance_prune_interval=importance_prune_interval,
|
|
146
|
+
), *args, **kwargs
|
|
147
|
+
)
|
|
@@ -21,7 +21,7 @@ def BasePrunerInDensifyTrainer(
|
|
|
21
21
|
return DensificationTrainerWrapper(
|
|
22
22
|
lambda model, scene_extent: BasePruner(
|
|
23
23
|
NoopDensifier(model),
|
|
24
|
-
|
|
24
|
+
dataset,
|
|
25
25
|
prune_from_iter=prune_from_iter,
|
|
26
26
|
prune_until_iter=prune_until_iter,
|
|
27
27
|
prune_interval=prune_interval,
|
|
@@ -82,7 +82,7 @@ def mercy_gaussians(
|
|
|
82
82
|
class BasePruner(DensifierWrapper):
|
|
83
83
|
def __init__(
|
|
84
84
|
self, base_densifier: AbstractDensifier,
|
|
85
|
-
|
|
85
|
+
dataset: List[Camera],
|
|
86
86
|
prune_from_iter=1000,
|
|
87
87
|
prune_until_iter=15000,
|
|
88
88
|
prune_interval: int = 100,
|
|
@@ -91,7 +91,6 @@ class BasePruner(DensifierWrapper):
|
|
|
91
91
|
mercy_minimum=3,
|
|
92
92
|
mercy_type='redundancy_opacity'):
|
|
93
93
|
super().__init__(base_densifier)
|
|
94
|
-
self._model = model
|
|
95
94
|
self.dataset = dataset
|
|
96
95
|
self.prune_from_iter = prune_from_iter
|
|
97
96
|
self.prune_until_iter = prune_until_iter
|
|
@@ -101,10 +100,6 @@ class BasePruner(DensifierWrapper):
|
|
|
101
100
|
self.mercy_minimum = mercy_minimum
|
|
102
101
|
self.mercy_type = mercy_type
|
|
103
102
|
|
|
104
|
-
@property
|
|
105
|
-
def model(self) -> GaussianModel:
|
|
106
|
-
return self._model
|
|
107
|
-
|
|
108
103
|
def densify_and_prune(self, loss, out, camera, step: int):
|
|
109
104
|
ret = super().densify_and_prune(loss, out, camera, step)
|
|
110
105
|
if self.prune_from_iter <= step <= self.prune_until_iter and step % self.prune_interval == 0:
|
|
@@ -128,8 +123,8 @@ def BasePruningTrainer(
|
|
|
128
123
|
return DensificationTrainer(
|
|
129
124
|
model, scene_extent,
|
|
130
125
|
BasePruner(
|
|
131
|
-
NoopDensifier(),
|
|
132
|
-
|
|
126
|
+
NoopDensifier(model),
|
|
127
|
+
dataset,
|
|
133
128
|
prune_from_iter=prune_from_iter,
|
|
134
129
|
prune_until_iter=prune_until_iter,
|
|
135
130
|
prune_interval=prune_interval,
|
|
@@ -10,11 +10,12 @@ reduced_3dgs.egg-info/SOURCES.txt
|
|
|
10
10
|
reduced_3dgs.egg-info/dependency_links.txt
|
|
11
11
|
reduced_3dgs.egg-info/requires.txt
|
|
12
12
|
reduced_3dgs.egg-info/top_level.txt
|
|
13
|
+
reduced_3dgs/importance/__init__.py
|
|
14
|
+
reduced_3dgs/importance/combinations.py
|
|
15
|
+
reduced_3dgs/importance/trainer.py
|
|
13
16
|
reduced_3dgs/pruning/__init__.py
|
|
14
17
|
reduced_3dgs/pruning/combinations.py
|
|
15
18
|
reduced_3dgs/pruning/trainer.py
|
|
16
|
-
reduced_3dgs/pruning/importance/__init__.py
|
|
17
|
-
reduced_3dgs/pruning/importance/trainer.py
|
|
18
19
|
reduced_3dgs/quantization/__init__.py
|
|
19
20
|
reduced_3dgs/quantization/abc.py
|
|
20
21
|
reduced_3dgs/quantization/exclude_zeros.py
|
|
@@ -48,7 +48,7 @@ importance_sources = [
|
|
|
48
48
|
"rasterize_points.cu",
|
|
49
49
|
"ext.cpp"]
|
|
50
50
|
importance_packages = {
|
|
51
|
-
'reduced_3dgs.
|
|
51
|
+
'reduced_3dgs.importance.diff_gaussian_rasterization': 'submodules/gaussian-importance/diff_gaussian_rasterization',
|
|
52
52
|
}
|
|
53
53
|
|
|
54
54
|
cxx_compiler_flags = []
|
|
@@ -60,7 +60,7 @@ if os.name == 'nt':
|
|
|
60
60
|
|
|
61
61
|
setup(
|
|
62
62
|
name="reduced_3dgs",
|
|
63
|
-
version='1.8.
|
|
63
|
+
version='1.8.18',
|
|
64
64
|
author='yindaheng98',
|
|
65
65
|
author_email='yindaheng98@gmail.com',
|
|
66
66
|
url='https://github.com/yindaheng98/reduced-3dgs',
|
|
@@ -83,7 +83,7 @@ setup(
|
|
|
83
83
|
extra_compile_args={"nvcc": nvcc_compiler_flags + ["-I" + os.path.join(os.path.abspath(rasterizor_root), "third_party/glm/")]}
|
|
84
84
|
),
|
|
85
85
|
CUDAExtension(
|
|
86
|
-
name="reduced_3dgs.
|
|
86
|
+
name="reduced_3dgs.importance.diff_gaussian_rasterization._C",
|
|
87
87
|
sources=[os.path.join(importance_root, source) for source in importance_sources],
|
|
88
88
|
extra_compile_args={"nvcc": nvcc_compiler_flags + ["-I" + os.path.join(os.path.abspath(importance_root), "third_party/glm/")]}
|
|
89
89
|
),
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
from .trainer import ImportancePruningTrainerWrapper, BaseImportancePruningTrainer, ImportancePruningTrainer
|
|
@@ -1,141 +0,0 @@
|
|
|
1
|
-
import math
|
|
2
|
-
import torch
|
|
3
|
-
|
|
4
|
-
from gaussian_splatting import Camera, GaussianModel
|
|
5
|
-
from gaussian_splatting.trainer import AbstractTrainer, TrainerWrapper, BaseTrainer, Trainer
|
|
6
|
-
from gaussian_splatting.dataset import CameraDataset
|
|
7
|
-
from .diff_gaussian_rasterization import GaussianRasterizationSettings, GaussianRasterizer
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
def count_render(self: GaussianModel, viewpoint_camera: Camera):
|
|
11
|
-
"""
|
|
12
|
-
Render the scene.
|
|
13
|
-
|
|
14
|
-
Background tensor (bg_color) must be on GPU!
|
|
15
|
-
"""
|
|
16
|
-
# Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means
|
|
17
|
-
screenspace_points = torch.zeros_like(self.get_xyz, dtype=self.get_xyz.dtype, requires_grad=True, device=self._xyz.device) + 0
|
|
18
|
-
try:
|
|
19
|
-
screenspace_points.retain_grad()
|
|
20
|
-
except:
|
|
21
|
-
pass
|
|
22
|
-
|
|
23
|
-
# Set up rasterization configuration
|
|
24
|
-
tanfovx = math.tan(viewpoint_camera.FoVx * 0.5)
|
|
25
|
-
tanfovy = math.tan(viewpoint_camera.FoVy * 0.5)
|
|
26
|
-
|
|
27
|
-
raster_settings = GaussianRasterizationSettings(
|
|
28
|
-
image_height=int(viewpoint_camera.image_height),
|
|
29
|
-
image_width=int(viewpoint_camera.image_width),
|
|
30
|
-
tanfovx=tanfovx,
|
|
31
|
-
tanfovy=tanfovy,
|
|
32
|
-
bg=viewpoint_camera.bg_color.to(self._xyz.device),
|
|
33
|
-
scale_modifier=self.scale_modifier,
|
|
34
|
-
viewmatrix=viewpoint_camera.world_view_transform,
|
|
35
|
-
projmatrix=viewpoint_camera.full_proj_transform,
|
|
36
|
-
sh_degree=self.active_sh_degree,
|
|
37
|
-
campos=viewpoint_camera.camera_center,
|
|
38
|
-
prefiltered=False,
|
|
39
|
-
debug=self.debug,
|
|
40
|
-
f_count=True,
|
|
41
|
-
)
|
|
42
|
-
|
|
43
|
-
rasterizer = GaussianRasterizer(raster_settings=raster_settings)
|
|
44
|
-
means3D = self.get_xyz
|
|
45
|
-
means2D = screenspace_points
|
|
46
|
-
opacity = self.get_opacity
|
|
47
|
-
|
|
48
|
-
scales = self.get_scaling
|
|
49
|
-
rotations = self.get_rotation
|
|
50
|
-
|
|
51
|
-
shs = self.get_features
|
|
52
|
-
|
|
53
|
-
# Rasterize visible Gaussians to image, obtain their radii (on screen).
|
|
54
|
-
gaussians_count, opacity_important_score, T_alpha_important_score, rendered_image, radii = rasterizer(
|
|
55
|
-
means3D=means3D,
|
|
56
|
-
means2D=means2D,
|
|
57
|
-
shs=shs,
|
|
58
|
-
colors_precomp=None,
|
|
59
|
-
opacities=opacity,
|
|
60
|
-
scales=scales,
|
|
61
|
-
rotations=rotations,
|
|
62
|
-
cov3D_precomp=None)
|
|
63
|
-
|
|
64
|
-
# Those Gaussians that were frustum culled or had a radius of 0 were not visible.
|
|
65
|
-
# They will be excluded from value updates used in the splitting criteria.
|
|
66
|
-
return {
|
|
67
|
-
"render": rendered_image,
|
|
68
|
-
"viewspace_points": screenspace_points,
|
|
69
|
-
"visibility_filter": radii > 0,
|
|
70
|
-
"radii": radii,
|
|
71
|
-
"gaussians_count": gaussians_count,
|
|
72
|
-
"opacity_important_score": opacity_important_score,
|
|
73
|
-
"T_alpha_important_score": T_alpha_important_score
|
|
74
|
-
}
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
class ImportancePruner(TrainerWrapper):
|
|
78
|
-
def __init__(
|
|
79
|
-
self, base_trainer: AbstractTrainer,
|
|
80
|
-
dataset: CameraDataset,
|
|
81
|
-
importance_prune_at_steps=[15000],
|
|
82
|
-
):
|
|
83
|
-
super().__init__(base_trainer)
|
|
84
|
-
self.dataset = dataset
|
|
85
|
-
self.importance_prune_at_steps = importance_prune_at_steps
|
|
86
|
-
|
|
87
|
-
def optim_step(self):
|
|
88
|
-
ret = super().optim_step()
|
|
89
|
-
if self.curr_step in self.importance_prune_at_steps:
|
|
90
|
-
gaussian_count = torch.zeros(self.model.get_xyz.shape[0], device=self.model.get_xyz.device, dtype=torch.int)
|
|
91
|
-
opacity_important_score = torch.zeros(self.model.get_xyz.shape[0], device=self.model.get_xyz.device, dtype=torch.float)
|
|
92
|
-
T_alpha_important_score = torch.zeros(self.model.get_xyz.shape[0], device=self.model.get_xyz.device, dtype=torch.float)
|
|
93
|
-
for camera in self.dataset:
|
|
94
|
-
out = count_render(self.model, camera)
|
|
95
|
-
gaussian_count += out["gaussians_count"]
|
|
96
|
-
opacity_important_score += out["opacity_important_score"]
|
|
97
|
-
T_alpha_important_score += out["T_alpha_important_score"]
|
|
98
|
-
pass
|
|
99
|
-
return ret
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
def ImportancePruningTrainerWrapper(
|
|
103
|
-
base_trainer_constructor,
|
|
104
|
-
model: GaussianModel,
|
|
105
|
-
scene_extent: float,
|
|
106
|
-
dataset: CameraDataset,
|
|
107
|
-
importance_prune_at_steps=[15000],
|
|
108
|
-
*args, **kwargs):
|
|
109
|
-
return ImportancePruner(
|
|
110
|
-
base_trainer_constructor(model, scene_extent, dataset, *args, **kwargs),
|
|
111
|
-
dataset,
|
|
112
|
-
importance_prune_at_steps=importance_prune_at_steps,
|
|
113
|
-
)
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
def BaseImportancePruningTrainer(
|
|
117
|
-
model: GaussianModel,
|
|
118
|
-
scene_extent: float,
|
|
119
|
-
dataset: CameraDataset,
|
|
120
|
-
importance_prune_at_steps=[15000],
|
|
121
|
-
*args, **kwargs):
|
|
122
|
-
return ImportancePruningTrainerWrapper(
|
|
123
|
-
lambda model, scene_extent, dataset, *args, **kwargs: BaseTrainer(model, scene_extent, *args, **kwargs),
|
|
124
|
-
model, scene_extent, dataset,
|
|
125
|
-
importance_prune_at_steps=importance_prune_at_steps,
|
|
126
|
-
*args, **kwargs,
|
|
127
|
-
)
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
def ImportancePruningTrainer(
|
|
131
|
-
model: GaussianModel,
|
|
132
|
-
scene_extent: float,
|
|
133
|
-
dataset: CameraDataset,
|
|
134
|
-
importance_prune_at_steps=[15000],
|
|
135
|
-
*args, **kwargs):
|
|
136
|
-
return ImportancePruningTrainerWrapper(
|
|
137
|
-
lambda model, scene_extent, dataset, *args, **kwargs: Trainer(model, scene_extent, *args, **kwargs),
|
|
138
|
-
model, scene_extent, dataset,
|
|
139
|
-
importance_prune_at_steps=importance_prune_at_steps,
|
|
140
|
-
*args, **kwargs,
|
|
141
|
-
)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/submodules/diff-gaussian-rasterization/reduced_3dgs.cu
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{reduced_3dgs-1.8.16 → reduced_3dgs-1.8.18}/submodules/gaussian-importance/rasterize_points.cu
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|