reduced-3dgs 1.9.3__tar.gz → 1.9.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of reduced-3dgs might be problematic. Click here for more details.

Files changed (47) hide show
  1. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/PKG-INFO +1 -1
  2. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/reduced_3dgs/combinations.py +46 -6
  3. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/reduced_3dgs/importance/combinations.py +3 -1
  4. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/reduced_3dgs/importance/trainer.py +20 -4
  5. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/reduced_3dgs.egg-info/PKG-INFO +1 -1
  6. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/setup.py +1 -1
  7. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/LICENSE.md +0 -0
  8. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/README.md +0 -0
  9. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/reduced_3dgs/__init__.py +0 -0
  10. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/reduced_3dgs/importance/__init__.py +0 -0
  11. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/reduced_3dgs/pruning/__init__.py +0 -0
  12. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/reduced_3dgs/pruning/combinations.py +0 -0
  13. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/reduced_3dgs/pruning/trainer.py +0 -0
  14. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/reduced_3dgs/quantization/__init__.py +0 -0
  15. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/reduced_3dgs/quantization/abc.py +0 -0
  16. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/reduced_3dgs/quantization/exclude_zeros.py +0 -0
  17. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/reduced_3dgs/quantization/quantizer.py +0 -0
  18. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/reduced_3dgs/quantization/wrapper.py +0 -0
  19. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/reduced_3dgs/quantize.py +0 -0
  20. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/reduced_3dgs/shculling/__init__.py +0 -0
  21. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/reduced_3dgs/shculling/gaussian_model.py +0 -0
  22. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/reduced_3dgs/shculling/trainer.py +0 -0
  23. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/reduced_3dgs/train.py +0 -0
  24. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/reduced_3dgs.egg-info/SOURCES.txt +0 -0
  25. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/reduced_3dgs.egg-info/dependency_links.txt +0 -0
  26. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/reduced_3dgs.egg-info/requires.txt +0 -0
  27. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/reduced_3dgs.egg-info/top_level.txt +0 -0
  28. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/setup.cfg +0 -0
  29. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/submodules/diff-gaussian-rasterization/cuda_rasterizer/backward.cu +0 -0
  30. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/submodules/diff-gaussian-rasterization/cuda_rasterizer/forward.cu +0 -0
  31. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/submodules/diff-gaussian-rasterization/cuda_rasterizer/rasterizer_impl.cu +0 -0
  32. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/submodules/diff-gaussian-rasterization/diff_gaussian_rasterization/__init__.py +0 -0
  33. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/submodules/diff-gaussian-rasterization/ext.cpp +0 -0
  34. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/submodules/diff-gaussian-rasterization/rasterize_points.cu +0 -0
  35. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/submodules/diff-gaussian-rasterization/reduced_3dgs/kmeans.cu +0 -0
  36. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/submodules/diff-gaussian-rasterization/reduced_3dgs/redundancy_score.cu +0 -0
  37. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/submodules/diff-gaussian-rasterization/reduced_3dgs/sh_culling.cu +0 -0
  38. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/submodules/diff-gaussian-rasterization/reduced_3dgs.cu +0 -0
  39. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/submodules/gaussian-importance/cuda_rasterizer/backward.cu +0 -0
  40. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/submodules/gaussian-importance/cuda_rasterizer/forward.cu +0 -0
  41. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/submodules/gaussian-importance/cuda_rasterizer/rasterizer_impl.cu +0 -0
  42. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/submodules/gaussian-importance/diff_gaussian_rasterization/__init__.py +0 -0
  43. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/submodules/gaussian-importance/ext.cpp +0 -0
  44. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/submodules/gaussian-importance/rasterize_points.cu +0 -0
  45. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/submodules/simple-knn/ext.cpp +0 -0
  46. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/submodules/simple-knn/simple_knn.cu +0 -0
  47. {reduced_3dgs-1.9.3 → reduced_3dgs-1.9.4}/submodules/simple-knn/spatial.cu +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: reduced_3dgs
3
- Version: 1.9.3
3
+ Version: 1.9.4
4
4
  Summary: Refactored code for the paper "Reducing the Memory Footprint of 3D Gaussian Splatting"
5
5
  Home-page: https://github.com/yindaheng98/reduced-3dgs
6
6
  Author: yindaheng98
@@ -17,9 +17,19 @@ def BaseFullPruningTrainer(
17
17
  scene_extent: float,
18
18
  dataset: List[Camera],
19
19
  *args,
20
- importance_prune_from_iter=1000,
21
- importance_prune_until_iter=15000,
22
- importance_prune_interval: int = 100,
20
+ importance_prune_from_iter=15000,
21
+ importance_prune_until_iter=20000,
22
+ importance_prune_interval: int = 1000,
23
+ importance_score_resize=None,
24
+ importance_prune_type="comprehensive",
25
+ importance_prune_percent=0.1,
26
+ importance_prune_thr_important_score=None,
27
+ importance_prune_thr_v_important_score=3.0,
28
+ importance_prune_thr_max_v_important_score=None,
29
+ importance_prune_thr_count=1,
30
+ importance_prune_thr_T_alpha=1.0,
31
+ importance_prune_thr_T_alpha_avg=0.001,
32
+ importance_v_pow=0.1,
23
33
  **kwargs):
24
34
  return PruningTrainerWrapper(
25
35
  lambda model, scene_extent, dataset: ImportancePruner(
@@ -28,6 +38,16 @@ def BaseFullPruningTrainer(
28
38
  importance_prune_from_iter=importance_prune_from_iter,
29
39
  importance_prune_until_iter=importance_prune_until_iter,
30
40
  importance_prune_interval=importance_prune_interval,
41
+ importance_score_resize=importance_score_resize,
42
+ importance_prune_type=importance_prune_type,
43
+ importance_prune_percent=importance_prune_percent,
44
+ importance_prune_thr_important_score=importance_prune_thr_important_score,
45
+ importance_prune_thr_v_important_score=importance_prune_thr_v_important_score,
46
+ importance_prune_thr_max_v_important_score=importance_prune_thr_max_v_important_score,
47
+ importance_prune_thr_count=importance_prune_thr_count,
48
+ importance_prune_thr_T_alpha=importance_prune_thr_T_alpha,
49
+ importance_prune_thr_T_alpha_avg=importance_prune_thr_T_alpha_avg,
50
+ importance_v_pow=importance_v_pow,
31
51
  ),
32
52
  model, scene_extent, dataset,
33
53
  *args, **kwargs
@@ -39,9 +59,19 @@ def BaseFullPrunerInDensifyTrainer(
39
59
  scene_extent: float,
40
60
  dataset: List[Camera],
41
61
  *args,
42
- importance_prune_from_iter=1000,
43
- importance_prune_until_iter=15000,
44
- importance_prune_interval: int = 100,
62
+ importance_prune_from_iter=15000,
63
+ importance_prune_until_iter=20000,
64
+ importance_prune_interval: int = 1000,
65
+ importance_score_resize=None,
66
+ importance_prune_type="comprehensive",
67
+ importance_prune_percent=0.1,
68
+ importance_prune_thr_important_score=None,
69
+ importance_prune_thr_v_important_score=3.0,
70
+ importance_prune_thr_max_v_important_score=None,
71
+ importance_prune_thr_count=1,
72
+ importance_prune_thr_T_alpha=1.0,
73
+ importance_prune_thr_T_alpha_avg=0.001,
74
+ importance_v_pow=0.1,
45
75
  **kwargs):
46
76
  return PrunerInDensifyTrainerWrapper(
47
77
  lambda model, scene_extent, dataset: ImportancePruner(
@@ -50,6 +80,16 @@ def BaseFullPrunerInDensifyTrainer(
50
80
  importance_prune_from_iter=importance_prune_from_iter,
51
81
  importance_prune_until_iter=importance_prune_until_iter,
52
82
  importance_prune_interval=importance_prune_interval,
83
+ importance_score_resize=importance_score_resize,
84
+ importance_prune_type=importance_prune_type,
85
+ importance_prune_percent=importance_prune_percent,
86
+ importance_prune_thr_important_score=importance_prune_thr_important_score,
87
+ importance_prune_thr_v_important_score=importance_prune_thr_v_important_score,
88
+ importance_prune_thr_max_v_important_score=importance_prune_thr_max_v_important_score,
89
+ importance_prune_thr_count=importance_prune_thr_count,
90
+ importance_prune_thr_T_alpha=importance_prune_thr_T_alpha,
91
+ importance_prune_thr_T_alpha_avg=importance_prune_thr_T_alpha_avg,
92
+ importance_v_pow=importance_v_pow,
53
93
  ),
54
94
  model, scene_extent, dataset,
55
95
  *args, **kwargs
@@ -13,13 +13,14 @@ def BaseImportancePrunerInDensifyTrainer(
13
13
  importance_prune_from_iter=15000,
14
14
  importance_prune_until_iter=20000,
15
15
  importance_prune_interval: int = 1000,
16
+ importance_score_resize=None,
16
17
  importance_prune_type="comprehensive",
17
18
  importance_prune_percent=0.1,
18
19
  importance_prune_thr_important_score=None,
19
20
  importance_prune_thr_v_important_score=3.0,
20
21
  importance_prune_thr_max_v_important_score=None,
21
22
  importance_prune_thr_count=1,
22
- importance_prune_thr_T_alpha=0.1,
23
+ importance_prune_thr_T_alpha=1.0,
23
24
  importance_prune_thr_T_alpha_avg=0.001,
24
25
  importance_v_pow=0.1,
25
26
  **kwargs):
@@ -30,6 +31,7 @@ def BaseImportancePrunerInDensifyTrainer(
30
31
  importance_prune_from_iter=importance_prune_from_iter,
31
32
  importance_prune_until_iter=importance_prune_until_iter,
32
33
  importance_prune_interval=importance_prune_interval,
34
+ importance_score_resize=importance_score_resize,
33
35
  importance_prune_type=importance_prune_type,
34
36
  importance_prune_percent=importance_prune_percent,
35
37
  importance_prune_thr_important_score=importance_prune_thr_important_score,
@@ -1,8 +1,9 @@
1
1
  import math
2
- from typing import Callable, List
2
+ from typing import List
3
3
  import torch
4
4
 
5
5
  from gaussian_splatting import Camera, GaussianModel
6
+ from gaussian_splatting.camera import build_camera
6
7
  from gaussian_splatting.trainer import AbstractDensifier, DensifierWrapper, DensificationTrainer, NoopDensifier
7
8
  from gaussian_splatting.dataset import CameraDataset
8
9
  from .diff_gaussian_rasterization import GaussianRasterizationSettings, GaussianRasterizer
@@ -75,11 +76,20 @@ def count_render(self: GaussianModel, viewpoint_camera: Camera):
75
76
  }
76
77
 
77
78
 
78
- def prune_list(model: GaussianModel, dataset: CameraDataset):
79
+ def prune_list(model: GaussianModel, dataset: CameraDataset, resize=None):
79
80
  gaussian_count = torch.zeros(model.get_xyz.shape[0], device=model.get_xyz.device, dtype=torch.int)
80
81
  opacity_important_score = torch.zeros(model.get_xyz.shape[0], device=model.get_xyz.device, dtype=torch.float)
81
82
  T_alpha_important_score = torch.zeros(model.get_xyz.shape[0], device=model.get_xyz.device, dtype=torch.float)
82
83
  for camera in dataset:
84
+ if resize is not None:
85
+ height, width = camera.image_height, camera.image_width
86
+ scale = resize / max(height, width)
87
+ height, width = int(height * scale), int(width * scale)
88
+ camera = build_camera(
89
+ image_height=height, image_width=width,
90
+ FoVx=camera.FoVx, FoVy=camera.FoVy,
91
+ R=camera.R, T=camera.T,
92
+ device=camera.R.device)
83
93
  out = count_render(model, camera)
84
94
  gaussian_count += out["gaussians_count"]
85
95
  opacity_important_score += out["opacity_important_score"]
@@ -118,6 +128,7 @@ def score2mask(percent, import_score: list, threshold=None):
118
128
 
119
129
  def prune_gaussians(
120
130
  gaussians: GaussianModel, dataset: CameraDataset,
131
+ resize=None,
121
132
  prune_type="comprehensive",
122
133
  prune_percent=0.1,
123
134
  prune_thr_important_score=None,
@@ -127,7 +138,7 @@ def prune_gaussians(
127
138
  prune_thr_T_alpha=None,
128
139
  prune_thr_T_alpha_avg=None,
129
140
  v_pow=0.1):
130
- gaussian_list, opacity_imp_list, T_alpha_imp_list = prune_list(gaussians, dataset)
141
+ gaussian_list, opacity_imp_list, T_alpha_imp_list = prune_list(gaussians, dataset, resize)
131
142
  match prune_type:
132
143
  case "important_score":
133
144
  mask = score2mask(prune_percent, opacity_imp_list, prune_thr_important_score)
@@ -176,6 +187,7 @@ class ImportancePruner(DensifierWrapper):
176
187
  importance_prune_from_iter=15000,
177
188
  importance_prune_until_iter=20000,
178
189
  importance_prune_interval: int = 1000,
190
+ importance_score_resize=None,
179
191
  importance_prune_type="comprehensive",
180
192
  importance_prune_percent=0.1,
181
193
  importance_prune_thr_important_score=None,
@@ -190,6 +202,7 @@ class ImportancePruner(DensifierWrapper):
190
202
  self.importance_prune_from_iter = importance_prune_from_iter
191
203
  self.importance_prune_until_iter = importance_prune_until_iter
192
204
  self.importance_prune_interval = importance_prune_interval
205
+ self.resize = importance_score_resize
193
206
  self.prune_percent = importance_prune_percent
194
207
  self.prune_thr_important_score = importance_prune_thr_important_score
195
208
  self.prune_thr_v_important_score = importance_prune_thr_v_important_score
@@ -205,6 +218,7 @@ class ImportancePruner(DensifierWrapper):
205
218
  if self.importance_prune_from_iter <= step <= self.importance_prune_until_iter and step % self.importance_prune_interval == 0:
206
219
  remove_mask = prune_gaussians(
207
220
  self.model, self.dataset,
221
+ self.resize,
208
222
  self.prune_type, self.prune_percent,
209
223
  self.prune_thr_important_score, self.prune_thr_v_important_score,
210
224
  self.prune_thr_max_v_important_score, self.prune_thr_count,
@@ -222,13 +236,14 @@ def BaseImportancePruningTrainer(
222
236
  importance_prune_from_iter=15000,
223
237
  importance_prune_until_iter=20000,
224
238
  importance_prune_interval: int = 1000,
239
+ importance_score_resize=None,
225
240
  importance_prune_type="comprehensive",
226
241
  importance_prune_percent=0.1,
227
242
  importance_prune_thr_important_score=None,
228
243
  importance_prune_thr_v_important_score=3.0,
229
244
  importance_prune_thr_max_v_important_score=None,
230
245
  importance_prune_thr_count=1,
231
- importance_prune_thr_T_alpha=0.1,
246
+ importance_prune_thr_T_alpha=1.0,
232
247
  importance_prune_thr_T_alpha_avg=0.001,
233
248
  importance_v_pow=0.1,
234
249
  **kwargs):
@@ -240,6 +255,7 @@ def BaseImportancePruningTrainer(
240
255
  importance_prune_from_iter=importance_prune_from_iter,
241
256
  importance_prune_until_iter=importance_prune_until_iter,
242
257
  importance_prune_interval=importance_prune_interval,
258
+ importance_score_resize=importance_score_resize,
243
259
  importance_prune_type=importance_prune_type,
244
260
  importance_prune_percent=importance_prune_percent,
245
261
  importance_prune_thr_important_score=importance_prune_thr_important_score,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: reduced_3dgs
3
- Version: 1.9.3
3
+ Version: 1.9.4
4
4
  Summary: Refactored code for the paper "Reducing the Memory Footprint of 3D Gaussian Splatting"
5
5
  Home-page: https://github.com/yindaheng98/reduced-3dgs
6
6
  Author: yindaheng98
@@ -60,7 +60,7 @@ if os.name == 'nt':
60
60
 
61
61
  setup(
62
62
  name="reduced_3dgs",
63
- version='1.9.3',
63
+ version='1.9.4',
64
64
  author='yindaheng98',
65
65
  author_email='yindaheng98@gmail.com',
66
66
  url='https://github.com/yindaheng98/reduced-3dgs',
File without changes
File without changes
File without changes