reduced-3dgs 1.9.0__tar.gz → 1.9.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of reduced-3dgs might be problematic. Click here for more details.

Files changed (47) hide show
  1. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/PKG-INFO +1 -1
  2. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/reduced_3dgs/importance/combinations.py +19 -3
  3. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/reduced_3dgs/importance/trainer.py +71 -10
  4. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/reduced_3dgs.egg-info/PKG-INFO +1 -1
  5. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/setup.py +1 -1
  6. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/LICENSE.md +0 -0
  7. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/README.md +0 -0
  8. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/reduced_3dgs/__init__.py +0 -0
  9. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/reduced_3dgs/combinations.py +0 -0
  10. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/reduced_3dgs/importance/__init__.py +0 -0
  11. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/reduced_3dgs/pruning/__init__.py +0 -0
  12. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/reduced_3dgs/pruning/combinations.py +0 -0
  13. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/reduced_3dgs/pruning/trainer.py +0 -0
  14. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/reduced_3dgs/quantization/__init__.py +0 -0
  15. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/reduced_3dgs/quantization/abc.py +0 -0
  16. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/reduced_3dgs/quantization/exclude_zeros.py +0 -0
  17. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/reduced_3dgs/quantization/quantizer.py +0 -0
  18. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/reduced_3dgs/quantization/wrapper.py +0 -0
  19. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/reduced_3dgs/quantize.py +0 -0
  20. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/reduced_3dgs/shculling/__init__.py +0 -0
  21. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/reduced_3dgs/shculling/gaussian_model.py +0 -0
  22. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/reduced_3dgs/shculling/trainer.py +0 -0
  23. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/reduced_3dgs/train.py +0 -0
  24. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/reduced_3dgs.egg-info/SOURCES.txt +0 -0
  25. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/reduced_3dgs.egg-info/dependency_links.txt +0 -0
  26. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/reduced_3dgs.egg-info/requires.txt +0 -0
  27. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/reduced_3dgs.egg-info/top_level.txt +0 -0
  28. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/setup.cfg +0 -0
  29. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/submodules/diff-gaussian-rasterization/cuda_rasterizer/backward.cu +0 -0
  30. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/submodules/diff-gaussian-rasterization/cuda_rasterizer/forward.cu +0 -0
  31. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/submodules/diff-gaussian-rasterization/cuda_rasterizer/rasterizer_impl.cu +0 -0
  32. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/submodules/diff-gaussian-rasterization/diff_gaussian_rasterization/__init__.py +0 -0
  33. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/submodules/diff-gaussian-rasterization/ext.cpp +0 -0
  34. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/submodules/diff-gaussian-rasterization/rasterize_points.cu +0 -0
  35. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/submodules/diff-gaussian-rasterization/reduced_3dgs/kmeans.cu +0 -0
  36. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/submodules/diff-gaussian-rasterization/reduced_3dgs/redundancy_score.cu +0 -0
  37. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/submodules/diff-gaussian-rasterization/reduced_3dgs/sh_culling.cu +0 -0
  38. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/submodules/diff-gaussian-rasterization/reduced_3dgs.cu +0 -0
  39. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/submodules/gaussian-importance/cuda_rasterizer/backward.cu +0 -0
  40. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/submodules/gaussian-importance/cuda_rasterizer/forward.cu +0 -0
  41. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/submodules/gaussian-importance/cuda_rasterizer/rasterizer_impl.cu +0 -0
  42. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/submodules/gaussian-importance/diff_gaussian_rasterization/__init__.py +0 -0
  43. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/submodules/gaussian-importance/ext.cpp +0 -0
  44. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/submodules/gaussian-importance/rasterize_points.cu +0 -0
  45. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/submodules/simple-knn/ext.cpp +0 -0
  46. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/submodules/simple-knn/simple_knn.cu +0 -0
  47. {reduced_3dgs-1.9.0 → reduced_3dgs-1.9.1}/submodules/simple-knn/spatial.cu +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: reduced_3dgs
3
- Version: 1.9.0
3
+ Version: 1.9.1
4
4
  Summary: Refactored code for the paper "Reducing the Memory Footprint of 3D Gaussian Splatting"
5
5
  Home-page: https://github.com/yindaheng98/reduced-3dgs
6
6
  Author: yindaheng98
@@ -10,9 +10,17 @@ def BaseImportancePrunerInDensifyTrainer(
10
10
  scene_extent: float,
11
11
  dataset: List[Camera],
12
12
  *args,
13
- importance_prune_from_iter=1000,
14
- importance_prune_until_iter=15000,
15
- importance_prune_interval=100,
13
+ importance_prune_from_iter=15000,
14
+ importance_prune_until_iter=20000,
15
+ importance_prune_interval: int = 1000,
16
+ importance_prune_type="comprehensive",
17
+ importance_prune_percent=0.1,
18
+ importance_prune_thr_important_score=None,
19
+ importance_prune_thr_v_important_score=1.0,
20
+ importance_prune_thr_max_v_important_score=None,
21
+ importance_prune_thr_count=1,
22
+ importance_prune_thr_T_alpha=0.01,
23
+ importance_v_pow=0.1,
16
24
  **kwargs):
17
25
  return DensificationTrainerWrapper(
18
26
  lambda model, scene_extent: ImportancePruner(
@@ -21,6 +29,14 @@ def BaseImportancePrunerInDensifyTrainer(
21
29
  importance_prune_from_iter=importance_prune_from_iter,
22
30
  importance_prune_until_iter=importance_prune_until_iter,
23
31
  importance_prune_interval=importance_prune_interval,
32
+ importance_prune_type=importance_prune_type,
33
+ importance_prune_percent=importance_prune_percent,
34
+ importance_prune_thr_important_score=importance_prune_thr_important_score,
35
+ importance_prune_thr_v_important_score=importance_prune_thr_v_important_score,
36
+ importance_prune_thr_max_v_important_score=importance_prune_thr_max_v_important_score,
37
+ importance_prune_thr_count=importance_prune_thr_count,
38
+ importance_prune_thr_T_alpha=importance_prune_thr_T_alpha,
39
+ importance_v_pow=importance_v_pow,
24
40
  ),
25
41
  model,
26
42
  scene_extent,
@@ -116,22 +116,45 @@ def score2mask(percent, import_score: list, threshold=None):
116
116
  return prune_mask
117
117
 
118
118
 
119
- def prune_gaussians(gaussians: GaussianModel, dataset: CameraDataset, prune_type="important_score", prune_percent=0.1, prune_thr=None, v_pow=0.1):
119
+ def prune_gaussians(
120
+ gaussians: GaussianModel, dataset: CameraDataset,
121
+ prune_type="comprehensive",
122
+ prune_percent=0.1,
123
+ prune_thr_important_score=None,
124
+ prune_thr_v_important_score=1.0,
125
+ prune_thr_max_v_important_score=None,
126
+ prune_thr_count=1,
127
+ prune_thr_T_alpha=0.01,
128
+ v_pow=0.1):
120
129
  gaussian_list, opacity_imp_list, T_alpha_imp_list = prune_list(gaussians, dataset)
121
130
  match prune_type:
122
131
  case "important_score":
123
- mask = score2mask(prune_percent, opacity_imp_list, prune_thr)
132
+ mask = score2mask(prune_percent, opacity_imp_list, prune_thr_important_score)
124
133
  case "v_important_score":
125
134
  v_list = calculate_v_imp_score(gaussians, opacity_imp_list, v_pow)
126
- mask = score2mask(prune_percent, v_list, prune_thr)
135
+ mask = score2mask(prune_percent, v_list, prune_thr_v_important_score)
127
136
  case "max_v_important_score":
128
137
  v_list = opacity_imp_list * torch.max(gaussians.get_scaling, dim=1)[0]
129
- mask = score2mask(prune_percent, v_list, prune_thr)
138
+ mask = score2mask(prune_percent, v_list, prune_thr_max_v_important_score)
130
139
  case "count":
131
- mask = score2mask(prune_percent, gaussian_list, prune_thr)
140
+ mask = score2mask(prune_percent, gaussian_list, prune_thr_count)
132
141
  case "T_alpha":
133
142
  # new importance score defined by doji
134
- mask = score2mask(prune_percent, T_alpha_imp_list, prune_thr)
143
+ mask = score2mask(prune_percent, T_alpha_imp_list, prune_thr_T_alpha)
144
+ case "comprehensive":
145
+ mask = torch.zeros_like(gaussian_list, dtype=torch.bool)
146
+ if prune_thr_important_score is not None:
147
+ mask |= score2mask(prune_percent, opacity_imp_list, prune_thr_important_score)
148
+ if prune_thr_v_important_score is not None:
149
+ v_list = calculate_v_imp_score(gaussians, opacity_imp_list, v_pow)
150
+ mask |= score2mask(prune_percent, v_list, prune_thr_v_important_score)
151
+ if prune_thr_max_v_important_score is not None:
152
+ v_list = opacity_imp_list * torch.max(gaussians.get_scaling, dim=1)[0]
153
+ mask |= score2mask(prune_percent, v_list, prune_thr_max_v_important_score)
154
+ if prune_thr_count is not None:
155
+ mask |= score2mask(prune_percent, gaussian_list, prune_thr_count)
156
+ if prune_thr_T_alpha is not None:
157
+ mask |= score2mask(prune_percent, T_alpha_imp_list, prune_thr_T_alpha)
135
158
  case _:
136
159
  raise Exception("Unsupportive prunning method")
137
160
  return mask
@@ -144,17 +167,39 @@ class ImportancePruner(DensifierWrapper):
144
167
  importance_prune_from_iter=15000,
145
168
  importance_prune_until_iter=20000,
146
169
  importance_prune_interval: int = 1000,
170
+ importance_prune_type="comprehensive",
171
+ importance_prune_percent=0.1,
172
+ importance_prune_thr_important_score=None,
173
+ importance_prune_thr_v_important_score=1.0,
174
+ importance_prune_thr_max_v_important_score=None,
175
+ importance_prune_thr_count=1,
176
+ importance_prune_thr_T_alpha=0.01,
177
+ importance_v_pow=0.1
147
178
  ):
148
179
  super().__init__(base_densifier)
149
180
  self.dataset = dataset
150
181
  self.importance_prune_from_iter = importance_prune_from_iter
151
182
  self.importance_prune_until_iter = importance_prune_until_iter
152
183
  self.importance_prune_interval = importance_prune_interval
184
+ self.prune_percent = importance_prune_percent
185
+ self.prune_thr_important_score = importance_prune_thr_important_score
186
+ self.prune_thr_v_important_score = importance_prune_thr_v_important_score
187
+ self.prune_thr_max_v_important_score = importance_prune_thr_max_v_important_score
188
+ self.prune_thr_count = importance_prune_thr_count
189
+ self.prune_thr_T_alpha = importance_prune_thr_T_alpha
190
+ self.v_pow = importance_v_pow
191
+ self.prune_type = importance_prune_type
153
192
 
154
193
  def densify_and_prune(self, loss, out, camera, step: int):
155
194
  ret = super().densify_and_prune(loss, out, camera, step)
156
195
  if self.importance_prune_from_iter <= step <= self.importance_prune_until_iter and step % self.importance_prune_interval == 0:
157
- remove_mask = prune_gaussians(self.model, self.dataset)
196
+ remove_mask = prune_gaussians(
197
+ self.model, self.dataset,
198
+ self.prune_type, self.prune_percent,
199
+ self.prune_thr_important_score, self.prune_thr_v_important_score,
200
+ self.prune_thr_max_v_important_score, self.prune_thr_count,
201
+ self.prune_thr_T_alpha, self.v_pow,
202
+ )
158
203
  ret = ret._replace(remove_mask=remove_mask if ret.remove_mask is None else torch.logical_or(ret.remove_mask, remove_mask))
159
204
  return ret
160
205
 
@@ -164,9 +209,17 @@ def BaseImportancePruningTrainer(
164
209
  scene_extent: float,
165
210
  dataset: List[Camera],
166
211
  *args,
167
- importance_prune_from_iter=1000,
168
- importance_prune_until_iter=15000,
169
- importance_prune_interval: int = 100,
212
+ importance_prune_from_iter=15000,
213
+ importance_prune_until_iter=20000,
214
+ importance_prune_interval: int = 1000,
215
+ importance_prune_type="comprehensive",
216
+ importance_prune_percent=0.1,
217
+ importance_prune_thr_important_score=None,
218
+ importance_prune_thr_v_important_score=1.0,
219
+ importance_prune_thr_max_v_important_score=None,
220
+ importance_prune_thr_count=1,
221
+ importance_prune_thr_T_alpha=0.01,
222
+ importance_v_pow=0.1,
170
223
  **kwargs):
171
224
  return DensificationTrainer(
172
225
  model, scene_extent,
@@ -176,5 +229,13 @@ def BaseImportancePruningTrainer(
176
229
  importance_prune_from_iter=importance_prune_from_iter,
177
230
  importance_prune_until_iter=importance_prune_until_iter,
178
231
  importance_prune_interval=importance_prune_interval,
232
+ importance_prune_type=importance_prune_type,
233
+ importance_prune_percent=importance_prune_percent,
234
+ importance_prune_thr_important_score=importance_prune_thr_important_score,
235
+ importance_prune_thr_v_important_score=importance_prune_thr_v_important_score,
236
+ importance_prune_thr_max_v_important_score=importance_prune_thr_max_v_important_score,
237
+ importance_prune_thr_count=importance_prune_thr_count,
238
+ importance_prune_thr_T_alpha=importance_prune_thr_T_alpha,
239
+ importance_v_pow=importance_v_pow,
179
240
  ), *args, **kwargs
180
241
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: reduced_3dgs
3
- Version: 1.9.0
3
+ Version: 1.9.1
4
4
  Summary: Refactored code for the paper "Reducing the Memory Footprint of 3D Gaussian Splatting"
5
5
  Home-page: https://github.com/yindaheng98/reduced-3dgs
6
6
  Author: yindaheng98
@@ -60,7 +60,7 @@ if os.name == 'nt':
60
60
 
61
61
  setup(
62
62
  name="reduced_3dgs",
63
- version='1.9.0',
63
+ version='1.9.1',
64
64
  author='yindaheng98',
65
65
  author_email='yindaheng98@gmail.com',
66
66
  url='https://github.com/yindaheng98/reduced-3dgs',
File without changes
File without changes
File without changes