reduced-3dgs 1.9.2__cp311-cp311-win_amd64.whl → 1.9.4__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of reduced-3dgs might be problematic. Click here for more details.
- reduced_3dgs/combinations.py +46 -6
- reduced_3dgs/diff_gaussian_rasterization/_C.cp311-win_amd64.pyd +0 -0
- reduced_3dgs/importance/combinations.py +5 -1
- reduced_3dgs/importance/diff_gaussian_rasterization/_C.cp311-win_amd64.pyd +0 -0
- reduced_3dgs/importance/trainer.py +35 -6
- reduced_3dgs/simple_knn/_C.cp311-win_amd64.pyd +0 -0
- {reduced_3dgs-1.9.2.dist-info → reduced_3dgs-1.9.4.dist-info}/METADATA +1 -1
- {reduced_3dgs-1.9.2.dist-info → reduced_3dgs-1.9.4.dist-info}/RECORD +11 -11
- {reduced_3dgs-1.9.2.dist-info → reduced_3dgs-1.9.4.dist-info}/WHEEL +1 -1
- {reduced_3dgs-1.9.2.dist-info → reduced_3dgs-1.9.4.dist-info}/licenses/LICENSE.md +0 -0
- {reduced_3dgs-1.9.2.dist-info → reduced_3dgs-1.9.4.dist-info}/top_level.txt +0 -0
reduced_3dgs/combinations.py
CHANGED
|
@@ -17,9 +17,19 @@ def BaseFullPruningTrainer(
|
|
|
17
17
|
scene_extent: float,
|
|
18
18
|
dataset: List[Camera],
|
|
19
19
|
*args,
|
|
20
|
-
importance_prune_from_iter=
|
|
21
|
-
importance_prune_until_iter=
|
|
22
|
-
importance_prune_interval: int =
|
|
20
|
+
importance_prune_from_iter=15000,
|
|
21
|
+
importance_prune_until_iter=20000,
|
|
22
|
+
importance_prune_interval: int = 1000,
|
|
23
|
+
importance_score_resize=None,
|
|
24
|
+
importance_prune_type="comprehensive",
|
|
25
|
+
importance_prune_percent=0.1,
|
|
26
|
+
importance_prune_thr_important_score=None,
|
|
27
|
+
importance_prune_thr_v_important_score=3.0,
|
|
28
|
+
importance_prune_thr_max_v_important_score=None,
|
|
29
|
+
importance_prune_thr_count=1,
|
|
30
|
+
importance_prune_thr_T_alpha=1.0,
|
|
31
|
+
importance_prune_thr_T_alpha_avg=0.001,
|
|
32
|
+
importance_v_pow=0.1,
|
|
23
33
|
**kwargs):
|
|
24
34
|
return PruningTrainerWrapper(
|
|
25
35
|
lambda model, scene_extent, dataset: ImportancePruner(
|
|
@@ -28,6 +38,16 @@ def BaseFullPruningTrainer(
|
|
|
28
38
|
importance_prune_from_iter=importance_prune_from_iter,
|
|
29
39
|
importance_prune_until_iter=importance_prune_until_iter,
|
|
30
40
|
importance_prune_interval=importance_prune_interval,
|
|
41
|
+
importance_score_resize=importance_score_resize,
|
|
42
|
+
importance_prune_type=importance_prune_type,
|
|
43
|
+
importance_prune_percent=importance_prune_percent,
|
|
44
|
+
importance_prune_thr_important_score=importance_prune_thr_important_score,
|
|
45
|
+
importance_prune_thr_v_important_score=importance_prune_thr_v_important_score,
|
|
46
|
+
importance_prune_thr_max_v_important_score=importance_prune_thr_max_v_important_score,
|
|
47
|
+
importance_prune_thr_count=importance_prune_thr_count,
|
|
48
|
+
importance_prune_thr_T_alpha=importance_prune_thr_T_alpha,
|
|
49
|
+
importance_prune_thr_T_alpha_avg=importance_prune_thr_T_alpha_avg,
|
|
50
|
+
importance_v_pow=importance_v_pow,
|
|
31
51
|
),
|
|
32
52
|
model, scene_extent, dataset,
|
|
33
53
|
*args, **kwargs
|
|
@@ -39,9 +59,19 @@ def BaseFullPrunerInDensifyTrainer(
|
|
|
39
59
|
scene_extent: float,
|
|
40
60
|
dataset: List[Camera],
|
|
41
61
|
*args,
|
|
42
|
-
importance_prune_from_iter=
|
|
43
|
-
importance_prune_until_iter=
|
|
44
|
-
importance_prune_interval: int =
|
|
62
|
+
importance_prune_from_iter=15000,
|
|
63
|
+
importance_prune_until_iter=20000,
|
|
64
|
+
importance_prune_interval: int = 1000,
|
|
65
|
+
importance_score_resize=None,
|
|
66
|
+
importance_prune_type="comprehensive",
|
|
67
|
+
importance_prune_percent=0.1,
|
|
68
|
+
importance_prune_thr_important_score=None,
|
|
69
|
+
importance_prune_thr_v_important_score=3.0,
|
|
70
|
+
importance_prune_thr_max_v_important_score=None,
|
|
71
|
+
importance_prune_thr_count=1,
|
|
72
|
+
importance_prune_thr_T_alpha=1.0,
|
|
73
|
+
importance_prune_thr_T_alpha_avg=0.001,
|
|
74
|
+
importance_v_pow=0.1,
|
|
45
75
|
**kwargs):
|
|
46
76
|
return PrunerInDensifyTrainerWrapper(
|
|
47
77
|
lambda model, scene_extent, dataset: ImportancePruner(
|
|
@@ -50,6 +80,16 @@ def BaseFullPrunerInDensifyTrainer(
|
|
|
50
80
|
importance_prune_from_iter=importance_prune_from_iter,
|
|
51
81
|
importance_prune_until_iter=importance_prune_until_iter,
|
|
52
82
|
importance_prune_interval=importance_prune_interval,
|
|
83
|
+
importance_score_resize=importance_score_resize,
|
|
84
|
+
importance_prune_type=importance_prune_type,
|
|
85
|
+
importance_prune_percent=importance_prune_percent,
|
|
86
|
+
importance_prune_thr_important_score=importance_prune_thr_important_score,
|
|
87
|
+
importance_prune_thr_v_important_score=importance_prune_thr_v_important_score,
|
|
88
|
+
importance_prune_thr_max_v_important_score=importance_prune_thr_max_v_important_score,
|
|
89
|
+
importance_prune_thr_count=importance_prune_thr_count,
|
|
90
|
+
importance_prune_thr_T_alpha=importance_prune_thr_T_alpha,
|
|
91
|
+
importance_prune_thr_T_alpha_avg=importance_prune_thr_T_alpha_avg,
|
|
92
|
+
importance_v_pow=importance_v_pow,
|
|
53
93
|
),
|
|
54
94
|
model, scene_extent, dataset,
|
|
55
95
|
*args, **kwargs
|
|
Binary file
|
|
@@ -13,13 +13,15 @@ def BaseImportancePrunerInDensifyTrainer(
|
|
|
13
13
|
importance_prune_from_iter=15000,
|
|
14
14
|
importance_prune_until_iter=20000,
|
|
15
15
|
importance_prune_interval: int = 1000,
|
|
16
|
+
importance_score_resize=None,
|
|
16
17
|
importance_prune_type="comprehensive",
|
|
17
18
|
importance_prune_percent=0.1,
|
|
18
19
|
importance_prune_thr_important_score=None,
|
|
19
20
|
importance_prune_thr_v_important_score=3.0,
|
|
20
21
|
importance_prune_thr_max_v_important_score=None,
|
|
21
22
|
importance_prune_thr_count=1,
|
|
22
|
-
importance_prune_thr_T_alpha=0
|
|
23
|
+
importance_prune_thr_T_alpha=1.0,
|
|
24
|
+
importance_prune_thr_T_alpha_avg=0.001,
|
|
23
25
|
importance_v_pow=0.1,
|
|
24
26
|
**kwargs):
|
|
25
27
|
return DensificationTrainerWrapper(
|
|
@@ -29,6 +31,7 @@ def BaseImportancePrunerInDensifyTrainer(
|
|
|
29
31
|
importance_prune_from_iter=importance_prune_from_iter,
|
|
30
32
|
importance_prune_until_iter=importance_prune_until_iter,
|
|
31
33
|
importance_prune_interval=importance_prune_interval,
|
|
34
|
+
importance_score_resize=importance_score_resize,
|
|
32
35
|
importance_prune_type=importance_prune_type,
|
|
33
36
|
importance_prune_percent=importance_prune_percent,
|
|
34
37
|
importance_prune_thr_important_score=importance_prune_thr_important_score,
|
|
@@ -36,6 +39,7 @@ def BaseImportancePrunerInDensifyTrainer(
|
|
|
36
39
|
importance_prune_thr_max_v_important_score=importance_prune_thr_max_v_important_score,
|
|
37
40
|
importance_prune_thr_count=importance_prune_thr_count,
|
|
38
41
|
importance_prune_thr_T_alpha=importance_prune_thr_T_alpha,
|
|
42
|
+
importance_prune_thr_T_alpha_avg=importance_prune_thr_T_alpha_avg,
|
|
39
43
|
importance_v_pow=importance_v_pow,
|
|
40
44
|
),
|
|
41
45
|
model,
|
|
Binary file
|
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
import math
|
|
2
|
-
from typing import
|
|
2
|
+
from typing import List
|
|
3
3
|
import torch
|
|
4
4
|
|
|
5
5
|
from gaussian_splatting import Camera, GaussianModel
|
|
6
|
+
from gaussian_splatting.camera import build_camera
|
|
6
7
|
from gaussian_splatting.trainer import AbstractDensifier, DensifierWrapper, DensificationTrainer, NoopDensifier
|
|
7
8
|
from gaussian_splatting.dataset import CameraDataset
|
|
8
9
|
from .diff_gaussian_rasterization import GaussianRasterizationSettings, GaussianRasterizer
|
|
@@ -75,11 +76,20 @@ def count_render(self: GaussianModel, viewpoint_camera: Camera):
|
|
|
75
76
|
}
|
|
76
77
|
|
|
77
78
|
|
|
78
|
-
def prune_list(model: GaussianModel, dataset: CameraDataset):
|
|
79
|
+
def prune_list(model: GaussianModel, dataset: CameraDataset, resize=None):
|
|
79
80
|
gaussian_count = torch.zeros(model.get_xyz.shape[0], device=model.get_xyz.device, dtype=torch.int)
|
|
80
81
|
opacity_important_score = torch.zeros(model.get_xyz.shape[0], device=model.get_xyz.device, dtype=torch.float)
|
|
81
82
|
T_alpha_important_score = torch.zeros(model.get_xyz.shape[0], device=model.get_xyz.device, dtype=torch.float)
|
|
82
83
|
for camera in dataset:
|
|
84
|
+
if resize is not None:
|
|
85
|
+
height, width = camera.image_height, camera.image_width
|
|
86
|
+
scale = resize / max(height, width)
|
|
87
|
+
height, width = int(height * scale), int(width * scale)
|
|
88
|
+
camera = build_camera(
|
|
89
|
+
image_height=height, image_width=width,
|
|
90
|
+
FoVx=camera.FoVx, FoVy=camera.FoVy,
|
|
91
|
+
R=camera.R, T=camera.T,
|
|
92
|
+
device=camera.R.device)
|
|
83
93
|
out = count_render(model, camera)
|
|
84
94
|
gaussian_count += out["gaussians_count"]
|
|
85
95
|
opacity_important_score += out["opacity_important_score"]
|
|
@@ -118,6 +128,7 @@ def score2mask(percent, import_score: list, threshold=None):
|
|
|
118
128
|
|
|
119
129
|
def prune_gaussians(
|
|
120
130
|
gaussians: GaussianModel, dataset: CameraDataset,
|
|
131
|
+
resize=None,
|
|
121
132
|
prune_type="comprehensive",
|
|
122
133
|
prune_percent=0.1,
|
|
123
134
|
prune_thr_important_score=None,
|
|
@@ -125,8 +136,9 @@ def prune_gaussians(
|
|
|
125
136
|
prune_thr_max_v_important_score=None,
|
|
126
137
|
prune_thr_count=None,
|
|
127
138
|
prune_thr_T_alpha=None,
|
|
139
|
+
prune_thr_T_alpha_avg=None,
|
|
128
140
|
v_pow=0.1):
|
|
129
|
-
gaussian_list, opacity_imp_list, T_alpha_imp_list = prune_list(gaussians, dataset)
|
|
141
|
+
gaussian_list, opacity_imp_list, T_alpha_imp_list = prune_list(gaussians, dataset, resize)
|
|
130
142
|
match prune_type:
|
|
131
143
|
case "important_score":
|
|
132
144
|
mask = score2mask(prune_percent, opacity_imp_list, prune_thr_important_score)
|
|
@@ -141,6 +153,10 @@ def prune_gaussians(
|
|
|
141
153
|
case "T_alpha":
|
|
142
154
|
# new importance score defined by doji
|
|
143
155
|
mask = score2mask(prune_percent, T_alpha_imp_list, prune_thr_T_alpha)
|
|
156
|
+
case "T_alpha_avg":
|
|
157
|
+
v_list = T_alpha_imp_list / gaussian_list
|
|
158
|
+
v_list[gaussian_list <= 0] = 0
|
|
159
|
+
mask = score2mask(prune_percent, v_list, prune_thr_T_alpha_avg)
|
|
144
160
|
case "comprehensive":
|
|
145
161
|
mask = torch.zeros_like(gaussian_list, dtype=torch.bool)
|
|
146
162
|
if prune_thr_important_score is not None:
|
|
@@ -155,6 +171,10 @@ def prune_gaussians(
|
|
|
155
171
|
mask |= score2mask(prune_percent, gaussian_list, prune_thr_count)
|
|
156
172
|
if prune_thr_T_alpha is not None:
|
|
157
173
|
mask |= score2mask(prune_percent, T_alpha_imp_list, prune_thr_T_alpha)
|
|
174
|
+
if prune_thr_T_alpha_avg is not None:
|
|
175
|
+
v_list = T_alpha_imp_list / gaussian_list
|
|
176
|
+
v_list[gaussian_list <= 0] = 0
|
|
177
|
+
mask |= score2mask(prune_percent, v_list, prune_thr_T_alpha_avg)
|
|
158
178
|
case _:
|
|
159
179
|
raise Exception("Unsupportive prunning method")
|
|
160
180
|
return mask
|
|
@@ -167,25 +187,29 @@ class ImportancePruner(DensifierWrapper):
|
|
|
167
187
|
importance_prune_from_iter=15000,
|
|
168
188
|
importance_prune_until_iter=20000,
|
|
169
189
|
importance_prune_interval: int = 1000,
|
|
190
|
+
importance_score_resize=None,
|
|
170
191
|
importance_prune_type="comprehensive",
|
|
171
192
|
importance_prune_percent=0.1,
|
|
172
193
|
importance_prune_thr_important_score=None,
|
|
173
194
|
importance_prune_thr_v_important_score=3.0,
|
|
174
195
|
importance_prune_thr_max_v_important_score=None,
|
|
175
196
|
importance_prune_thr_count=1,
|
|
176
|
-
importance_prune_thr_T_alpha=
|
|
197
|
+
importance_prune_thr_T_alpha=1,
|
|
198
|
+
importance_prune_thr_T_alpha_avg=0.001,
|
|
177
199
|
importance_v_pow=0.1):
|
|
178
200
|
super().__init__(base_densifier)
|
|
179
201
|
self.dataset = dataset
|
|
180
202
|
self.importance_prune_from_iter = importance_prune_from_iter
|
|
181
203
|
self.importance_prune_until_iter = importance_prune_until_iter
|
|
182
204
|
self.importance_prune_interval = importance_prune_interval
|
|
205
|
+
self.resize = importance_score_resize
|
|
183
206
|
self.prune_percent = importance_prune_percent
|
|
184
207
|
self.prune_thr_important_score = importance_prune_thr_important_score
|
|
185
208
|
self.prune_thr_v_important_score = importance_prune_thr_v_important_score
|
|
186
209
|
self.prune_thr_max_v_important_score = importance_prune_thr_max_v_important_score
|
|
187
210
|
self.prune_thr_count = importance_prune_thr_count
|
|
188
211
|
self.prune_thr_T_alpha = importance_prune_thr_T_alpha
|
|
212
|
+
self.prune_thr_T_alpha_avg = importance_prune_thr_T_alpha_avg
|
|
189
213
|
self.v_pow = importance_v_pow
|
|
190
214
|
self.prune_type = importance_prune_type
|
|
191
215
|
|
|
@@ -194,10 +218,11 @@ class ImportancePruner(DensifierWrapper):
|
|
|
194
218
|
if self.importance_prune_from_iter <= step <= self.importance_prune_until_iter and step % self.importance_prune_interval == 0:
|
|
195
219
|
remove_mask = prune_gaussians(
|
|
196
220
|
self.model, self.dataset,
|
|
221
|
+
self.resize,
|
|
197
222
|
self.prune_type, self.prune_percent,
|
|
198
223
|
self.prune_thr_important_score, self.prune_thr_v_important_score,
|
|
199
224
|
self.prune_thr_max_v_important_score, self.prune_thr_count,
|
|
200
|
-
self.prune_thr_T_alpha, self.v_pow,
|
|
225
|
+
self.prune_thr_T_alpha, self.prune_thr_T_alpha_avg, self.v_pow,
|
|
201
226
|
)
|
|
202
227
|
ret = ret._replace(remove_mask=remove_mask if ret.remove_mask is None else torch.logical_or(ret.remove_mask, remove_mask))
|
|
203
228
|
return ret
|
|
@@ -211,13 +236,15 @@ def BaseImportancePruningTrainer(
|
|
|
211
236
|
importance_prune_from_iter=15000,
|
|
212
237
|
importance_prune_until_iter=20000,
|
|
213
238
|
importance_prune_interval: int = 1000,
|
|
239
|
+
importance_score_resize=None,
|
|
214
240
|
importance_prune_type="comprehensive",
|
|
215
241
|
importance_prune_percent=0.1,
|
|
216
242
|
importance_prune_thr_important_score=None,
|
|
217
243
|
importance_prune_thr_v_important_score=3.0,
|
|
218
244
|
importance_prune_thr_max_v_important_score=None,
|
|
219
245
|
importance_prune_thr_count=1,
|
|
220
|
-
importance_prune_thr_T_alpha=0
|
|
246
|
+
importance_prune_thr_T_alpha=1.0,
|
|
247
|
+
importance_prune_thr_T_alpha_avg=0.001,
|
|
221
248
|
importance_v_pow=0.1,
|
|
222
249
|
**kwargs):
|
|
223
250
|
return DensificationTrainer(
|
|
@@ -228,6 +255,7 @@ def BaseImportancePruningTrainer(
|
|
|
228
255
|
importance_prune_from_iter=importance_prune_from_iter,
|
|
229
256
|
importance_prune_until_iter=importance_prune_until_iter,
|
|
230
257
|
importance_prune_interval=importance_prune_interval,
|
|
258
|
+
importance_score_resize=importance_score_resize,
|
|
231
259
|
importance_prune_type=importance_prune_type,
|
|
232
260
|
importance_prune_percent=importance_prune_percent,
|
|
233
261
|
importance_prune_thr_important_score=importance_prune_thr_important_score,
|
|
@@ -235,6 +263,7 @@ def BaseImportancePruningTrainer(
|
|
|
235
263
|
importance_prune_thr_max_v_important_score=importance_prune_thr_max_v_important_score,
|
|
236
264
|
importance_prune_thr_count=importance_prune_thr_count,
|
|
237
265
|
importance_prune_thr_T_alpha=importance_prune_thr_T_alpha,
|
|
266
|
+
importance_prune_thr_T_alpha_avg=importance_prune_thr_T_alpha_avg,
|
|
238
267
|
importance_v_pow=importance_v_pow,
|
|
239
268
|
), *args, **kwargs
|
|
240
269
|
)
|
|
Binary file
|
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
reduced_3dgs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
-
reduced_3dgs/combinations.py,sha256=
|
|
2
|
+
reduced_3dgs/combinations.py,sha256=k4ErxpAscCqJMtVXZ29KGQjw2VoZMV0W3V4u3nj0e-Y,9401
|
|
3
3
|
reduced_3dgs/quantize.py,sha256=Y44qHyFdOIqke7NoeqXmyKloS43j-al74ZiNsuZZHbM,2527
|
|
4
4
|
reduced_3dgs/train.py,sha256=jXHdXk05o_ebHjx_VBzcY6fRNn9EdKve6Tf5YC5an0o,9803
|
|
5
|
-
reduced_3dgs/diff_gaussian_rasterization/_C.cp311-win_amd64.pyd,sha256=
|
|
5
|
+
reduced_3dgs/diff_gaussian_rasterization/_C.cp311-win_amd64.pyd,sha256=mNCYXaE2Dq-zOfK19nMCcdVx2WvXTZqx574PqK9canY,1632256
|
|
6
6
|
reduced_3dgs/diff_gaussian_rasterization/__init__.py,sha256=oV6JjTc-50MscX4XHeIWSgLr3l8Y25knBIs-0gRbJr4,7932
|
|
7
7
|
reduced_3dgs/importance/__init__.py,sha256=neJsbY5cLikEGBQGdR4MjwCQ5VWVikT1357DwL0EtWU,289
|
|
8
|
-
reduced_3dgs/importance/combinations.py,sha256=
|
|
9
|
-
reduced_3dgs/importance/trainer.py,sha256=
|
|
10
|
-
reduced_3dgs/importance/diff_gaussian_rasterization/_C.cp311-win_amd64.pyd,sha256=
|
|
8
|
+
reduced_3dgs/importance/combinations.py,sha256=eAdykeTdvRGCHxskjILQnZVaqQVvwC-0wMxdgYMeeDs,2922
|
|
9
|
+
reduced_3dgs/importance/trainer.py,sha256=Sj4ORvoYtFT7z3hifzFZDfhFyqumHraXyk3vMVtk0AU,12661
|
|
10
|
+
reduced_3dgs/importance/diff_gaussian_rasterization/_C.cp311-win_amd64.pyd,sha256=OvadjumWCD_wdPZuwU90YR1G11p1tNOjGsVlAsIfjDg,1315840
|
|
11
11
|
reduced_3dgs/importance/diff_gaussian_rasterization/__init__.py,sha256=Tix8auyXBb_QFQtXrV3sLE9kdnl5zgHH0BbqcFzDp84,12850
|
|
12
12
|
reduced_3dgs/pruning/__init__.py,sha256=E_YxJ9cDV_B6EJbYUBEcuRYMIht_C72rI1VJUXFCLpM,201
|
|
13
13
|
reduced_3dgs/pruning/combinations.py,sha256=UivTfbSMmaWYVi9E4OF-_AZA-WBWniMiX-wKUftezF8,2331
|
|
@@ -20,9 +20,9 @@ reduced_3dgs/quantization/wrapper.py,sha256=cyXqfJgo9b3fS7DYXxOk5LmQudvrEhweOebF
|
|
|
20
20
|
reduced_3dgs/shculling/__init__.py,sha256=nP2BejDCUdCmJNRbg0hfhHREO6jyZXwIcRiw6ttVgqo,149
|
|
21
21
|
reduced_3dgs/shculling/gaussian_model.py,sha256=f8QWaL09vaV9Tcf6Dngjg_Fmk1wTQPAjWhuhI_N02Y8,2877
|
|
22
22
|
reduced_3dgs/shculling/trainer.py,sha256=9hwR77djhZpyf-URhwKHjnLbe0ZAOS-DIw58RzkcHXQ,6369
|
|
23
|
-
reduced_3dgs/simple_knn/_C.cp311-win_amd64.pyd,sha256=
|
|
24
|
-
reduced_3dgs-1.9.
|
|
25
|
-
reduced_3dgs-1.9.
|
|
26
|
-
reduced_3dgs-1.9.
|
|
27
|
-
reduced_3dgs-1.9.
|
|
28
|
-
reduced_3dgs-1.9.
|
|
23
|
+
reduced_3dgs/simple_knn/_C.cp311-win_amd64.pyd,sha256=ZeG0NTTKvaG1CKRE0o9rE_2uuVfMiYkVogeuReFW8dI,1263616
|
|
24
|
+
reduced_3dgs-1.9.4.dist-info/licenses/LICENSE.md,sha256=LQ4_LAqlncGkg_mQy5ykMAFtQDSPB0eKmIEtBut0yjw,4916
|
|
25
|
+
reduced_3dgs-1.9.4.dist-info/METADATA,sha256=AxCPAYi-0sHw1PwzbbNWsK5GL1pdc8VoCnzSYXcjFik,13014
|
|
26
|
+
reduced_3dgs-1.9.4.dist-info/WHEEL,sha256=ge07nwgZNrHM7F-h6ntB4NDhu5ar8XLYRt0vb6q09YM,101
|
|
27
|
+
reduced_3dgs-1.9.4.dist-info/top_level.txt,sha256=PpU5aT3-baSCdqCtTaZknoB32H93UeKCkYDkRCCZMEI,13
|
|
28
|
+
reduced_3dgs-1.9.4.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|