reduced-3dgs 1.10.1__cp312-cp312-win_amd64.whl → 1.10.3__cp312-cp312-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of reduced-3dgs might be problematic. Click here for more details.

@@ -0,0 +1,105 @@
1
+ from gaussian_splatting import GaussianModel
2
+ from gaussian_splatting.dataset import CameraDataset
3
+ from gaussian_splatting.dataset.colmap import colmap_init
4
+ from gaussian_splatting.trainer import AbstractTrainer
5
+ from gaussian_splatting.trainer.extensions import ScaleRegularizeTrainerWrapper
6
+ from reduced_3dgs.quantization import VectorQuantizeTrainerWrapper
7
+ from reduced_3dgs.shculling import VariableSHGaussianModel, SHCullingTrainer
8
+ from reduced_3dgs.pruning import PruningTrainer
9
+ from reduced_3dgs.combinations import PrunerInDensifyTrainer, SHCullingDensificationTrainer, SHCullingPruningTrainer, SHCullingPrunerInDensifyTrainer
10
+ from reduced_3dgs.combinations import CameraTrainableVariableSHGaussianModel, CameraSHCullingTrainer, CameraPruningTrainer
11
+ from reduced_3dgs.combinations import CameraPrunerInDensifyTrainer, CameraSHCullingDensifyTrainer, CameraSHCullingPruningTrainer, CameraSHCullingPruningDensifyTrainer
12
+
13
+
14
+ def prepare_gaussians(sh_degree: int, source: str, device: str, trainable_camera: bool = False, load_ply: str = None) -> GaussianModel:
15
+ if trainable_camera:
16
+ gaussians = CameraTrainableVariableSHGaussianModel(sh_degree).to(device)
17
+ gaussians.load_ply(load_ply) if load_ply else colmap_init(gaussians, source)
18
+ else:
19
+ gaussians = VariableSHGaussianModel(sh_degree).to(device)
20
+ gaussians.load_ply(load_ply) if load_ply else colmap_init(gaussians, source)
21
+ return gaussians
22
+
23
+
24
+ modes = {
25
+ "shculling": SHCullingTrainer,
26
+ "pruning": PruningTrainer,
27
+ "densify-pruning": PrunerInDensifyTrainer,
28
+ "densify-shculling": SHCullingDensificationTrainer,
29
+ "prune-shculling": SHCullingPruningTrainer,
30
+ "densify-prune-shculling": SHCullingPrunerInDensifyTrainer,
31
+ "camera-shculling": CameraSHCullingTrainer,
32
+ "camera-pruning": CameraPruningTrainer,
33
+ "camera-densify-pruning": CameraPrunerInDensifyTrainer,
34
+ "camera-densify-shculling": CameraSHCullingDensifyTrainer,
35
+ "camera-prune-shculling": CameraSHCullingPruningTrainer,
36
+ "camera-densify-prune-shculling": CameraSHCullingPruningDensifyTrainer,
37
+ }
38
+
39
+
40
+ def prepare_quantizer(
41
+ gaussians: GaussianModel,
42
+ scene_extent: float,
43
+ dataset: CameraDataset,
44
+ base_constructor,
45
+ load_quantized: str = None,
46
+
47
+ num_clusters=256,
48
+ num_clusters_rotation_re=None,
49
+ num_clusters_rotation_im=None,
50
+ num_clusters_opacity=None,
51
+ num_clusters_scaling=None,
52
+ num_clusters_features_dc=None,
53
+ num_clusters_features_rest=[],
54
+
55
+ quantize_from_iter=5000,
56
+ quantize_until_iter=30000,
57
+ quantize_interval=1000,
58
+ **configs):
59
+ trainer = VectorQuantizeTrainerWrapper(
60
+ base_constructor(
61
+ gaussians,
62
+ scene_extent=scene_extent,
63
+ dataset=dataset,
64
+ **configs
65
+ ),
66
+
67
+ num_clusters=num_clusters,
68
+ num_clusters_rotation_re=num_clusters_rotation_re,
69
+ num_clusters_rotation_im=num_clusters_rotation_im,
70
+ num_clusters_opacity=num_clusters_opacity,
71
+ num_clusters_scaling=num_clusters_scaling,
72
+ num_clusters_features_dc=num_clusters_features_dc,
73
+ num_clusters_features_rest=num_clusters_features_rest,
74
+
75
+ quantize_from_iter=quantize_from_iter,
76
+ quantize_until_iter=quantize_until_iter,
77
+ quantize_interval=quantize_interval,
78
+ )
79
+ if load_quantized:
80
+ trainer.quantizer.load_quantized(load_quantized)
81
+ return trainer, trainer.quantizer
82
+
83
+
84
+ def prepare_trainer(gaussians: GaussianModel, dataset: CameraDataset, mode: str, with_scale_reg=False, quantize: bool = False, load_quantized: str = None, configs={}) -> AbstractTrainer:
85
+ constructor = modes[mode]
86
+ if with_scale_reg:
87
+ constructor = lambda *args, **kwargs: ScaleRegularizeTrainerWrapper(modes[mode], *args, **kwargs)
88
+ if quantize:
89
+ trainer, quantizer = prepare_quantizer(
90
+ gaussians,
91
+ scene_extent=dataset.scene_extent(),
92
+ dataset=dataset,
93
+ base_constructor=modes[mode],
94
+ load_quantized=load_quantized,
95
+ **configs
96
+ )
97
+ else:
98
+ trainer = constructor(
99
+ gaussians,
100
+ scene_extent=dataset.scene_extent(),
101
+ dataset=dataset,
102
+ **configs
103
+ )
104
+ quantizer = None
105
+ return trainer, quantizer
@@ -206,8 +206,7 @@ class VectorQuantizer(AbstractQuantizer):
206
206
  ids_dict = self.find_nearest_cluster_id(model, self._codebook_dict)
207
207
  return ids_dict, codebook_dict
208
208
 
209
- def save_quantized(self, model: GaussianModel, ply_path: str):
210
- ids_dict, codebook_dict = self.quantize(model, update_codebook=False)
209
+ def ply_dtype(self, max_sh_degree: int):
211
210
  dtype_full = [
212
211
  ('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
213
212
  ('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),
@@ -217,13 +216,16 @@ class VectorQuantizer(AbstractQuantizer):
217
216
  ('scale', self.force_code_dtype or compute_uint_dtype(self.num_clusters_scaling)),
218
217
  ('f_dc', self.force_code_dtype or compute_uint_dtype(self.num_clusters_features_dc)),
219
218
  ]
220
- for sh_degree in range(model.max_sh_degree):
219
+ for sh_degree in range(max_sh_degree):
221
220
  force_code_dtype = self.force_code_dtype or compute_uint_dtype(self.num_clusters_features_rest[sh_degree])
222
221
  dtype_full.extend([
223
222
  (f'f_rest_{sh_degree}_0', force_code_dtype),
224
223
  (f'f_rest_{sh_degree}_1', force_code_dtype),
225
224
  (f'f_rest_{sh_degree}_2', force_code_dtype),
226
225
  ])
226
+ return dtype_full
227
+
228
+ def ply_data(self, model: GaussianModel, ids_dict: Dict[str, torch.Tensor]):
227
229
  data_full = [
228
230
  *np.array_split(model._xyz.detach().cpu().numpy(), 3, axis=1),
229
231
  *np.array_split(torch.zeros_like(model._xyz).detach().cpu().numpy(), 3, axis=1),
@@ -236,6 +238,12 @@ class VectorQuantizer(AbstractQuantizer):
236
238
  for sh_degree in range(model.max_sh_degree):
237
239
  features_rest = ids_dict[f'features_rest_{sh_degree}'].cpu().numpy()
238
240
  data_full.extend(np.array_split(features_rest, 3, axis=1))
241
+ return data_full
242
+
243
+ def save_quantized(self, model: GaussianModel, ply_path: str):
244
+ ids_dict, codebook_dict = self.quantize(model, update_codebook=False)
245
+ dtype_full = self.ply_dtype(model.max_sh_degree)
246
+ data_full = self.ply_data(model, ids_dict)
239
247
 
240
248
  elements = np.rec.fromarrays([data.squeeze(-1) for data in data_full], dtype=dtype_full)
241
249
  el = PlyElement.describe(elements, 'vertex')
reduced_3dgs/quantize.py CHANGED
@@ -13,6 +13,7 @@ def copy_not_exists(source, destination):
13
13
 
14
14
 
15
15
  def quantize(source, destination, iteration, sh_degree, device, **kwargs):
16
+ os.makedirs(destination, exist_ok=True)
16
17
  copy_not_exists(os.path.join(source, "cfg_args"), os.path.join(destination, "cfg_args"))
17
18
  copy_not_exists(os.path.join(source, "cameras.json"), os.path.join(destination, "cameras.json"))
18
19
 
reduced_3dgs/train.py CHANGED
@@ -1,129 +1,22 @@
1
1
  import os
2
2
  import random
3
3
  import shutil
4
- from typing import List, Tuple
4
+ from typing import List
5
5
  import torch
6
6
  from tqdm import tqdm
7
7
  from gaussian_splatting import GaussianModel
8
8
  from gaussian_splatting.dataset import CameraDataset
9
9
  from gaussian_splatting.utils import psnr
10
- from gaussian_splatting.dataset.colmap import colmap_init
11
10
  from gaussian_splatting.trainer import AbstractTrainer
12
- from gaussian_splatting.trainer.extensions import ScaleRegularizeTrainerWrapper
13
- from gaussian_splatting.train import prepare_dataset, save_cfg_args
14
- from reduced_3dgs.quantization import AbstractQuantizer, VectorQuantizeTrainerWrapper
15
- from reduced_3dgs.shculling import VariableSHGaussianModel, SHCullingTrainer
16
- from reduced_3dgs.pruning import PruningTrainer
17
- from reduced_3dgs.combinations import PrunerInDensifyTrainer, SHCullingDensificationTrainer, SHCullingPruningTrainer, SHCullingPrunerInDensifyTrainer
18
- from reduced_3dgs.combinations import CameraTrainableVariableSHGaussianModel, CameraSHCullingTrainer, CameraPruningTrainer
19
- from reduced_3dgs.combinations import CameraPrunerInDensifyTrainer, CameraSHCullingDensifyTrainer, CameraSHCullingPruningTrainer, CameraSHCullingPruningDensifyTrainer
11
+ from gaussian_splatting.prepare import prepare_dataset
12
+ from gaussian_splatting.train import save_cfg_args
13
+ from reduced_3dgs.quantization import AbstractQuantizer
14
+ from reduced_3dgs.prepare import modes, prepare_gaussians, prepare_trainer
20
15
 
21
16
 
22
- basemodes = {
23
- "shculling": SHCullingTrainer,
24
- "pruning": PruningTrainer,
25
- "densify-pruning": PrunerInDensifyTrainer,
26
- "densify-shculling": SHCullingDensificationTrainer,
27
- "prune-shculling": SHCullingPruningTrainer,
28
- "densify-prune-shculling": SHCullingPrunerInDensifyTrainer,
29
- }
30
- cameramodes = {
31
- "camera-shculling": CameraSHCullingTrainer,
32
- "camera-pruning": CameraPruningTrainer,
33
- "camera-densify-pruning": CameraPrunerInDensifyTrainer,
34
- "camera-densify-shculling": CameraSHCullingDensifyTrainer,
35
- "camera-prune-shculling": CameraSHCullingPruningTrainer,
36
- "camera-densify-prune-shculling": CameraSHCullingPruningDensifyTrainer,
37
- }
38
-
39
-
40
- def prepare_quantizer(
41
- gaussians: GaussianModel,
42
- scene_extent: float,
43
- dataset: CameraDataset,
44
- base_constructor,
45
- load_quantized: str = None,
46
-
47
- num_clusters=256,
48
- num_clusters_rotation_re=None,
49
- num_clusters_rotation_im=None,
50
- num_clusters_opacity=None,
51
- num_clusters_scaling=None,
52
- num_clusters_features_dc=None,
53
- num_clusters_features_rest=[],
54
-
55
- quantize_from_iter=5000,
56
- quantize_until_iter=30000,
57
- quantize_interval=1000,
58
- **configs):
59
- trainer = VectorQuantizeTrainerWrapper(
60
- base_constructor(
61
- gaussians,
62
- scene_extent=scene_extent,
63
- dataset=dataset,
64
- **configs
65
- ),
66
-
67
- num_clusters=num_clusters,
68
- num_clusters_rotation_re=num_clusters_rotation_re,
69
- num_clusters_rotation_im=num_clusters_rotation_im,
70
- num_clusters_opacity=num_clusters_opacity,
71
- num_clusters_scaling=num_clusters_scaling,
72
- num_clusters_features_dc=num_clusters_features_dc,
73
- num_clusters_features_rest=num_clusters_features_rest,
74
-
75
- quantize_from_iter=quantize_from_iter,
76
- quantize_until_iter=quantize_until_iter,
77
- quantize_interval=quantize_interval,
78
- )
79
- if load_quantized:
80
- trainer.quantizer.load_quantized(load_quantized)
81
- return trainer, trainer.quantizer
82
-
83
-
84
- def prepare_gaussians(sh_degree: int, source: str, device: str, trainable_camera: bool = False, load_ply: str = None) -> Tuple[CameraDataset, GaussianModel, AbstractTrainer]:
85
- if trainable_camera:
86
- gaussians = CameraTrainableVariableSHGaussianModel(sh_degree).to(device)
87
- gaussians.load_ply(load_ply) if load_ply else colmap_init(gaussians, source)
88
- else:
89
- gaussians = VariableSHGaussianModel(sh_degree).to(device)
90
- gaussians.load_ply(load_ply) if load_ply else colmap_init(gaussians, source)
91
- return gaussians
92
-
93
-
94
- def prepare_trainer(gaussians: GaussianModel, dataset: CameraDataset, mode: str, with_scale_reg=False, quantize: bool = False, load_quantized: str = None, configs={}) -> AbstractTrainer:
95
- if mode in basemodes:
96
- modes = basemodes
97
- elif mode in cameramodes:
98
- modes = cameramodes
99
- else:
100
- raise ValueError(f"Unknown mode: {mode}")
101
- constructor = modes[mode]
102
- if with_scale_reg:
103
- constructor = lambda *args, **kwargs: ScaleRegularizeTrainerWrapper(modes[mode], *args, **kwargs)
104
- if quantize:
105
- trainer, quantizer = prepare_quantizer(
106
- gaussians,
107
- scene_extent=dataset.scene_extent(),
108
- dataset=dataset,
109
- base_constructor=modes[mode],
110
- load_quantized=load_quantized,
111
- **configs
112
- )
113
- else:
114
- trainer = constructor(
115
- gaussians,
116
- scene_extent=dataset.scene_extent(),
117
- dataset=dataset,
118
- **configs
119
- )
120
- quantizer = None
121
- return trainer, quantizer
122
-
123
-
124
- def prepare_training(sh_degree: int, source: str, device: str, mode: str, load_ply: str = None, load_camera: str = None, load_depth=False, with_scale_reg=False, quantize: bool = False, load_quantized: str = None, configs={}) -> Tuple[CameraDataset, GaussianModel, AbstractTrainer]:
125
- dataset = prepare_dataset(source=source, device=device, trainable_camera=mode in cameramodes, load_camera=load_camera, load_depth=load_depth)
126
- gaussians = prepare_gaussians(sh_degree=sh_degree, source=source, device=device, trainable_camera=mode in cameramodes, load_ply=load_ply)
17
+ def prepare_training(sh_degree: int, source: str, device: str, mode: str, trainable_camera: bool = False, load_ply: str = None, load_camera: str = None, load_depth=False, with_scale_reg=False, quantize: bool = False, load_quantized: str = None, configs={}):
18
+ dataset = prepare_dataset(source=source, device=device, trainable_camera=trainable_camera, load_camera=load_camera, load_depth=load_depth)
19
+ gaussians = prepare_gaussians(sh_degree=sh_degree, source=source, device=device, trainable_camera=trainable_camera, load_ply=load_ply)
127
20
  trainer, quantizer = prepare_trainer(gaussians=gaussians, dataset=dataset, mode=mode, with_scale_reg=with_scale_reg, quantize=quantize, load_quantized=load_quantized, configs=configs)
128
21
  return dataset, gaussians, trainer, quantizer
129
22
 
@@ -166,7 +59,7 @@ def training(dataset: CameraDataset, gaussians: GaussianModel, trainer: Abstract
166
59
 
167
60
 
168
61
  if __name__ == "__main__":
169
- from argparse import ArgumentParser, Namespace
62
+ from argparse import ArgumentParser
170
63
  parser = ArgumentParser()
171
64
  parser.add_argument("--sh_degree", default=3, type=int)
172
65
  parser.add_argument("-s", "--source", required=True, type=str)
@@ -178,7 +71,7 @@ if __name__ == "__main__":
178
71
  parser.add_argument("--no_depth_data", action='store_true')
179
72
  parser.add_argument("--with_scale_reg", action="store_true")
180
73
  parser.add_argument("--load_quantized", default=None, type=str)
181
- parser.add_argument("--mode", choices=list(basemodes.keys()) + list(cameramodes.keys()), default="densify-prune-shculling")
74
+ parser.add_argument("--mode", choices=list(modes), default="densify-prune-shculling")
182
75
  parser.add_argument("--save_iterations", nargs="+", type=int, default=[7000, 30000])
183
76
  parser.add_argument("--device", default="cuda", type=str)
184
77
  parser.add_argument("--empty_cache_every_step", action='store_true')
@@ -189,7 +82,7 @@ if __name__ == "__main__":
189
82
 
190
83
  configs = {o.split("=", 1)[0]: eval(o.split("=", 1)[1]) for o in args.option}
191
84
  dataset, gaussians, trainer, quantizer = prepare_training(
192
- sh_degree=args.sh_degree, source=args.source, device=args.device, mode=args.mode,
85
+ sh_degree=args.sh_degree, source=args.source, device=args.device, mode=args.mode, trainable_camera="camera" in args.mode,
193
86
  load_ply=args.load_ply, load_camera=args.load_camera, load_depth=not args.no_depth_data, with_scale_reg=args.with_scale_reg,
194
87
  quantize=args.quantize, load_quantized=args.load_quantized, configs=configs)
195
88
  dataset.save_cameras(os.path.join(args.destination, "cameras.json"))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: reduced_3dgs
3
- Version: 1.10.1
3
+ Version: 1.10.3
4
4
  Summary: Refactored code for the paper "Reducing the Memory Footprint of 3D Gaussian Splatting"
5
5
  Home-page: https://github.com/yindaheng98/reduced-3dgs
6
6
  Author: yindaheng98
@@ -1,13 +1,14 @@
1
1
  reduced_3dgs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
2
  reduced_3dgs/combinations.py,sha256=k4ErxpAscCqJMtVXZ29KGQjw2VoZMV0W3V4u3nj0e-Y,9401
3
- reduced_3dgs/quantize.py,sha256=Y44qHyFdOIqke7NoeqXmyKloS43j-al74ZiNsuZZHbM,2527
4
- reduced_3dgs/train.py,sha256=kXgUZqSGc09NNmGNCoSe3Dv-VMBuQ6XjVNJhjmm0g3E,10082
5
- reduced_3dgs/diff_gaussian_rasterization/_C.cp312-win_amd64.pyd,sha256=abu4zXWAobWzWcuvBTv5kQqqe6TJ4RDm817Mjdpli5w,1640448
3
+ reduced_3dgs/prepare.py,sha256=MFUUckRHKfgcva4ZOBxfPFyE95N-OlCQLplpmEPuzOk,4440
4
+ reduced_3dgs/quantize.py,sha256=BVqBb2tQgiP3hap5-OByD8VELtJJGfEeFzaVFyzCJZU,2572
5
+ reduced_3dgs/train.py,sha256=yRQPQv-hTOBQN-tqGHYs2aIZ0AbWq158CXpthYN2cfw,5666
6
+ reduced_3dgs/diff_gaussian_rasterization/_C.cp312-win_amd64.pyd,sha256=1n2kPV5fkgfh94MJXmKd1-f-aQo2FdqXPr8g8jMsngQ,1640448
6
7
  reduced_3dgs/diff_gaussian_rasterization/__init__.py,sha256=oV6JjTc-50MscX4XHeIWSgLr3l8Y25knBIs-0gRbJr4,7932
7
8
  reduced_3dgs/importance/__init__.py,sha256=neJsbY5cLikEGBQGdR4MjwCQ5VWVikT1357DwL0EtWU,289
8
9
  reduced_3dgs/importance/combinations.py,sha256=eAdykeTdvRGCHxskjILQnZVaqQVvwC-0wMxdgYMeeDs,2922
9
10
  reduced_3dgs/importance/trainer.py,sha256=Sj4ORvoYtFT7z3hifzFZDfhFyqumHraXyk3vMVtk0AU,12661
10
- reduced_3dgs/importance/diff_gaussian_rasterization/_C.cp312-win_amd64.pyd,sha256=5ZxCa8jgG1bC4Ub77UrmNv-iLxvxTyxokQQVkqTNHZc,1320448
11
+ reduced_3dgs/importance/diff_gaussian_rasterization/_C.cp312-win_amd64.pyd,sha256=2TshQzpg0ERxwfB6M9qQfp14KW4QVsxpmEtmHoT2nFw,1320448
11
12
  reduced_3dgs/importance/diff_gaussian_rasterization/__init__.py,sha256=Tix8auyXBb_QFQtXrV3sLE9kdnl5zgHH0BbqcFzDp84,12850
12
13
  reduced_3dgs/pruning/__init__.py,sha256=E_YxJ9cDV_B6EJbYUBEcuRYMIht_C72rI1VJUXFCLpM,201
13
14
  reduced_3dgs/pruning/combinations.py,sha256=QhXt2C7pTXhwzp9hPL9dVdiQzz0cUQpm5qljqytPEsM,2345
@@ -15,14 +16,14 @@ reduced_3dgs/pruning/trainer.py,sha256=JJml-uYfDfUpbsjRNZbIvnUYYslVgFXkhejbkYSo0
15
16
  reduced_3dgs/quantization/__init__.py,sha256=1z1xMn3yj9u7cR9JizGrI3WSyIES_Tqq6oDquvglSeo,225
16
17
  reduced_3dgs/quantization/abc.py,sha256=rsi8HFRwQCltWTYiJ3BpygtQDT7hK6J01jKMOboOY8w,1910
17
18
  reduced_3dgs/quantization/exclude_zeros.py,sha256=fKSgjHous4OpdI6mQi9z23if9jnbB79w2jChpxkCJWw,2381
18
- reduced_3dgs/quantization/quantizer.py,sha256=VL96ZTY7G8jUt-fLBJ3gw1MQV46NkHOBJyfUPsmDQxE,16712
19
+ reduced_3dgs/quantization/quantizer.py,sha256=HZFuYs1Uxq3aXzn9Ow1LSDxF4myh_vfgvovVDJoc2BM,17001
19
20
  reduced_3dgs/quantization/wrapper.py,sha256=cyXqfJgo9b3fS7DYXxOk5LmQudvrEhweOebFsjRnXiQ,2549
20
21
  reduced_3dgs/shculling/__init__.py,sha256=nP2BejDCUdCmJNRbg0hfhHREO6jyZXwIcRiw6ttVgqo,149
21
22
  reduced_3dgs/shculling/gaussian_model.py,sha256=f8QWaL09vaV9Tcf6Dngjg_Fmk1wTQPAjWhuhI_N02Y8,2877
22
23
  reduced_3dgs/shculling/trainer.py,sha256=9hwR77djhZpyf-URhwKHjnLbe0ZAOS-DIw58RzkcHXQ,6369
23
- reduced_3dgs/simple_knn/_C.cp312-win_amd64.pyd,sha256=RQosX1BwSnkCObjwlwRsw2fnDDR7IwHyVOp4cx8IX_Q,1267712
24
- reduced_3dgs-1.10.1.dist-info/licenses/LICENSE.md,sha256=LQ4_LAqlncGkg_mQy5ykMAFtQDSPB0eKmIEtBut0yjw,4916
25
- reduced_3dgs-1.10.1.dist-info/METADATA,sha256=6cxgTTpPqokRIa7VNyuXEJ2wN5iyClcSwbY451onMrA,13015
26
- reduced_3dgs-1.10.1.dist-info/WHEEL,sha256=8UP9x9puWI0P1V_d7K2oMTBqfeLNm21CTzZ_Ptr0NXU,101
27
- reduced_3dgs-1.10.1.dist-info/top_level.txt,sha256=PpU5aT3-baSCdqCtTaZknoB32H93UeKCkYDkRCCZMEI,13
28
- reduced_3dgs-1.10.1.dist-info/RECORD,,
24
+ reduced_3dgs/simple_knn/_C.cp312-win_amd64.pyd,sha256=5ZzPxMlTNRnsLs2p9EFSr93-MI9XQEUFgFo8SRhazxo,1267712
25
+ reduced_3dgs-1.10.3.dist-info/licenses/LICENSE.md,sha256=LQ4_LAqlncGkg_mQy5ykMAFtQDSPB0eKmIEtBut0yjw,4916
26
+ reduced_3dgs-1.10.3.dist-info/METADATA,sha256=DGxZflgGcJRjRyeqz0CTkgSUeJpGGAgKDkOJ0mG5Dk4,13015
27
+ reduced_3dgs-1.10.3.dist-info/WHEEL,sha256=8UP9x9puWI0P1V_d7K2oMTBqfeLNm21CTzZ_Ptr0NXU,101
28
+ reduced_3dgs-1.10.3.dist-info/top_level.txt,sha256=PpU5aT3-baSCdqCtTaZknoB32H93UeKCkYDkRCCZMEI,13
29
+ reduced_3dgs-1.10.3.dist-info/RECORD,,