reduced-3dgs 1.10.0__cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of reduced-3dgs might be problematic. Click here for more details.
- reduced_3dgs/__init__.py +0 -0
- reduced_3dgs/combinations.py +245 -0
- reduced_3dgs/diff_gaussian_rasterization/_C.cpython-310-x86_64-linux-gnu.so +0 -0
- reduced_3dgs/diff_gaussian_rasterization/__init__.py +235 -0
- reduced_3dgs/importance/__init__.py +3 -0
- reduced_3dgs/importance/combinations.py +63 -0
- reduced_3dgs/importance/diff_gaussian_rasterization/_C.cpython-310-x86_64-linux-gnu.so +0 -0
- reduced_3dgs/importance/diff_gaussian_rasterization/__init__.py +347 -0
- reduced_3dgs/importance/trainer.py +269 -0
- reduced_3dgs/pruning/__init__.py +2 -0
- reduced_3dgs/pruning/combinations.py +65 -0
- reduced_3dgs/pruning/trainer.py +145 -0
- reduced_3dgs/quantization/__init__.py +4 -0
- reduced_3dgs/quantization/abc.py +49 -0
- reduced_3dgs/quantization/exclude_zeros.py +41 -0
- reduced_3dgs/quantization/quantizer.py +289 -0
- reduced_3dgs/quantization/wrapper.py +67 -0
- reduced_3dgs/quantize.py +49 -0
- reduced_3dgs/shculling/__init__.py +2 -0
- reduced_3dgs/shculling/gaussian_model.py +78 -0
- reduced_3dgs/shculling/trainer.py +158 -0
- reduced_3dgs/simple_knn/_C.cpython-310-x86_64-linux-gnu.so +0 -0
- reduced_3dgs/train.py +195 -0
- reduced_3dgs-1.10.0.dist-info/LICENSE.md +93 -0
- reduced_3dgs-1.10.0.dist-info/METADATA +278 -0
- reduced_3dgs-1.10.0.dist-info/RECORD +31 -0
- reduced_3dgs-1.10.0.dist-info/WHEEL +6 -0
- reduced_3dgs-1.10.0.dist-info/top_level.txt +1 -0
- reduced_3dgs.libs/libc10-ff4eddb5.so +0 -0
- reduced_3dgs.libs/libc10_cuda-c675d3fb.so +0 -0
- reduced_3dgs.libs/libcudart-8774224f.so.12.4.127 +0 -0
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
from typing import Callable, List
|
|
2
|
+
import torch
|
|
3
|
+
from gaussian_splatting import GaussianModel, Camera
|
|
4
|
+
from gaussian_splatting.trainer import AbstractDensifier, OpacityPruner, DensificationTrainer, NoopDensifier
|
|
5
|
+
from reduced_3dgs.diff_gaussian_rasterization._C import sphere_ellipsoid_intersection, allocate_minimum_redundancy_value, find_minimum_projected_pixel_size
|
|
6
|
+
from reduced_3dgs.simple_knn._C import distIndex2
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def calculate_redundancy_metric(gaussians: GaussianModel, cameras: List[Camera], pixel_scale=1.0, num_neighbours=30):
|
|
10
|
+
# Get minimum projected pixel size
|
|
11
|
+
cube_size = find_minimum_projected_pixel_size(
|
|
12
|
+
torch.stack([camera.full_proj_transform for camera in cameras], dim=0),
|
|
13
|
+
torch.stack([camera.full_proj_transform.inverse() for camera in cameras], dim=0),
|
|
14
|
+
gaussians._xyz,
|
|
15
|
+
torch.tensor([camera.image_height for camera in cameras], device="cuda", dtype=torch.int32),
|
|
16
|
+
torch.tensor([camera.image_width for camera in cameras], device="cuda", dtype=torch.int32)
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
scaled_pixel_size = cube_size * pixel_scale
|
|
20
|
+
half_diagonal = scaled_pixel_size * torch.sqrt(torch.tensor([3], device="cuda")) / 2
|
|
21
|
+
|
|
22
|
+
# Find neighbours as candidates for the intersection test
|
|
23
|
+
_, indices = distIndex2(gaussians.get_xyz, num_neighbours)
|
|
24
|
+
indices = indices.view(-1, num_neighbours)
|
|
25
|
+
|
|
26
|
+
# Do the intersection check
|
|
27
|
+
redundancy_metrics, intersection_mask = sphere_ellipsoid_intersection(gaussians._xyz,
|
|
28
|
+
gaussians.get_scaling,
|
|
29
|
+
gaussians.get_rotation,
|
|
30
|
+
indices,
|
|
31
|
+
half_diagonal,
|
|
32
|
+
num_neighbours)
|
|
33
|
+
# We haven't counted count for the primitive at the center of each sphere, so add 1 to everything
|
|
34
|
+
redundancy_metrics += 1
|
|
35
|
+
|
|
36
|
+
indices = torch.cat((torch.arange(gaussians.get_xyz.shape[0], device="cuda", dtype=torch.int).view(-1, 1), indices), dim=1)
|
|
37
|
+
intersection_mask = torch.cat((torch.ones_like(gaussians._opacity, device="cuda", dtype=bool), intersection_mask), dim=1)
|
|
38
|
+
|
|
39
|
+
min_redundancy_metrics = allocate_minimum_redundancy_value(redundancy_metrics, indices, intersection_mask, num_neighbours+1)[0]
|
|
40
|
+
return min_redundancy_metrics, cube_size
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def mercy_points(self: GaussianModel, _splatted_num_accum: torch.Tensor, lambda_mercy=2, mercy_minimum=2, mercy_type='redundancy_opacity'):
|
|
44
|
+
mean = _splatted_num_accum.float().mean(dim=0, keepdim=True)
|
|
45
|
+
std = _splatted_num_accum.float().var(dim=0, keepdim=True).sqrt()
|
|
46
|
+
|
|
47
|
+
threshold = max((mean + lambda_mercy*std).item(), mercy_minimum)
|
|
48
|
+
|
|
49
|
+
mask = (_splatted_num_accum > threshold)
|
|
50
|
+
|
|
51
|
+
if mercy_type == 'redundancy_opacity':
|
|
52
|
+
# Prune redundant points based on 50% lowest opacity
|
|
53
|
+
mask[mask.clone()] = self.get_opacity[mask].squeeze() < self.get_opacity[mask].median()
|
|
54
|
+
elif mercy_type == 'redundancy_random':
|
|
55
|
+
# Prune 50% redundant points at random
|
|
56
|
+
mask[mask.clone()] = torch.rand(mask[mask].shape, device="cuda").squeeze() < 0.5
|
|
57
|
+
elif mercy_type == 'opacity':
|
|
58
|
+
# Prune based just on opacity
|
|
59
|
+
threshold = self.get_opacity.quantile(0.045)
|
|
60
|
+
mask = (self.get_opacity < threshold).squeeze()
|
|
61
|
+
elif mercy_type == 'redundancy_opacity_opacity':
|
|
62
|
+
# Prune based on opacity and on redundancy + opacity (options 1 and 3)
|
|
63
|
+
mask[mask.clone()] = self.get_opacity[mask].squeeze() < self.get_opacity[mask].median()
|
|
64
|
+
threshold = torch.min(self.get_opacity.quantile(0.03), torch.tensor([0.05], device="cuda"))
|
|
65
|
+
mask = torch.logical_or(mask, (self.get_opacity < threshold).squeeze())
|
|
66
|
+
return mask
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def mercy_gaussians(
|
|
70
|
+
model: GaussianModel,
|
|
71
|
+
dataset: List[Camera],
|
|
72
|
+
box_size=1.,
|
|
73
|
+
lambda_mercy=1.,
|
|
74
|
+
mercy_minimum=3,
|
|
75
|
+
mercy_type='redundancy_opacity'
|
|
76
|
+
):
|
|
77
|
+
_splatted_num_accum, _ = calculate_redundancy_metric(model, dataset, pixel_scale=box_size)
|
|
78
|
+
mask = mercy_points(model, _splatted_num_accum.squeeze(), lambda_mercy, mercy_minimum, mercy_type)
|
|
79
|
+
return mask
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class BasePruner(OpacityPruner):
|
|
83
|
+
def __init__(
|
|
84
|
+
self, base_densifier: AbstractDensifier,
|
|
85
|
+
scene_extent,
|
|
86
|
+
dataset: List[Camera],
|
|
87
|
+
*args,
|
|
88
|
+
box_size=1.,
|
|
89
|
+
lambda_mercy=1.,
|
|
90
|
+
mercy_minimum=3,
|
|
91
|
+
mercy_type='redundancy_opacity',
|
|
92
|
+
**kwargs):
|
|
93
|
+
super().__init__(base_densifier, scene_extent, *args, **kwargs)
|
|
94
|
+
self.dataset = dataset
|
|
95
|
+
self.box_size = box_size
|
|
96
|
+
self.lambda_mercy = lambda_mercy
|
|
97
|
+
self.mercy_minimum = mercy_minimum
|
|
98
|
+
self.mercy_type = mercy_type
|
|
99
|
+
|
|
100
|
+
def prune(self) -> torch.Tensor:
|
|
101
|
+
remove_mask = mercy_gaussians(self.model, self.dataset, self.box_size, self.lambda_mercy, self.mercy_minimum, self.mercy_type)
|
|
102
|
+
prune_mask = torch.logical_or(super().prune(), remove_mask)
|
|
103
|
+
return prune_mask
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def PruningTrainerWrapper(
|
|
107
|
+
noargs_base_densifier_constructor: Callable[[GaussianModel, float, List[Camera]], AbstractDensifier],
|
|
108
|
+
model: GaussianModel,
|
|
109
|
+
scene_extent: float,
|
|
110
|
+
dataset: List[Camera],
|
|
111
|
+
*args,
|
|
112
|
+
prune_from_iter=1000,
|
|
113
|
+
prune_until_iter=15000,
|
|
114
|
+
prune_interval: int = 100,
|
|
115
|
+
box_size=1.,
|
|
116
|
+
lambda_mercy=1.,
|
|
117
|
+
mercy_minimum=3,
|
|
118
|
+
mercy_type='redundancy_opacity',
|
|
119
|
+
**kwargs):
|
|
120
|
+
return DensificationTrainer(
|
|
121
|
+
model, scene_extent,
|
|
122
|
+
BasePruner(
|
|
123
|
+
noargs_base_densifier_constructor(model, scene_extent, dataset),
|
|
124
|
+
scene_extent, dataset,
|
|
125
|
+
prune_from_iter=prune_from_iter,
|
|
126
|
+
prune_until_iter=prune_until_iter,
|
|
127
|
+
prune_interval=prune_interval,
|
|
128
|
+
box_size=box_size,
|
|
129
|
+
lambda_mercy=lambda_mercy,
|
|
130
|
+
mercy_minimum=mercy_minimum,
|
|
131
|
+
mercy_type=mercy_type,
|
|
132
|
+
), *args, **kwargs
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def BasePruningTrainer(
|
|
137
|
+
model: GaussianModel,
|
|
138
|
+
scene_extent: float,
|
|
139
|
+
dataset: List[Camera],
|
|
140
|
+
*args, **kwargs):
|
|
141
|
+
return DensificationTrainer(
|
|
142
|
+
lambda model, scene_extent, dataset: NoopDensifier(model),
|
|
143
|
+
model, scene_extent, dataset,
|
|
144
|
+
*args, **kwargs
|
|
145
|
+
)
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
import abc
|
|
2
|
+
from typing import Dict, Tuple
|
|
3
|
+
|
|
4
|
+
import torch
|
|
5
|
+
|
|
6
|
+
from gaussian_splatting import GaussianModel
|
|
7
|
+
from gaussian_splatting.trainer import AbstractTrainer, TrainerWrapper
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class AbstractQuantizer(abc.ABC):
|
|
11
|
+
|
|
12
|
+
@abc.abstractmethod
|
|
13
|
+
def quantize(self, model: GaussianModel, update_codebook=True) -> Tuple[Dict[str, torch.Tensor], Dict[str, torch.Tensor]]:
|
|
14
|
+
pass
|
|
15
|
+
|
|
16
|
+
@abc.abstractmethod
|
|
17
|
+
def dequantize(self, model: GaussianModel, ids_dict: Dict[str, torch.Tensor], codebook_dict: Dict[str, torch.Tensor], xyz: torch.Tensor = None, replace=False) -> GaussianModel:
|
|
18
|
+
pass
|
|
19
|
+
|
|
20
|
+
@abc.abstractmethod
|
|
21
|
+
def save_quantized(self, model: GaussianModel, ply_path: str, codebook_path: str):
|
|
22
|
+
pass
|
|
23
|
+
|
|
24
|
+
@abc.abstractmethod
|
|
25
|
+
def load_quantized(self, model: GaussianModel, ply_path: str, codebook_path: str) -> GaussianModel:
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class QuantizeTrainerWrapper(TrainerWrapper, metaclass=abc.ABCMeta):
|
|
30
|
+
def __init__(
|
|
31
|
+
self, base_trainer: AbstractTrainer,
|
|
32
|
+
quantizer: AbstractQuantizer,
|
|
33
|
+
quantize_from_iter=5000,
|
|
34
|
+
quantize_until_iter=30000,
|
|
35
|
+
quantize_interval=1000,
|
|
36
|
+
):
|
|
37
|
+
super().__init__(base_trainer)
|
|
38
|
+
self.quantizer = quantizer
|
|
39
|
+
self.quantize_from_iter = quantize_from_iter
|
|
40
|
+
self.quantize_until_iter = quantize_until_iter
|
|
41
|
+
self.quantize_interval = quantize_interval
|
|
42
|
+
|
|
43
|
+
@property
|
|
44
|
+
def model(self) -> GaussianModel:
|
|
45
|
+
if self.quantize_from_iter <= self.curr_step <= self.quantize_until_iter and self.curr_step % self.quantize_interval == 0:
|
|
46
|
+
with torch.no_grad():
|
|
47
|
+
ids_dict, codebook_dict = self.quantizer.quantize(self.base_trainer.model, update_codebook=True)
|
|
48
|
+
return self.quantizer.dequantize(self.base_trainer.model, ids_dict, codebook_dict)
|
|
49
|
+
return self.base_trainer.model
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from gaussian_splatting import GaussianModel
|
|
3
|
+
from .quantizer import VectorQuantizer
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class ExcludeZeroSHQuantizer(VectorQuantizer):
|
|
7
|
+
def __init__(self, *args, treat_as_zero=1e-8, **kwargs):
|
|
8
|
+
super(ExcludeZeroSHQuantizer, self).__init__(*args, **kwargs)
|
|
9
|
+
self.treat_as_zero = treat_as_zero
|
|
10
|
+
|
|
11
|
+
def zeros_mask(self, values: torch.Tensor):
|
|
12
|
+
return (values.abs() < self.treat_as_zero).all(-1)
|
|
13
|
+
|
|
14
|
+
def generate_codebook_exclude_zero(self, values: torch.Tensor, num_clusters=256, init_codebook=None):
|
|
15
|
+
zeros_mask = self.zeros_mask(values)
|
|
16
|
+
if zeros_mask.all():
|
|
17
|
+
return torch.zeros(1, values.shape[1], dtype=values.dtype, device=values.device), torch.zeros(values.shape[0], dtype=torch.long, device=values.device)
|
|
18
|
+
if init_codebook is not None:
|
|
19
|
+
if init_codebook.abs().max() < self.treat_as_zero:
|
|
20
|
+
init_codebook = None
|
|
21
|
+
elif init_codebook.shape[0] > num_clusters - 1:
|
|
22
|
+
init_codebook = init_codebook[-(num_clusters - 1):, ...]
|
|
23
|
+
nonzero_values = values[~zeros_mask]
|
|
24
|
+
nonzero_centers, nonzero_ids = super().generate_codebook(nonzero_values, num_clusters - 1, init_codebook)
|
|
25
|
+
ids = torch.zeros(values.shape[0], dtype=nonzero_ids.dtype, device=nonzero_ids.device)
|
|
26
|
+
ids[~zeros_mask] = nonzero_ids + 1
|
|
27
|
+
centers = torch.cat((torch.zeros(1, values.shape[1], dtype=values.dtype, device=values.device), nonzero_centers), dim=0)
|
|
28
|
+
return centers, ids
|
|
29
|
+
|
|
30
|
+
def has_zero(self, values: torch.Tensor):
|
|
31
|
+
return self.zeros_mask(values).any()
|
|
32
|
+
|
|
33
|
+
def produce_clusters_degree_features_rest(self, model: GaussianModel, sh_degree, *args, **kwargs):
|
|
34
|
+
features_rest_flatten = model._features_rest.detach().transpose(1, 2).flatten(0, 1)
|
|
35
|
+
sh_idx_start, sh_idx_end = (sh_degree + 1) ** 2 - 1, (sh_degree + 2) ** 2 - 1
|
|
36
|
+
features_rest = features_rest_flatten[:, sh_idx_start:sh_idx_end]
|
|
37
|
+
if self.has_zero(features_rest):
|
|
38
|
+
codebook, ids = self.generate_codebook_exclude_zero(features_rest, self.num_clusters_features_rest[sh_degree], *args, **kwargs)
|
|
39
|
+
else:
|
|
40
|
+
codebook, ids = self.generate_codebook(features_rest, self.num_clusters_features_rest[sh_degree], *args, **kwargs)
|
|
41
|
+
return codebook, ids.reshape(-1, model._features_rest.shape[-1])
|
|
@@ -0,0 +1,289 @@
|
|
|
1
|
+
import math
|
|
2
|
+
import os
|
|
3
|
+
from typing import Dict, Tuple
|
|
4
|
+
import torch
|
|
5
|
+
import torch.nn as nn
|
|
6
|
+
import numpy as np
|
|
7
|
+
from sklearn.cluster import MiniBatchKMeans as KMeans
|
|
8
|
+
from gaussian_splatting import GaussianModel
|
|
9
|
+
from plyfile import PlyData, PlyElement
|
|
10
|
+
import numpy as np
|
|
11
|
+
from .abc import AbstractQuantizer
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def array2record(array: torch.Tensor, perfix, n_cols, dtype):
|
|
15
|
+
dtype_full = [(f'{perfix}_{i}', dtype) for i in range(n_cols)] if n_cols > 1 else [(perfix, dtype)]
|
|
16
|
+
data_full = map(lambda x: x.squeeze(-1), np.array_split(array.cpu().numpy(), n_cols, axis=1))
|
|
17
|
+
record = np.rec.fromarrays(data_full, dtype=dtype_full)
|
|
18
|
+
return record
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def compute_uint_length(n):
|
|
22
|
+
count = 0
|
|
23
|
+
while n >> 1:
|
|
24
|
+
count += 1
|
|
25
|
+
n >>= 1
|
|
26
|
+
return count
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def compute_uint_dtype(n):
|
|
30
|
+
bits = compute_uint_length(n)
|
|
31
|
+
bytes = bits // 8
|
|
32
|
+
if bits % 8:
|
|
33
|
+
bytes += 1
|
|
34
|
+
return f'u{bytes}'
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class VectorQuantizer(AbstractQuantizer):
|
|
38
|
+
def __init__(
|
|
39
|
+
self,
|
|
40
|
+
num_clusters=256,
|
|
41
|
+
num_clusters_rotation_re=None,
|
|
42
|
+
num_clusters_rotation_im=None,
|
|
43
|
+
num_clusters_opacity=None,
|
|
44
|
+
num_clusters_scaling=None,
|
|
45
|
+
num_clusters_features_dc=None,
|
|
46
|
+
num_clusters_features_rest=[],
|
|
47
|
+
max_sh_degree=3,
|
|
48
|
+
force_code_dtype=None,
|
|
49
|
+
force_codebook_dtype='f4',
|
|
50
|
+
tol=1e-6, max_iter=500,
|
|
51
|
+
):
|
|
52
|
+
self.num_clusters_rotation_re = num_clusters_rotation_re or num_clusters
|
|
53
|
+
self.num_clusters_rotation_im = num_clusters_rotation_im or num_clusters
|
|
54
|
+
self.num_clusters_opacity = num_clusters_opacity or num_clusters
|
|
55
|
+
self.num_clusters_scaling = num_clusters_scaling or num_clusters
|
|
56
|
+
self.num_clusters_features_dc = num_clusters_features_dc or num_clusters
|
|
57
|
+
self.num_clusters_features_rest = [(num_clusters_features_rest[i] if len(num_clusters_features_rest) > i else num_clusters) for i in range(max_sh_degree)]
|
|
58
|
+
self.force_code_dtype = force_code_dtype
|
|
59
|
+
self.force_codebook_dtype = force_codebook_dtype
|
|
60
|
+
self.tol = tol
|
|
61
|
+
self.max_iter = max_iter
|
|
62
|
+
|
|
63
|
+
self._codebook_dict = {}
|
|
64
|
+
|
|
65
|
+
def generate_codebook(self, values: torch.Tensor, num_clusters, init_codebook=None):
|
|
66
|
+
kmeans = KMeans(
|
|
67
|
+
n_clusters=num_clusters, tol=self.tol, max_iter=self.max_iter,
|
|
68
|
+
init='k-means++' if init_codebook is None else init_codebook.cpu().numpy(),
|
|
69
|
+
random_state=0, n_init="auto", verbose=0,
|
|
70
|
+
batch_size=256 * os.cpu_count()
|
|
71
|
+
)
|
|
72
|
+
ids = torch.tensor(kmeans.fit_predict(values.cpu().numpy()), device=values.device)
|
|
73
|
+
centers = torch.tensor(kmeans.cluster_centers_, dtype=values.dtype, device=values.device)
|
|
74
|
+
return centers, ids
|
|
75
|
+
|
|
76
|
+
def one_nearst(self, points: torch.Tensor, codebook: torch.Tensor, batch=2**16):
|
|
77
|
+
ids = torch.zeros(points.shape[0], dtype=torch.int64, device=points.device)
|
|
78
|
+
for i in range(math.ceil(points.shape[0]/batch)):
|
|
79
|
+
ids[i*batch:i*batch+batch] = torch.argmin(torch.cdist(points[i*batch:i*batch+batch, ...], codebook), dim=1)
|
|
80
|
+
return ids
|
|
81
|
+
|
|
82
|
+
def produce_clusters_features_dc(self, model: GaussianModel, *args, **kwargs):
|
|
83
|
+
codebook, ids = self.generate_codebook(model._features_dc.detach().squeeze(1), self.num_clusters_features_dc, *args, **kwargs)
|
|
84
|
+
return codebook, ids.unsqueeze(1)
|
|
85
|
+
|
|
86
|
+
def find_nearest_cluster_id_features_dc(self, model: GaussianModel, codebook: torch.Tensor):
|
|
87
|
+
return self.one_nearst(model._features_dc.detach().squeeze(1), codebook).unsqueeze(1)
|
|
88
|
+
|
|
89
|
+
def produce_clusters_degree_features_rest(self, model: GaussianModel, sh_degree, *args, **kwargs):
|
|
90
|
+
features_rest_flatten = model._features_rest.detach().transpose(1, 2).flatten(0, 1)
|
|
91
|
+
sh_idx_start, sh_idx_end = (sh_degree + 1) ** 2 - 1, (sh_degree + 2) ** 2 - 1
|
|
92
|
+
features_rest = features_rest_flatten[:, sh_idx_start:sh_idx_end]
|
|
93
|
+
codebook, ids = self.generate_codebook(features_rest, self.num_clusters_features_rest[sh_degree], *args, **kwargs)
|
|
94
|
+
return codebook, ids.reshape(-1, model._features_rest.shape[-1])
|
|
95
|
+
|
|
96
|
+
def find_nearest_cluster_id_degree_features_rest(self, model: GaussianModel, sh_degree, codebook: torch.Tensor):
|
|
97
|
+
features_rest_flatten = model._features_rest.detach().transpose(1, 2).flatten(0, 1)
|
|
98
|
+
sh_idx_start, sh_idx_end = (sh_degree + 1) ** 2 - 1, (sh_degree + 2) ** 2 - 1
|
|
99
|
+
features_rest = features_rest_flatten[:, sh_idx_start:sh_idx_end]
|
|
100
|
+
ids = self.one_nearst(features_rest, codebook)
|
|
101
|
+
return ids.reshape(-1, model._features_rest.shape[-1])
|
|
102
|
+
|
|
103
|
+
def produce_clusters_rotation_re(self, model: GaussianModel, *args, **kwargs):
|
|
104
|
+
return self.generate_codebook(model.get_rotation.detach()[:, 0:1], self.num_clusters_rotation_re, *args, **kwargs)
|
|
105
|
+
|
|
106
|
+
def find_nearest_cluster_id_rotation_re(self, model: GaussianModel, codebook: torch.Tensor):
|
|
107
|
+
return self.one_nearst(model.get_rotation.detach()[:, 0:1], codebook)
|
|
108
|
+
|
|
109
|
+
def produce_clusters_rotation_im(self, model: GaussianModel, *args, **kwargs):
|
|
110
|
+
return self.generate_codebook(model.get_rotation.detach()[:, 1:], self.num_clusters_rotation_im, *args, **kwargs)
|
|
111
|
+
|
|
112
|
+
def find_nearest_cluster_id_rotation_im(self, model: GaussianModel, codebook: torch.Tensor):
|
|
113
|
+
return self.one_nearst(model.get_rotation.detach()[:, 1:], codebook)
|
|
114
|
+
|
|
115
|
+
def produce_clusters_opacity(self, model: GaussianModel, *args, **kwargs):
|
|
116
|
+
return self.generate_codebook(model._opacity.detach(), self.num_clusters_opacity, *args, **kwargs)
|
|
117
|
+
|
|
118
|
+
def find_nearest_cluster_id_opacity(self, model: GaussianModel, codebook: torch.Tensor):
|
|
119
|
+
return self.one_nearst(model._opacity.detach(), codebook)
|
|
120
|
+
|
|
121
|
+
def produce_clusters_scaling(self, model: GaussianModel, *args, **kwargs):
|
|
122
|
+
centers, ids = self.generate_codebook(model.get_scaling.detach(), self.num_clusters_scaling, *args, **kwargs)
|
|
123
|
+
centers_log = model.scaling_inverse_activation(centers)
|
|
124
|
+
return centers_log, ids
|
|
125
|
+
|
|
126
|
+
def find_nearest_cluster_id_scaling(self, model: GaussianModel, codebook: torch.Tensor):
|
|
127
|
+
return self.one_nearst(model.get_scaling.detach(), model.scaling_activation(codebook))
|
|
128
|
+
|
|
129
|
+
def produce_clusters(self, model: GaussianModel, init_codebook_dict={}):
|
|
130
|
+
codebook_dict: Dict[str, torch.Tensor] = {}
|
|
131
|
+
ids_dict: Dict[str, torch.Tensor] = {}
|
|
132
|
+
init_codebook_dict = {
|
|
133
|
+
"features_dc": None,
|
|
134
|
+
**{f"features_rest_{sh_degree}": None for sh_degree in range(model.max_sh_degree)},
|
|
135
|
+
"rotation_re": None,
|
|
136
|
+
"rotation_im": None,
|
|
137
|
+
"opacity": None,
|
|
138
|
+
"scaling": None,
|
|
139
|
+
**init_codebook_dict
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
codebook_dict["features_dc"], ids_dict["features_dc"] = self.produce_clusters_features_dc(model, init_codebook=init_codebook_dict["features_dc"])
|
|
143
|
+
for sh_degree in range(model.max_sh_degree):
|
|
144
|
+
codebook_dict[f"features_rest_{sh_degree}"], ids_dict[f"features_rest_{sh_degree}"] = self.produce_clusters_degree_features_rest(
|
|
145
|
+
model, sh_degree, init_codebook=init_codebook_dict[f"features_rest_{sh_degree}"]
|
|
146
|
+
)
|
|
147
|
+
codebook_dict["rotation_re"], ids_dict[f"rotation_re"] = self.produce_clusters_rotation_re(model, init_codebook=init_codebook_dict["rotation_re"])
|
|
148
|
+
codebook_dict["rotation_im"], ids_dict[f"rotation_im"] = self.produce_clusters_rotation_im(model, init_codebook=init_codebook_dict["rotation_im"])
|
|
149
|
+
codebook_dict["opacity"], ids_dict[f"opacity"] = self.produce_clusters_opacity(model, init_codebook=init_codebook_dict["opacity"])
|
|
150
|
+
codebook_dict["scaling"], ids_dict[f"scaling"] = self.produce_clusters_scaling(model, init_codebook=init_codebook_dict["scaling"])
|
|
151
|
+
return codebook_dict, ids_dict
|
|
152
|
+
|
|
153
|
+
def find_nearest_cluster_id(self, model: GaussianModel, codebook_dict={}):
|
|
154
|
+
ids_dict: Dict[str, torch.Tensor] = {}
|
|
155
|
+
ids_dict["features_dc"] = self.find_nearest_cluster_id_features_dc(model, codebook=codebook_dict["features_dc"])
|
|
156
|
+
for sh_degree in range(model.max_sh_degree):
|
|
157
|
+
ids_dict[f"features_rest_{sh_degree}"] = self.find_nearest_cluster_id_degree_features_rest(
|
|
158
|
+
model, sh_degree, codebook=codebook_dict[f"features_rest_{sh_degree}"]
|
|
159
|
+
)
|
|
160
|
+
ids_dict[f"rotation_re"] = self.find_nearest_cluster_id_rotation_re(model, codebook=codebook_dict["rotation_re"])
|
|
161
|
+
ids_dict[f"rotation_im"] = self.find_nearest_cluster_id_rotation_im(model, codebook=codebook_dict["rotation_im"])
|
|
162
|
+
ids_dict[f"opacity"] = self.find_nearest_cluster_id_opacity(model, codebook=codebook_dict["opacity"])
|
|
163
|
+
ids_dict[f"scaling"] = self.find_nearest_cluster_id_scaling(model, codebook=codebook_dict["scaling"])
|
|
164
|
+
return ids_dict
|
|
165
|
+
|
|
166
|
+
def dequantize(self, model: GaussianModel, ids_dict: Dict[str, torch.Tensor], codebook_dict: Dict[str, torch.Tensor], xyz: torch.Tensor = None, replace=False) -> GaussianModel:
|
|
167
|
+
opacity = codebook_dict["opacity"][ids_dict["opacity"], ...]
|
|
168
|
+
scaling = codebook_dict["scaling"][ids_dict["scaling"], ...]
|
|
169
|
+
|
|
170
|
+
rotation = torch.cat((
|
|
171
|
+
codebook_dict["rotation_re"][ids_dict["rotation_re"], ...],
|
|
172
|
+
codebook_dict["rotation_im"][ids_dict["rotation_im"], ...],
|
|
173
|
+
), dim=1)
|
|
174
|
+
|
|
175
|
+
features_dc = codebook_dict["features_dc"][ids_dict["features_dc"], ...]
|
|
176
|
+
features_rest = []
|
|
177
|
+
for sh_degree in range(model.max_sh_degree):
|
|
178
|
+
features_rest.append(codebook_dict[f"features_rest_{sh_degree}"][ids_dict[f"features_rest_{sh_degree}"], ...])
|
|
179
|
+
features_rest = torch.cat(features_rest, dim=2).transpose(1, 2)
|
|
180
|
+
|
|
181
|
+
with torch.no_grad():
|
|
182
|
+
if replace:
|
|
183
|
+
if xyz is not None:
|
|
184
|
+
model._xyz = nn.Parameter(xyz)
|
|
185
|
+
model._opacity = nn.Parameter(opacity)
|
|
186
|
+
model._scaling = nn.Parameter(scaling)
|
|
187
|
+
model._rotation = nn.Parameter(rotation)
|
|
188
|
+
model._features_dc = nn.Parameter(features_dc)
|
|
189
|
+
model._features_rest = nn.Parameter(features_rest)
|
|
190
|
+
else:
|
|
191
|
+
if xyz is not None:
|
|
192
|
+
model._xyz[...] = xyz
|
|
193
|
+
model._opacity[...] = opacity
|
|
194
|
+
model._scaling[...] = scaling
|
|
195
|
+
model._rotation[...] = rotation
|
|
196
|
+
model._features_dc[...] = features_dc
|
|
197
|
+
model._features_rest[...] = features_rest
|
|
198
|
+
return model
|
|
199
|
+
|
|
200
|
+
def quantize(self, model: GaussianModel, update_codebook=True) -> Tuple[Dict[str, torch.Tensor], Dict[str, torch.Tensor]]:
|
|
201
|
+
if self._codebook_dict == {} or update_codebook:
|
|
202
|
+
codebook_dict, ids_dict = self.produce_clusters(model, self._codebook_dict)
|
|
203
|
+
self._codebook_dict = codebook_dict
|
|
204
|
+
else:
|
|
205
|
+
codebook_dict = self._codebook_dict
|
|
206
|
+
ids_dict = self.find_nearest_cluster_id(model, self._codebook_dict)
|
|
207
|
+
return ids_dict, codebook_dict
|
|
208
|
+
|
|
209
|
+
def save_quantized(self, model: GaussianModel, ply_path: str):
|
|
210
|
+
ids_dict, codebook_dict = self.quantize(model, update_codebook=False)
|
|
211
|
+
dtype_full = [
|
|
212
|
+
('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
|
|
213
|
+
('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),
|
|
214
|
+
('rot_re', self.force_code_dtype or compute_uint_dtype(self.num_clusters_rotation_re)),
|
|
215
|
+
('rot_im', self.force_code_dtype or compute_uint_dtype(self.num_clusters_rotation_im)),
|
|
216
|
+
('opacity', self.force_code_dtype or compute_uint_dtype(self.num_clusters_opacity)),
|
|
217
|
+
('scale', self.force_code_dtype or compute_uint_dtype(self.num_clusters_scaling)),
|
|
218
|
+
('f_dc', self.force_code_dtype or compute_uint_dtype(self.num_clusters_features_dc)),
|
|
219
|
+
]
|
|
220
|
+
for sh_degree in range(model.max_sh_degree):
|
|
221
|
+
force_code_dtype = self.force_code_dtype or compute_uint_dtype(self.num_clusters_features_rest[sh_degree])
|
|
222
|
+
dtype_full.extend([
|
|
223
|
+
(f'f_rest_{sh_degree}_0', force_code_dtype),
|
|
224
|
+
(f'f_rest_{sh_degree}_1', force_code_dtype),
|
|
225
|
+
(f'f_rest_{sh_degree}_2', force_code_dtype),
|
|
226
|
+
])
|
|
227
|
+
data_full = [
|
|
228
|
+
*np.array_split(model._xyz.detach().cpu().numpy(), 3, axis=1),
|
|
229
|
+
*np.array_split(torch.zeros_like(model._xyz).detach().cpu().numpy(), 3, axis=1),
|
|
230
|
+
ids_dict["rotation_re"].unsqueeze(-1).cpu().numpy(),
|
|
231
|
+
ids_dict["rotation_im"].unsqueeze(-1).cpu().numpy(),
|
|
232
|
+
ids_dict["opacity"].unsqueeze(-1).cpu().numpy(),
|
|
233
|
+
ids_dict["scaling"].unsqueeze(-1).cpu().numpy(),
|
|
234
|
+
ids_dict["features_dc"].cpu().numpy(),
|
|
235
|
+
]
|
|
236
|
+
for sh_degree in range(model.max_sh_degree):
|
|
237
|
+
features_rest = ids_dict[f'features_rest_{sh_degree}'].cpu().numpy()
|
|
238
|
+
data_full.extend(np.array_split(features_rest, 3, axis=1))
|
|
239
|
+
|
|
240
|
+
elements = np.rec.fromarrays([data.squeeze(-1) for data in data_full], dtype=dtype_full)
|
|
241
|
+
el = PlyElement.describe(elements, 'vertex')
|
|
242
|
+
|
|
243
|
+
cb = [
|
|
244
|
+
PlyElement.describe(array2record(codebook_dict["rotation_re"], "rot_re", 1, self.force_codebook_dtype), 'codebook_rot_re'),
|
|
245
|
+
PlyElement.describe(array2record(codebook_dict["rotation_im"], "rot_im", 3, self.force_codebook_dtype), 'codebook_rot_im'),
|
|
246
|
+
PlyElement.describe(array2record(codebook_dict["opacity"], "opacity", 1, self.force_codebook_dtype), 'codebook_opacity'),
|
|
247
|
+
PlyElement.describe(array2record(codebook_dict["scaling"], "scaling", 3, self.force_codebook_dtype), 'codebook_scaling'),
|
|
248
|
+
PlyElement.describe(array2record(codebook_dict["features_dc"], "f_dc", 3, self.force_codebook_dtype), 'codebook_f_dc'),
|
|
249
|
+
]
|
|
250
|
+
for sh_degree in range(model.max_sh_degree):
|
|
251
|
+
features_rest = codebook_dict[f'features_rest_{sh_degree}']
|
|
252
|
+
n_channels = (sh_degree + 2) ** 2 - (sh_degree + 1) ** 2
|
|
253
|
+
cb.append(PlyElement.describe(array2record(features_rest, f'f_rest_{sh_degree}', n_channels, self.force_codebook_dtype), f'codebook_f_rest_{sh_degree}'))
|
|
254
|
+
|
|
255
|
+
PlyData([el, *cb]).write(ply_path)
|
|
256
|
+
|
|
257
|
+
def load_quantized(self, model: GaussianModel, ply_path: str) -> GaussianModel:
|
|
258
|
+
plydata = PlyData.read(ply_path)
|
|
259
|
+
|
|
260
|
+
ids_dict = {}
|
|
261
|
+
elements = plydata['vertex']
|
|
262
|
+
kwargs = dict(dtype=torch.long, device=model._xyz.device)
|
|
263
|
+
ids_dict["rotation_re"] = torch.tensor(elements["rot_re"].copy(), **kwargs)
|
|
264
|
+
ids_dict["rotation_im"] = torch.tensor(elements["rot_im"].copy(), **kwargs)
|
|
265
|
+
ids_dict["opacity"] = torch.tensor(elements["opacity"].copy(), **kwargs)
|
|
266
|
+
ids_dict["scaling"] = torch.tensor(elements["scale"].copy(), **kwargs)
|
|
267
|
+
ids_dict["features_dc"] = torch.tensor(elements["f_dc"].copy(), **kwargs).unsqueeze(-1)
|
|
268
|
+
for sh_degree in range(model.max_sh_degree):
|
|
269
|
+
ids_dict[f'features_rest_{sh_degree}'] = torch.tensor(np.stack([elements[f'f_rest_{sh_degree}_{ch}'] for ch in range(3)], axis=1), **kwargs)
|
|
270
|
+
|
|
271
|
+
codebook_dict = {}
|
|
272
|
+
kwargs = dict(dtype=torch.float32, device=model._xyz.device)
|
|
273
|
+
codebook_dict["rotation_re"] = torch.tensor(plydata["codebook_rot_re"]["rot_re"], **kwargs).unsqueeze(-1)
|
|
274
|
+
codebook_dict["rotation_im"] = torch.tensor(np.stack([plydata["codebook_rot_im"][f'rot_im_{ch}'] for ch in range(3)], axis=1), **kwargs)
|
|
275
|
+
codebook_dict["opacity"] = torch.tensor(plydata["codebook_opacity"]["opacity"], **kwargs).unsqueeze(-1)
|
|
276
|
+
codebook_dict["scaling"] = torch.tensor(np.stack([plydata["codebook_scaling"][f'scaling_{ch}'] for ch in range(3)], axis=1), **kwargs)
|
|
277
|
+
codebook_dict["features_dc"] = torch.tensor(np.stack([plydata["codebook_f_dc"][f'f_dc_{ch}'] for ch in range(3)], axis=1), **kwargs)
|
|
278
|
+
for sh_degree in range(model.max_sh_degree):
|
|
279
|
+
n_channels = (sh_degree + 2) ** 2 - (sh_degree + 1) ** 2
|
|
280
|
+
codebook_dict[f'features_rest_{sh_degree}'] = torch.tensor(np.stack([plydata[f"codebook_f_rest_{sh_degree}"][f'f_rest_{sh_degree}_{ch}'] for ch in range(n_channels)], axis=1), **kwargs)
|
|
281
|
+
|
|
282
|
+
self._codebook_dict = codebook_dict
|
|
283
|
+
|
|
284
|
+
xyz = torch.stack([
|
|
285
|
+
torch.tensor(elements["x"].copy(), **kwargs),
|
|
286
|
+
torch.tensor(elements["y"].copy(), **kwargs),
|
|
287
|
+
torch.tensor(elements["z"].copy(), **kwargs),
|
|
288
|
+
], dim=1)
|
|
289
|
+
return self.dequantize(model, ids_dict, codebook_dict, xyz=xyz, replace=True)
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
from gaussian_splatting import GaussianModel
|
|
2
|
+
from gaussian_splatting.trainer import AbstractTrainer, Trainer
|
|
3
|
+
from .abc import QuantizeTrainerWrapper
|
|
4
|
+
from .exclude_zeros import ExcludeZeroSHQuantizer
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def VectorQuantizeTrainerWrapper(
|
|
8
|
+
base_trainer: AbstractTrainer,
|
|
9
|
+
num_clusters=256,
|
|
10
|
+
num_clusters_rotation_re=None,
|
|
11
|
+
num_clusters_rotation_im=None,
|
|
12
|
+
num_clusters_opacity=None,
|
|
13
|
+
num_clusters_scaling=None,
|
|
14
|
+
num_clusters_features_dc=None,
|
|
15
|
+
num_clusters_features_rest=[],
|
|
16
|
+
quantize_from_iter=5000,
|
|
17
|
+
quantize_until_iter=30000,
|
|
18
|
+
quantize_interval=1000,
|
|
19
|
+
treat_as_zero=1e-8,
|
|
20
|
+
):
|
|
21
|
+
return QuantizeTrainerWrapper(
|
|
22
|
+
base_trainer, ExcludeZeroSHQuantizer(
|
|
23
|
+
num_clusters=num_clusters,
|
|
24
|
+
num_clusters_rotation_re=num_clusters_rotation_re,
|
|
25
|
+
num_clusters_rotation_im=num_clusters_rotation_im,
|
|
26
|
+
num_clusters_opacity=num_clusters_opacity,
|
|
27
|
+
num_clusters_scaling=num_clusters_scaling,
|
|
28
|
+
num_clusters_features_dc=num_clusters_features_dc,
|
|
29
|
+
num_clusters_features_rest=num_clusters_features_rest,
|
|
30
|
+
treat_as_zero=treat_as_zero,
|
|
31
|
+
),
|
|
32
|
+
quantize_from_iter=quantize_from_iter,
|
|
33
|
+
quantize_until_iter=quantize_until_iter,
|
|
34
|
+
quantize_interval=quantize_interval,
|
|
35
|
+
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def VectorQuantizeTrainer(
|
|
40
|
+
model: GaussianModel,
|
|
41
|
+
scene_extent: float,
|
|
42
|
+
num_clusters=256,
|
|
43
|
+
num_clusters_rotation_re=None,
|
|
44
|
+
num_clusters_rotation_im=None,
|
|
45
|
+
num_clusters_opacity=None,
|
|
46
|
+
num_clusters_scaling=None,
|
|
47
|
+
num_clusters_features_dc=None,
|
|
48
|
+
num_clusters_features_rest=[],
|
|
49
|
+
quantize_from_iter=5000,
|
|
50
|
+
quantize_until_iter=30000,
|
|
51
|
+
quantize_interval=1000,
|
|
52
|
+
treat_as_zero=1e-8,
|
|
53
|
+
*args, **kwargs):
|
|
54
|
+
return VectorQuantizeTrainerWrapper(
|
|
55
|
+
Trainer(model, scene_extent, *args, **kwargs),
|
|
56
|
+
num_clusters=num_clusters,
|
|
57
|
+
num_clusters_rotation_re=num_clusters_rotation_re,
|
|
58
|
+
num_clusters_rotation_im=num_clusters_rotation_im,
|
|
59
|
+
num_clusters_opacity=num_clusters_opacity,
|
|
60
|
+
num_clusters_scaling=num_clusters_scaling,
|
|
61
|
+
num_clusters_features_dc=num_clusters_features_dc,
|
|
62
|
+
num_clusters_features_rest=num_clusters_features_rest,
|
|
63
|
+
treat_as_zero=treat_as_zero,
|
|
64
|
+
quantize_from_iter=quantize_from_iter,
|
|
65
|
+
quantize_until_iter=quantize_until_iter,
|
|
66
|
+
quantize_interval=quantize_interval,
|
|
67
|
+
)
|
reduced_3dgs/quantize.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import shutil
|
|
3
|
+
from gaussian_splatting import GaussianModel
|
|
4
|
+
from reduced_3dgs.quantization import ExcludeZeroSHQuantizer as VectorQuantizer
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def copy_not_exists(source, destination):
|
|
8
|
+
if os.path.exists(destination):
|
|
9
|
+
if os.path.samefile(source, destination):
|
|
10
|
+
return
|
|
11
|
+
os.remove(destination)
|
|
12
|
+
shutil.copy(source, destination)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def quantize(source, destination, iteration, sh_degree, device, **kwargs):
|
|
16
|
+
copy_not_exists(os.path.join(source, "cfg_args"), os.path.join(destination, "cfg_args"))
|
|
17
|
+
copy_not_exists(os.path.join(source, "cameras.json"), os.path.join(destination, "cameras.json"))
|
|
18
|
+
|
|
19
|
+
input = os.path.join(source, "point_cloud", "iteration_" + str(iteration), "point_cloud.ply")
|
|
20
|
+
output = os.path.join(destination, "point_cloud", "iteration_" + str(iteration), "point_cloud_quantized.ply")
|
|
21
|
+
os.makedirs(os.path.join(destination, "point_cloud", "iteration_" + str(iteration)), exist_ok=True)
|
|
22
|
+
gaussians = GaussianModel(sh_degree).to(device)
|
|
23
|
+
gaussians.load_ply(input)
|
|
24
|
+
quantizer = VectorQuantizer(**kwargs)
|
|
25
|
+
quantizer.save_quantized(gaussians, output)
|
|
26
|
+
ids_dict, codebook_dict = quantizer.quantize(gaussians, update_codebook=False)
|
|
27
|
+
gaussians = quantizer.dequantize(gaussians, ids_dict, codebook_dict)
|
|
28
|
+
quantizer.load_quantized(gaussians, output)
|
|
29
|
+
output = os.path.join(destination, "point_cloud", "iteration_" + str(iteration), "point_cloud.ply")
|
|
30
|
+
gaussians.save_ply(output)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
if __name__ == "__main__":
|
|
34
|
+
import argparse
|
|
35
|
+
parser = argparse.ArgumentParser()
|
|
36
|
+
parser.add_argument("-s", "--source", required=True, type=str)
|
|
37
|
+
parser.add_argument("-d", "--destination", required=True, type=str)
|
|
38
|
+
parser.add_argument("-i", "--iteration", default=30000, type=int)
|
|
39
|
+
parser.add_argument("--sh_degree", type=int, default=3)
|
|
40
|
+
parser.add_argument("--device", type=str, default="cuda")
|
|
41
|
+
parser.add_argument("--num_clusters", type=int, default=256)
|
|
42
|
+
parser.add_argument("--num_clusters_rotation_re", type=int, default=None)
|
|
43
|
+
parser.add_argument("--num_clusters_rotation_im", type=int, default=None)
|
|
44
|
+
parser.add_argument("--num_clusters_opacity", type=int, default=None)
|
|
45
|
+
parser.add_argument("--num_clusters_scaling", type=int, default=None)
|
|
46
|
+
parser.add_argument("--num_clusters_features_dc", type=int, default=None)
|
|
47
|
+
parser.add_argument("--num_clusters_features_rest", nargs="+", type=int, default=[])
|
|
48
|
+
args = parser.parse_args()
|
|
49
|
+
quantize(**vars(args))
|