reduced-3dgs 1.10.0__cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of reduced-3dgs might be problematic. Click here for more details.
- reduced_3dgs/__init__.py +0 -0
- reduced_3dgs/combinations.py +245 -0
- reduced_3dgs/diff_gaussian_rasterization/_C.cpython-310-x86_64-linux-gnu.so +0 -0
- reduced_3dgs/diff_gaussian_rasterization/__init__.py +235 -0
- reduced_3dgs/importance/__init__.py +3 -0
- reduced_3dgs/importance/combinations.py +63 -0
- reduced_3dgs/importance/diff_gaussian_rasterization/_C.cpython-310-x86_64-linux-gnu.so +0 -0
- reduced_3dgs/importance/diff_gaussian_rasterization/__init__.py +347 -0
- reduced_3dgs/importance/trainer.py +269 -0
- reduced_3dgs/pruning/__init__.py +2 -0
- reduced_3dgs/pruning/combinations.py +65 -0
- reduced_3dgs/pruning/trainer.py +145 -0
- reduced_3dgs/quantization/__init__.py +4 -0
- reduced_3dgs/quantization/abc.py +49 -0
- reduced_3dgs/quantization/exclude_zeros.py +41 -0
- reduced_3dgs/quantization/quantizer.py +289 -0
- reduced_3dgs/quantization/wrapper.py +67 -0
- reduced_3dgs/quantize.py +49 -0
- reduced_3dgs/shculling/__init__.py +2 -0
- reduced_3dgs/shculling/gaussian_model.py +78 -0
- reduced_3dgs/shculling/trainer.py +158 -0
- reduced_3dgs/simple_knn/_C.cpython-310-x86_64-linux-gnu.so +0 -0
- reduced_3dgs/train.py +195 -0
- reduced_3dgs-1.10.0.dist-info/LICENSE.md +93 -0
- reduced_3dgs-1.10.0.dist-info/METADATA +278 -0
- reduced_3dgs-1.10.0.dist-info/RECORD +31 -0
- reduced_3dgs-1.10.0.dist-info/WHEEL +6 -0
- reduced_3dgs-1.10.0.dist-info/top_level.txt +1 -0
- reduced_3dgs.libs/libc10-ff4eddb5.so +0 -0
- reduced_3dgs.libs/libc10_cuda-c675d3fb.so +0 -0
- reduced_3dgs.libs/libcudart-8774224f.so.12.4.127 +0 -0
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
import torch.nn as nn
|
|
3
|
+
from gaussian_splatting import GaussianModel
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class VariableSHGaussianModel(GaussianModel):
|
|
7
|
+
def __init__(self, *args, **kwargs):
|
|
8
|
+
super().__init__(*args, **kwargs)
|
|
9
|
+
self._degrees = torch.empty(0)
|
|
10
|
+
|
|
11
|
+
def to(self, device):
|
|
12
|
+
self._degrees = self._degrees.to(device)
|
|
13
|
+
return super().to(device)
|
|
14
|
+
|
|
15
|
+
@property
|
|
16
|
+
def get_features(self):
|
|
17
|
+
features_dc = self._features_dc
|
|
18
|
+
# compute SH according to self._degrees
|
|
19
|
+
n_SH = (self._degrees + 1) ** 2 - 1
|
|
20
|
+
indices = torch.arange((self.max_sh_degree + 1) ** 2 - 1, device=n_SH.device).expand(n_SH.shape[0], -1) < n_SH.unsqueeze(-1)
|
|
21
|
+
features_rest = torch.zeros_like(self._features_rest)
|
|
22
|
+
features_rest[indices, :] = self._features_rest[indices, :]
|
|
23
|
+
with torch.no_grad():
|
|
24
|
+
self._features_rest[~indices, :] = 0
|
|
25
|
+
if self._features_rest.grad is not None:
|
|
26
|
+
self._features_rest.grad[~indices, :] = 0
|
|
27
|
+
return torch.cat((features_dc, features_rest), dim=1)
|
|
28
|
+
|
|
29
|
+
def init_degrees(self):
|
|
30
|
+
self._degrees = torch.zeros(self._xyz.shape[0], dtype=torch.int, device=self._xyz.device) + self.max_sh_degree
|
|
31
|
+
|
|
32
|
+
def create_from_pcd(self, *args, **kwargs):
|
|
33
|
+
super().create_from_pcd(*args, **kwargs)
|
|
34
|
+
self.init_degrees()
|
|
35
|
+
|
|
36
|
+
def load_ply(self, *args, **kwargs):
|
|
37
|
+
super().load_ply(*args, **kwargs)
|
|
38
|
+
self.init_degrees()
|
|
39
|
+
|
|
40
|
+
def update_points_add(
|
|
41
|
+
self,
|
|
42
|
+
xyz: nn.Parameter,
|
|
43
|
+
features_dc: nn.Parameter,
|
|
44
|
+
features_rest: nn.Parameter,
|
|
45
|
+
scaling: nn.Parameter,
|
|
46
|
+
rotation: nn.Parameter,
|
|
47
|
+
opacity: nn.Parameter,
|
|
48
|
+
):
|
|
49
|
+
super().update_points_add(
|
|
50
|
+
xyz=xyz,
|
|
51
|
+
features_dc=features_dc,
|
|
52
|
+
features_rest=features_rest,
|
|
53
|
+
scaling=scaling,
|
|
54
|
+
rotation=rotation,
|
|
55
|
+
opacity=opacity,
|
|
56
|
+
)
|
|
57
|
+
with torch.no_grad():
|
|
58
|
+
self._degrees = torch.cat((self._degrees, torch.zeros(xyz.shape[0]-self._degrees.shape[0], dtype=self._degrees.dtype, device=xyz.device) + self.max_sh_degree))
|
|
59
|
+
|
|
60
|
+
def update_points_remove(
|
|
61
|
+
self, removed_mask: torch.Tensor,
|
|
62
|
+
xyz: nn.Parameter,
|
|
63
|
+
features_dc: nn.Parameter,
|
|
64
|
+
features_rest: nn.Parameter,
|
|
65
|
+
scaling: nn.Parameter,
|
|
66
|
+
rotation: nn.Parameter,
|
|
67
|
+
opacity: nn.Parameter,):
|
|
68
|
+
super().update_points_remove(
|
|
69
|
+
removed_mask=removed_mask,
|
|
70
|
+
xyz=xyz,
|
|
71
|
+
features_dc=features_dc,
|
|
72
|
+
features_rest=features_rest,
|
|
73
|
+
scaling=scaling,
|
|
74
|
+
rotation=rotation,
|
|
75
|
+
opacity=opacity,
|
|
76
|
+
)
|
|
77
|
+
with torch.no_grad():
|
|
78
|
+
self._degrees = self._degrees[~removed_mask]
|
|
@@ -0,0 +1,158 @@
|
|
|
1
|
+
from typing import List
|
|
2
|
+
import torch
|
|
3
|
+
|
|
4
|
+
from gaussian_splatting import Camera
|
|
5
|
+
from gaussian_splatting.trainer import AbstractTrainer, TrainerWrapper, BaseTrainer, Trainer
|
|
6
|
+
from gaussian_splatting.dataset import CameraDataset
|
|
7
|
+
from reduced_3dgs.diff_gaussian_rasterization._C import calculate_colours_variance
|
|
8
|
+
|
|
9
|
+
from .gaussian_model import VariableSHGaussianModel
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _low_variance_colour_culling(self: VariableSHGaussianModel, threshold, weighted_variance: torch.Tensor, weighted_mean: torch.Tensor):
|
|
13
|
+
original_degrees = torch.zeros_like(self._degrees)
|
|
14
|
+
original_degrees.copy_(self._degrees)
|
|
15
|
+
|
|
16
|
+
# Uniform colour culling
|
|
17
|
+
weighted_colour_std = weighted_variance.sqrt()
|
|
18
|
+
weighted_colour_std[weighted_colour_std.isnan()] = 0
|
|
19
|
+
weighted_colour_std = weighted_colour_std.mean(dim=2).squeeze()
|
|
20
|
+
|
|
21
|
+
std_mask = weighted_colour_std < threshold
|
|
22
|
+
self._features_dc[std_mask] = (weighted_mean[std_mask] - 0.5) / 0.28209479177387814
|
|
23
|
+
self._degrees[std_mask] = 0
|
|
24
|
+
self._features_rest[std_mask] = 0
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def _low_distance_colour_culling(self: VariableSHGaussianModel, threshold, colour_distances: torch.Tensor):
|
|
28
|
+
colour_distances[colour_distances.isnan()] = 0
|
|
29
|
+
|
|
30
|
+
# Loop from active_sh_degree - 1 to 0, since the comparisons
|
|
31
|
+
# are always done based on the max band that corresponds to active_sh_degree
|
|
32
|
+
for sh_degree in range(self.active_sh_degree - 1, 0, -1):
|
|
33
|
+
coeffs_num = (sh_degree+1)**2 - 1
|
|
34
|
+
mask = colour_distances[:, sh_degree] < threshold
|
|
35
|
+
self._degrees[mask] = torch.min(
|
|
36
|
+
torch.tensor([sh_degree], device="cuda", dtype=int),
|
|
37
|
+
self._degrees[mask]
|
|
38
|
+
).int()
|
|
39
|
+
|
|
40
|
+
# Zero-out the associated SH coefficients for clarity,
|
|
41
|
+
# as they won't be used in rasterisation due to the degrees field
|
|
42
|
+
self._features_rest[mask, coeffs_num:] = 0
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def _cull_sh_bands(self: VariableSHGaussianModel, cameras: List[Camera], threshold=0, std_threshold=0.):
|
|
46
|
+
camera_positions = torch.stack([cam.camera_center for cam in cameras], dim=0)
|
|
47
|
+
camera_viewmatrices = torch.stack([cam.world_view_transform for cam in cameras], dim=0)
|
|
48
|
+
camera_projmatrices = torch.stack([cam.full_proj_transform for cam in cameras], dim=0)
|
|
49
|
+
camera_fovx = torch.tensor([camera.FoVx for camera in cameras], device="cuda", dtype=torch.float32)
|
|
50
|
+
camera_fovy = torch.tensor([camera.FoVy for camera in cameras], device="cuda", dtype=torch.float32)
|
|
51
|
+
image_height = torch.tensor([camera.image_height for camera in cameras], device="cuda", dtype=torch.int32)
|
|
52
|
+
image_width = torch.tensor([camera.image_width for camera in cameras], device="cuda", dtype=torch.int32)
|
|
53
|
+
|
|
54
|
+
# Wrapping in a function since it's called with the same parameters twice
|
|
55
|
+
def run_calculate_colours_variance():
|
|
56
|
+
return calculate_colours_variance(
|
|
57
|
+
camera_positions,
|
|
58
|
+
self.get_xyz,
|
|
59
|
+
self._opacity,
|
|
60
|
+
self.get_scaling,
|
|
61
|
+
self.get_rotation,
|
|
62
|
+
camera_viewmatrices,
|
|
63
|
+
camera_projmatrices,
|
|
64
|
+
torch.tan(camera_fovx*0.5),
|
|
65
|
+
torch.tan(camera_fovy*0.5),
|
|
66
|
+
image_height,
|
|
67
|
+
image_width,
|
|
68
|
+
self.get_features,
|
|
69
|
+
self._degrees,
|
|
70
|
+
self.active_sh_degree)
|
|
71
|
+
|
|
72
|
+
_, weighted_variance, weighted_mean = run_calculate_colours_variance()
|
|
73
|
+
_low_variance_colour_culling(self, std_threshold, weighted_variance, weighted_mean)
|
|
74
|
+
|
|
75
|
+
# Recalculate to account for the changed values
|
|
76
|
+
colour_distances, _, _ = run_calculate_colours_variance()
|
|
77
|
+
_low_distance_colour_culling(self, threshold, colour_distances)
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def cull_sh_bands(self: VariableSHGaussianModel, cameras: List[Camera], threshold=0, std_threshold=0.):
|
|
81
|
+
with torch.no_grad():
|
|
82
|
+
_cull_sh_bands(self, cameras, threshold, std_threshold)
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
class SHCuller(TrainerWrapper):
|
|
86
|
+
def __init__(
|
|
87
|
+
self, base_trainer: AbstractTrainer,
|
|
88
|
+
dataset: CameraDataset,
|
|
89
|
+
cdist_threshold: float = 6,
|
|
90
|
+
std_threshold: float = 0.04,
|
|
91
|
+
cull_at_steps=[15000],
|
|
92
|
+
):
|
|
93
|
+
super().__init__(base_trainer)
|
|
94
|
+
assert isinstance(self.model, VariableSHGaussianModel)
|
|
95
|
+
self.dataset = dataset
|
|
96
|
+
self.cdist_threshold = cdist_threshold
|
|
97
|
+
self.std_threshold = std_threshold
|
|
98
|
+
self.cull_at_steps = cull_at_steps
|
|
99
|
+
|
|
100
|
+
def optim_step(self):
|
|
101
|
+
ret = super().optim_step()
|
|
102
|
+
if self.curr_step in self.cull_at_steps:
|
|
103
|
+
cull_sh_bands(self.model, self.dataset, self.cdist_threshold, self.std_threshold)
|
|
104
|
+
return ret
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def SHCullingTrainerWrapper(
|
|
108
|
+
base_trainer_constructor,
|
|
109
|
+
model: VariableSHGaussianModel,
|
|
110
|
+
scene_extent: float,
|
|
111
|
+
dataset: CameraDataset,
|
|
112
|
+
cdist_threshold: float = 6,
|
|
113
|
+
std_threshold: float = 0.04,
|
|
114
|
+
cull_at_steps=[15000],
|
|
115
|
+
*args, **kwargs):
|
|
116
|
+
return SHCuller(
|
|
117
|
+
base_trainer_constructor(model, scene_extent, dataset, *args, **kwargs),
|
|
118
|
+
dataset,
|
|
119
|
+
cdist_threshold=cdist_threshold,
|
|
120
|
+
std_threshold=std_threshold,
|
|
121
|
+
cull_at_steps=cull_at_steps,
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def BaseSHCullingTrainer(
|
|
126
|
+
model: VariableSHGaussianModel,
|
|
127
|
+
scene_extent: float,
|
|
128
|
+
dataset: CameraDataset,
|
|
129
|
+
cdist_threshold: float = 6,
|
|
130
|
+
std_threshold: float = 0.04,
|
|
131
|
+
cull_at_steps=[15000],
|
|
132
|
+
*args, **kwargs):
|
|
133
|
+
return SHCullingTrainerWrapper(
|
|
134
|
+
lambda model, scene_extent, dataset, *args, **kwargs: BaseTrainer(model, scene_extent, *args, **kwargs),
|
|
135
|
+
model, scene_extent, dataset,
|
|
136
|
+
cdist_threshold=cdist_threshold,
|
|
137
|
+
std_threshold=std_threshold,
|
|
138
|
+
cull_at_steps=cull_at_steps,
|
|
139
|
+
*args, **kwargs,
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def SHCullingTrainer(
|
|
144
|
+
model: VariableSHGaussianModel,
|
|
145
|
+
scene_extent: float,
|
|
146
|
+
dataset: CameraDataset,
|
|
147
|
+
cdist_threshold: float = 6,
|
|
148
|
+
std_threshold: float = 0.04,
|
|
149
|
+
cull_at_steps=[15000],
|
|
150
|
+
*args, **kwargs):
|
|
151
|
+
return SHCullingTrainerWrapper(
|
|
152
|
+
lambda model, scene_extent, dataset, *args, **kwargs: Trainer(model, scene_extent, *args, **kwargs),
|
|
153
|
+
model, scene_extent, dataset,
|
|
154
|
+
cdist_threshold=cdist_threshold,
|
|
155
|
+
std_threshold=std_threshold,
|
|
156
|
+
cull_at_steps=cull_at_steps,
|
|
157
|
+
*args, **kwargs,
|
|
158
|
+
)
|
|
Binary file
|
reduced_3dgs/train.py
ADDED
|
@@ -0,0 +1,195 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import random
|
|
3
|
+
import shutil
|
|
4
|
+
from typing import List, Tuple
|
|
5
|
+
import torch
|
|
6
|
+
from tqdm import tqdm
|
|
7
|
+
from argparse import Namespace
|
|
8
|
+
from gaussian_splatting import GaussianModel
|
|
9
|
+
from gaussian_splatting.dataset import CameraDataset, JSONCameraDataset, TrainableCameraDataset
|
|
10
|
+
from gaussian_splatting.utils import psnr
|
|
11
|
+
from gaussian_splatting.dataset.colmap import ColmapCameraDataset, ColmapTrainableCameraDataset, colmap_init
|
|
12
|
+
from gaussian_splatting.trainer import AbstractTrainer
|
|
13
|
+
from gaussian_splatting.trainer.extensions import ScaleRegularizeTrainerWrapper
|
|
14
|
+
from reduced_3dgs.quantization import AbstractQuantizer, VectorQuantizeTrainerWrapper
|
|
15
|
+
from reduced_3dgs.shculling import VariableSHGaussianModel, SHCullingTrainer
|
|
16
|
+
from reduced_3dgs.pruning import PruningTrainer
|
|
17
|
+
from reduced_3dgs.combinations import PrunerInDensifyTrainer, SHCullingDensificationTrainer, SHCullingPruningTrainer, SHCullingPrunerInDensifyTrainer
|
|
18
|
+
from reduced_3dgs.combinations import CameraTrainableVariableSHGaussianModel, CameraSHCullingTrainer, CameraPruningTrainer
|
|
19
|
+
from reduced_3dgs.combinations import CameraPrunerInDensifyTrainer, CameraSHCullingDensifyTrainer, CameraSHCullingPruningTrainer, CameraSHCullingPruningDensifyTrainer
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
basemodes = {
|
|
23
|
+
"shculling": SHCullingTrainer,
|
|
24
|
+
"pruning": PruningTrainer,
|
|
25
|
+
"densify-pruning": PrunerInDensifyTrainer,
|
|
26
|
+
"densify-shculling": SHCullingDensificationTrainer,
|
|
27
|
+
"prune-shculling": SHCullingPruningTrainer,
|
|
28
|
+
"densify-prune-shculling": SHCullingPrunerInDensifyTrainer,
|
|
29
|
+
}
|
|
30
|
+
cameramodes = {
|
|
31
|
+
"camera-shculling": CameraSHCullingTrainer,
|
|
32
|
+
"camera-pruning": CameraPruningTrainer,
|
|
33
|
+
"camera-densify-pruning": CameraPrunerInDensifyTrainer,
|
|
34
|
+
"camera-densify-shculling": CameraSHCullingDensifyTrainer,
|
|
35
|
+
"camera-prune-shculling": CameraSHCullingPruningTrainer,
|
|
36
|
+
"camera-densify-prune-shculling": CameraSHCullingPruningDensifyTrainer,
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def prepare_quantizer(
|
|
41
|
+
gaussians: GaussianModel,
|
|
42
|
+
scene_extent: float,
|
|
43
|
+
dataset: CameraDataset,
|
|
44
|
+
base_constructor,
|
|
45
|
+
load_quantized: str = None,
|
|
46
|
+
|
|
47
|
+
num_clusters=256,
|
|
48
|
+
num_clusters_rotation_re=None,
|
|
49
|
+
num_clusters_rotation_im=None,
|
|
50
|
+
num_clusters_opacity=None,
|
|
51
|
+
num_clusters_scaling=None,
|
|
52
|
+
num_clusters_features_dc=None,
|
|
53
|
+
num_clusters_features_rest=[],
|
|
54
|
+
|
|
55
|
+
quantize_from_iter=5000,
|
|
56
|
+
quantize_until_iter=30000,
|
|
57
|
+
quantize_interval=1000,
|
|
58
|
+
**configs):
|
|
59
|
+
trainer = VectorQuantizeTrainerWrapper(
|
|
60
|
+
base_constructor(
|
|
61
|
+
gaussians,
|
|
62
|
+
scene_extent=scene_extent,
|
|
63
|
+
dataset=dataset,
|
|
64
|
+
**configs
|
|
65
|
+
),
|
|
66
|
+
|
|
67
|
+
num_clusters=num_clusters,
|
|
68
|
+
num_clusters_rotation_re=num_clusters_rotation_re,
|
|
69
|
+
num_clusters_rotation_im=num_clusters_rotation_im,
|
|
70
|
+
num_clusters_opacity=num_clusters_opacity,
|
|
71
|
+
num_clusters_scaling=num_clusters_scaling,
|
|
72
|
+
num_clusters_features_dc=num_clusters_features_dc,
|
|
73
|
+
num_clusters_features_rest=num_clusters_features_rest,
|
|
74
|
+
|
|
75
|
+
quantize_from_iter=quantize_from_iter,
|
|
76
|
+
quantize_until_iter=quantize_until_iter,
|
|
77
|
+
quantize_interval=quantize_interval,
|
|
78
|
+
)
|
|
79
|
+
if load_quantized:
|
|
80
|
+
trainer.quantizer.load_quantized(load_quantized)
|
|
81
|
+
return trainer, trainer.quantizer
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def prepare_training(sh_degree: int, source: str, device: str, mode: str, load_ply: str = None, load_camera: str = None, load_depth=False, with_scale_reg=False, quantize: bool = False, load_quantized: str = None, configs={}) -> Tuple[CameraDataset, GaussianModel, AbstractTrainer]:
|
|
85
|
+
quantizer = None
|
|
86
|
+
if mode in basemodes:
|
|
87
|
+
gaussians = VariableSHGaussianModel(sh_degree).to(device)
|
|
88
|
+
gaussians.load_ply(load_ply) if load_ply else colmap_init(gaussians, source)
|
|
89
|
+
dataset = (JSONCameraDataset(load_camera, load_depth=load_depth) if load_camera else ColmapCameraDataset(source, load_depth=load_depth)).to(device)
|
|
90
|
+
modes = basemodes
|
|
91
|
+
elif mode in cameramodes:
|
|
92
|
+
gaussians = CameraTrainableVariableSHGaussianModel(sh_degree).to(device)
|
|
93
|
+
gaussians.load_ply(load_ply) if load_ply else colmap_init(gaussians, source)
|
|
94
|
+
dataset = (TrainableCameraDataset.from_json(load_camera, load_depth=load_depth) if load_camera else ColmapTrainableCameraDataset(source, load_depth=load_depth)).to(device)
|
|
95
|
+
modes = cameramodes
|
|
96
|
+
else:
|
|
97
|
+
raise ValueError(f"Unknown mode: {mode}")
|
|
98
|
+
constructor = modes[mode]
|
|
99
|
+
if with_scale_reg:
|
|
100
|
+
constructor = lambda *args, **kwargs: ScaleRegularizeTrainerWrapper(modes[mode], *args, **kwargs)
|
|
101
|
+
if quantize:
|
|
102
|
+
trainer, quantizer = prepare_quantizer(
|
|
103
|
+
gaussians,
|
|
104
|
+
scene_extent=dataset.scene_extent(),
|
|
105
|
+
dataset=dataset,
|
|
106
|
+
base_constructor=modes[mode],
|
|
107
|
+
load_quantized=load_quantized,
|
|
108
|
+
**configs
|
|
109
|
+
)
|
|
110
|
+
else:
|
|
111
|
+
trainer = constructor(
|
|
112
|
+
gaussians,
|
|
113
|
+
scene_extent=dataset.scene_extent(),
|
|
114
|
+
dataset=dataset,
|
|
115
|
+
**configs
|
|
116
|
+
)
|
|
117
|
+
return dataset, gaussians, trainer, quantizer
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def save_cfg_args(destination: str, sh_degree: int, source: str):
|
|
121
|
+
os.makedirs(destination, exist_ok=True)
|
|
122
|
+
with open(os.path.join(destination, "cfg_args"), 'w') as cfg_log_f:
|
|
123
|
+
cfg_log_f.write(str(Namespace(sh_degree=sh_degree, source_path=source)))
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def training(dataset: CameraDataset, gaussians: GaussianModel, trainer: AbstractTrainer, quantizer: AbstractQuantizer, destination: str, iteration: int, save_iterations: List[int], device: str, empty_cache_every_step=False):
|
|
127
|
+
shutil.rmtree(os.path.join(destination, "point_cloud"), ignore_errors=True) # remove the previous point cloud
|
|
128
|
+
pbar = tqdm(range(1, iteration+1))
|
|
129
|
+
epoch = list(range(len(dataset)))
|
|
130
|
+
epoch_psnr = torch.empty(3, 0, device=device)
|
|
131
|
+
ema_loss_for_log = 0.0
|
|
132
|
+
avg_psnr_for_log = 0.0
|
|
133
|
+
for step in pbar:
|
|
134
|
+
epoch_idx = step % len(dataset)
|
|
135
|
+
if epoch_idx == 0:
|
|
136
|
+
avg_psnr_for_log = epoch_psnr.mean().item()
|
|
137
|
+
epoch_psnr = torch.empty(3, 0, device=device)
|
|
138
|
+
random.shuffle(epoch)
|
|
139
|
+
idx = epoch[epoch_idx]
|
|
140
|
+
loss, out = trainer.step(dataset[idx])
|
|
141
|
+
if empty_cache_every_step:
|
|
142
|
+
torch.cuda.empty_cache()
|
|
143
|
+
with torch.no_grad():
|
|
144
|
+
ema_loss_for_log = 0.4 * loss.item() + 0.6 * ema_loss_for_log
|
|
145
|
+
epoch_psnr = torch.concat([epoch_psnr, psnr(out["render"], dataset[idx].ground_truth_image)], dim=1)
|
|
146
|
+
if step % 10 == 0:
|
|
147
|
+
pbar.set_postfix({'epoch': step // len(dataset), 'loss': ema_loss_for_log, 'psnr': avg_psnr_for_log, 'n': gaussians._xyz.shape[0]})
|
|
148
|
+
if step in save_iterations:
|
|
149
|
+
save_path = os.path.join(destination, "point_cloud", "iteration_" + str(step))
|
|
150
|
+
os.makedirs(save_path, exist_ok=True)
|
|
151
|
+
gaussians.save_ply(os.path.join(save_path, "point_cloud.ply"))
|
|
152
|
+
dataset.save_cameras(os.path.join(destination, "cameras.json"))
|
|
153
|
+
if quantizer:
|
|
154
|
+
quantizer.save_quantized(gaussians, os.path.join(save_path, "point_cloud_quantized.ply"))
|
|
155
|
+
save_path = os.path.join(destination, "point_cloud", "iteration_" + str(iteration))
|
|
156
|
+
os.makedirs(save_path, exist_ok=True)
|
|
157
|
+
gaussians.save_ply(os.path.join(save_path, "point_cloud.ply"))
|
|
158
|
+
dataset.save_cameras(os.path.join(destination, "cameras.json"))
|
|
159
|
+
if quantizer:
|
|
160
|
+
quantizer.save_quantized(gaussians, os.path.join(save_path, "point_cloud_quantized.ply"))
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
if __name__ == "__main__":
|
|
164
|
+
from argparse import ArgumentParser, Namespace
|
|
165
|
+
parser = ArgumentParser()
|
|
166
|
+
parser.add_argument("--sh_degree", default=3, type=int)
|
|
167
|
+
parser.add_argument("-s", "--source", required=True, type=str)
|
|
168
|
+
parser.add_argument("-d", "--destination", required=True, type=str)
|
|
169
|
+
parser.add_argument("-i", "--iteration", default=30000, type=int)
|
|
170
|
+
parser.add_argument("-l", "--load_ply", default=None, type=str)
|
|
171
|
+
parser.add_argument("--load_camera", default=None, type=str)
|
|
172
|
+
parser.add_argument("--quantize", action='store_true')
|
|
173
|
+
parser.add_argument("--no_depth_data", action='store_true')
|
|
174
|
+
parser.add_argument("--with_scale_reg", action="store_true")
|
|
175
|
+
parser.add_argument("--load_quantized", default=None, type=str)
|
|
176
|
+
parser.add_argument("--mode", choices=list(basemodes.keys()) + list(cameramodes.keys()), default="densify-prune-shculling")
|
|
177
|
+
parser.add_argument("--save_iterations", nargs="+", type=int, default=[7000, 30000])
|
|
178
|
+
parser.add_argument("--device", default="cuda", type=str)
|
|
179
|
+
parser.add_argument("--empty_cache_every_step", action='store_true')
|
|
180
|
+
parser.add_argument("-o", "--option", default=[], action='append', type=str)
|
|
181
|
+
args = parser.parse_args()
|
|
182
|
+
save_cfg_args(args.destination, args.sh_degree, args.source)
|
|
183
|
+
torch.autograd.set_detect_anomaly(False)
|
|
184
|
+
|
|
185
|
+
configs = {o.split("=", 1)[0]: eval(o.split("=", 1)[1]) for o in args.option}
|
|
186
|
+
dataset, gaussians, trainer, quantizer = prepare_training(
|
|
187
|
+
sh_degree=args.sh_degree, source=args.source, device=args.device, mode=args.mode,
|
|
188
|
+
load_ply=args.load_ply, load_camera=args.load_camera, load_depth=not args.no_depth_data, with_scale_reg=args.with_scale_reg,
|
|
189
|
+
quantize=args.quantize, load_quantized=args.load_quantized, configs=configs)
|
|
190
|
+
dataset.save_cameras(os.path.join(args.destination, "cameras.json"))
|
|
191
|
+
torch.cuda.empty_cache()
|
|
192
|
+
training(
|
|
193
|
+
dataset=dataset, gaussians=gaussians, trainer=trainer, quantizer=quantizer,
|
|
194
|
+
destination=args.destination, iteration=args.iteration, save_iterations=args.save_iterations,
|
|
195
|
+
device=args.device)
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
The Reduced 3DGS code is an extension of the 3DGS code; the original 3DGS license applies to the original part of the code. This license in reproduced below.
|
|
2
|
+
|
|
3
|
+
Gaussian-Splatting License
|
|
4
|
+
===========================
|
|
5
|
+
|
|
6
|
+
**Inria** and **the Max Planck Institut for Informatik (MPII)** hold all the ownership rights on the *Software* named **gaussian-splatting**.
|
|
7
|
+
The *Software* is in the process of being registered with the Agence pour la Protection des
|
|
8
|
+
Programmes (APP).
|
|
9
|
+
|
|
10
|
+
The *Software* is still being developed by the *Licensor*.
|
|
11
|
+
|
|
12
|
+
*Licensor*'s goal is to allow the research community to use, test and evaluate
|
|
13
|
+
the *Software*.
|
|
14
|
+
|
|
15
|
+
## 1. Definitions
|
|
16
|
+
|
|
17
|
+
*Licensee* means any person or entity that uses the *Software* and distributes
|
|
18
|
+
its *Work*.
|
|
19
|
+
|
|
20
|
+
*Licensor* means the owners of the *Software*, i.e Inria and MPII
|
|
21
|
+
|
|
22
|
+
*Software* means the original work of authorship made available under this
|
|
23
|
+
License ie gaussian-splatting.
|
|
24
|
+
|
|
25
|
+
*Work* means the *Software* and any additions to or derivative works of the
|
|
26
|
+
*Software* that are made available under this License.
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
## 2. Purpose
|
|
30
|
+
This license is intended to define the rights granted to the *Licensee* by
|
|
31
|
+
Licensors under the *Software*.
|
|
32
|
+
|
|
33
|
+
## 3. Rights granted
|
|
34
|
+
|
|
35
|
+
For the above reasons Licensors have decided to distribute the *Software*.
|
|
36
|
+
Licensors grant non-exclusive rights to use the *Software* for research purposes
|
|
37
|
+
to research users (both academic and industrial), free of charge, without right
|
|
38
|
+
to sublicense.. The *Software* may be used "non-commercially", i.e., for research
|
|
39
|
+
and/or evaluation purposes only.
|
|
40
|
+
|
|
41
|
+
Subject to the terms and conditions of this License, you are granted a
|
|
42
|
+
non-exclusive, royalty-free, license to reproduce, prepare derivative works of,
|
|
43
|
+
publicly display, publicly perform and distribute its *Work* and any resulting
|
|
44
|
+
derivative works in any form.
|
|
45
|
+
|
|
46
|
+
## 4. Limitations
|
|
47
|
+
|
|
48
|
+
**4.1 Redistribution.** You may reproduce or distribute the *Work* only if (a) you do
|
|
49
|
+
so under this License, (b) you include a complete copy of this License with
|
|
50
|
+
your distribution, and (c) you retain without modification any copyright,
|
|
51
|
+
patent, trademark, or attribution notices that are present in the *Work*.
|
|
52
|
+
|
|
53
|
+
**4.2 Derivative Works.** You may specify that additional or different terms apply
|
|
54
|
+
to the use, reproduction, and distribution of your derivative works of the *Work*
|
|
55
|
+
("Your Terms") only if (a) Your Terms provide that the use limitation in
|
|
56
|
+
Section 2 applies to your derivative works, and (b) you identify the specific
|
|
57
|
+
derivative works that are subject to Your Terms. Notwithstanding Your Terms,
|
|
58
|
+
this License (including the redistribution requirements in Section 3.1) will
|
|
59
|
+
continue to apply to the *Work* itself.
|
|
60
|
+
|
|
61
|
+
**4.3** Any other use without of prior consent of Licensors is prohibited. Research
|
|
62
|
+
users explicitly acknowledge having received from Licensors all information
|
|
63
|
+
allowing to appreciate the adequacy between of the *Software* and their needs and
|
|
64
|
+
to undertake all necessary precautions for its execution and use.
|
|
65
|
+
|
|
66
|
+
**4.4** The *Software* is provided both as a compiled library file and as source
|
|
67
|
+
code. In case of using the *Software* for a publication or other results obtained
|
|
68
|
+
through the use of the *Software*, users are strongly encouraged to cite the
|
|
69
|
+
corresponding publications as explained in the documentation of the *Software*.
|
|
70
|
+
|
|
71
|
+
## 5. Disclaimer
|
|
72
|
+
|
|
73
|
+
THE USER CANNOT USE, EXPLOIT OR DISTRIBUTE THE *SOFTWARE* FOR COMMERCIAL PURPOSES
|
|
74
|
+
WITHOUT PRIOR AND EXPLICIT CONSENT OF LICENSORS. YOU MUST CONTACT INRIA FOR ANY
|
|
75
|
+
UNAUTHORIZED USE: stip-sophia.transfert@inria.fr . ANY SUCH ACTION WILL
|
|
76
|
+
CONSTITUTE A FORGERY. THIS *SOFTWARE* IS PROVIDED "AS IS" WITHOUT ANY WARRANTIES
|
|
77
|
+
OF ANY NATURE AND ANY EXPRESS OR IMPLIED WARRANTIES, WITH REGARDS TO COMMERCIAL
|
|
78
|
+
USE, PROFESSIONNAL USE, LEGAL OR NOT, OR OTHER, OR COMMERCIALISATION OR
|
|
79
|
+
ADAPTATION. UNLESS EXPLICITLY PROVIDED BY LAW, IN NO EVENT, SHALL INRIA OR THE
|
|
80
|
+
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
81
|
+
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
|
|
82
|
+
GOODS OR SERVICES, LOSS OF USE, DATA, OR PROFITS OR BUSINESS INTERRUPTION)
|
|
83
|
+
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
84
|
+
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING FROM, OUT OF OR
|
|
85
|
+
IN CONNECTION WITH THE *SOFTWARE* OR THE USE OR OTHER DEALINGS IN THE *SOFTWARE*.
|
|
86
|
+
|
|
87
|
+
## 6. Files subject to permissive licenses
|
|
88
|
+
The contents of the file ```utils/loss_utils.py``` are based on publicly available code authored by Evan Su, which falls under the permissive MIT license.
|
|
89
|
+
|
|
90
|
+
Title: pytorch-ssim\
|
|
91
|
+
Project code: https://github.com/Po-Hsun-Su/pytorch-ssim\
|
|
92
|
+
Copyright Evan Su, 2017\
|
|
93
|
+
License: https://github.com/Po-Hsun-Su/pytorch-ssim/blob/master/LICENSE.txt (MIT)
|