reduced-3dgs 1.10.0__tar.gz → 1.10.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of reduced-3dgs might be problematic. Click here for more details.
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/PKG-INFO +1 -1
- reduced_3dgs-1.10.2/reduced_3dgs/prepare.py +105 -0
- reduced_3dgs-1.10.2/reduced_3dgs/train.py +93 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/reduced_3dgs.egg-info/PKG-INFO +1 -1
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/reduced_3dgs.egg-info/SOURCES.txt +1 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/setup.py +1 -1
- reduced_3dgs-1.10.0/reduced_3dgs/train.py +0 -195
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/LICENSE.md +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/README.md +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/reduced_3dgs/__init__.py +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/reduced_3dgs/combinations.py +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/reduced_3dgs/importance/__init__.py +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/reduced_3dgs/importance/combinations.py +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/reduced_3dgs/importance/trainer.py +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/reduced_3dgs/pruning/__init__.py +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/reduced_3dgs/pruning/combinations.py +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/reduced_3dgs/pruning/trainer.py +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/reduced_3dgs/quantization/__init__.py +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/reduced_3dgs/quantization/abc.py +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/reduced_3dgs/quantization/exclude_zeros.py +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/reduced_3dgs/quantization/quantizer.py +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/reduced_3dgs/quantization/wrapper.py +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/reduced_3dgs/quantize.py +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/reduced_3dgs/shculling/__init__.py +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/reduced_3dgs/shculling/gaussian_model.py +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/reduced_3dgs/shculling/trainer.py +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/reduced_3dgs.egg-info/dependency_links.txt +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/reduced_3dgs.egg-info/requires.txt +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/reduced_3dgs.egg-info/top_level.txt +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/setup.cfg +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/submodules/diff-gaussian-rasterization/cuda_rasterizer/backward.cu +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/submodules/diff-gaussian-rasterization/cuda_rasterizer/forward.cu +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/submodules/diff-gaussian-rasterization/cuda_rasterizer/rasterizer_impl.cu +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/submodules/diff-gaussian-rasterization/diff_gaussian_rasterization/__init__.py +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/submodules/diff-gaussian-rasterization/ext.cpp +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/submodules/diff-gaussian-rasterization/rasterize_points.cu +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/submodules/diff-gaussian-rasterization/reduced_3dgs/kmeans.cu +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/submodules/diff-gaussian-rasterization/reduced_3dgs/redundancy_score.cu +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/submodules/diff-gaussian-rasterization/reduced_3dgs/sh_culling.cu +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/submodules/diff-gaussian-rasterization/reduced_3dgs.cu +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/submodules/gaussian-importance/cuda_rasterizer/backward.cu +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/submodules/gaussian-importance/cuda_rasterizer/forward.cu +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/submodules/gaussian-importance/cuda_rasterizer/rasterizer_impl.cu +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/submodules/gaussian-importance/diff_gaussian_rasterization/__init__.py +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/submodules/gaussian-importance/ext.cpp +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/submodules/gaussian-importance/rasterize_points.cu +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/submodules/simple-knn/ext.cpp +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/submodules/simple-knn/simple_knn.cu +0 -0
- {reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/submodules/simple-knn/spatial.cu +0 -0
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
from gaussian_splatting import GaussianModel
|
|
2
|
+
from gaussian_splatting.dataset import CameraDataset
|
|
3
|
+
from gaussian_splatting.dataset.colmap import colmap_init
|
|
4
|
+
from gaussian_splatting.trainer import AbstractTrainer
|
|
5
|
+
from gaussian_splatting.trainer.extensions import ScaleRegularizeTrainerWrapper
|
|
6
|
+
from reduced_3dgs.quantization import VectorQuantizeTrainerWrapper
|
|
7
|
+
from reduced_3dgs.shculling import VariableSHGaussianModel, SHCullingTrainer
|
|
8
|
+
from reduced_3dgs.pruning import PruningTrainer
|
|
9
|
+
from reduced_3dgs.combinations import PrunerInDensifyTrainer, SHCullingDensificationTrainer, SHCullingPruningTrainer, SHCullingPrunerInDensifyTrainer
|
|
10
|
+
from reduced_3dgs.combinations import CameraTrainableVariableSHGaussianModel, CameraSHCullingTrainer, CameraPruningTrainer
|
|
11
|
+
from reduced_3dgs.combinations import CameraPrunerInDensifyTrainer, CameraSHCullingDensifyTrainer, CameraSHCullingPruningTrainer, CameraSHCullingPruningDensifyTrainer
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def prepare_gaussians(sh_degree: int, source: str, device: str, trainable_camera: bool = False, load_ply: str = None) -> GaussianModel:
|
|
15
|
+
if trainable_camera:
|
|
16
|
+
gaussians = CameraTrainableVariableSHGaussianModel(sh_degree).to(device)
|
|
17
|
+
gaussians.load_ply(load_ply) if load_ply else colmap_init(gaussians, source)
|
|
18
|
+
else:
|
|
19
|
+
gaussians = VariableSHGaussianModel(sh_degree).to(device)
|
|
20
|
+
gaussians.load_ply(load_ply) if load_ply else colmap_init(gaussians, source)
|
|
21
|
+
return gaussians
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
modes = {
|
|
25
|
+
"shculling": SHCullingTrainer,
|
|
26
|
+
"pruning": PruningTrainer,
|
|
27
|
+
"densify-pruning": PrunerInDensifyTrainer,
|
|
28
|
+
"densify-shculling": SHCullingDensificationTrainer,
|
|
29
|
+
"prune-shculling": SHCullingPruningTrainer,
|
|
30
|
+
"densify-prune-shculling": SHCullingPrunerInDensifyTrainer,
|
|
31
|
+
"camera-shculling": CameraSHCullingTrainer,
|
|
32
|
+
"camera-pruning": CameraPruningTrainer,
|
|
33
|
+
"camera-densify-pruning": CameraPrunerInDensifyTrainer,
|
|
34
|
+
"camera-densify-shculling": CameraSHCullingDensifyTrainer,
|
|
35
|
+
"camera-prune-shculling": CameraSHCullingPruningTrainer,
|
|
36
|
+
"camera-densify-prune-shculling": CameraSHCullingPruningDensifyTrainer,
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def prepare_quantizer(
|
|
41
|
+
gaussians: GaussianModel,
|
|
42
|
+
scene_extent: float,
|
|
43
|
+
dataset: CameraDataset,
|
|
44
|
+
base_constructor,
|
|
45
|
+
load_quantized: str = None,
|
|
46
|
+
|
|
47
|
+
num_clusters=256,
|
|
48
|
+
num_clusters_rotation_re=None,
|
|
49
|
+
num_clusters_rotation_im=None,
|
|
50
|
+
num_clusters_opacity=None,
|
|
51
|
+
num_clusters_scaling=None,
|
|
52
|
+
num_clusters_features_dc=None,
|
|
53
|
+
num_clusters_features_rest=[],
|
|
54
|
+
|
|
55
|
+
quantize_from_iter=5000,
|
|
56
|
+
quantize_until_iter=30000,
|
|
57
|
+
quantize_interval=1000,
|
|
58
|
+
**configs):
|
|
59
|
+
trainer = VectorQuantizeTrainerWrapper(
|
|
60
|
+
base_constructor(
|
|
61
|
+
gaussians,
|
|
62
|
+
scene_extent=scene_extent,
|
|
63
|
+
dataset=dataset,
|
|
64
|
+
**configs
|
|
65
|
+
),
|
|
66
|
+
|
|
67
|
+
num_clusters=num_clusters,
|
|
68
|
+
num_clusters_rotation_re=num_clusters_rotation_re,
|
|
69
|
+
num_clusters_rotation_im=num_clusters_rotation_im,
|
|
70
|
+
num_clusters_opacity=num_clusters_opacity,
|
|
71
|
+
num_clusters_scaling=num_clusters_scaling,
|
|
72
|
+
num_clusters_features_dc=num_clusters_features_dc,
|
|
73
|
+
num_clusters_features_rest=num_clusters_features_rest,
|
|
74
|
+
|
|
75
|
+
quantize_from_iter=quantize_from_iter,
|
|
76
|
+
quantize_until_iter=quantize_until_iter,
|
|
77
|
+
quantize_interval=quantize_interval,
|
|
78
|
+
)
|
|
79
|
+
if load_quantized:
|
|
80
|
+
trainer.quantizer.load_quantized(load_quantized)
|
|
81
|
+
return trainer, trainer.quantizer
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def prepare_trainer(gaussians: GaussianModel, dataset: CameraDataset, mode: str, with_scale_reg=False, quantize: bool = False, load_quantized: str = None, configs={}) -> AbstractTrainer:
|
|
85
|
+
constructor = modes[mode]
|
|
86
|
+
if with_scale_reg:
|
|
87
|
+
constructor = lambda *args, **kwargs: ScaleRegularizeTrainerWrapper(modes[mode], *args, **kwargs)
|
|
88
|
+
if quantize:
|
|
89
|
+
trainer, quantizer = prepare_quantizer(
|
|
90
|
+
gaussians,
|
|
91
|
+
scene_extent=dataset.scene_extent(),
|
|
92
|
+
dataset=dataset,
|
|
93
|
+
base_constructor=modes[mode],
|
|
94
|
+
load_quantized=load_quantized,
|
|
95
|
+
**configs
|
|
96
|
+
)
|
|
97
|
+
else:
|
|
98
|
+
trainer = constructor(
|
|
99
|
+
gaussians,
|
|
100
|
+
scene_extent=dataset.scene_extent(),
|
|
101
|
+
dataset=dataset,
|
|
102
|
+
**configs
|
|
103
|
+
)
|
|
104
|
+
quantizer = None
|
|
105
|
+
return trainer, quantizer
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import random
|
|
3
|
+
import shutil
|
|
4
|
+
from typing import List
|
|
5
|
+
import torch
|
|
6
|
+
from tqdm import tqdm
|
|
7
|
+
from gaussian_splatting import GaussianModel
|
|
8
|
+
from gaussian_splatting.dataset import CameraDataset
|
|
9
|
+
from gaussian_splatting.utils import psnr
|
|
10
|
+
from gaussian_splatting.trainer import AbstractTrainer
|
|
11
|
+
from gaussian_splatting.prepare import prepare_dataset
|
|
12
|
+
from gaussian_splatting.train import save_cfg_args
|
|
13
|
+
from reduced_3dgs.quantization import AbstractQuantizer
|
|
14
|
+
from reduced_3dgs.prepare import modes, prepare_gaussians, prepare_trainer
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def prepare_training(sh_degree: int, source: str, device: str, mode: str, trainable_camera: bool = False, load_ply: str = None, load_camera: str = None, load_depth=False, with_scale_reg=False, quantize: bool = False, load_quantized: str = None, configs={}):
|
|
18
|
+
dataset = prepare_dataset(source=source, device=device, trainable_camera=trainable_camera, load_camera=load_camera, load_depth=load_depth)
|
|
19
|
+
gaussians = prepare_gaussians(sh_degree=sh_degree, source=source, device=device, trainable_camera=trainable_camera, load_ply=load_ply)
|
|
20
|
+
trainer, quantizer = prepare_trainer(gaussians=gaussians, dataset=dataset, mode=mode, with_scale_reg=with_scale_reg, quantize=quantize, load_quantized=load_quantized, configs=configs)
|
|
21
|
+
return dataset, gaussians, trainer, quantizer
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def training(dataset: CameraDataset, gaussians: GaussianModel, trainer: AbstractTrainer, quantizer: AbstractQuantizer, destination: str, iteration: int, save_iterations: List[int], device: str, empty_cache_every_step=False):
|
|
25
|
+
shutil.rmtree(os.path.join(destination, "point_cloud"), ignore_errors=True) # remove the previous point cloud
|
|
26
|
+
pbar = tqdm(range(1, iteration+1))
|
|
27
|
+
epoch = list(range(len(dataset)))
|
|
28
|
+
epoch_psnr = torch.empty(3, 0, device=device)
|
|
29
|
+
ema_loss_for_log = 0.0
|
|
30
|
+
avg_psnr_for_log = 0.0
|
|
31
|
+
for step in pbar:
|
|
32
|
+
epoch_idx = step % len(dataset)
|
|
33
|
+
if epoch_idx == 0:
|
|
34
|
+
avg_psnr_for_log = epoch_psnr.mean().item()
|
|
35
|
+
epoch_psnr = torch.empty(3, 0, device=device)
|
|
36
|
+
random.shuffle(epoch)
|
|
37
|
+
idx = epoch[epoch_idx]
|
|
38
|
+
loss, out = trainer.step(dataset[idx])
|
|
39
|
+
if empty_cache_every_step:
|
|
40
|
+
torch.cuda.empty_cache()
|
|
41
|
+
with torch.no_grad():
|
|
42
|
+
ema_loss_for_log = 0.4 * loss.item() + 0.6 * ema_loss_for_log
|
|
43
|
+
epoch_psnr = torch.concat([epoch_psnr, psnr(out["render"], dataset[idx].ground_truth_image)], dim=1)
|
|
44
|
+
if step % 10 == 0:
|
|
45
|
+
pbar.set_postfix({'epoch': step // len(dataset), 'loss': ema_loss_for_log, 'psnr': avg_psnr_for_log, 'n': gaussians._xyz.shape[0]})
|
|
46
|
+
if step in save_iterations:
|
|
47
|
+
save_path = os.path.join(destination, "point_cloud", "iteration_" + str(step))
|
|
48
|
+
os.makedirs(save_path, exist_ok=True)
|
|
49
|
+
gaussians.save_ply(os.path.join(save_path, "point_cloud.ply"))
|
|
50
|
+
dataset.save_cameras(os.path.join(destination, "cameras.json"))
|
|
51
|
+
if quantizer:
|
|
52
|
+
quantizer.save_quantized(gaussians, os.path.join(save_path, "point_cloud_quantized.ply"))
|
|
53
|
+
save_path = os.path.join(destination, "point_cloud", "iteration_" + str(iteration))
|
|
54
|
+
os.makedirs(save_path, exist_ok=True)
|
|
55
|
+
gaussians.save_ply(os.path.join(save_path, "point_cloud.ply"))
|
|
56
|
+
dataset.save_cameras(os.path.join(destination, "cameras.json"))
|
|
57
|
+
if quantizer:
|
|
58
|
+
quantizer.save_quantized(gaussians, os.path.join(save_path, "point_cloud_quantized.ply"))
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
if __name__ == "__main__":
|
|
62
|
+
from argparse import ArgumentParser
|
|
63
|
+
parser = ArgumentParser()
|
|
64
|
+
parser.add_argument("--sh_degree", default=3, type=int)
|
|
65
|
+
parser.add_argument("-s", "--source", required=True, type=str)
|
|
66
|
+
parser.add_argument("-d", "--destination", required=True, type=str)
|
|
67
|
+
parser.add_argument("-i", "--iteration", default=30000, type=int)
|
|
68
|
+
parser.add_argument("-l", "--load_ply", default=None, type=str)
|
|
69
|
+
parser.add_argument("--load_camera", default=None, type=str)
|
|
70
|
+
parser.add_argument("--quantize", action='store_true')
|
|
71
|
+
parser.add_argument("--no_depth_data", action='store_true')
|
|
72
|
+
parser.add_argument("--with_scale_reg", action="store_true")
|
|
73
|
+
parser.add_argument("--load_quantized", default=None, type=str)
|
|
74
|
+
parser.add_argument("--mode", choices=list(modes), default="densify-prune-shculling")
|
|
75
|
+
parser.add_argument("--save_iterations", nargs="+", type=int, default=[7000, 30000])
|
|
76
|
+
parser.add_argument("--device", default="cuda", type=str)
|
|
77
|
+
parser.add_argument("--empty_cache_every_step", action='store_true')
|
|
78
|
+
parser.add_argument("-o", "--option", default=[], action='append', type=str)
|
|
79
|
+
args = parser.parse_args()
|
|
80
|
+
save_cfg_args(args.destination, args.sh_degree, args.source)
|
|
81
|
+
torch.autograd.set_detect_anomaly(False)
|
|
82
|
+
|
|
83
|
+
configs = {o.split("=", 1)[0]: eval(o.split("=", 1)[1]) for o in args.option}
|
|
84
|
+
dataset, gaussians, trainer, quantizer = prepare_training(
|
|
85
|
+
sh_degree=args.sh_degree, source=args.source, device=args.device, mode=args.mode, trainable_camera="camera" in args.mode,
|
|
86
|
+
load_ply=args.load_ply, load_camera=args.load_camera, load_depth=not args.no_depth_data, with_scale_reg=args.with_scale_reg,
|
|
87
|
+
quantize=args.quantize, load_quantized=args.load_quantized, configs=configs)
|
|
88
|
+
dataset.save_cameras(os.path.join(args.destination, "cameras.json"))
|
|
89
|
+
torch.cuda.empty_cache()
|
|
90
|
+
training(
|
|
91
|
+
dataset=dataset, gaussians=gaussians, trainer=trainer, quantizer=quantizer,
|
|
92
|
+
destination=args.destination, iteration=args.iteration, save_iterations=args.save_iterations,
|
|
93
|
+
device=args.device)
|
|
@@ -1,195 +0,0 @@
|
|
|
1
|
-
import os
|
|
2
|
-
import random
|
|
3
|
-
import shutil
|
|
4
|
-
from typing import List, Tuple
|
|
5
|
-
import torch
|
|
6
|
-
from tqdm import tqdm
|
|
7
|
-
from argparse import Namespace
|
|
8
|
-
from gaussian_splatting import GaussianModel
|
|
9
|
-
from gaussian_splatting.dataset import CameraDataset, JSONCameraDataset, TrainableCameraDataset
|
|
10
|
-
from gaussian_splatting.utils import psnr
|
|
11
|
-
from gaussian_splatting.dataset.colmap import ColmapCameraDataset, ColmapTrainableCameraDataset, colmap_init
|
|
12
|
-
from gaussian_splatting.trainer import AbstractTrainer
|
|
13
|
-
from gaussian_splatting.trainer.extensions import ScaleRegularizeTrainerWrapper
|
|
14
|
-
from reduced_3dgs.quantization import AbstractQuantizer, VectorQuantizeTrainerWrapper
|
|
15
|
-
from reduced_3dgs.shculling import VariableSHGaussianModel, SHCullingTrainer
|
|
16
|
-
from reduced_3dgs.pruning import PruningTrainer
|
|
17
|
-
from reduced_3dgs.combinations import PrunerInDensifyTrainer, SHCullingDensificationTrainer, SHCullingPruningTrainer, SHCullingPrunerInDensifyTrainer
|
|
18
|
-
from reduced_3dgs.combinations import CameraTrainableVariableSHGaussianModel, CameraSHCullingTrainer, CameraPruningTrainer
|
|
19
|
-
from reduced_3dgs.combinations import CameraPrunerInDensifyTrainer, CameraSHCullingDensifyTrainer, CameraSHCullingPruningTrainer, CameraSHCullingPruningDensifyTrainer
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
basemodes = {
|
|
23
|
-
"shculling": SHCullingTrainer,
|
|
24
|
-
"pruning": PruningTrainer,
|
|
25
|
-
"densify-pruning": PrunerInDensifyTrainer,
|
|
26
|
-
"densify-shculling": SHCullingDensificationTrainer,
|
|
27
|
-
"prune-shculling": SHCullingPruningTrainer,
|
|
28
|
-
"densify-prune-shculling": SHCullingPrunerInDensifyTrainer,
|
|
29
|
-
}
|
|
30
|
-
cameramodes = {
|
|
31
|
-
"camera-shculling": CameraSHCullingTrainer,
|
|
32
|
-
"camera-pruning": CameraPruningTrainer,
|
|
33
|
-
"camera-densify-pruning": CameraPrunerInDensifyTrainer,
|
|
34
|
-
"camera-densify-shculling": CameraSHCullingDensifyTrainer,
|
|
35
|
-
"camera-prune-shculling": CameraSHCullingPruningTrainer,
|
|
36
|
-
"camera-densify-prune-shculling": CameraSHCullingPruningDensifyTrainer,
|
|
37
|
-
}
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
def prepare_quantizer(
|
|
41
|
-
gaussians: GaussianModel,
|
|
42
|
-
scene_extent: float,
|
|
43
|
-
dataset: CameraDataset,
|
|
44
|
-
base_constructor,
|
|
45
|
-
load_quantized: str = None,
|
|
46
|
-
|
|
47
|
-
num_clusters=256,
|
|
48
|
-
num_clusters_rotation_re=None,
|
|
49
|
-
num_clusters_rotation_im=None,
|
|
50
|
-
num_clusters_opacity=None,
|
|
51
|
-
num_clusters_scaling=None,
|
|
52
|
-
num_clusters_features_dc=None,
|
|
53
|
-
num_clusters_features_rest=[],
|
|
54
|
-
|
|
55
|
-
quantize_from_iter=5000,
|
|
56
|
-
quantize_until_iter=30000,
|
|
57
|
-
quantize_interval=1000,
|
|
58
|
-
**configs):
|
|
59
|
-
trainer = VectorQuantizeTrainerWrapper(
|
|
60
|
-
base_constructor(
|
|
61
|
-
gaussians,
|
|
62
|
-
scene_extent=scene_extent,
|
|
63
|
-
dataset=dataset,
|
|
64
|
-
**configs
|
|
65
|
-
),
|
|
66
|
-
|
|
67
|
-
num_clusters=num_clusters,
|
|
68
|
-
num_clusters_rotation_re=num_clusters_rotation_re,
|
|
69
|
-
num_clusters_rotation_im=num_clusters_rotation_im,
|
|
70
|
-
num_clusters_opacity=num_clusters_opacity,
|
|
71
|
-
num_clusters_scaling=num_clusters_scaling,
|
|
72
|
-
num_clusters_features_dc=num_clusters_features_dc,
|
|
73
|
-
num_clusters_features_rest=num_clusters_features_rest,
|
|
74
|
-
|
|
75
|
-
quantize_from_iter=quantize_from_iter,
|
|
76
|
-
quantize_until_iter=quantize_until_iter,
|
|
77
|
-
quantize_interval=quantize_interval,
|
|
78
|
-
)
|
|
79
|
-
if load_quantized:
|
|
80
|
-
trainer.quantizer.load_quantized(load_quantized)
|
|
81
|
-
return trainer, trainer.quantizer
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
def prepare_training(sh_degree: int, source: str, device: str, mode: str, load_ply: str = None, load_camera: str = None, load_depth=False, with_scale_reg=False, quantize: bool = False, load_quantized: str = None, configs={}) -> Tuple[CameraDataset, GaussianModel, AbstractTrainer]:
|
|
85
|
-
quantizer = None
|
|
86
|
-
if mode in basemodes:
|
|
87
|
-
gaussians = VariableSHGaussianModel(sh_degree).to(device)
|
|
88
|
-
gaussians.load_ply(load_ply) if load_ply else colmap_init(gaussians, source)
|
|
89
|
-
dataset = (JSONCameraDataset(load_camera, load_depth=load_depth) if load_camera else ColmapCameraDataset(source, load_depth=load_depth)).to(device)
|
|
90
|
-
modes = basemodes
|
|
91
|
-
elif mode in cameramodes:
|
|
92
|
-
gaussians = CameraTrainableVariableSHGaussianModel(sh_degree).to(device)
|
|
93
|
-
gaussians.load_ply(load_ply) if load_ply else colmap_init(gaussians, source)
|
|
94
|
-
dataset = (TrainableCameraDataset.from_json(load_camera, load_depth=load_depth) if load_camera else ColmapTrainableCameraDataset(source, load_depth=load_depth)).to(device)
|
|
95
|
-
modes = cameramodes
|
|
96
|
-
else:
|
|
97
|
-
raise ValueError(f"Unknown mode: {mode}")
|
|
98
|
-
constructor = modes[mode]
|
|
99
|
-
if with_scale_reg:
|
|
100
|
-
constructor = lambda *args, **kwargs: ScaleRegularizeTrainerWrapper(modes[mode], *args, **kwargs)
|
|
101
|
-
if quantize:
|
|
102
|
-
trainer, quantizer = prepare_quantizer(
|
|
103
|
-
gaussians,
|
|
104
|
-
scene_extent=dataset.scene_extent(),
|
|
105
|
-
dataset=dataset,
|
|
106
|
-
base_constructor=modes[mode],
|
|
107
|
-
load_quantized=load_quantized,
|
|
108
|
-
**configs
|
|
109
|
-
)
|
|
110
|
-
else:
|
|
111
|
-
trainer = constructor(
|
|
112
|
-
gaussians,
|
|
113
|
-
scene_extent=dataset.scene_extent(),
|
|
114
|
-
dataset=dataset,
|
|
115
|
-
**configs
|
|
116
|
-
)
|
|
117
|
-
return dataset, gaussians, trainer, quantizer
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
def save_cfg_args(destination: str, sh_degree: int, source: str):
|
|
121
|
-
os.makedirs(destination, exist_ok=True)
|
|
122
|
-
with open(os.path.join(destination, "cfg_args"), 'w') as cfg_log_f:
|
|
123
|
-
cfg_log_f.write(str(Namespace(sh_degree=sh_degree, source_path=source)))
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
def training(dataset: CameraDataset, gaussians: GaussianModel, trainer: AbstractTrainer, quantizer: AbstractQuantizer, destination: str, iteration: int, save_iterations: List[int], device: str, empty_cache_every_step=False):
|
|
127
|
-
shutil.rmtree(os.path.join(destination, "point_cloud"), ignore_errors=True) # remove the previous point cloud
|
|
128
|
-
pbar = tqdm(range(1, iteration+1))
|
|
129
|
-
epoch = list(range(len(dataset)))
|
|
130
|
-
epoch_psnr = torch.empty(3, 0, device=device)
|
|
131
|
-
ema_loss_for_log = 0.0
|
|
132
|
-
avg_psnr_for_log = 0.0
|
|
133
|
-
for step in pbar:
|
|
134
|
-
epoch_idx = step % len(dataset)
|
|
135
|
-
if epoch_idx == 0:
|
|
136
|
-
avg_psnr_for_log = epoch_psnr.mean().item()
|
|
137
|
-
epoch_psnr = torch.empty(3, 0, device=device)
|
|
138
|
-
random.shuffle(epoch)
|
|
139
|
-
idx = epoch[epoch_idx]
|
|
140
|
-
loss, out = trainer.step(dataset[idx])
|
|
141
|
-
if empty_cache_every_step:
|
|
142
|
-
torch.cuda.empty_cache()
|
|
143
|
-
with torch.no_grad():
|
|
144
|
-
ema_loss_for_log = 0.4 * loss.item() + 0.6 * ema_loss_for_log
|
|
145
|
-
epoch_psnr = torch.concat([epoch_psnr, psnr(out["render"], dataset[idx].ground_truth_image)], dim=1)
|
|
146
|
-
if step % 10 == 0:
|
|
147
|
-
pbar.set_postfix({'epoch': step // len(dataset), 'loss': ema_loss_for_log, 'psnr': avg_psnr_for_log, 'n': gaussians._xyz.shape[0]})
|
|
148
|
-
if step in save_iterations:
|
|
149
|
-
save_path = os.path.join(destination, "point_cloud", "iteration_" + str(step))
|
|
150
|
-
os.makedirs(save_path, exist_ok=True)
|
|
151
|
-
gaussians.save_ply(os.path.join(save_path, "point_cloud.ply"))
|
|
152
|
-
dataset.save_cameras(os.path.join(destination, "cameras.json"))
|
|
153
|
-
if quantizer:
|
|
154
|
-
quantizer.save_quantized(gaussians, os.path.join(save_path, "point_cloud_quantized.ply"))
|
|
155
|
-
save_path = os.path.join(destination, "point_cloud", "iteration_" + str(iteration))
|
|
156
|
-
os.makedirs(save_path, exist_ok=True)
|
|
157
|
-
gaussians.save_ply(os.path.join(save_path, "point_cloud.ply"))
|
|
158
|
-
dataset.save_cameras(os.path.join(destination, "cameras.json"))
|
|
159
|
-
if quantizer:
|
|
160
|
-
quantizer.save_quantized(gaussians, os.path.join(save_path, "point_cloud_quantized.ply"))
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
if __name__ == "__main__":
|
|
164
|
-
from argparse import ArgumentParser, Namespace
|
|
165
|
-
parser = ArgumentParser()
|
|
166
|
-
parser.add_argument("--sh_degree", default=3, type=int)
|
|
167
|
-
parser.add_argument("-s", "--source", required=True, type=str)
|
|
168
|
-
parser.add_argument("-d", "--destination", required=True, type=str)
|
|
169
|
-
parser.add_argument("-i", "--iteration", default=30000, type=int)
|
|
170
|
-
parser.add_argument("-l", "--load_ply", default=None, type=str)
|
|
171
|
-
parser.add_argument("--load_camera", default=None, type=str)
|
|
172
|
-
parser.add_argument("--quantize", action='store_true')
|
|
173
|
-
parser.add_argument("--no_depth_data", action='store_true')
|
|
174
|
-
parser.add_argument("--with_scale_reg", action="store_true")
|
|
175
|
-
parser.add_argument("--load_quantized", default=None, type=str)
|
|
176
|
-
parser.add_argument("--mode", choices=list(basemodes.keys()) + list(cameramodes.keys()), default="densify-prune-shculling")
|
|
177
|
-
parser.add_argument("--save_iterations", nargs="+", type=int, default=[7000, 30000])
|
|
178
|
-
parser.add_argument("--device", default="cuda", type=str)
|
|
179
|
-
parser.add_argument("--empty_cache_every_step", action='store_true')
|
|
180
|
-
parser.add_argument("-o", "--option", default=[], action='append', type=str)
|
|
181
|
-
args = parser.parse_args()
|
|
182
|
-
save_cfg_args(args.destination, args.sh_degree, args.source)
|
|
183
|
-
torch.autograd.set_detect_anomaly(False)
|
|
184
|
-
|
|
185
|
-
configs = {o.split("=", 1)[0]: eval(o.split("=", 1)[1]) for o in args.option}
|
|
186
|
-
dataset, gaussians, trainer, quantizer = prepare_training(
|
|
187
|
-
sh_degree=args.sh_degree, source=args.source, device=args.device, mode=args.mode,
|
|
188
|
-
load_ply=args.load_ply, load_camera=args.load_camera, load_depth=not args.no_depth_data, with_scale_reg=args.with_scale_reg,
|
|
189
|
-
quantize=args.quantize, load_quantized=args.load_quantized, configs=configs)
|
|
190
|
-
dataset.save_cameras(os.path.join(args.destination, "cameras.json"))
|
|
191
|
-
torch.cuda.empty_cache()
|
|
192
|
-
training(
|
|
193
|
-
dataset=dataset, gaussians=gaussians, trainer=trainer, quantizer=quantizer,
|
|
194
|
-
destination=args.destination, iteration=args.iteration, save_iterations=args.save_iterations,
|
|
195
|
-
device=args.device)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/submodules/diff-gaussian-rasterization/reduced_3dgs.cu
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{reduced_3dgs-1.10.0 → reduced_3dgs-1.10.2}/submodules/gaussian-importance/rasterize_points.cu
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|