reduced-3dgs 1.10.4__tar.gz → 1.10.13__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/PKG-INFO +16 -33
  2. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/README.md +14 -26
  3. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/reduced_3dgs/combinations.py +8 -2
  4. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/reduced_3dgs/importance/combinations.py +8 -2
  5. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/reduced_3dgs/pruning/combinations.py +8 -2
  6. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/reduced_3dgs/quantization/quantizer.py +11 -4
  7. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/reduced_3dgs/train.py +26 -6
  8. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/reduced_3dgs.egg-info/PKG-INFO +16 -33
  9. reduced_3dgs-1.10.13/reduced_3dgs.egg-info/requires.txt +2 -0
  10. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/setup.py +2 -7
  11. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/submodules/simple-knn/simple_knn.cu +1 -0
  12. reduced_3dgs-1.10.4/reduced_3dgs.egg-info/requires.txt +0 -7
  13. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/LICENSE.md +0 -0
  14. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/reduced_3dgs/__init__.py +0 -0
  15. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/reduced_3dgs/importance/__init__.py +0 -0
  16. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/reduced_3dgs/importance/trainer.py +0 -0
  17. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/reduced_3dgs/prepare.py +0 -0
  18. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/reduced_3dgs/pruning/__init__.py +0 -0
  19. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/reduced_3dgs/pruning/trainer.py +0 -0
  20. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/reduced_3dgs/quantization/__init__.py +0 -0
  21. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/reduced_3dgs/quantization/abc.py +0 -0
  22. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/reduced_3dgs/quantization/exclude_zeros.py +0 -0
  23. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/reduced_3dgs/quantization/wrapper.py +0 -0
  24. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/reduced_3dgs/quantize.py +0 -0
  25. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/reduced_3dgs/shculling/__init__.py +0 -0
  26. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/reduced_3dgs/shculling/gaussian_model.py +0 -0
  27. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/reduced_3dgs/shculling/trainer.py +0 -0
  28. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/reduced_3dgs.egg-info/SOURCES.txt +0 -0
  29. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/reduced_3dgs.egg-info/dependency_links.txt +0 -0
  30. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/reduced_3dgs.egg-info/top_level.txt +0 -0
  31. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/setup.cfg +0 -0
  32. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/submodules/diff-gaussian-rasterization/cuda_rasterizer/backward.cu +0 -0
  33. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/submodules/diff-gaussian-rasterization/cuda_rasterizer/forward.cu +0 -0
  34. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/submodules/diff-gaussian-rasterization/cuda_rasterizer/rasterizer_impl.cu +0 -0
  35. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/submodules/diff-gaussian-rasterization/diff_gaussian_rasterization/__init__.py +0 -0
  36. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/submodules/diff-gaussian-rasterization/ext.cpp +0 -0
  37. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/submodules/diff-gaussian-rasterization/rasterize_points.cu +0 -0
  38. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/submodules/diff-gaussian-rasterization/reduced_3dgs/kmeans.cu +0 -0
  39. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/submodules/diff-gaussian-rasterization/reduced_3dgs/redundancy_score.cu +0 -0
  40. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/submodules/diff-gaussian-rasterization/reduced_3dgs/sh_culling.cu +0 -0
  41. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/submodules/diff-gaussian-rasterization/reduced_3dgs.cu +0 -0
  42. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/submodules/gaussian-importance/cuda_rasterizer/backward.cu +0 -0
  43. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/submodules/gaussian-importance/cuda_rasterizer/forward.cu +0 -0
  44. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/submodules/gaussian-importance/cuda_rasterizer/rasterizer_impl.cu +0 -0
  45. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/submodules/gaussian-importance/diff_gaussian_rasterization/__init__.py +0 -0
  46. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/submodules/gaussian-importance/ext.cpp +0 -0
  47. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/submodules/gaussian-importance/rasterize_points.cu +0 -0
  48. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/submodules/simple-knn/ext.cpp +0 -0
  49. {reduced_3dgs-1.10.4 → reduced_3dgs-1.10.13}/submodules/simple-knn/spatial.cu +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: reduced_3dgs
3
- Version: 1.10.4
3
+ Version: 1.10.13
4
4
  Summary: Refactored code for the paper "Reducing the Memory Footprint of 3D Gaussian Splatting"
5
5
  Home-page: https://github.com/yindaheng98/reduced-3dgs
6
6
  Author: yindaheng98
@@ -8,13 +8,8 @@ Author-email: yindaheng98@gmail.com
8
8
  Classifier: Programming Language :: Python :: 3
9
9
  Description-Content-Type: text/markdown
10
10
  License-File: LICENSE.md
11
- Requires-Dist: tqdm
12
- Requires-Dist: plyfile
13
- Requires-Dist: scikit-learn
14
- Requires-Dist: torch
15
- Requires-Dist: torchvision
16
- Requires-Dist: numpy
17
11
  Requires-Dist: gaussian-splatting
12
+ Requires-Dist: scikit-learn
18
13
  Dynamic: author
19
14
  Dynamic: author-email
20
15
  Dynamic: classifier
@@ -40,47 +35,35 @@ This repository contains the **refactored Python code for [Reduced-3DGS](https:/
40
35
 
41
36
  * [Pytorch](https://pytorch.org/) (v2.4 or higher recommended)
42
37
  * [CUDA Toolkit](https://developer.nvidia.com/cuda-12-4-0-download-archive) (12.4 recommended, should match with PyTorch version)
38
+ * (Optional) [cuML](https://github.com/rapidsai/cuml) for faster vector quantization
43
39
 
44
- ## Install (PyPI)
45
-
40
+ (Optional) If you have trouble with [`gaussian-splatting`](https://github.com/yindaheng98/gaussian-splatting), try to install it from source:
46
41
  ```sh
47
- pip install --upgrade reduced-3dgs
42
+ pip install wheel setuptools
43
+ pip install --upgrade git+https://github.com/yindaheng98/gaussian-splatting.git@master --no-build-isolation
48
44
  ```
49
45
 
50
- ## Install (Build from source)
51
-
52
- ```sh
53
- pip install --upgrade git+https://github.com/yindaheng98/reduced-3dgs.git@main
54
- ```
55
- If you have trouble with [`gaussian-splatting`](https://github.com/yindaheng98/gaussian-splatting), you can install it from source:
56
- ```sh
57
- pip install --upgrade git+https://github.com/yindaheng98/gaussian-splatting.git@master
58
- ```
46
+ ## PyPI Install
59
47
 
60
- ## Install (Development)
61
-
62
- Install [`gaussian-splatting`](https://github.com/yindaheng98/gaussian-splatting).
63
- You can download the wheel from [PyPI](https://pypi.org/project/gaussian-splatting/):
64
48
  ```shell
65
- pip install --upgrade gaussian-splatting
49
+ pip install --upgrade reduced-3dgs
66
50
  ```
67
- Alternatively, install the latest version from the source:
68
- ```sh
69
- pip install --upgrade git+https://github.com/yindaheng98/gaussian-splatting.git@master
51
+ or
52
+ build latest from source:
53
+ ```shell
54
+ pip install wheel setuptools
55
+ pip install --upgrade git+https://github.com/yindaheng98/reduced-3dgs.git@main --no-build-isolation
70
56
  ```
71
57
 
58
+ ### Development Install
59
+
72
60
  ```shell
73
61
  git clone --recursive https://github.com/yindaheng98/reduced-3dgs
74
62
  cd reduced-3dgs
75
- pip install tqdm plyfile scikit-learn numpy tifffile triton xformers
63
+ pip install scikit-learn
76
64
  pip install --target . --upgrade --no-deps .
77
65
  ```
78
66
 
79
- (Optional) If you prefer not to install `gaussian-splatting` in your environment, you can install it in your `reduced-3dgs` directory:
80
- ```sh
81
- pip install --target . --no-deps --upgrade git+https://github.com/yindaheng98/gaussian-splatting.git@master
82
- ```
83
-
84
67
  ## Quick Start
85
68
 
86
69
  1. Download the dataset (T&T+DB COLMAP dataset, size 650MB):
@@ -13,47 +13,35 @@ This repository contains the **refactored Python code for [Reduced-3DGS](https:/
13
13
 
14
14
  * [Pytorch](https://pytorch.org/) (v2.4 or higher recommended)
15
15
  * [CUDA Toolkit](https://developer.nvidia.com/cuda-12-4-0-download-archive) (12.4 recommended, should match with PyTorch version)
16
+ * (Optional) [cuML](https://github.com/rapidsai/cuml) for faster vector quantization
16
17
 
17
- ## Install (PyPI)
18
-
19
- ```sh
20
- pip install --upgrade reduced-3dgs
21
- ```
22
-
23
- ## Install (Build from source)
24
-
25
- ```sh
26
- pip install --upgrade git+https://github.com/yindaheng98/reduced-3dgs.git@main
27
- ```
28
- If you have trouble with [`gaussian-splatting`](https://github.com/yindaheng98/gaussian-splatting), you can install it from source:
18
+ (Optional) If you have trouble with [`gaussian-splatting`](https://github.com/yindaheng98/gaussian-splatting), try to install it from source:
29
19
  ```sh
30
- pip install --upgrade git+https://github.com/yindaheng98/gaussian-splatting.git@master
20
+ pip install wheel setuptools
21
+ pip install --upgrade git+https://github.com/yindaheng98/gaussian-splatting.git@master --no-build-isolation
31
22
  ```
32
23
 
33
- ## Install (Development)
24
+ ## PyPI Install
34
25
 
35
- Install [`gaussian-splatting`](https://github.com/yindaheng98/gaussian-splatting).
36
- You can download the wheel from [PyPI](https://pypi.org/project/gaussian-splatting/):
37
26
  ```shell
38
- pip install --upgrade gaussian-splatting
27
+ pip install --upgrade reduced-3dgs
39
28
  ```
40
- Alternatively, install the latest version from the source:
41
- ```sh
42
- pip install --upgrade git+https://github.com/yindaheng98/gaussian-splatting.git@master
29
+ or
30
+ build latest from source:
31
+ ```shell
32
+ pip install wheel setuptools
33
+ pip install --upgrade git+https://github.com/yindaheng98/reduced-3dgs.git@main --no-build-isolation
43
34
  ```
44
35
 
36
+ ### Development Install
37
+
45
38
  ```shell
46
39
  git clone --recursive https://github.com/yindaheng98/reduced-3dgs
47
40
  cd reduced-3dgs
48
- pip install tqdm plyfile scikit-learn numpy tifffile triton xformers
41
+ pip install scikit-learn
49
42
  pip install --target . --upgrade --no-deps .
50
43
  ```
51
44
 
52
- (Optional) If you prefer not to install `gaussian-splatting` in your environment, you can install it in your `reduced-3dgs` directory:
53
- ```sh
54
- pip install --target . --no-deps --upgrade git+https://github.com/yindaheng98/gaussian-splatting.git@master
55
- ```
56
-
57
45
  ## Quick Start
58
46
 
59
47
  1. Download the dataset (T&T+DB COLMAP dataset, size 650MB):
@@ -97,11 +97,17 @@ def BaseFullPrunerInDensifyTrainer(
97
97
 
98
98
 
99
99
  def DepthFullPruningTrainer(model: GaussianModel, scene_extent: float, dataset: TrainableCameraDataset, *args, **kwargs):
100
- return DepthTrainerWrapper(BaseFullPruningTrainer, model, scene_extent, *args, dataset=dataset, **kwargs)
100
+ return DepthTrainerWrapper(
101
+ BaseFullPruningTrainer,
102
+ model, scene_extent, dataset,
103
+ *args, **kwargs)
101
104
 
102
105
 
103
106
  def DepthFullPrunerInDensifyTrainer(model: GaussianModel, scene_extent: float, dataset: TrainableCameraDataset, *args, **kwargs):
104
- return DepthTrainerWrapper(BaseFullPrunerInDensifyTrainer, model, scene_extent, *args, dataset=dataset, **kwargs)
107
+ return DepthTrainerWrapper(
108
+ BaseFullPrunerInDensifyTrainer,
109
+ model, scene_extent, dataset,
110
+ *args, **kwargs)
105
111
 
106
112
 
107
113
  def OpacityResetPruningTrainer(
@@ -52,11 +52,17 @@ def BaseImportancePrunerInDensifyTrainer(
52
52
 
53
53
 
54
54
  def DepthImportancePruningTrainer(model: GaussianModel, scene_extent: float, dataset: TrainableCameraDataset, *args, **kwargs):
55
- return DepthTrainerWrapper(BaseImportancePruningTrainer, model, scene_extent, *args, dataset=dataset, **kwargs)
55
+ return DepthTrainerWrapper(
56
+ BaseImportancePruningTrainer,
57
+ model, scene_extent, dataset,
58
+ *args, **kwargs)
56
59
 
57
60
 
58
61
  def DepthImportancePrunerInDensifyTrainer(model: GaussianModel, scene_extent: float, dataset: TrainableCameraDataset, *args, **kwargs):
59
- return DepthTrainerWrapper(BaseImportancePrunerInDensifyTrainer, model, scene_extent, *args, dataset=dataset, **kwargs)
62
+ return DepthTrainerWrapper(
63
+ BaseImportancePrunerInDensifyTrainer,
64
+ model, scene_extent, dataset,
65
+ *args, **kwargs)
60
66
 
61
67
 
62
68
  ImportancePruningTrainer = DepthImportancePruningTrainer
@@ -54,11 +54,17 @@ def BasePrunerInDensifyTrainer(
54
54
 
55
55
 
56
56
  def DepthPruningTrainer(model: GaussianModel, scene_extent: float, dataset: TrainableCameraDataset, *args, **kwargs):
57
- return DepthTrainerWrapper(BasePruningTrainer, model, scene_extent, *args, dataset=dataset, **kwargs)
57
+ return DepthTrainerWrapper(
58
+ BasePruningTrainer,
59
+ model, scene_extent, dataset,
60
+ *args, **kwargs)
58
61
 
59
62
 
60
63
  def DepthPrunerInDensifyTrainer(model: GaussianModel, scene_extent: float, dataset: TrainableCameraDataset, *args, **kwargs):
61
- return DepthTrainerWrapper(BasePrunerInDensifyTrainer, model, scene_extent, *args, dataset=dataset, **kwargs)
64
+ return DepthTrainerWrapper(
65
+ BasePrunerInDensifyTrainer,
66
+ model, scene_extent, dataset,
67
+ *args, **kwargs)
62
68
 
63
69
 
64
70
  PruningTrainer = DepthPruningTrainer
@@ -4,7 +4,15 @@ from typing import Dict, Tuple
4
4
  import torch
5
5
  import torch.nn as nn
6
6
  import numpy as np
7
- from sklearn.cluster import MiniBatchKMeans as KMeans
7
+ try:
8
+ from cuml.cluster import KMeans
9
+ kmeans_init = 'k-means||'
10
+ except ImportError:
11
+ print("Cuml not found, using sklearn's MiniBatchKMeans for quantization.")
12
+ from sklearn.cluster import MiniBatchKMeans
13
+ from functools import partial
14
+ KMeans = partial(MiniBatchKMeans, batch_size=256 * os.cpu_count())
15
+ kmeans_init = 'k-means++'
8
16
  from gaussian_splatting import GaussianModel
9
17
  from plyfile import PlyData, PlyElement
10
18
  import numpy as np
@@ -65,9 +73,8 @@ class VectorQuantizer(AbstractQuantizer):
65
73
  def generate_codebook(self, values: torch.Tensor, num_clusters, init_codebook=None):
66
74
  kmeans = KMeans(
67
75
  n_clusters=num_clusters, tol=self.tol, max_iter=self.max_iter,
68
- init='k-means++' if init_codebook is None else init_codebook.cpu().numpy(),
69
- random_state=0, n_init="auto", verbose=0,
70
- batch_size=256 * os.cpu_count()
76
+ init=kmeans_init if init_codebook is None else init_codebook.cpu().numpy(),
77
+ random_state=0, n_init="auto", verbose=1,
71
78
  )
72
79
  ids = torch.tensor(kmeans.fit_predict(values.cpu().numpy()), device=values.device)
73
80
  centers = torch.tensor(kmeans.cluster_centers_, dtype=values.dtype, device=values.device)
@@ -14,8 +14,12 @@ from reduced_3dgs.quantization import AbstractQuantizer
14
14
  from reduced_3dgs.prepare import modes, prepare_gaussians, prepare_trainer
15
15
 
16
16
 
17
- def prepare_training(sh_degree: int, source: str, device: str, mode: str, trainable_camera: bool = False, load_ply: str = None, load_camera: str = None, load_depth=False, with_scale_reg=False, quantize: bool = False, load_quantized: str = None, configs={}):
18
- dataset = prepare_dataset(source=source, device=device, trainable_camera=trainable_camera, load_camera=load_camera, load_depth=load_depth)
17
+ def prepare_training(
18
+ sh_degree: int, source: str, device: str, mode: str,
19
+ trainable_camera: bool = False, load_ply: str = None, load_camera: str = None,
20
+ load_mask=True, load_depth=True,
21
+ with_scale_reg=False, quantize: bool = False, load_quantized: str = None, configs={}):
22
+ dataset = prepare_dataset(source=source, device=device, trainable_camera=trainable_camera, load_camera=load_camera, load_mask=load_mask, load_depth=load_depth)
19
23
  gaussians = prepare_gaussians(sh_degree=sh_degree, source=source, device=device, trainable_camera=trainable_camera, load_ply=load_ply)
20
24
  trainer, quantizer = prepare_trainer(gaussians=gaussians, dataset=dataset, mode=mode, with_scale_reg=with_scale_reg, quantize=quantize, load_quantized=load_quantized, configs=configs)
21
25
  return dataset, gaussians, trainer, quantizer
@@ -23,26 +27,39 @@ def prepare_training(sh_degree: int, source: str, device: str, mode: str, traina
23
27
 
24
28
  def training(dataset: CameraDataset, gaussians: GaussianModel, trainer: AbstractTrainer, quantizer: AbstractQuantizer, destination: str, iteration: int, save_iterations: List[int], device: str, empty_cache_every_step=False):
25
29
  shutil.rmtree(os.path.join(destination, "point_cloud"), ignore_errors=True) # remove the previous point cloud
26
- pbar = tqdm(range(1, iteration+1))
30
+ pbar = tqdm(range(1, iteration+1), dynamic_ncols=True, desc="Training")
27
31
  epoch = list(range(len(dataset)))
28
32
  epoch_psnr = torch.empty(3, 0, device=device)
33
+ epoch_maskpsnr = torch.empty(3, 0, device=device)
29
34
  ema_loss_for_log = 0.0
30
35
  avg_psnr_for_log = 0.0
36
+ avg_maskpsnr_for_log = 0.0
31
37
  for step in pbar:
32
38
  epoch_idx = step % len(dataset)
33
39
  if epoch_idx == 0:
34
40
  avg_psnr_for_log = epoch_psnr.mean().item()
41
+ avg_maskpsnr_for_log = epoch_maskpsnr.mean().item()
35
42
  epoch_psnr = torch.empty(3, 0, device=device)
43
+ epoch_maskpsnr = torch.empty(3, 0, device=device)
36
44
  random.shuffle(epoch)
37
45
  idx = epoch[epoch_idx]
38
46
  loss, out = trainer.step(dataset[idx])
39
47
  if empty_cache_every_step:
40
48
  torch.cuda.empty_cache()
41
49
  with torch.no_grad():
50
+ ground_truth_image = dataset[idx].ground_truth_image
51
+ rendered_image = out["render"]
52
+ epoch_psnr = torch.concat([epoch_psnr, psnr(rendered_image, ground_truth_image)], dim=1)
53
+ if dataset[idx].ground_truth_image_mask is not None:
54
+ ground_truth_maskimage = ground_truth_image * dataset[idx].ground_truth_image_mask
55
+ rendered_maskimage = rendered_image * dataset[idx].ground_truth_image_mask
56
+ epoch_maskpsnr = torch.concat([epoch_maskpsnr, psnr(rendered_maskimage, ground_truth_maskimage)], dim=1)
42
57
  ema_loss_for_log = 0.4 * loss.item() + 0.6 * ema_loss_for_log
43
- epoch_psnr = torch.concat([epoch_psnr, psnr(out["render"], dataset[idx].ground_truth_image)], dim=1)
44
58
  if step % 10 == 0:
45
- pbar.set_postfix({'epoch': step // len(dataset), 'loss': ema_loss_for_log, 'psnr': avg_psnr_for_log, 'n': gaussians._xyz.shape[0]})
59
+ postfix = {'epoch': step // len(dataset), 'loss': ema_loss_for_log, 'psnr': avg_psnr_for_log, 'masked psnr': avg_maskpsnr_for_log, 'n': gaussians._xyz.shape[0]}
60
+ if avg_maskpsnr_for_log <= 0:
61
+ del postfix['masked psnr']
62
+ pbar.set_postfix(postfix)
46
63
  if step in save_iterations:
47
64
  save_path = os.path.join(destination, "point_cloud", "iteration_" + str(step))
48
65
  os.makedirs(save_path, exist_ok=True)
@@ -68,6 +85,7 @@ if __name__ == "__main__":
68
85
  parser.add_argument("-l", "--load_ply", default=None, type=str)
69
86
  parser.add_argument("--load_camera", default=None, type=str)
70
87
  parser.add_argument("--quantize", action='store_true')
88
+ parser.add_argument("--no_image_mask", action="store_true")
71
89
  parser.add_argument("--no_depth_data", action='store_true')
72
90
  parser.add_argument("--with_scale_reg", action="store_true")
73
91
  parser.add_argument("--load_quantized", default=None, type=str)
@@ -83,7 +101,9 @@ if __name__ == "__main__":
83
101
  configs = {o.split("=", 1)[0]: eval(o.split("=", 1)[1]) for o in args.option}
84
102
  dataset, gaussians, trainer, quantizer = prepare_training(
85
103
  sh_degree=args.sh_degree, source=args.source, device=args.device, mode=args.mode, trainable_camera="camera" in args.mode,
86
- load_ply=args.load_ply, load_camera=args.load_camera, load_depth=not args.no_depth_data, with_scale_reg=args.with_scale_reg,
104
+ load_ply=args.load_ply, load_camera=args.load_camera,
105
+ load_mask=not args.no_image_mask, load_depth=not args.no_depth_data,
106
+ with_scale_reg=args.with_scale_reg,
87
107
  quantize=args.quantize, load_quantized=args.load_quantized, configs=configs)
88
108
  dataset.save_cameras(os.path.join(args.destination, "cameras.json"))
89
109
  torch.cuda.empty_cache()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: reduced_3dgs
3
- Version: 1.10.4
3
+ Version: 1.10.13
4
4
  Summary: Refactored code for the paper "Reducing the Memory Footprint of 3D Gaussian Splatting"
5
5
  Home-page: https://github.com/yindaheng98/reduced-3dgs
6
6
  Author: yindaheng98
@@ -8,13 +8,8 @@ Author-email: yindaheng98@gmail.com
8
8
  Classifier: Programming Language :: Python :: 3
9
9
  Description-Content-Type: text/markdown
10
10
  License-File: LICENSE.md
11
- Requires-Dist: tqdm
12
- Requires-Dist: plyfile
13
- Requires-Dist: scikit-learn
14
- Requires-Dist: torch
15
- Requires-Dist: torchvision
16
- Requires-Dist: numpy
17
11
  Requires-Dist: gaussian-splatting
12
+ Requires-Dist: scikit-learn
18
13
  Dynamic: author
19
14
  Dynamic: author-email
20
15
  Dynamic: classifier
@@ -40,47 +35,35 @@ This repository contains the **refactored Python code for [Reduced-3DGS](https:/
40
35
 
41
36
  * [Pytorch](https://pytorch.org/) (v2.4 or higher recommended)
42
37
  * [CUDA Toolkit](https://developer.nvidia.com/cuda-12-4-0-download-archive) (12.4 recommended, should match with PyTorch version)
38
+ * (Optional) [cuML](https://github.com/rapidsai/cuml) for faster vector quantization
43
39
 
44
- ## Install (PyPI)
45
-
40
+ (Optional) If you have trouble with [`gaussian-splatting`](https://github.com/yindaheng98/gaussian-splatting), try to install it from source:
46
41
  ```sh
47
- pip install --upgrade reduced-3dgs
42
+ pip install wheel setuptools
43
+ pip install --upgrade git+https://github.com/yindaheng98/gaussian-splatting.git@master --no-build-isolation
48
44
  ```
49
45
 
50
- ## Install (Build from source)
51
-
52
- ```sh
53
- pip install --upgrade git+https://github.com/yindaheng98/reduced-3dgs.git@main
54
- ```
55
- If you have trouble with [`gaussian-splatting`](https://github.com/yindaheng98/gaussian-splatting), you can install it from source:
56
- ```sh
57
- pip install --upgrade git+https://github.com/yindaheng98/gaussian-splatting.git@master
58
- ```
46
+ ## PyPI Install
59
47
 
60
- ## Install (Development)
61
-
62
- Install [`gaussian-splatting`](https://github.com/yindaheng98/gaussian-splatting).
63
- You can download the wheel from [PyPI](https://pypi.org/project/gaussian-splatting/):
64
48
  ```shell
65
- pip install --upgrade gaussian-splatting
49
+ pip install --upgrade reduced-3dgs
66
50
  ```
67
- Alternatively, install the latest version from the source:
68
- ```sh
69
- pip install --upgrade git+https://github.com/yindaheng98/gaussian-splatting.git@master
51
+ or
52
+ build latest from source:
53
+ ```shell
54
+ pip install wheel setuptools
55
+ pip install --upgrade git+https://github.com/yindaheng98/reduced-3dgs.git@main --no-build-isolation
70
56
  ```
71
57
 
58
+ ### Development Install
59
+
72
60
  ```shell
73
61
  git clone --recursive https://github.com/yindaheng98/reduced-3dgs
74
62
  cd reduced-3dgs
75
- pip install tqdm plyfile scikit-learn numpy tifffile triton xformers
63
+ pip install scikit-learn
76
64
  pip install --target . --upgrade --no-deps .
77
65
  ```
78
66
 
79
- (Optional) If you prefer not to install `gaussian-splatting` in your environment, you can install it in your `reduced-3dgs` directory:
80
- ```sh
81
- pip install --target . --no-deps --upgrade git+https://github.com/yindaheng98/gaussian-splatting.git@master
82
- ```
83
-
84
67
  ## Quick Start
85
68
 
86
69
  1. Download the dataset (T&T+DB COLMAP dataset, size 650MB):
@@ -0,0 +1,2 @@
1
+ gaussian-splatting
2
+ scikit-learn
@@ -60,7 +60,7 @@ if os.name == 'nt':
60
60
 
61
61
  setup(
62
62
  name="reduced_3dgs",
63
- version='1.10.4',
63
+ version='1.10.13',
64
64
  author='yindaheng98',
65
65
  author_email='yindaheng98@gmail.com',
66
66
  url='https://github.com/yindaheng98/reduced-3dgs',
@@ -97,12 +97,7 @@ setup(
97
97
  'build_ext': BuildExtension
98
98
  },
99
99
  install_requires=[
100
- 'tqdm',
101
- 'plyfile',
102
- 'scikit-learn',
103
- 'torch',
104
- 'torchvision',
105
- 'numpy',
106
100
  'gaussian-splatting',
101
+ 'scikit-learn',
107
102
  ]
108
103
  )
@@ -12,6 +12,7 @@
12
12
  #define BOX_SIZE 1024
13
13
  #define BOX_SIZE2 128
14
14
 
15
+ #include <float.h>
15
16
  #include "cuda_runtime.h"
16
17
  #include "device_launch_parameters.h"
17
18
  #include "simple_knn.h"
@@ -1,7 +0,0 @@
1
- tqdm
2
- plyfile
3
- scikit-learn
4
- torch
5
- torchvision
6
- numpy
7
- gaussian-splatting
File without changes
File without changes