gaussian-splatting 1.17.11__cp310-cp310-win_amd64.whl → 1.18.0__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gaussian-splatting might be problematic. Click here for more details.

@@ -133,7 +133,7 @@ def build_camera(
133
133
  )
134
134
 
135
135
 
136
- def dict2camera(camera_dict, load_depth=False, device="cuda", custom_data: dict = {}):
136
+ def dict2camera(camera_dict, load_mask=True, load_depth=True, device="cuda", custom_data: dict = {}):
137
137
  C2W = torch.zeros((4, 4), device=device)
138
138
  C2W[:3, 3] = torch.tensor(camera_dict['position'], dtype=torch.float, device=device)
139
139
  C2W[:3, :3] = torch.tensor(camera_dict['rotation'], dtype=torch.float, device=device)
@@ -141,6 +141,12 @@ def dict2camera(camera_dict, load_depth=False, device="cuda", custom_data: dict
141
141
  Rt = torch.linalg.inv(C2W)
142
142
  T = Rt[:3, 3]
143
143
  R = Rt[:3, :3]
144
+ if load_mask and ('ground_truth_image_mask_path' not in camera_dict or camera_dict['ground_truth_image_mask_path'] is None):
145
+ logging.warning(f"Value of key 'ground_truth_image_mask_path' is not a valid path, skipping mask loading.")
146
+ if load_depth and ('ground_truth_depth_path' not in camera_dict or camera_dict['ground_truth_depth_path'] is None):
147
+ logging.warning(f"Value of key 'ground_truth_depth_path' is not a valid path, skipping depth loading.")
148
+ if load_depth and ('ground_truth_depth_mask_path' not in camera_dict or camera_dict['ground_truth_depth_mask_path'] is None):
149
+ logging.warning(f"Value of key 'ground_truth_depth_mask_path' is not a valid path, skipping depth mask loading.")
144
150
  return build_camera(
145
151
  image_width=camera_dict['width'],
146
152
  image_height=camera_dict['height'],
@@ -149,7 +155,7 @@ def dict2camera(camera_dict, load_depth=False, device="cuda", custom_data: dict
149
155
  R=R,
150
156
  T=T,
151
157
  image_path=camera_dict['ground_truth_image_path'] if 'ground_truth_image_path' in camera_dict else None,
152
- image_mask_path=camera_dict['ground_truth_image_mask_path'] if 'ground_truth_image_mask_path' in camera_dict else None,
158
+ image_mask_path=camera_dict['ground_truth_image_mask_path'] if (load_mask and 'ground_truth_image_mask_path' in camera_dict) else None,
153
159
  depth_path=camera_dict['ground_truth_depth_path'] if (load_depth and 'ground_truth_depth_path' in camera_dict) else None,
154
160
  depth_mask_path=camera_dict['ground_truth_depth_mask_path'] if (load_depth and 'ground_truth_depth_mask_path' in camera_dict) else None,
155
161
  device=device,
@@ -81,8 +81,8 @@ class TrainableCameraDataset(CameraDataset):
81
81
  json.dump(cameras, f, indent=2)
82
82
 
83
83
  @classmethod
84
- def from_json(cls, path, load_depth=False):
85
- cameras = JSONCameraDataset(path, load_depth=load_depth)
84
+ def from_json(cls, path, load_mask=True, load_depth=True):
85
+ cameras = JSONCameraDataset(path, load_mask=load_mask, load_depth=load_depth)
86
86
  exposures = [(torch.tensor(camera['exposure'], dtype=torch.float) if 'exposure' in camera else torch.eye(3, 4)) for camera in cameras.json_cameras]
87
87
  return cls(cameras, exposures)
88
88
 
@@ -91,8 +91,8 @@ class FixedTrainableCameraDataset(JSONCameraDataset):
91
91
  # Same as TrainableCameraDataset, but is fixed
92
92
  # Used for loading cameras saved by TrainableCameraDataset
93
93
 
94
- def __init__(self, path, load_depth=False):
95
- super().__init__(path, load_depth=load_depth)
94
+ def __init__(self, path, load_mask=True, load_depth=True):
95
+ super().__init__(path, load_mask=load_mask, load_depth=load_depth)
96
96
  self.load_exposures()
97
97
 
98
98
  def to(self, device):
@@ -27,7 +27,7 @@ class ColmapCamera(NamedTuple):
27
27
  depth_mask_path: str
28
28
 
29
29
 
30
- def parse_colmap_camera(cameras, images, image_dir, depth_dir=None) -> List[ColmapCamera]:
30
+ def parse_colmap_camera(cameras, images, image_dir, load_mask=True, depth_dir=None) -> List[ColmapCamera]:
31
31
  parsed_cameras = []
32
32
  for _, key in enumerate(cameras):
33
33
  extr = cameras[key]
@@ -49,9 +49,11 @@ def parse_colmap_camera(cameras, images, image_dir, depth_dir=None) -> List[Colm
49
49
  raise ValueError("Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!")
50
50
 
51
51
  image_path = os.path.join(image_dir, extr.name)
52
- image_mask_path = os.path.join(image_dir, os.path.splitext(extr.name)[0] + '_mask.tiff')
53
- if not os.path.exists(image_mask_path):
54
- image_mask_path = os.path.splitext(image_mask_path)[0] + '.png'
52
+ image_mask_path = None
53
+ if load_mask:
54
+ image_mask_path = os.path.join(image_dir, os.path.splitext(extr.name)[0] + '_mask.tiff')
55
+ if not os.path.exists(image_mask_path):
56
+ image_mask_path = os.path.splitext(image_mask_path)[0] + '.png'
55
57
  depth_path, depth_mask_path = None, None
56
58
  if depth_dir is not None:
57
59
  depth_path = os.path.join(depth_dir, os.path.splitext(extr.name)[0] + '.tiff')
@@ -72,7 +74,7 @@ def parse_colmap_camera(cameras, images, image_dir, depth_dir=None) -> List[Colm
72
74
  return parsed_cameras
73
75
 
74
76
 
75
- def read_colmap_cameras(colmap_folder, load_depth=False) -> List[ColmapCamera]:
77
+ def read_colmap_cameras(colmap_folder, load_mask=True, load_depth=True) -> List[ColmapCamera]:
76
78
  path = colmap_folder
77
79
  image_dir = os.path.join(path, "images")
78
80
  try:
@@ -86,13 +88,13 @@ def read_colmap_cameras(colmap_folder, load_depth=False) -> List[ColmapCamera]:
86
88
  cam_extrinsics = read_images_text(cameras_extrinsic_file)
87
89
  cam_intrinsics = read_cameras_text(cameras_intrinsic_file)
88
90
  depth_dir = os.path.join(path, "depths") if load_depth else None
89
- return parse_colmap_camera(cam_extrinsics, cam_intrinsics, image_dir, depth_dir)
91
+ return parse_colmap_camera(cam_extrinsics, cam_intrinsics, image_dir, load_mask=load_mask, depth_dir=depth_dir)
90
92
 
91
93
 
92
94
  class ColmapCameraDataset(CameraDataset):
93
- def __init__(self, colmap_folder, load_depth=False):
95
+ def __init__(self, colmap_folder, load_mask=True, load_depth=True):
94
96
  super().__init__()
95
- self.raw_cameras = read_colmap_cameras(colmap_folder, load_depth=load_depth)
97
+ self.raw_cameras = read_colmap_cameras(colmap_folder, load_mask=load_mask, load_depth=load_depth)
96
98
  self.cameras = [build_camera(**cam._asdict()) for cam in self.raw_cameras]
97
99
 
98
100
  def to(self, device):
@@ -106,5 +108,5 @@ class ColmapCameraDataset(CameraDataset):
106
108
  return self.cameras[idx]
107
109
 
108
110
 
109
- def ColmapTrainableCameraDataset(colmap_folder, load_depth=False):
110
- return TrainableCameraDataset(ColmapCameraDataset(colmap_folder, load_depth=load_depth))
111
+ def ColmapTrainableCameraDataset(colmap_folder, load_mask=True, load_depth=True):
112
+ return TrainableCameraDataset(ColmapCameraDataset(colmap_folder, load_mask=load_mask, load_depth=load_depth))
@@ -35,9 +35,10 @@ class CameraDataset:
35
35
 
36
36
 
37
37
  class JSONCameraDataset(CameraDataset):
38
- def __init__(self, path, load_depth=False):
38
+ def __init__(self, path, load_mask=True, load_depth=True):
39
39
  with open(path, 'r') as f:
40
40
  self.json_cameras = json.load(f)
41
+ self.load_mask = load_mask
41
42
  self.load_depth = load_depth
42
43
  self.load_cameras()
43
44
 
@@ -51,7 +52,7 @@ class JSONCameraDataset(CameraDataset):
51
52
  return self.load_cameras(device=device)
52
53
 
53
54
  def load_cameras(self, device=None):
54
- self.cameras = [dict2camera(camera, load_depth=self.load_depth, device=device) for camera in self.json_cameras]
55
+ self.cameras = [dict2camera(camera, load_mask=self.load_mask, load_depth=self.load_depth, device=device) for camera in self.json_cameras]
55
56
  return self
56
57
 
57
58
 
@@ -6,11 +6,11 @@ from .trainer import *
6
6
  from .trainer.extensions import ScaleRegularizeTrainerWrapper
7
7
 
8
8
 
9
- def prepare_dataset(source: str, device: str, trainable_camera: bool = False, load_camera: str = None, load_depth=False) -> CameraDataset:
9
+ def prepare_dataset(source: str, device: str, trainable_camera: bool = False, load_camera: str = None, load_mask=True, load_depth=True) -> CameraDataset:
10
10
  if trainable_camera:
11
- dataset = (TrainableCameraDataset.from_json(load_camera, load_depth=load_depth) if load_camera else ColmapTrainableCameraDataset(source, load_depth=load_depth)).to(device)
11
+ dataset = (TrainableCameraDataset.from_json(load_camera, load_mask=load_mask, load_depth=load_depth) if load_camera else ColmapTrainableCameraDataset(source, load_mask=load_mask, load_depth=load_depth)).to(device)
12
12
  else:
13
- dataset = (FixedTrainableCameraDataset(load_camera, load_depth=load_depth) if load_camera else ColmapCameraDataset(source, load_depth=load_depth)).to(device)
13
+ dataset = (FixedTrainableCameraDataset(load_camera, load_mask=load_mask, load_depth=load_depth) if load_camera else ColmapCameraDataset(source, load_mask=load_mask, load_depth=load_depth)).to(device)
14
14
  return dataset
15
15
 
16
16
 
@@ -12,8 +12,11 @@ from gaussian_splatting.utils.lpipsPyTorch import lpips
12
12
  from gaussian_splatting.prepare import prepare_dataset, prepare_gaussians
13
13
 
14
14
 
15
- def prepare_rendering(sh_degree: int, source: str, device: str, trainable_camera: bool = False, load_ply: str = None, load_camera: str = None, load_depth=False) -> Tuple[CameraDataset, GaussianModel]:
16
- dataset = prepare_dataset(source=source, device=device, trainable_camera=trainable_camera, load_camera=load_camera, load_depth=load_depth)
15
+ def prepare_rendering(
16
+ sh_degree: int, source: str, device: str,
17
+ trainable_camera: bool = False, load_ply: str = None, load_camera: str = None,
18
+ load_depth=False) -> Tuple[CameraDataset, GaussianModel]:
19
+ dataset = prepare_dataset(source=source, device=device, trainable_camera=trainable_camera, load_camera=load_camera, load_mask=False, load_depth=load_depth)
17
20
  gaussians = prepare_gaussians(sh_degree=sh_degree, source=source, device=device, trainable_camera=trainable_camera, load_ply=load_ply)
18
21
  return dataset, gaussians
19
22
 
@@ -101,5 +104,5 @@ if __name__ == "__main__":
101
104
  with torch.no_grad():
102
105
  dataset, gaussians = prepare_rendering(
103
106
  sh_degree=args.sh_degree, source=args.source, device=args.device, trainable_camera=args.mode == "camera",
104
- load_ply=load_ply, load_camera=args.load_camera, load_depth=True)
107
+ load_ply=load_ply, load_camera=args.load_camera, load_depth=args.save_depth_pcd)
105
108
  rendering(dataset, gaussians, save, save_pcd=args.save_depth_pcd, rescale_depth_gt=not args.no_rescale_depth_gt)
@@ -12,8 +12,12 @@ from gaussian_splatting.trainer import AbstractTrainer
12
12
  from gaussian_splatting.prepare import basemodes, shliftmodes, prepare_dataset, prepare_gaussians, prepare_trainer
13
13
 
14
14
 
15
- def prepare_training(sh_degree: int, source: str, device: str, mode: str, trainable_camera: bool = False, load_ply: str = None, load_camera: str = None, load_depth=False, with_scale_reg=False, configs={}) -> Tuple[CameraDataset, GaussianModel, AbstractTrainer]:
16
- dataset = prepare_dataset(source=source, device=device, trainable_camera=trainable_camera, load_camera=load_camera, load_depth=load_depth)
15
+ def prepare_training(
16
+ sh_degree: int, source: str, device: str, mode: str,
17
+ trainable_camera: bool = False, load_ply: str = None, load_camera: str = None,
18
+ load_mask=False, load_depth=False,
19
+ with_scale_reg=False, configs={}) -> Tuple[CameraDataset, GaussianModel, AbstractTrainer]:
20
+ dataset = prepare_dataset(source=source, device=device, trainable_camera=trainable_camera, load_camera=load_camera, load_mask=load_mask, load_depth=load_depth)
17
21
  gaussians = prepare_gaussians(sh_degree=sh_degree, source=source, device=device, trainable_camera=trainable_camera, load_ply=load_ply)
18
22
  trainer = prepare_trainer(gaussians=gaussians, dataset=dataset, mode=mode, trainable_camera=trainable_camera, load_ply=load_ply, with_scale_reg=with_scale_reg, configs=configs)
19
23
  return dataset, gaussians, trainer
@@ -65,6 +69,7 @@ if __name__ == "__main__":
65
69
  parser.add_argument("-i", "--iteration", default=30000, type=int)
66
70
  parser.add_argument("-l", "--load_ply", default=None, type=str)
67
71
  parser.add_argument("--load_camera", default=None, type=str)
72
+ parser.add_argument("--no_image_mask", action="store_true")
68
73
  parser.add_argument("--no_depth_data", action="store_true")
69
74
  parser.add_argument("--with_scale_reg", action="store_true")
70
75
  parser.add_argument("--mode", choices=sorted(list(set(list(basemodes.keys()) + list(shliftmodes.keys())))), default="base")
@@ -78,7 +83,9 @@ if __name__ == "__main__":
78
83
  configs = {o.split("=", 1)[0]: eval(o.split("=", 1)[1]) for o in args.option}
79
84
  dataset, gaussians, trainer = prepare_training(
80
85
  sh_degree=args.sh_degree, source=args.source, device=args.device, mode=args.mode, trainable_camera="camera" in args.mode,
81
- load_ply=args.load_ply, load_camera=args.load_camera, load_depth=not args.no_depth_data, with_scale_reg=args.with_scale_reg, configs=configs)
86
+ load_ply=args.load_ply, load_camera=args.load_camera,
87
+ load_mask=not args.no_image_mask, load_depth=not args.no_depth_data,
88
+ with_scale_reg=args.with_scale_reg, configs=configs)
82
89
  dataset.save_cameras(os.path.join(args.destination, "cameras.json"))
83
90
  torch.cuda.empty_cache()
84
91
  training(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: gaussian_splatting
3
- Version: 1.17.11
3
+ Version: 1.18.0
4
4
  Summary: Refactored python training and inference code for 3D Gaussian Splatting
5
5
  Home-page: https://github.com/yindaheng98/gaussian-splatting
6
6
  Author: yindaheng98
@@ -1,20 +1,20 @@
1
1
  gaussian_splatting/__init__.py,sha256=CiOZMcyPTAaKtEuMZUhEda_Ad4_RUhmIstB-A3iuOJY,131
2
- gaussian_splatting/camera.py,sha256=LB44l5ok36jhkWXja2oIvnuBio3pfE5m18aCEFKl2jc,8477
2
+ gaussian_splatting/camera.py,sha256=vo7mu6lyFpIhDqOAgNiJuPan8t_nDJn5cJkAYygLFcA,9243
3
3
  gaussian_splatting/camera_trainable.py,sha256=nI6hFFRV2ev7VwLlKUbzEdN9zUmngYZAANGLr1p1yBA,3841
4
4
  gaussian_splatting/gaussian_model.py,sha256=_Dy_dDa2prALhVgg428a-O8-8PODg3c_JPkOJJ8X4o8,13275
5
- gaussian_splatting/prepare.py,sha256=SdljFF2TU-ETkIY2ceHEz9tGA-o4HfHxGsFKIhyB1Ek,3042
6
- gaussian_splatting/render.py,sha256=2dP7Yr5e5uLJAs9MwRJSBY9eZQjaDtf9XB-HdtPmxPc,5930
7
- gaussian_splatting/train.py,sha256=VAFlPHB5ETH8BiYhe0LWY2UWXyB16tSRPKUaV5VPp5A,5196
5
+ gaussian_splatting/prepare.py,sha256=GWPRpufg5larcKGNwlRtN22xVs9fp3ptMu11rQeySX8,3141
6
+ gaussian_splatting/render.py,sha256=sh67INAUhEy5lfkmuB6RBTqe08NF-8MMR7QXTy4Ogg8,5990
7
+ gaussian_splatting/train.py,sha256=no945bMVVRylvkkllQIURDplSwm8EydvUhn-wD9Cn2k,5388
8
8
  gaussian_splatting/dataset/__init__.py,sha256=-runuT-61P0YVpfV_WXqwUZM1oY0N012YH13Bt3rzSU,138
9
- gaussian_splatting/dataset/camera_trainable.py,sha256=D8bqeLVcnQ4qrsy3U1s6BVzdG4KCCPGR-Tygj6nLp-Q,4969
10
- gaussian_splatting/dataset/dataset.py,sha256=mlcIS0pJNdUIT-RObcQNYwgxxTOFJ0OYg3AlhoQ4Mww,2315
9
+ gaussian_splatting/dataset/camera_trainable.py,sha256=Kd8v-_ZJ9dLIQ2QyVOXbmouYf5QjbgOgHNRHVpkgCms,5041
10
+ gaussian_splatting/dataset/dataset.py,sha256=0tmIZ5P7kOEdABiEAXPznkRN91e5rcT5VsAzOLoOuEM,2392
11
11
  gaussian_splatting/dataset/colmap/__init__.py,sha256=YEYT2k2WJSqrkkZq4KAJYS9UMgqU6W6TJaeHLRc1CM4,213
12
- gaussian_splatting/dataset/colmap/dataset.py,sha256=Lq2b3hMdtOmdqPjvEjR6CLukAR7dZBEKMz8yzDD2Bgo,4519
12
+ gaussian_splatting/dataset/colmap/dataset.py,sha256=0UBQ6ynOqElHZSphJ-MSbYQqCwwYZaAXl1y9AY5YKuY,4720
13
13
  gaussian_splatting/dataset/colmap/params_init.py,sha256=6_6gZ0Wl4aZrps2PJ_U234sxW5D-vOTfwioVa1FWC-E,1802
14
14
  gaussian_splatting/dataset/colmap/read_write_model.py,sha256=TenI7ai5UV7Ksg2vAXvJWnYFwOOo1tlS_633RfCLuQU,23137
15
- gaussian_splatting/diff_gaussian_rasterization/_C.cp310-win_amd64.pyd,sha256=YN0Oev5kgszT2ukGdSi_Fzkdfff3ugTmOyN3C4f84vs,1287680
15
+ gaussian_splatting/diff_gaussian_rasterization/_C.cp310-win_amd64.pyd,sha256=xBBzubbNtBsa59qXqReMer7oFeshmdxBcZtMMNaueJk,1287680
16
16
  gaussian_splatting/diff_gaussian_rasterization/__init__.py,sha256=a9D0IZiPx-Mk1795hSq54T-NYT4MtEN_MZrxeMhw0Eo,6705
17
- gaussian_splatting/simple_knn/_C.cp310-win_amd64.pyd,sha256=XXsPotAF7TR-3ik38t_erNhxU4iekR6cTtgduLau8bU,1156608
17
+ gaussian_splatting/simple_knn/_C.cp310-win_amd64.pyd,sha256=El47ul_aP-C8jETADhbfzvtNZUg8OlmVkaoIKSCpW1k,1156608
18
18
  gaussian_splatting/trainer/__init__.py,sha256=962fEY8A0spSQn5de_d_LkPOjA1PYKrLbuAkxwZo7mI,940
19
19
  gaussian_splatting/trainer/abc.py,sha256=kpYnJjLOhsyhE-V2J79EC9nih6MYBcXkmK9cHUA-3ao,4022
20
20
  gaussian_splatting/trainer/base.py,sha256=gO1x4m82xrZNl8NZVw2CWYqIvZJIMUWmBtPZQPeyxJ0,3370
@@ -45,8 +45,8 @@ gaussian_splatting/utils/lpipsPyTorch/modules/__init__.py,sha256=47DEQpj8HBSa-_T
45
45
  gaussian_splatting/utils/lpipsPyTorch/modules/lpips.py,sha256=YScu0oXIEstCCjJVRItS_R_csUw70sBMFuP8Syl2UdI,1187
46
46
  gaussian_splatting/utils/lpipsPyTorch/modules/networks.py,sha256=kqIebq7dAhHypTXweFVEf_RDbN7_Zv7O3MlD-CfRvpg,2788
47
47
  gaussian_splatting/utils/lpipsPyTorch/modules/utils.py,sha256=TDcem3E3HqDNN2MT8qlOL_BKVHeO4HRE77JxF-kOWk8,915
48
- gaussian_splatting-1.17.11.dist-info/licenses/LICENSE.md,sha256=bMuRQKn0u485mx8JBBTJ5Simc-aWHaQsxmoB6jsg5oE,4752
49
- gaussian_splatting-1.17.11.dist-info/METADATA,sha256=YhuQHoRNZ-SttnBQZ0IumPV8JWSZ4MzMe_hRSm3dUW4,17184
50
- gaussian_splatting-1.17.11.dist-info/WHEEL,sha256=KUuBC6lxAbHCKilKua8R9W_TM71_-9Sg5uEP3uDWcoU,101
51
- gaussian_splatting-1.17.11.dist-info/top_level.txt,sha256=uaYrPYXRHhpybgCnsoazTcdhpzZGnLT_vd5eoRzBWWI,19
52
- gaussian_splatting-1.17.11.dist-info/RECORD,,
48
+ gaussian_splatting-1.18.0.dist-info/licenses/LICENSE.md,sha256=bMuRQKn0u485mx8JBBTJ5Simc-aWHaQsxmoB6jsg5oE,4752
49
+ gaussian_splatting-1.18.0.dist-info/METADATA,sha256=z6Q1TP39OROC_mlfGLSBgZQUip5neZOFgqQKx1Gbj_c,17183
50
+ gaussian_splatting-1.18.0.dist-info/WHEEL,sha256=KUuBC6lxAbHCKilKua8R9W_TM71_-9Sg5uEP3uDWcoU,101
51
+ gaussian_splatting-1.18.0.dist-info/top_level.txt,sha256=uaYrPYXRHhpybgCnsoazTcdhpzZGnLT_vd5eoRzBWWI,19
52
+ gaussian_splatting-1.18.0.dist-info/RECORD,,