learning3d 0.0.6__tar.gz → 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. {learning3d-0.0.6/src/learning3d.egg-info → learning3d-0.1.0}/PKG-INFO +1 -1
  2. {learning3d-0.0.6 → learning3d-0.1.0}/pyproject.toml +1 -1
  3. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/data_utils/dataloaders.py +8 -8
  4. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/examples/test_dcp.py +4 -2
  5. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/examples/test_deepgmr.py +4 -2
  6. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/examples/test_masknet.py +3 -1
  7. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/examples/test_masknet2.py +3 -1
  8. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/examples/test_pcn.py +4 -2
  9. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/examples/test_pcrnet.py +3 -1
  10. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/examples/test_pnlk.py +3 -1
  11. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/examples/test_pointconv.py +3 -1
  12. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/examples/test_pointnet.py +3 -1
  13. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/examples/test_prnet.py +4 -2
  14. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/examples/test_rpmnet.py +3 -1
  15. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/examples/train_PointNetLK.py +4 -2
  16. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/examples/train_dcp.py +4 -2
  17. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/examples/train_deepgmr.py +4 -2
  18. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/examples/train_masknet.py +4 -2
  19. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/examples/train_pcn.py +4 -2
  20. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/examples/train_pcrnet.py +4 -2
  21. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/examples/train_pointconv.py +4 -2
  22. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/examples/train_pointnet.py +4 -2
  23. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/examples/train_prnet.py +4 -2
  24. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/examples/train_rpmnet.py +4 -2
  25. {learning3d-0.0.6 → learning3d-0.1.0/src/learning3d.egg-info}/PKG-INFO +1 -1
  26. {learning3d-0.0.6 → learning3d-0.1.0}/LICENSE +0 -0
  27. {learning3d-0.0.6 → learning3d-0.1.0}/MANIFEST.in +0 -0
  28. {learning3d-0.0.6 → learning3d-0.1.0}/README.md +0 -0
  29. {learning3d-0.0.6 → learning3d-0.1.0}/data/modelnet40_ply_hdf5_2048/shape_names.txt +0 -0
  30. {learning3d-0.0.6 → learning3d-0.1.0}/data/modelnet40_ply_hdf5_2048/test_files.txt +0 -0
  31. {learning3d-0.0.6 → learning3d-0.1.0}/data/modelnet40_ply_hdf5_2048/train_files.txt +0 -0
  32. {learning3d-0.0.6 → learning3d-0.1.0}/requirements.txt +0 -0
  33. {learning3d-0.0.6 → learning3d-0.1.0}/setup.cfg +0 -0
  34. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/__init__.py +0 -0
  35. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/data_utils/__init__.py +0 -0
  36. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/data_utils/user_data.py +0 -0
  37. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/losses/__init__.py +0 -0
  38. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/losses/chamfer_distance.py +0 -0
  39. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/losses/classification.py +0 -0
  40. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/losses/correspondence_loss.py +0 -0
  41. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/losses/cuda/chamfer_distance/__init__.py +0 -0
  42. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/losses/cuda/chamfer_distance/chamfer_distance.cpp +0 -0
  43. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/losses/cuda/chamfer_distance/chamfer_distance.cu +0 -0
  44. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/losses/cuda/chamfer_distance/chamfer_distance.py +0 -0
  45. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/losses/cuda/emd_torch/pkg/emd_loss_layer.py +0 -0
  46. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/losses/cuda/emd_torch/pkg/include/cuda/emd.cuh +0 -0
  47. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/losses/cuda/emd_torch/pkg/include/cuda_helper.h +0 -0
  48. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/losses/cuda/emd_torch/pkg/include/emd.h +0 -0
  49. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/losses/cuda/emd_torch/pkg/layer/__init__.py +0 -0
  50. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/losses/cuda/emd_torch/pkg/layer/emd_loss_layer.py +0 -0
  51. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/losses/cuda/emd_torch/pkg/src/cuda/emd.cu +0 -0
  52. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/losses/cuda/emd_torch/pkg/src/emd.cpp +0 -0
  53. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/losses/cuda/emd_torch/setup.py +0 -0
  54. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/losses/emd.py +0 -0
  55. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/losses/frobenius_norm.py +0 -0
  56. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/losses/rmse_features.py +0 -0
  57. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/models/__init__.py +0 -0
  58. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/models/classifier.py +0 -0
  59. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/models/dcp.py +0 -0
  60. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/models/deepgmr.py +0 -0
  61. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/models/dgcnn.py +0 -0
  62. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/models/masknet.py +0 -0
  63. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/models/masknet2.py +0 -0
  64. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/models/pcn.py +0 -0
  65. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/models/pcrnet.py +0 -0
  66. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/models/pointconv.py +0 -0
  67. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/models/pointnet.py +0 -0
  68. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/models/pointnetlk.py +0 -0
  69. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/models/pooling.py +0 -0
  70. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/models/ppfnet.py +0 -0
  71. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/models/prnet.py +0 -0
  72. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/models/rpmnet.py +0 -0
  73. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/models/segmentation.py +0 -0
  74. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/ops/__init__.py +0 -0
  75. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/ops/data_utils.py +0 -0
  76. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/ops/invmat.py +0 -0
  77. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/ops/quaternion.py +0 -0
  78. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/ops/se3.py +0 -0
  79. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/ops/sinc.py +0 -0
  80. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/ops/so3.py +0 -0
  81. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/ops/transform_functions.py +0 -0
  82. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/utils/__init__.py +0 -0
  83. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/utils/pointconv_util.py +0 -0
  84. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/utils/ppfnet_util.py +0 -0
  85. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/utils/svd.py +0 -0
  86. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d/utils/transformer.py +0 -0
  87. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d.egg-info/SOURCES.txt +0 -0
  88. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d.egg-info/dependency_links.txt +0 -0
  89. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d.egg-info/requires.txt +0 -0
  90. {learning3d-0.0.6 → learning3d-0.1.0}/src/learning3d.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: learning3d
3
- Version: 0.0.6
3
+ Version: 0.1.0
4
4
  Summary: Learning3D: A Modern Library for Deep Learning on 3D Point Clouds Data
5
5
  Author-email: Vinit Sarode <vinitsarode5@gmail.com>
6
6
  Project-URL: Homepage, https://github.com/vinits5/learning3d
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "learning3d"
7
- version = "0.0.6"
7
+ version = "0.1.0"
8
8
  authors = [
9
9
  { name="Vinit Sarode", email="vinitsarode5@gmail.com"},
10
10
  ]
@@ -226,15 +226,15 @@ class ModelNet40Data(Dataset):
226
226
 
227
227
 
228
228
  class ClassificationData(Dataset):
229
- def __init__(self, data_class=ModelNet40Data, root_dir='./'):
229
+ def __init__(self, data_class):
230
230
  super(ClassificationData, self).__init__()
231
- self.set_class(data_class, root_dir)
231
+ self.set_class(data_class)
232
232
 
233
233
  def __len__(self):
234
234
  return len(self.data_class)
235
235
 
236
- def set_class(self, data_class, root_dir):
237
- self.data_class = data_class(root_dir=root_dir)
236
+ def set_class(self, data_class):
237
+ self.data_class = data_class
238
238
 
239
239
  def get_shape(self, label):
240
240
  try:
@@ -247,13 +247,13 @@ class ClassificationData(Dataset):
247
247
 
248
248
 
249
249
  class RegistrationData(Dataset):
250
- def __init__(self, algorithm, root_dir='./', data_class=ModelNet40Data, partial_source=False, partial_template=False, noise=False, additional_params={}):
250
+ def __init__(self, algorithm, data_class, partial_source=False, partial_template=False, noise=False, additional_params={}):
251
251
  super(RegistrationData, self).__init__()
252
252
  available_algorithms = ['PCRNet', 'PointNetLK', 'DCP', 'PRNet', 'iPCRNet', 'RPMNet', 'DeepGMR']
253
253
  if algorithm in available_algorithms: self.algorithm = algorithm
254
254
  else: raise Exception("Algorithm not available for registration.")
255
255
 
256
- self.set_class(data_class, root_dir)
256
+ self.set_class(data_class)
257
257
  self.partial_template = partial_template
258
258
  self.partial_source = partial_source
259
259
  self.noise = noise
@@ -283,8 +283,8 @@ class RegistrationData(Dataset):
283
283
  def __len__(self):
284
284
  return len(self.data_class)
285
285
 
286
- def set_class(self, data_class, root_dir):
287
- self.data_class = data_class(root_dir=root_dir)
286
+ def set_class(self, data_class):
287
+ self.data_class = data_class
288
288
 
289
289
  def __getitem__(self, index):
290
290
  template, label = self.data_class[index]
@@ -88,6 +88,8 @@ def options():
88
88
  metavar='DATASET', help='dataset type (default: modelnet)')
89
89
  parser.add_argument('--num_points', default=1024, type=int,
90
90
  metavar='N', help='points in point-cloud (default: 1024)')
91
+ parser.add_argument('--root_dir', default='./', type=str,
92
+ help='path of the data where modelnet files are downloaded.')
91
93
 
92
94
  # settings for PointNet
93
95
  parser.add_argument('--pointnet', default='tune', type=str, choices=['fixed', 'tune'],
@@ -114,8 +116,8 @@ def main():
114
116
  args = options()
115
117
  torch.backends.cudnn.deterministic = True
116
118
 
117
- trainset = RegistrationData('DCP', ModelNet40Data(train=True))
118
- testset = RegistrationData('DCP', ModelNet40Data(train=False))
119
+ trainset = RegistrationData('DCP', ModelNet40Data(train=True, root_dir=args.root_dir))
120
+ testset = RegistrationData('DCP', ModelNet40Data(train=False, root_dir=args.root_dir))
119
121
  train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers)
120
122
  test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
121
123
 
@@ -103,6 +103,8 @@ def options():
103
103
  metavar='K', help='No of nearest neighbors to be estimated.')
104
104
  parser.add_argument('--use_rri', default=True, type=bool,
105
105
  help='Find nearest neighbors to estimate features from PointNet.')
106
+ parser.add_argument('--root_dir', default='./', type=str,
107
+ help='path of the data where modelnet files are downloaded.')
106
108
 
107
109
  # settings for on training
108
110
  parser.add_argument('-j', '--workers', default=4, type=int,
@@ -121,8 +123,8 @@ def main():
121
123
  args = options()
122
124
  torch.backends.cudnn.deterministic = True
123
125
 
124
- trainset = RegistrationData('DeepGMR', ModelNet40Data(train=True))
125
- testset = RegistrationData('DeepGMR', ModelNet40Data(train=False))
126
+ trainset = RegistrationData('DeepGMR', ModelNet40Data(train=True, root_dir=args.root_dir))
127
+ testset = RegistrationData('DeepGMR', ModelNet40Data(train=False, root_dir=args.root_dir))
126
128
  train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers)
127
129
  test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
128
130
 
@@ -117,6 +117,8 @@ def options():
117
117
  help='Add noise in source point clouds.')
118
118
  parser.add_argument('--outliers', default=False, type=bool,
119
119
  help='Add outliers to template point cloud.')
120
+ parser.add_argument('--root_dir', default='./', type=str,
121
+ help='path of the data where modelnet files are downloaded.')
120
122
 
121
123
  # settings for on testing
122
124
  parser.add_argument('-j', '--workers', default=1, type=int,
@@ -137,7 +139,7 @@ def main():
137
139
  args = options()
138
140
  torch.backends.cudnn.deterministic = True
139
141
 
140
- testset = RegistrationData('PointNetLK', ModelNet40Data(train=False, num_points=args.num_points),
142
+ testset = RegistrationData('PointNetLK', ModelNet40Data(train=False, num_points=args.num_points, root_dir=args.root_dir),
141
143
  partial_source=args.partial_source, noise=args.noise,
142
144
  additional_params={'use_masknet': True})
143
145
  test_loader = DataLoader(testset, batch_size=args.test_batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
@@ -120,6 +120,8 @@ def options():
120
120
  help='Add noise in source point clouds.')
121
121
  parser.add_argument('--outliers', default=False, type=bool,
122
122
  help='Add outliers to template point cloud.')
123
+ parser.add_argument('--root_dir', default='./', type=str,
124
+ help='path of the data where modelnet files are downloaded.')
123
125
 
124
126
  # settings for on testing
125
127
  parser.add_argument('-j', '--workers', default=1, type=int,
@@ -140,7 +142,7 @@ def main():
140
142
  args = options()
141
143
  torch.backends.cudnn.deterministic = True
142
144
 
143
- testset = RegistrationData('PointNetLK', ModelNet40Data(train=False, num_points=args.num_points),
145
+ testset = RegistrationData('PointNetLK', ModelNet40Data(train=False, num_points=args.num_points, root_dir=args.root_dir),
144
146
  partial_template=args.partial_template, partial_source=args.partial_source,
145
147
  noise=args.noise, additional_params={'use_masknet': True, 'partial_point_cloud_method': 'planar_crop'})
146
148
  test_loader = DataLoader(testset, batch_size=args.test_batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
@@ -70,6 +70,8 @@ def options():
70
70
  metavar='DATASET', help='dataset type (default: modelnet)')
71
71
  parser.add_argument('--num_points', default=1024, type=int,
72
72
  metavar='N', help='points in point-cloud (default: 1024)')
73
+ parser.add_argument('--root_dir', default='./', type=str,
74
+ help='path of the data where modelnet files are downloaded.')
73
75
 
74
76
  # settings for PCN
75
77
  parser.add_argument('--emb_dims', default=1024, type=int,
@@ -95,8 +97,8 @@ def main():
95
97
  args = options()
96
98
  args.dataset_path = os.path.join(os.getcwd(), os.pardir, os.pardir, 'ModelNet40', 'ModelNet40')
97
99
 
98
- trainset = ClassificationData(ModelNet40Data(train=True))
99
- testset = ClassificationData(ModelNet40Data(train=False))
100
+ trainset = ClassificationData(ModelNet40Data(train=True, root_dir=args.root_dir))
101
+ testset = ClassificationData(ModelNet40Data(train=False, root_dir=args.root_dir))
100
102
  train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers)
101
103
  test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
102
104
 
@@ -74,6 +74,8 @@ def options():
74
74
  metavar='DATASET', help='dataset type (default: modelnet)')
75
75
  parser.add_argument('--num_points', default=1024, type=int,
76
76
  metavar='N', help='points in point-cloud (default: 1024)')
77
+ parser.add_argument('--root_dir', default='./', type=str,
78
+ help='path of the data where modelnet files are downloaded.')
77
79
 
78
80
  # settings for PointNet
79
81
  parser.add_argument('--emb_dims', default=1024, type=int,
@@ -97,7 +99,7 @@ def options():
97
99
  def main():
98
100
  args = options()
99
101
 
100
- testset = RegistrationData('PCRNet', ModelNet40Data(train=False))
102
+ testset = RegistrationData('PCRNet', ModelNet40Data(train=False, root_dir=args.root_dir))
101
103
  test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
102
104
 
103
105
  if not torch.cuda.is_available():
@@ -74,6 +74,8 @@ def options():
74
74
  metavar='DATASET', help='dataset type (default: modelnet)')
75
75
  parser.add_argument('--num_points', default=1024, type=int,
76
76
  metavar='N', help='points in point-cloud (default: 1024)')
77
+ parser.add_argument('--root_dir', default='./', type=str,
78
+ help='path of the data where modelnet files are downloaded.')
77
79
 
78
80
  # settings for PointNet
79
81
  parser.add_argument('--emb_dims', default=1024, type=int,
@@ -98,7 +100,7 @@ def options():
98
100
  def main():
99
101
  args = options()
100
102
 
101
- testset = RegistrationData('PointNetLK', ModelNet40Data(train=False))
103
+ testset = RegistrationData('PointNetLK', ModelNet40Data(train=False, root_dir=args.root_dir))
102
104
  test_loader = DataLoader(testset, batch_size=8, shuffle=False, drop_last=False, num_workers=args.workers)
103
105
 
104
106
  if not torch.cuda.is_available():
@@ -73,6 +73,8 @@ def options():
73
73
  metavar='DATASET', help='dataset type (default: modelnet)')
74
74
  parser.add_argument('--num_points', default=1024, type=int,
75
75
  metavar='N', help='points in point-cloud (default: 1024)')
76
+ parser.add_argument('--root_dir', default='./', type=str,
77
+ help='path of the data where modelnet files are downloaded.')
76
78
 
77
79
  # settings for PointNet
78
80
  parser.add_argument('--pointnet', default='tune', type=str, choices=['fixed', 'tune'],
@@ -99,7 +101,7 @@ def main():
99
101
  args = options()
100
102
  args.dataset_path = os.path.join(os.getcwd(), os.pardir, os.pardir, 'ModelNet40', 'ModelNet40')
101
103
 
102
- testset = ClassificationData(ModelNet40Data(train=False))
104
+ testset = ClassificationData(ModelNet40Data(train=False, root_dir=args.root_dir))
103
105
  test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
104
106
 
105
107
  if not torch.cuda.is_available():
@@ -73,6 +73,8 @@ def options():
73
73
  metavar='DATASET', help='dataset type (default: modelnet)')
74
74
  parser.add_argument('--num_points', default=1024, type=int,
75
75
  metavar='N', help='points in point-cloud (default: 1024)')
76
+ parser.add_argument('--root_dir', default='./', type=str,
77
+ help='path of the data where modelnet files are downloaded.')
76
78
 
77
79
  # settings for PointNet
78
80
  parser.add_argument('--pointnet', default='tune', type=str, choices=['fixed', 'tune'],
@@ -99,7 +101,7 @@ def main():
99
101
  args = options()
100
102
  args.dataset_path = os.path.join(os.getcwd(), os.pardir, os.pardir, 'ModelNet40', 'ModelNet40')
101
103
 
102
- testset = ClassificationData(ModelNet40Data(train=False))
104
+ testset = ClassificationData(ModelNet40Data(train=False, root_dir=args.root_dir))
103
105
  test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
104
106
 
105
107
  if not torch.cuda.is_available():
@@ -79,6 +79,8 @@ def options():
79
79
  # settings for input data
80
80
  parser.add_argument('--dataset_type', default='modelnet', choices=['modelnet', 'shapenet2'],
81
81
  metavar='DATASET', help='dataset type (default: modelnet)')
82
+ parser.add_argument('--root_dir', default='./', type=str,
83
+ help='path of the data where modelnet files are downloaded.')
82
84
 
83
85
  # settings for PointNet
84
86
  parser.add_argument('--emb_dims', default=512, type=int,
@@ -102,8 +104,8 @@ def main():
102
104
  args = options()
103
105
  torch.backends.cudnn.deterministic = True
104
106
 
105
- trainset = RegistrationData('PRNet', ModelNet40Data(train=True), partial_source=True, partial_template=True)
106
- testset = RegistrationData('PRNet', ModelNet40Data(train=False), partial_source=True, partial_template=True)
107
+ trainset = RegistrationData('PRNet', ModelNet40Data(train=True, root_dir=args.root_dir), partial_source=True, partial_template=True)
108
+ testset = RegistrationData('PRNet', ModelNet40Data(train=False, root_dir=args.root_dir), partial_source=True, partial_template=True)
107
109
  train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers)
108
110
  test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
109
111
 
@@ -74,6 +74,8 @@ def options():
74
74
  metavar='DATASET', help='dataset type (default: modelnet)')
75
75
  parser.add_argument('--num_points', default=1024, type=int,
76
76
  metavar='N', help='points in point-cloud (default: 1024)')
77
+ parser.add_argument('--root_dir', default='./', type=str,
78
+ help='path of the data where modelnet files are downloaded.')
77
79
 
78
80
  # settings for PointNet
79
81
  parser.add_argument('--emb_dims', default=1024, type=int,
@@ -98,7 +100,7 @@ def options():
98
100
  def main():
99
101
  args = options()
100
102
 
101
- testset = RegistrationData('RPMNet', ModelNet40Data(train=False, num_points=args.num_points, use_normals=True), partial_source=True, partial_template=False)
103
+ testset = RegistrationData('RPMNet', ModelNet40Data(train=False, num_points=args.num_points, use_normals=True, root_dir=args.root_dir), partial_source=True, partial_template=False)
102
104
  test_loader = DataLoader(testset, batch_size=1, shuffle=False, drop_last=False, num_workers=args.workers)
103
105
 
104
106
  if not torch.cuda.is_available():
@@ -147,6 +147,8 @@ def options():
147
147
  metavar='DATASET', help='dataset type (default: modelnet)')
148
148
  parser.add_argument('--num_points', default=1024, type=int,
149
149
  metavar='N', help='points in point-cloud (default: 1024)')
150
+ parser.add_argument('--root_dir', default='./', type=str,
151
+ help='path of the data where modelnet files are downloaded.')
150
152
 
151
153
  # settings for PointNet
152
154
  parser.add_argument('--fine_tune_pointnet', default='tune', type=str, choices=['fixed', 'tune'],
@@ -195,8 +197,8 @@ def main():
195
197
  textio.cprint(str(args))
196
198
 
197
199
 
198
- trainset = RegistrationData('PointNetLK', ModelNet40Data(train=True))
199
- testset = RegistrationData('PointNetLK', ModelNet40Data(train=False))
200
+ trainset = RegistrationData('PointNetLK', ModelNet40Data(train=True, root_dir=args.root_dir))
201
+ testset = RegistrationData('PointNetLK', ModelNet40Data(train=False, root_dir=args.root_dir))
200
202
  train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers)
201
203
  test_loader = DataLoader(testset, batch_size=8, shuffle=False, drop_last=False, num_workers=args.workers)
202
204
 
@@ -168,6 +168,8 @@ def options():
168
168
  metavar='DATASET', help='dataset type (default: modelnet)')
169
169
  parser.add_argument('--num_points', default=1024, type=int,
170
170
  metavar='N', help='points in point-cloud (default: 1024)')
171
+ parser.add_argument('--root_dir', default='./', type=str,
172
+ help='path of the data where modelnet files are downloaded.')
171
173
 
172
174
  # settings for PointNet
173
175
  parser.add_argument('--pointnet', default='tune', type=str, choices=['fixed', 'tune'],
@@ -214,8 +216,8 @@ def main():
214
216
  textio.cprint(str(args))
215
217
 
216
218
 
217
- trainset = RegistrationData('DCP', ModelNet40Data(train=True))
218
- testset = RegistrationData('DCP', ModelNet40Data(train=False))
219
+ trainset = RegistrationData('DCP', ModelNet40Data(train=True, root_dir=args.root_dir))
220
+ testset = RegistrationData('DCP', ModelNet40Data(train=False, root_dir=args.root_dir))
219
221
  train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers)
220
222
  test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
221
223
 
@@ -165,6 +165,8 @@ def options():
165
165
  metavar='DATASET', help='dataset type (default: modelnet)')
166
166
  parser.add_argument('--num_points', default=1024, type=int,
167
167
  metavar='N', help='points in point-cloud (default: 1024)')
168
+ parser.add_argument('--root_dir', default='./', type=str,
169
+ help='path of the data where modelnet files are downloaded.')
168
170
 
169
171
  parser.add_argument('--nearest_neighbors', default=20, type=int,
170
172
  metavar='K', help='No of nearest neighbors to be estimated.')
@@ -211,8 +213,8 @@ def main():
211
213
  textio = IOStream('checkpoints/' + args.exp_name + '/run.log')
212
214
  textio.cprint(str(args))
213
215
 
214
- trainset = RegistrationData('DeepGMR', ModelNet40Data(train=True), additional_params={'nearest_neighbors': args.nearest_neighbors})
215
- testset = RegistrationData('DeepGMR', ModelNet40Data(train=False), additional_params={'nearest_neighbors': args.nearest_neighbors})
216
+ trainset = RegistrationData('DeepGMR', ModelNet40Data(train=True, root_dir=args.root_dir), additional_params={'nearest_neighbors': args.nearest_neighbors})
217
+ testset = RegistrationData('DeepGMR', ModelNet40Data(train=False, root_dir=args.root_dir), additional_params={'nearest_neighbors': args.nearest_neighbors})
216
218
  train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers)
217
219
  test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
218
220
 
@@ -160,6 +160,8 @@ def options():
160
160
  help='Add noise in source point clouds.')
161
161
  parser.add_argument('--outliers', default=False, type=bool,
162
162
  help='Add outliers to template point cloud.')
163
+ parser.add_argument('--root_dir', default='./', type=str,
164
+ help='path of the data where modelnet files are downloaded.')
163
165
 
164
166
  # settings for on training
165
167
  parser.add_argument('--seed', type=int, default=1234)
@@ -202,10 +204,10 @@ def main():
202
204
  textio = IOStream('checkpoints/' + args.exp_name + '/run.log')
203
205
  textio.cprint(str(args))
204
206
 
205
- trainset = RegistrationData(ModelNet40Data(train=True, num_points=args.num_points, unseen=args.unseen),
207
+ trainset = RegistrationData(ModelNet40Data(train=True, num_points=args.num_points, unseen=args.unseen, root_dir=args.root_dir),
206
208
  partial_source=args.partial_source, noise=args.noise, outliers=args.outliers,
207
209
  additional_params={'use_masknet': True})
208
- testset = RegistrationData(ModelNet40Data(train=False, num_points=args.num_points, unseen=args.unseen),
210
+ testset = RegistrationData(ModelNet40Data(train=False, num_points=args.num_points, unseen=args.unseen, root_dir=args.root_dir),
209
211
  partial_source=args.partial_source, noise=args.noise, outliers=args.outliers,
210
212
  additional_params={'use_masknet': True})
211
213
  train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers)
@@ -138,6 +138,8 @@ def options():
138
138
  metavar='DATASET', help='dataset type (default: modelnet)')
139
139
  parser.add_argument('--num_points', default=1024, type=int,
140
140
  metavar='N', help='points in point-cloud (default: 1024)')
141
+ parser.add_argument('--root_dir', default='./', type=str,
142
+ help='path of the data where modelnet files are downloaded.')
141
143
 
142
144
  # settings for PCN
143
145
  parser.add_argument('--emb_dims', default=1024, type=int,
@@ -183,8 +185,8 @@ def main():
183
185
  textio.cprint(str(args))
184
186
 
185
187
 
186
- trainset = ClassificationData(ModelNet40Data(train=True))
187
- testset = ClassificationData(ModelNet40Data(train=False))
188
+ trainset = ClassificationData(ModelNet40Data(train=True, root_dir=args.root_dir))
189
+ testset = ClassificationData(ModelNet40Data(train=False, root_dir=args.root_dir))
188
190
  train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers)
189
191
  test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
190
192
 
@@ -147,6 +147,8 @@ def options():
147
147
  metavar='DATASET', help='dataset type (default: modelnet)')
148
148
  parser.add_argument('--num_points', default=1024, type=int,
149
149
  metavar='N', help='points in point-cloud (default: 1024)')
150
+ parser.add_argument('--root_dir', default='./', type=str,
151
+ help='path of the data where modelnet files are downloaded.')
150
152
 
151
153
  # settings for PointNet
152
154
  parser.add_argument('--pointnet', default='tune', type=str, choices=['fixed', 'tune'],
@@ -193,8 +195,8 @@ def main():
193
195
  textio.cprint(str(args))
194
196
 
195
197
 
196
- trainset = RegistrationData('PCRNet', ModelNet40Data(train=True))
197
- testset = RegistrationData('PCRNet', ModelNet40Data(train=False))
198
+ trainset = RegistrationData('PCRNet', ModelNet40Data(train=True, root_dir=args.root_dir))
199
+ testset = RegistrationData('PCRNet', ModelNet40Data(train=False, root_dir=args.root_dir))
198
200
  train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers)
199
201
  test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
200
202
 
@@ -163,6 +163,8 @@ def options():
163
163
  metavar='DATASET', help='dataset type (default: modelnet)')
164
164
  parser.add_argument('--num_points', default=1024, type=int,
165
165
  metavar='N', help='points in point-cloud (default: 1024)')
166
+ parser.add_argument('--root_dir', default='./', type=str,
167
+ help='path of the data where modelnet files are downloaded.')
166
168
 
167
169
  # settings for PointNet
168
170
  parser.add_argument('--pointnet', default='tune', type=str, choices=['fixed', 'tune'],
@@ -210,8 +212,8 @@ def main():
210
212
  textio.cprint(str(args))
211
213
 
212
214
 
213
- trainset = ClassificationData(ModelNet40Data(train=True))
214
- testset = ClassificationData(ModelNet40Data(train=False))
215
+ trainset = ClassificationData(ModelNet40Data(train=True, root_dir=args.root_dir))
216
+ testset = ClassificationData(ModelNet40Data(train=False, root_dir=args.root_dir))
215
217
  train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers)
216
218
  test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
217
219
 
@@ -163,6 +163,8 @@ def options():
163
163
  metavar='DATASET', help='dataset type (default: modelnet)')
164
164
  parser.add_argument('--num_points', default=1024, type=int,
165
165
  metavar='N', help='points in point-cloud (default: 1024)')
166
+ parser.add_argument('--root_dir', default='./', type=str,
167
+ help='path of the data where modelnet files are downloaded.')
166
168
 
167
169
  # settings for PointNet
168
170
  parser.add_argument('--pointnet', default='tune', type=str, choices=['fixed', 'tune'],
@@ -210,8 +212,8 @@ def main():
210
212
  textio.cprint(str(args))
211
213
 
212
214
 
213
- trainset = ClassificationData(ModelNet40Data(train=True))
214
- testset = ClassificationData(ModelNet40Data(train=False))
215
+ trainset = ClassificationData(ModelNet40Data(train=True, root_dir=args.root_dir))
216
+ testset = ClassificationData(ModelNet40Data(train=False, root_dir=args.root_dir))
215
217
  train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers)
216
218
  test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
217
219
 
@@ -157,6 +157,8 @@ def options():
157
157
  metavar='K', help='dim. of the feature vector (default: 1024)')
158
158
  parser.add_argument('--num_iterations', default=3, type=int,
159
159
  help='Number of Iterations')
160
+ parser.add_argument('--root_dir', default='./', type=str,
161
+ help='path of the data where modelnet files are downloaded.')
160
162
 
161
163
  # settings for on training
162
164
  parser.add_argument('--seed', type=int, default=1234)
@@ -195,8 +197,8 @@ def main():
195
197
  textio.cprint(str(args))
196
198
 
197
199
 
198
- trainset = RegistrationData('PRNet', ModelNet40Data(train=True), partial_source=True, partial_template=True)
199
- testset = RegistrationData('PRNet', ModelNet40Data(train=False), partial_source=True, partial_template=True)
200
+ trainset = RegistrationData('PRNet', ModelNet40Data(train=True, root_dir=args.root_dir), partial_source=True, partial_template=True)
201
+ testset = RegistrationData('PRNet', ModelNet40Data(train=False, root_dir=args.root_dir), partial_source=True, partial_template=True)
200
202
  train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers)
201
203
  test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
202
204
 
@@ -146,6 +146,8 @@ def options():
146
146
  metavar='DATASET', help='dataset type (default: modelnet)')
147
147
  parser.add_argument('--num_points', default=1024, type=int,
148
148
  metavar='N', help='points in point-cloud (default: 1024)')
149
+ parser.add_argument('--root_dir', default='./', type=str,
150
+ help='path of the data where modelnet files are downloaded.')
149
151
 
150
152
  # settings for PointNet
151
153
  parser.add_argument('--fine_tune_pointnet', default='tune', type=str, choices=['fixed', 'tune'],
@@ -194,8 +196,8 @@ def main():
194
196
  textio.cprint(str(args))
195
197
 
196
198
 
197
- trainset = RegistrationData('RPMNet', ModelNet40Data(train=True, num_points=args.num_points, use_normals=True), partial_source=True, partial_template=True)
198
- testset = RegistrationData('RPMNet', ModelNet40Data(train=False, num_points=args.num_points, use_normals=True), partial_source=True, partial_template=True)
199
+ trainset = RegistrationData('RPMNet', ModelNet40Data(train=True, num_points=args.num_points, use_normals=True, root_dir=args.root_dir), partial_source=True, partial_template=True)
200
+ testset = RegistrationData('RPMNet', ModelNet40Data(train=False, num_points=args.num_points, use_normals=True, root_dir=args.root_dir), partial_source=True, partial_template=True)
199
201
  train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers)
200
202
  test_loader = DataLoader(testset, batch_size=8, shuffle=False, drop_last=False, num_workers=args.workers)
201
203
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: learning3d
3
- Version: 0.0.6
3
+ Version: 0.1.0
4
4
  Summary: Learning3D: A Modern Library for Deep Learning on 3D Point Clouds Data
5
5
  Author-email: Vinit Sarode <vinitsarode5@gmail.com>
6
6
  Project-URL: Homepage, https://github.com/vinits5/learning3d
File without changes
File without changes
File without changes
File without changes
File without changes