learning3d 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (115) hide show
  1. learning3d/__init__.py +2 -0
  2. learning3d/data_utils/__init__.py +4 -0
  3. learning3d/data_utils/dataloaders.py +454 -0
  4. learning3d/data_utils/user_data.py +119 -0
  5. learning3d/examples/test_dcp.py +139 -0
  6. learning3d/examples/test_deepgmr.py +144 -0
  7. learning3d/examples/test_flownet.py +113 -0
  8. learning3d/examples/test_masknet.py +159 -0
  9. learning3d/examples/test_masknet2.py +162 -0
  10. learning3d/examples/test_pcn.py +118 -0
  11. learning3d/examples/test_pcrnet.py +120 -0
  12. learning3d/examples/test_pnlk.py +121 -0
  13. learning3d/examples/test_pointconv.py +126 -0
  14. learning3d/examples/test_pointnet.py +121 -0
  15. learning3d/examples/test_prnet.py +126 -0
  16. learning3d/examples/test_rpmnet.py +120 -0
  17. learning3d/examples/train_PointNetLK.py +240 -0
  18. learning3d/examples/train_dcp.py +249 -0
  19. learning3d/examples/train_deepgmr.py +244 -0
  20. learning3d/examples/train_flownet.py +259 -0
  21. learning3d/examples/train_masknet.py +239 -0
  22. learning3d/examples/train_pcn.py +216 -0
  23. learning3d/examples/train_pcrnet.py +228 -0
  24. learning3d/examples/train_pointconv.py +245 -0
  25. learning3d/examples/train_pointnet.py +244 -0
  26. learning3d/examples/train_prnet.py +229 -0
  27. learning3d/examples/train_rpmnet.py +228 -0
  28. learning3d/losses/__init__.py +12 -0
  29. learning3d/losses/chamfer_distance.py +51 -0
  30. learning3d/losses/classification.py +14 -0
  31. learning3d/losses/correspondence_loss.py +10 -0
  32. learning3d/losses/cuda/chamfer_distance/__init__.py +1 -0
  33. learning3d/losses/cuda/chamfer_distance/chamfer_distance.cpp +185 -0
  34. learning3d/losses/cuda/chamfer_distance/chamfer_distance.cu +209 -0
  35. learning3d/losses/cuda/chamfer_distance/chamfer_distance.py +66 -0
  36. learning3d/losses/cuda/emd_torch/pkg/emd_loss_layer.py +41 -0
  37. learning3d/losses/cuda/emd_torch/pkg/include/cuda/emd.cuh +347 -0
  38. learning3d/losses/cuda/emd_torch/pkg/include/cuda_helper.h +18 -0
  39. learning3d/losses/cuda/emd_torch/pkg/include/emd.h +54 -0
  40. learning3d/losses/cuda/emd_torch/pkg/layer/__init__.py +1 -0
  41. learning3d/losses/cuda/emd_torch/pkg/layer/emd_loss_layer.py +40 -0
  42. learning3d/losses/cuda/emd_torch/pkg/src/cuda/emd.cu +70 -0
  43. learning3d/losses/cuda/emd_torch/pkg/src/emd.cpp +1 -0
  44. learning3d/losses/cuda/emd_torch/setup.py +29 -0
  45. learning3d/losses/emd.py +16 -0
  46. learning3d/losses/frobenius_norm.py +21 -0
  47. learning3d/losses/rmse_features.py +16 -0
  48. learning3d/models/__init__.py +23 -0
  49. learning3d/models/classifier.py +41 -0
  50. learning3d/models/dcp.py +92 -0
  51. learning3d/models/deepgmr.py +165 -0
  52. learning3d/models/dgcnn.py +92 -0
  53. learning3d/models/flownet3d.py +446 -0
  54. learning3d/models/masknet.py +84 -0
  55. learning3d/models/masknet2.py +264 -0
  56. learning3d/models/pcn.py +164 -0
  57. learning3d/models/pcrnet.py +74 -0
  58. learning3d/models/pointconv.py +108 -0
  59. learning3d/models/pointnet.py +108 -0
  60. learning3d/models/pointnetlk.py +173 -0
  61. learning3d/models/pooling.py +15 -0
  62. learning3d/models/ppfnet.py +102 -0
  63. learning3d/models/prnet.py +431 -0
  64. learning3d/models/rpmnet.py +359 -0
  65. learning3d/models/segmentation.py +38 -0
  66. learning3d/ops/__init__.py +0 -0
  67. learning3d/ops/data_utils.py +45 -0
  68. learning3d/ops/invmat.py +134 -0
  69. learning3d/ops/quaternion.py +218 -0
  70. learning3d/ops/se3.py +157 -0
  71. learning3d/ops/sinc.py +229 -0
  72. learning3d/ops/so3.py +213 -0
  73. learning3d/ops/transform_functions.py +342 -0
  74. learning3d/utils/__init__.py +9 -0
  75. learning3d/utils/lib/build/lib.linux-x86_64-3.5/pointnet2_cuda.cpython-35m-x86_64-linux-gnu.so +0 -0
  76. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/ball_query.o +0 -0
  77. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/ball_query_gpu.o +0 -0
  78. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/group_points.o +0 -0
  79. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/group_points_gpu.o +0 -0
  80. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/interpolate.o +0 -0
  81. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/interpolate_gpu.o +0 -0
  82. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/pointnet2_api.o +0 -0
  83. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/sampling.o +0 -0
  84. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/sampling_gpu.o +0 -0
  85. learning3d/utils/lib/dist/pointnet2-0.0.0-py3.5-linux-x86_64.egg +0 -0
  86. learning3d/utils/lib/pointnet2.egg-info/SOURCES.txt +14 -0
  87. learning3d/utils/lib/pointnet2.egg-info/dependency_links.txt +1 -0
  88. learning3d/utils/lib/pointnet2.egg-info/top_level.txt +1 -0
  89. learning3d/utils/lib/pointnet2_modules.py +160 -0
  90. learning3d/utils/lib/pointnet2_utils.py +318 -0
  91. learning3d/utils/lib/pytorch_utils.py +236 -0
  92. learning3d/utils/lib/setup.py +23 -0
  93. learning3d/utils/lib/src/ball_query.cpp +25 -0
  94. learning3d/utils/lib/src/ball_query_gpu.cu +67 -0
  95. learning3d/utils/lib/src/ball_query_gpu.h +15 -0
  96. learning3d/utils/lib/src/cuda_utils.h +15 -0
  97. learning3d/utils/lib/src/group_points.cpp +36 -0
  98. learning3d/utils/lib/src/group_points_gpu.cu +86 -0
  99. learning3d/utils/lib/src/group_points_gpu.h +22 -0
  100. learning3d/utils/lib/src/interpolate.cpp +65 -0
  101. learning3d/utils/lib/src/interpolate_gpu.cu +233 -0
  102. learning3d/utils/lib/src/interpolate_gpu.h +36 -0
  103. learning3d/utils/lib/src/pointnet2_api.cpp +25 -0
  104. learning3d/utils/lib/src/sampling.cpp +46 -0
  105. learning3d/utils/lib/src/sampling_gpu.cu +253 -0
  106. learning3d/utils/lib/src/sampling_gpu.h +29 -0
  107. learning3d/utils/pointconv_util.py +382 -0
  108. learning3d/utils/ppfnet_util.py +244 -0
  109. learning3d/utils/svd.py +59 -0
  110. learning3d/utils/transformer.py +243 -0
  111. learning3d-0.0.1.dist-info/LICENSE +21 -0
  112. learning3d-0.0.1.dist-info/METADATA +271 -0
  113. learning3d-0.0.1.dist-info/RECORD +115 -0
  114. learning3d-0.0.1.dist-info/WHEEL +5 -0
  115. learning3d-0.0.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,139 @@
1
+ import open3d as o3d
2
+ import argparse
3
+ import os
4
+ import sys
5
+ import logging
6
+ import numpy
7
+ import numpy as np
8
+ import torch
9
+ import torch.utils.data
10
+ import torchvision
11
+ from torch.utils.data import DataLoader
12
+ from tensorboardX import SummaryWriter
13
+ from tqdm import tqdm
14
+
15
+ # Only if the files are in example folder.
16
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
17
+ if BASE_DIR[-8:] == 'examples':
18
+ sys.path.append(os.path.join(BASE_DIR, os.pardir))
19
+ os.chdir(os.path.join(BASE_DIR, os.pardir))
20
+
21
+ from learning3d.models import DGCNN, DCP
22
+ from learning3d.data_utils import RegistrationData, ModelNet40Data
23
+
24
+ def get_transformations(igt):
25
+ R_ba = igt[:, 0:3, 0:3] # Ps = R_ba * Pt
26
+ translation_ba = igt[:, 0:3, 3].unsqueeze(2) # Ps = Pt + t_ba
27
+ R_ab = R_ba.permute(0, 2, 1) # Pt = R_ab * Ps
28
+ translation_ab = -torch.bmm(R_ab, translation_ba) # Pt = Ps + t_ab
29
+ return R_ab, translation_ab, R_ba, translation_ba
30
+
31
+ def display_open3d(template, source, transformed_source):
32
+ template_ = o3d.geometry.PointCloud()
33
+ source_ = o3d.geometry.PointCloud()
34
+ transformed_source_ = o3d.geometry.PointCloud()
35
+ template_.points = o3d.utility.Vector3dVector(template)
36
+ source_.points = o3d.utility.Vector3dVector(source + np.array([0,0,0]))
37
+ transformed_source_.points = o3d.utility.Vector3dVector(transformed_source)
38
+ template_.paint_uniform_color([1, 0, 0])
39
+ source_.paint_uniform_color([0, 1, 0])
40
+ transformed_source_.paint_uniform_color([0, 0, 1])
41
+ o3d.visualization.draw_geometries([template_, source_, transformed_source_])
42
+
43
+ def test_one_epoch(device, model, test_loader):
44
+ model.eval()
45
+ test_loss = 0.0
46
+ pred = 0.0
47
+ count = 0
48
+ for i, data in enumerate(tqdm(test_loader)):
49
+ template, source, igt = data
50
+ transformations = get_transformations(igt)
51
+ transformations = [t.to(device) for t in transformations]
52
+ R_ab, translation_ab, R_ba, translation_ba = transformations
53
+
54
+ template = template.to(device)
55
+ source = source.to(device)
56
+ igt = igt.to(device)
57
+
58
+ output = model(template, source)
59
+ display_open3d(template.detach().cpu().numpy()[0], source.detach().cpu().numpy()[0], output['transformed_source'].detach().cpu().numpy()[0])
60
+
61
+ identity = torch.eye(3).cuda().unsqueeze(0).repeat(template.shape[0], 1, 1)
62
+ loss_val = torch.nn.functional.mse_loss(torch.matmul(output['est_R'].transpose(2, 1), R_ab), identity) \
63
+ + torch.nn.functional.mse_loss(output['est_t'], translation_ab[:,:,0])
64
+
65
+ cycle_loss = torch.nn.functional.mse_loss(torch.matmul(output['est_R_'].transpose(2, 1), R_ba), identity) \
66
+ + torch.nn.functional.mse_loss(output['est_t_'], translation_ba[:,:,0])
67
+ loss_val = loss_val + cycle_loss * 0.1
68
+
69
+ test_loss += loss_val.item()
70
+ count += 1
71
+
72
+ test_loss = float(test_loss)/count
73
+ return test_loss
74
+
75
+ def test(args, model, test_loader):
76
+ test_loss, test_accuracy = test_one_epoch(args.device, model, test_loader)
77
+
78
+ def options():
79
+ parser = argparse.ArgumentParser(description='Point Cloud Registration')
80
+ parser.add_argument('--exp_name', type=str, default='exp_ipcrnet', metavar='N',
81
+ help='Name of the experiment')
82
+ parser.add_argument('--dataset_path', type=str, default='ModelNet40',
83
+ metavar='PATH', help='path to the input dataset') # like '/path/to/ModelNet40'
84
+ parser.add_argument('--eval', type=bool, default=False, help='Train or Evaluate the network.')
85
+
86
+ # settings for input data
87
+ parser.add_argument('--dataset_type', default='modelnet', choices=['modelnet', 'shapenet2'],
88
+ metavar='DATASET', help='dataset type (default: modelnet)')
89
+ parser.add_argument('--num_points', default=1024, type=int,
90
+ metavar='N', help='points in point-cloud (default: 1024)')
91
+
92
+ # settings for PointNet
93
+ parser.add_argument('--pointnet', default='tune', type=str, choices=['fixed', 'tune'],
94
+ help='train pointnet (default: tune)')
95
+ parser.add_argument('--emb_dims', default=512, type=int,
96
+ metavar='K', help='dim. of the feature vector (default: 1024)')
97
+ parser.add_argument('--symfn', default='max', choices=['max', 'avg'],
98
+ help='symmetric function (default: max)')
99
+
100
+ # settings for on training
101
+ parser.add_argument('-j', '--workers', default=4, type=int,
102
+ metavar='N', help='number of data loading workers (default: 4)')
103
+ parser.add_argument('-b', '--batch_size', default=2, type=int,
104
+ metavar='N', help='mini-batch size (default: 32)')
105
+ parser.add_argument('--pretrained', default='learning3d/pretrained/exp_dcp/models/best_model.t7', type=str,
106
+ metavar='PATH', help='path to pretrained model file (default: null (no-use))')
107
+ parser.add_argument('--device', default='cuda:0', type=str,
108
+ metavar='DEVICE', help='use CUDA if available')
109
+
110
+ args = parser.parse_args()
111
+ return args
112
+
113
+ def main():
114
+ args = options()
115
+ torch.backends.cudnn.deterministic = True
116
+
117
+ trainset = RegistrationData('DCP', ModelNet40Data(train=True))
118
+ testset = RegistrationData('DCP', ModelNet40Data(train=False))
119
+ train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers)
120
+ test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
121
+
122
+ if not torch.cuda.is_available():
123
+ args.device = 'cpu'
124
+ args.device = torch.device(args.device)
125
+
126
+ # Create PointNet Model.
127
+ dgcnn = DGCNN(emb_dims=args.emb_dims)
128
+ model = DCP(feature_model=dgcnn, cycle=True)
129
+ model = model.to(args.device)
130
+
131
+ if args.pretrained:
132
+ assert os.path.isfile(args.pretrained)
133
+ model.load_state_dict(torch.load(args.pretrained), strict=False)
134
+ model.to(args.device)
135
+
136
+ test(args, model, test_loader)
137
+
138
+ if __name__ == '__main__':
139
+ main()
@@ -0,0 +1,144 @@
1
+ import open3d as o3d
2
+ import argparse
3
+ import os
4
+ import sys
5
+ import logging
6
+ import numpy
7
+ import numpy as np
8
+ import torch
9
+ import torch.utils.data
10
+ import torchvision
11
+ from torch.utils.data import DataLoader
12
+ from tensorboardX import SummaryWriter
13
+ from tqdm import tqdm
14
+
15
+ # Only if the files are in example folder.
16
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
17
+ if BASE_DIR[-8:] == 'examples':
18
+ sys.path.append(os.path.join(BASE_DIR, os.pardir))
19
+ os.chdir(os.path.join(BASE_DIR, os.pardir))
20
+
21
+ from learning3d.models import DeepGMR
22
+ from learning3d.data_utils import RegistrationData, ModelNet40Data
23
+
24
+ def display_open3d(template, source, transformed_source):
25
+ template_ = o3d.geometry.PointCloud()
26
+ source_ = o3d.geometry.PointCloud()
27
+ transformed_source_ = o3d.geometry.PointCloud()
28
+ template_.points = o3d.utility.Vector3dVector(template)
29
+ source_.points = o3d.utility.Vector3dVector(source + np.array([0,0,0]))
30
+ transformed_source_.points = o3d.utility.Vector3dVector(transformed_source)
31
+ template_.paint_uniform_color([1, 0, 0])
32
+ source_.paint_uniform_color([0, 1, 0])
33
+ transformed_source_.paint_uniform_color([0, 0, 1])
34
+ o3d.visualization.draw_geometries([template_, source_, transformed_source_])
35
+
36
+ def rotation_error(R, R_gt):
37
+ cos_theta = (torch.einsum('bij,bij->b', R, R_gt) - 1) / 2
38
+ cos_theta = torch.clamp(cos_theta, -1, 1)
39
+ return torch.acos(cos_theta) * 180 / math.pi
40
+
41
+ def translation_error(t, t_gt):
42
+ return torch.norm(t - t_gt, dim=1)
43
+
44
+ def rmse(pts, T, T_gt):
45
+ pts_pred = pts @ T[:, :3, :3].transpose(1, 2) + T[:, :3, 3].unsqueeze(1)
46
+ pts_gt = pts @ T_gt[:, :3, :3].transpose(1, 2) + T_gt[:, :3, 3].unsqueeze(1)
47
+ return torch.norm(pts_pred - pts_gt, dim=2).mean(dim=1)
48
+
49
+ def test_one_epoch(device, model, test_loader):
50
+ model.eval()
51
+ test_loss = 0.0
52
+ pred = 0.0
53
+ count = 0
54
+ rotation_errors, translation_errors, rmses = [], [], []
55
+
56
+ for i, data in enumerate(tqdm(test_loader)):
57
+ template, source, igt = data
58
+
59
+ template = template.to(device)
60
+ source = source.to(device)
61
+ igt = igt.to(device)
62
+
63
+ output = model(template, source)
64
+ display_open3d(template.detach().cpu().numpy()[0, :, :3], source.detach().cpu().numpy()[0, :, :3], output['transformed_source'].detach().cpu().numpy()[0])
65
+
66
+ eye = torch.eye(4).expand_as(igt).to(igt.device)
67
+ mse1 = F.mse_loss(output['est_T_inverse'] @ torch.inverse(igt), eye)
68
+ mse2 = F.mse_loss(output['est_T'] @ igt, eye)
69
+ loss = mse1 + mse2
70
+
71
+ r_err = rotation_error(est_T_inverse[:, :3, :3], igt[:, :3, :3])
72
+ t_err = translation_error(est_T_inverse[:, :3, 3], igt[:, :3, 3])
73
+ rmse_val = rmse(template[:, :100], est_T_inverse, igt)
74
+ rotation_errors.append(r_err)
75
+ translation_errors.append(t_err)
76
+ rmses.append(rmse_val)
77
+
78
+ test_loss += loss_val.item()
79
+ count += 1
80
+
81
+ test_loss = float(test_loss)/count
82
+ print("Mean rotation error: {}, Mean translation error: {} and Mean RMSE: {}".format(np.mean(rotation_errors), np.mean(translation_errors), np.mean(rmses)))
83
+ return test_loss
84
+
85
+ def test(args, model, test_loader):
86
+ test_loss = test_one_epoch(args.device, model, test_loader)
87
+
88
+ def options():
89
+ parser = argparse.ArgumentParser(description='Point Cloud Registration')
90
+ parser.add_argument('--exp_name', type=str, default='exp_deepgmr', metavar='N',
91
+ help='Name of the experiment')
92
+ parser.add_argument('--dataset_path', type=str, default='ModelNet40',
93
+ metavar='PATH', help='path to the input dataset') # like '/path/to/ModelNet40'
94
+ parser.add_argument('--eval', type=bool, default=False, help='Train or Evaluate the network.')
95
+
96
+ # settings for input data
97
+ parser.add_argument('--dataset_type', default='modelnet', choices=['modelnet', 'shapenet2'],
98
+ metavar='DATASET', help='dataset type (default: modelnet)')
99
+ parser.add_argument('--num_points', default=1024, type=int,
100
+ metavar='N', help='points in point-cloud (default: 1024)')
101
+
102
+ parser.add_argument('--nearest_neighbors', default=20, type=int,
103
+ metavar='K', help='No of nearest neighbors to be estimated.')
104
+ parser.add_argument('--use_rri', default=True, type=bool,
105
+ help='Find nearest neighbors to estimate features from PointNet.')
106
+
107
+ # settings for on training
108
+ parser.add_argument('-j', '--workers', default=4, type=int,
109
+ metavar='N', help='number of data loading workers (default: 4)')
110
+ parser.add_argument('-b', '--batch_size', default=2, type=int,
111
+ metavar='N', help='mini-batch size (default: 32)')
112
+ parser.add_argument('--pretrained', default='learning3d/pretrained/exp_deepgmr/models/best_model.pth', type=str,
113
+ metavar='PATH', help='path to pretrained model file (default: null (no-use))')
114
+ parser.add_argument('--device', default='cuda:0', type=str,
115
+ metavar='DEVICE', help='use CUDA if available')
116
+
117
+ args = parser.parse_args()
118
+ return args
119
+
120
+ def main():
121
+ args = options()
122
+ torch.backends.cudnn.deterministic = True
123
+
124
+ trainset = RegistrationData('DeepGMR', ModelNet40Data(train=True))
125
+ testset = RegistrationData('DeepGMR', ModelNet40Data(train=False))
126
+ train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers)
127
+ test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
128
+
129
+ if not torch.cuda.is_available():
130
+ args.device = 'cpu'
131
+ args.device = torch.device(args.device)
132
+
133
+ model = DeepGMR(use_rri=args.use_rri, nearest_neighbors=args.nearest_neighbors)
134
+ model = model.to(args.device)
135
+
136
+ if args.pretrained:
137
+ assert os.path.isfile(args.pretrained)
138
+ model.load_state_dict(torch.load(args.pretrained), strict=False)
139
+ model.to(args.device)
140
+
141
+ test(args, model, test_loader)
142
+
143
+ if __name__ == '__main__':
144
+ main()
@@ -0,0 +1,113 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+
4
+
5
+ import open3d as o3d
6
+ import os
7
+ import gc
8
+ import argparse
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.nn.functional as F
12
+ import torch.optim as optim
13
+ from torch.optim.lr_scheduler import MultiStepLR
14
+ from learning3d.models import FlowNet3D
15
+ from learning3d.data_utils import SceneflowDataset
16
+ import numpy as np
17
+ from torch.utils.data import DataLoader
18
+ from tensorboardX import SummaryWriter
19
+ from tqdm import tqdm
20
+
21
+ def display_open3d(template, source, transformed_source):
22
+ template_ = o3d.geometry.PointCloud()
23
+ source_ = o3d.geometry.PointCloud()
24
+ transformed_source_ = o3d.geometry.PointCloud()
25
+ template_.points = o3d.utility.Vector3dVector(template)
26
+ source_.points = o3d.utility.Vector3dVector(source + np.array([0,0.5,0.5]))
27
+ transformed_source_.points = o3d.utility.Vector3dVector(transformed_source)
28
+ template_.paint_uniform_color([1, 0, 0])
29
+ source_.paint_uniform_color([0, 1, 0])
30
+ transformed_source_.paint_uniform_color([0, 0, 1])
31
+ o3d.visualization.draw_geometries([template_, source_, transformed_source_])
32
+
33
+ def test_one_epoch(args, net, test_loader):
34
+ net.eval()
35
+
36
+ total_loss = 0
37
+ num_examples = 0
38
+ for i, data in enumerate(tqdm(test_loader)):
39
+ data = [d.to(args.device) for d in data]
40
+ pc1, pc2, color1, color2, flow, mask1 = data
41
+ pc1 = pc1.transpose(2,1).contiguous()
42
+ pc2 = pc2.transpose(2,1).contiguous()
43
+ color1 = color1.transpose(2,1).contiguous()
44
+ color2 = color2.transpose(2,1).contiguous()
45
+ flow = flow
46
+ mask1 = mask1.float()
47
+
48
+ batch_size = pc1.size(0)
49
+ num_examples += batch_size
50
+ flow_pred = net(pc1, pc2, color1, color2).permute(0,2,1)
51
+ loss_1 = torch.mean(mask1 * torch.sum((flow_pred - flow) * (flow_pred - flow), -1) / 2.0)
52
+
53
+ pc1, pc2 = pc1.permute(0,2,1), pc2.permute(0,2,1)
54
+ pc1_ = pc1 - flow_pred
55
+ print("Loss: ", loss_1)
56
+ display_open3d(pc1.detach().cpu().numpy()[0], pc2.detach().cpu().numpy()[0], pc1_.detach().cpu().numpy()[0])
57
+ total_loss += loss_1.item() * batch_size
58
+
59
+ return total_loss * 1.0 / num_examples
60
+
61
+
62
+ def test(args, net, test_loader):
63
+ test_loss = test_one_epoch(args, net, test_loader)
64
+
65
+ def main():
66
+ parser = argparse.ArgumentParser(description='Point Cloud Registration')
67
+ parser.add_argument('--model', type=str, default='flownet', metavar='N',
68
+ choices=['flownet'], help='Model to use, [flownet]')
69
+ parser.add_argument('--emb_dims', type=int, default=512, metavar='N',
70
+ help='Dimension of embeddings')
71
+ parser.add_argument('--num_points', type=int, default=2048,
72
+ help='Point Number [default: 2048]')
73
+ parser.add_argument('--test_batch_size', type=int, default=1, metavar='batch_size',
74
+ help='Size of batch)')
75
+
76
+ parser.add_argument('--gaussian_noise', type=bool, default=False, metavar='N',
77
+ help='Wheter to add gaussian noise')
78
+ parser.add_argument('--unseen', type=bool, default=False, metavar='N',
79
+ help='Whether to test on unseen category')
80
+ parser.add_argument('--dataset', type=str, default='SceneflowDataset',
81
+ choices=['SceneflowDataset'], metavar='N',
82
+ help='dataset to use')
83
+ parser.add_argument('--dataset_path', type=str, default='data_processed_maxcut_35_20k_2k_8192', metavar='N',
84
+ help='dataset to use')
85
+ parser.add_argument('--pretrained', type=str, default='learning3d/pretrained/exp_flownet/models/model.best.t7', metavar='N',
86
+ help='Pretrained model path')
87
+ parser.add_argument('--device', default='cuda:0', type=str,
88
+ metavar='DEVICE', help='use CUDA if available')
89
+
90
+ args = parser.parse_args()
91
+ if not torch.cuda.is_available():
92
+ args.device = torch.device('cpu')
93
+ else:
94
+ args.device = torch.device('cuda')
95
+
96
+ if args.dataset == 'SceneflowDataset':
97
+ test_loader = DataLoader(
98
+ SceneflowDataset(npoints=args.num_points, partition='test'),
99
+ batch_size=args.test_batch_size, shuffle=False, drop_last=False)
100
+ else:
101
+ raise Exception("not implemented")
102
+
103
+ net = FlowNet3D()
104
+ assert os.path.exists(args.pretrained), "Pretrained Model Doesn't Exists!"
105
+ net.load_state_dict(torch.load(args.pretrained, map_location='cpu'))
106
+ net = net.to(args.device)
107
+
108
+ test(args, net, test_loader)
109
+ print('FINISH')
110
+
111
+
112
+ if __name__ == '__main__':
113
+ main()
@@ -0,0 +1,159 @@
1
+ import open3d as o3d
2
+ import argparse
3
+ import os
4
+ import sys
5
+ import logging
6
+ import numpy
7
+ import numpy as np
8
+ import torch
9
+ import torch.utils.data
10
+ import torchvision
11
+ from torch.utils.data import DataLoader
12
+ from tensorboardX import SummaryWriter
13
+ from tqdm import tqdm
14
+
15
+ # Only if the files are in example folder.
16
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
17
+ if BASE_DIR[-8:] == 'examples':
18
+ sys.path.append(os.path.join(BASE_DIR, os.pardir))
19
+ os.chdir(os.path.join(BASE_DIR, os.pardir))
20
+
21
+ from learning3d.models import MaskNet
22
+ from learning3d.data_utils import RegistrationData, ModelNet40Data
23
+
24
+ def pc2open3d(data):
25
+ if torch.is_tensor(data): data = data.detach().cpu().numpy()
26
+ if len(data.shape) == 2:
27
+ pc = o3d.geometry.PointCloud()
28
+ pc.points = o3d.utility.Vector3dVector(data)
29
+ return pc
30
+ else:
31
+ print("Error in the shape of data given to Open3D!, Shape is ", data.shape)
32
+
33
+ def display_results(template, source, masked_template):
34
+ template = pc2open3d(template)
35
+ source = pc2open3d(source)
36
+ masked_template = pc2open3d(masked_template)
37
+
38
+ template.paint_uniform_color([1, 0, 0])
39
+ source.paint_uniform_color([0, 1, 0])
40
+ masked_template.paint_uniform_color([0, 0, 1])
41
+
42
+ o3d.visualization.draw_geometries([template, source])
43
+ o3d.visualization.draw_geometries([masked_template, source])
44
+
45
+ def evaluate_metrics(TP, FP, FN, TN, gt_mask):
46
+ # TP, FP, FN, TN: True +ve, False +ve, False -ve, True -ve
47
+ # gt_mask: Ground Truth mask [Nt, 1]
48
+
49
+ accuracy = (TP + TN)/gt_mask.shape[1]
50
+ misclassification_rate = (FN + FP)/gt_mask.shape[1]
51
+ # Precision: (What portion of positive identifications are actually correct?)
52
+ precision = TP / (TP + FP)
53
+ # Recall: (What portion of actual positives are identified correctly?)
54
+ recall = TP / (TP + FN)
55
+
56
+ fscore = (2*precision*recall) / (precision + recall)
57
+ return accuracy, precision, recall, fscore
58
+
59
+ # Function used to evaluate the predicted mask with ground truth mask.
60
+ def evaluate_mask(gt_mask, predicted_mask, predicted_mask_idx):
61
+ # gt_mask: Ground Truth Mask [Nt, 1]
62
+ # predicted_mask: Mask predicted by network [Nt, 1]
63
+ # predicted_mask_idx: Point indices chosen by network [Ns, 1]
64
+
65
+ if torch.is_tensor(gt_mask): gt_mask = gt_mask.detach().cpu().numpy()
66
+ if torch.is_tensor(gt_mask): predicted_mask = predicted_mask.detach().cpu().numpy()
67
+ if torch.is_tensor(predicted_mask_idx): predicted_mask_idx = predicted_mask_idx.detach().cpu().numpy()
68
+ gt_mask, predicted_mask, predicted_mask_idx = gt_mask.reshape(1,-1), predicted_mask.reshape(1,-1), predicted_mask_idx.reshape(1,-1)
69
+
70
+ gt_idx = np.where(gt_mask == 1)[1].reshape(1,-1) # Find indices of points which are actually in source.
71
+
72
+ # TP + FP = number of source points.
73
+ TP = np.intersect1d(predicted_mask_idx[0], gt_idx[0]).shape[0] # is inliner and predicted as inlier (True Positive) (Find common indices in predicted_mask_idx, gt_idx)
74
+ FP = len([x for x in predicted_mask_idx[0] if x not in gt_idx]) # isn't inlier but predicted as inlier (False Positive)
75
+ FN = FP # is inlier but predicted as outlier (False Negative) (due to binary classification)
76
+ TN = gt_mask.shape[1] - gt_idx.shape[1] - FN # is outlier and predicted as outlier (True Negative)
77
+ return evaluate_metrics(TP, FP, FN, TN, gt_mask)
78
+
79
+ def test_one_epoch(args, model, test_loader):
80
+ model.eval()
81
+ test_loss = 0.0
82
+ pred = 0.0
83
+ count = 0
84
+ precision_list = []
85
+
86
+ for i, data in enumerate(tqdm(test_loader)):
87
+ template, source, igt, gt_mask = data
88
+
89
+ template = template.to(args.device)
90
+ source = source.to(args.device)
91
+ igt = igt.to(args.device) # [source] = [igt]*[template]
92
+ gt_mask = gt_mask.to(args.device)
93
+
94
+ masked_template, predicted_mask = model(template, source)
95
+
96
+ # Evaluate mask based on classification metrics.
97
+ accuracy, precision, recall, fscore = evaluate_mask(gt_mask, predicted_mask, predicted_mask_idx = model.mask_idx)
98
+ precision_list.append(precision)
99
+
100
+ # Different ways to visualize results.
101
+ display_results(template.detach().cpu().numpy()[0], source.detach().cpu().numpy()[0], masked_template.detach().cpu().numpy()[0])
102
+
103
+ print("Mean Precision: ", np.mean(precision_list))
104
+
105
+ def test(args, model, test_loader):
106
+ test_one_epoch(args, model, test_loader)
107
+
108
+ def options():
109
+ parser = argparse.ArgumentParser(description='MaskNet: A Fully-Convolutional Network For Inlier Estimation (Testing)')
110
+
111
+ # settings for input data
112
+ parser.add_argument('--num_points', default=1024, type=int,
113
+ metavar='N', help='points in point-cloud (default: 1024)')
114
+ parser.add_argument('--partial_source', default=True, type=bool,
115
+ help='create partial source point cloud in dataset.')
116
+ parser.add_argument('--noise', default=False, type=bool,
117
+ help='Add noise in source point clouds.')
118
+ parser.add_argument('--outliers', default=False, type=bool,
119
+ help='Add outliers to template point cloud.')
120
+
121
+ # settings for on testing
122
+ parser.add_argument('-j', '--workers', default=1, type=int,
123
+ metavar='N', help='number of data loading workers (default: 4)')
124
+ parser.add_argument('-b', '--test_batch_size', default=1, type=int,
125
+ metavar='N', help='test-mini-batch size (default: 1)')
126
+ parser.add_argument('--pretrained', default='learning3d/pretrained/exp_masknet/models/best_model.t7', type=str,
127
+ metavar='PATH', help='path to pretrained model file (default: null (no-use))')
128
+ parser.add_argument('--device', default='cuda:0', type=str,
129
+ metavar='DEVICE', help='use CUDA if available')
130
+ parser.add_argument('--unseen', default=False, type=bool,
131
+ help='Use first 20 categories for training and last 20 for testing')
132
+
133
+ args = parser.parse_args()
134
+ return args
135
+
136
+ def main():
137
+ args = options()
138
+ torch.backends.cudnn.deterministic = True
139
+
140
+ testset = RegistrationData('PointNetLK', ModelNet40Data(train=False, num_points=args.num_points),
141
+ partial_source=args.partial_source, noise=args.noise,
142
+ additional_params={'use_masknet': True})
143
+ test_loader = DataLoader(testset, batch_size=args.test_batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
144
+
145
+ if not torch.cuda.is_available():
146
+ args.device = 'cpu'
147
+ args.device = torch.device(args.device)
148
+
149
+ # Load Pretrained MaskNet.
150
+ model = MaskNet()
151
+ if args.pretrained:
152
+ assert os.path.isfile(args.pretrained)
153
+ model.load_state_dict(torch.load(args.pretrained, map_location='cpu'))
154
+ model = model.to(args.device)
155
+
156
+ test(args, model, test_loader)
157
+
158
+ if __name__ == '__main__':
159
+ main()