learning3d 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (115) hide show
  1. learning3d/__init__.py +2 -0
  2. learning3d/data_utils/__init__.py +4 -0
  3. learning3d/data_utils/dataloaders.py +454 -0
  4. learning3d/data_utils/user_data.py +119 -0
  5. learning3d/examples/test_dcp.py +139 -0
  6. learning3d/examples/test_deepgmr.py +144 -0
  7. learning3d/examples/test_flownet.py +113 -0
  8. learning3d/examples/test_masknet.py +159 -0
  9. learning3d/examples/test_masknet2.py +162 -0
  10. learning3d/examples/test_pcn.py +118 -0
  11. learning3d/examples/test_pcrnet.py +120 -0
  12. learning3d/examples/test_pnlk.py +121 -0
  13. learning3d/examples/test_pointconv.py +126 -0
  14. learning3d/examples/test_pointnet.py +121 -0
  15. learning3d/examples/test_prnet.py +126 -0
  16. learning3d/examples/test_rpmnet.py +120 -0
  17. learning3d/examples/train_PointNetLK.py +240 -0
  18. learning3d/examples/train_dcp.py +249 -0
  19. learning3d/examples/train_deepgmr.py +244 -0
  20. learning3d/examples/train_flownet.py +259 -0
  21. learning3d/examples/train_masknet.py +239 -0
  22. learning3d/examples/train_pcn.py +216 -0
  23. learning3d/examples/train_pcrnet.py +228 -0
  24. learning3d/examples/train_pointconv.py +245 -0
  25. learning3d/examples/train_pointnet.py +244 -0
  26. learning3d/examples/train_prnet.py +229 -0
  27. learning3d/examples/train_rpmnet.py +228 -0
  28. learning3d/losses/__init__.py +12 -0
  29. learning3d/losses/chamfer_distance.py +51 -0
  30. learning3d/losses/classification.py +14 -0
  31. learning3d/losses/correspondence_loss.py +10 -0
  32. learning3d/losses/cuda/chamfer_distance/__init__.py +1 -0
  33. learning3d/losses/cuda/chamfer_distance/chamfer_distance.cpp +185 -0
  34. learning3d/losses/cuda/chamfer_distance/chamfer_distance.cu +209 -0
  35. learning3d/losses/cuda/chamfer_distance/chamfer_distance.py +66 -0
  36. learning3d/losses/cuda/emd_torch/pkg/emd_loss_layer.py +41 -0
  37. learning3d/losses/cuda/emd_torch/pkg/include/cuda/emd.cuh +347 -0
  38. learning3d/losses/cuda/emd_torch/pkg/include/cuda_helper.h +18 -0
  39. learning3d/losses/cuda/emd_torch/pkg/include/emd.h +54 -0
  40. learning3d/losses/cuda/emd_torch/pkg/layer/__init__.py +1 -0
  41. learning3d/losses/cuda/emd_torch/pkg/layer/emd_loss_layer.py +40 -0
  42. learning3d/losses/cuda/emd_torch/pkg/src/cuda/emd.cu +70 -0
  43. learning3d/losses/cuda/emd_torch/pkg/src/emd.cpp +1 -0
  44. learning3d/losses/cuda/emd_torch/setup.py +29 -0
  45. learning3d/losses/emd.py +16 -0
  46. learning3d/losses/frobenius_norm.py +21 -0
  47. learning3d/losses/rmse_features.py +16 -0
  48. learning3d/models/__init__.py +23 -0
  49. learning3d/models/classifier.py +41 -0
  50. learning3d/models/dcp.py +92 -0
  51. learning3d/models/deepgmr.py +165 -0
  52. learning3d/models/dgcnn.py +92 -0
  53. learning3d/models/flownet3d.py +446 -0
  54. learning3d/models/masknet.py +84 -0
  55. learning3d/models/masknet2.py +264 -0
  56. learning3d/models/pcn.py +164 -0
  57. learning3d/models/pcrnet.py +74 -0
  58. learning3d/models/pointconv.py +108 -0
  59. learning3d/models/pointnet.py +108 -0
  60. learning3d/models/pointnetlk.py +173 -0
  61. learning3d/models/pooling.py +15 -0
  62. learning3d/models/ppfnet.py +102 -0
  63. learning3d/models/prnet.py +431 -0
  64. learning3d/models/rpmnet.py +359 -0
  65. learning3d/models/segmentation.py +38 -0
  66. learning3d/ops/__init__.py +0 -0
  67. learning3d/ops/data_utils.py +45 -0
  68. learning3d/ops/invmat.py +134 -0
  69. learning3d/ops/quaternion.py +218 -0
  70. learning3d/ops/se3.py +157 -0
  71. learning3d/ops/sinc.py +229 -0
  72. learning3d/ops/so3.py +213 -0
  73. learning3d/ops/transform_functions.py +342 -0
  74. learning3d/utils/__init__.py +9 -0
  75. learning3d/utils/lib/build/lib.linux-x86_64-3.5/pointnet2_cuda.cpython-35m-x86_64-linux-gnu.so +0 -0
  76. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/ball_query.o +0 -0
  77. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/ball_query_gpu.o +0 -0
  78. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/group_points.o +0 -0
  79. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/group_points_gpu.o +0 -0
  80. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/interpolate.o +0 -0
  81. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/interpolate_gpu.o +0 -0
  82. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/pointnet2_api.o +0 -0
  83. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/sampling.o +0 -0
  84. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/sampling_gpu.o +0 -0
  85. learning3d/utils/lib/dist/pointnet2-0.0.0-py3.5-linux-x86_64.egg +0 -0
  86. learning3d/utils/lib/pointnet2.egg-info/SOURCES.txt +14 -0
  87. learning3d/utils/lib/pointnet2.egg-info/dependency_links.txt +1 -0
  88. learning3d/utils/lib/pointnet2.egg-info/top_level.txt +1 -0
  89. learning3d/utils/lib/pointnet2_modules.py +160 -0
  90. learning3d/utils/lib/pointnet2_utils.py +318 -0
  91. learning3d/utils/lib/pytorch_utils.py +236 -0
  92. learning3d/utils/lib/setup.py +23 -0
  93. learning3d/utils/lib/src/ball_query.cpp +25 -0
  94. learning3d/utils/lib/src/ball_query_gpu.cu +67 -0
  95. learning3d/utils/lib/src/ball_query_gpu.h +15 -0
  96. learning3d/utils/lib/src/cuda_utils.h +15 -0
  97. learning3d/utils/lib/src/group_points.cpp +36 -0
  98. learning3d/utils/lib/src/group_points_gpu.cu +86 -0
  99. learning3d/utils/lib/src/group_points_gpu.h +22 -0
  100. learning3d/utils/lib/src/interpolate.cpp +65 -0
  101. learning3d/utils/lib/src/interpolate_gpu.cu +233 -0
  102. learning3d/utils/lib/src/interpolate_gpu.h +36 -0
  103. learning3d/utils/lib/src/pointnet2_api.cpp +25 -0
  104. learning3d/utils/lib/src/sampling.cpp +46 -0
  105. learning3d/utils/lib/src/sampling_gpu.cu +253 -0
  106. learning3d/utils/lib/src/sampling_gpu.h +29 -0
  107. learning3d/utils/pointconv_util.py +382 -0
  108. learning3d/utils/ppfnet_util.py +244 -0
  109. learning3d/utils/svd.py +59 -0
  110. learning3d/utils/transformer.py +243 -0
  111. learning3d-0.0.1.dist-info/LICENSE +21 -0
  112. learning3d-0.0.1.dist-info/METADATA +271 -0
  113. learning3d-0.0.1.dist-info/RECORD +115 -0
  114. learning3d-0.0.1.dist-info/WHEEL +5 -0
  115. learning3d-0.0.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,121 @@
1
+ import open3d as o3d
2
+ import argparse
3
+ import os
4
+ import sys
5
+ import logging
6
+ import numpy
7
+ import numpy as np
8
+ import torch
9
+ import torch.utils.data
10
+ import torchvision
11
+ from torch.utils.data import DataLoader
12
+ from tensorboardX import SummaryWriter
13
+ from tqdm import tqdm
14
+
15
+ # Only if the files are in example folder.
16
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
17
+ if BASE_DIR[-8:] == 'examples':
18
+ sys.path.append(os.path.join(BASE_DIR, os.pardir))
19
+ os.chdir(os.path.join(BASE_DIR, os.pardir))
20
+
21
+ from learning3d.models import PointNet
22
+ from learning3d.models import Classifier
23
+ from learning3d.data_utils import ClassificationData, ModelNet40Data
24
+
25
+ def display_open3d(template):
26
+ template_ = o3d.geometry.PointCloud()
27
+ template_.points = o3d.utility.Vector3dVector(template)
28
+ # template_.paint_uniform_color([1, 0, 0])
29
+ o3d.visualization.draw_geometries([template_])
30
+
31
+ def test_one_epoch(device, model, test_loader, testset):
32
+ model.eval()
33
+ test_loss = 0.0
34
+ pred = 0.0
35
+ count = 0
36
+ for i, data in enumerate(tqdm(test_loader)):
37
+ points, target = data
38
+ target = target[:,0]
39
+
40
+ points = points.to(device)
41
+ target = target.to(device)
42
+
43
+ output = model(points)
44
+ loss_val = torch.nn.functional.nll_loss(
45
+ torch.nn.functional.log_softmax(output, dim=1), target, size_average=False)
46
+ print("Ground Truth Label: ", testset.get_shape(target[0].item()))
47
+ print("Predicted Label: ", testset.get_shape(torch.argmax(output[0]).item()))
48
+ display_open3d(points.detach().cpu().numpy()[0])
49
+
50
+ test_loss += loss_val.item()
51
+ count += output.size(0)
52
+
53
+ _, pred1 = output.max(dim=1)
54
+ ag = (pred1 == target)
55
+ am = ag.sum()
56
+ pred += am.item()
57
+
58
+ test_loss = float(test_loss)/count
59
+ accuracy = float(pred)/count
60
+ return test_loss, accuracy
61
+
62
+ def test(args, model, test_loader, testset):
63
+ test_loss, test_accuracy = test_one_epoch(args.device, model, test_loader, testset)
64
+
65
+ def options():
66
+ parser = argparse.ArgumentParser(description='Point Cloud Registration')
67
+ parser.add_argument('--dataset_path', type=str, default='ModelNet40',
68
+ metavar='PATH', help='path to the input dataset') # like '/path/to/ModelNet40'
69
+ parser.add_argument('--eval', type=bool, default=False, help='Train or Evaluate the network.')
70
+
71
+ # settings for input data
72
+ parser.add_argument('--dataset_type', default='modelnet', choices=['modelnet', 'shapenet2'],
73
+ metavar='DATASET', help='dataset type (default: modelnet)')
74
+ parser.add_argument('--num_points', default=1024, type=int,
75
+ metavar='N', help='points in point-cloud (default: 1024)')
76
+
77
+ # settings for PointNet
78
+ parser.add_argument('--pointnet', default='tune', type=str, choices=['fixed', 'tune'],
79
+ help='train pointnet (default: tune)')
80
+ parser.add_argument('-j', '--workers', default=4, type=int,
81
+ metavar='N', help='number of data loading workers (default: 4)')
82
+ parser.add_argument('-b', '--batch_size', default=32, type=int,
83
+ metavar='N', help='mini-batch size (default: 32)')
84
+ parser.add_argument('--emb_dims', default=1024, type=int,
85
+ metavar='K', help='dim. of the feature vector (default: 1024)')
86
+ parser.add_argument('--symfn', default='max', choices=['max', 'avg'],
87
+ help='symmetric function (default: max)')
88
+
89
+ # settings for on training
90
+ parser.add_argument('--pretrained', default='learning3d/pretrained/exp_classifier/models/best_model.t7', type=str,
91
+ metavar='PATH', help='path to pretrained model file (default: null (no-use))')
92
+ parser.add_argument('--device', default='cuda:0', type=str,
93
+ metavar='DEVICE', help='use CUDA if available')
94
+
95
+ args = parser.parse_args()
96
+ return args
97
+
98
+ def main():
99
+ args = options()
100
+ args.dataset_path = os.path.join(os.getcwd(), os.pardir, os.pardir, 'ModelNet40', 'ModelNet40')
101
+
102
+ testset = ClassificationData(ModelNet40Data(train=False))
103
+ test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
104
+
105
+ if not torch.cuda.is_available():
106
+ args.device = 'cpu'
107
+ args.device = torch.device(args.device)
108
+
109
+ # Create PointNet Model.
110
+ ptnet = PointNet(emb_dims=args.emb_dims, use_bn=True)
111
+ model = Classifier(feature_model=ptnet)
112
+
113
+ if args.pretrained:
114
+ assert os.path.isfile(args.pretrained)
115
+ model.load_state_dict(torch.load(args.pretrained, map_location='cpu'))
116
+ model.to(args.device)
117
+
118
+ test(args, model, test_loader, testset)
119
+
120
+ if __name__ == '__main__':
121
+ main()
@@ -0,0 +1,126 @@
1
+ import open3d as o3d
2
+ import argparse
3
+ import os
4
+ import sys
5
+ import logging
6
+ import numpy
7
+ import numpy as np
8
+ import torch
9
+ import torch.utils.data
10
+ import torchvision
11
+ from torch.utils.data import DataLoader
12
+ from tensorboardX import SummaryWriter
13
+ from tqdm import tqdm
14
+
15
+ # Only if the files are in example folder.
16
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
17
+ if BASE_DIR[-8:] == 'examples':
18
+ sys.path.append(os.path.join(BASE_DIR, os.pardir))
19
+ os.chdir(os.path.join(BASE_DIR, os.pardir))
20
+
21
+ from learning3d.models import PRNet
22
+ from learning3d.data_utils import RegistrationData, ModelNet40Data
23
+
24
+ def get_transformations(igt):
25
+ R_ba = igt[:, 0:3, 0:3] # Ps = R_ba * Pt
26
+ translation_ba = igt[:, 0:3, 3].unsqueeze(2) # Ps = Pt + t_ba
27
+ R_ab = R_ba.permute(0, 2, 1) # Pt = R_ab * Ps
28
+ translation_ab = -torch.bmm(R_ab, translation_ba) # Pt = Ps + t_ab
29
+ return R_ab, translation_ab, R_ba, translation_ba
30
+
31
+ def display_open3d(template, source, transformed_source):
32
+ template_ = o3d.geometry.PointCloud()
33
+ source_ = o3d.geometry.PointCloud()
34
+ transformed_source_ = o3d.geometry.PointCloud()
35
+ template_.points = o3d.utility.Vector3dVector(template)
36
+ source_.points = o3d.utility.Vector3dVector(source + np.array([0,0,0]))
37
+ transformed_source_.points = o3d.utility.Vector3dVector(transformed_source)
38
+ template_.paint_uniform_color([1, 0, 0])
39
+ source_.paint_uniform_color([0, 1, 0])
40
+ transformed_source_.paint_uniform_color([0, 0, 1])
41
+ o3d.visualization.draw_geometries([template_, source_, transformed_source_])
42
+
43
+ def test_one_epoch(device, model, test_loader):
44
+ model.eval()
45
+ test_loss = 0.0
46
+ pred = 0.0
47
+ count = 0
48
+ for i, data in enumerate(tqdm(test_loader)):
49
+ template, source, igt = data
50
+
51
+ transformations = get_transformations(igt)
52
+ transformations = [t.to(device) for t in transformations]
53
+ R_ab, translation_ab, R_ba, translation_ba = transformations
54
+
55
+ template = template.to(device)
56
+ source = source.to(device)
57
+ igt = igt.to(device)
58
+
59
+ output = model(template, source, R_ab, translation_ab.squeeze(2))
60
+ display_open3d(template.detach().cpu().numpy()[0], source.detach().cpu().numpy()[0], output['transformed_source'].detach().cpu().numpy()[0])
61
+
62
+ test_loss += output['loss'].item()
63
+ count += 1
64
+
65
+ test_loss = float(test_loss)/count
66
+ return test_loss
67
+
68
+ def test(args, model, test_loader):
69
+ test_loss = test_one_epoch(args.device, model, test_loader)
70
+
71
+ def options():
72
+ parser = argparse.ArgumentParser(description='Point Cloud Registration')
73
+ parser.add_argument('--exp_name', type=str, default='exp_prnet', metavar='N',
74
+ help='Name of the experiment')
75
+ parser.add_argument('--dataset_path', type=str, default='ModelNet40',
76
+ metavar='PATH', help='path to the input dataset') # like '/path/to/ModelNet40'
77
+ parser.add_argument('--eval', type=bool, default=False, help='Train or Evaluate the network.')
78
+
79
+ # settings for input data
80
+ parser.add_argument('--dataset_type', default='modelnet', choices=['modelnet', 'shapenet2'],
81
+ metavar='DATASET', help='dataset type (default: modelnet)')
82
+
83
+ # settings for PointNet
84
+ parser.add_argument('--emb_dims', default=512, type=int,
85
+ metavar='K', help='dim. of the feature vector (default: 1024)')
86
+ parser.add_argument('--num_iterations', default=3, type=int,
87
+ help='Number of Iterations')
88
+
89
+ parser.add_argument('-j', '--workers', default=4, type=int,
90
+ metavar='N', help='number of data loading workers (default: 4)')
91
+ parser.add_argument('-b', '--batch_size', default=1, type=int,
92
+ metavar='N', help='mini-batch size (default: 32)')
93
+ parser.add_argument('--pretrained', default='learning3d/pretrained/exp_prnet/models/best_model.t7', type=str,
94
+ metavar='PATH', help='path to pretrained model file (default: null (no-use))')
95
+ parser.add_argument('--device', default='cuda:0', type=str,
96
+ metavar='DEVICE', help='use CUDA if available')
97
+
98
+ args = parser.parse_args()
99
+ return args
100
+
101
+ def main():
102
+ args = options()
103
+ torch.backends.cudnn.deterministic = True
104
+
105
+ trainset = RegistrationData('PRNet', ModelNet40Data(train=True), partial_source=True, partial_template=True)
106
+ testset = RegistrationData('PRNet', ModelNet40Data(train=False), partial_source=True, partial_template=True)
107
+ train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers)
108
+ test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
109
+
110
+ if not torch.cuda.is_available():
111
+ args.device = 'cpu'
112
+ args.device = torch.device(args.device)
113
+
114
+ # Create PointNet Model.
115
+ model = PRNet(emb_dims=args.emb_dims, num_iters=args.num_iterations)
116
+ model = model.to(args.device)
117
+
118
+ if args.pretrained:
119
+ assert os.path.isfile(args.pretrained)
120
+ model.load_state_dict(torch.load(args.pretrained), strict=False)
121
+ model.to(args.device)
122
+
123
+ test(args, model, test_loader)
124
+
125
+ if __name__ == '__main__':
126
+ main()
@@ -0,0 +1,120 @@
1
+ import open3d as o3d
2
+ import argparse
3
+ import os
4
+ import sys
5
+ import logging
6
+ import numpy
7
+ import numpy as np
8
+ import torch
9
+ import torch.utils.data
10
+ import torchvision
11
+ from torch.utils.data import DataLoader
12
+ from tensorboardX import SummaryWriter
13
+ from tqdm import tqdm
14
+
15
+ # Only if the files are in example folder.
16
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
17
+ if BASE_DIR[-8:] == 'examples':
18
+ sys.path.append(os.path.join(BASE_DIR, os.pardir))
19
+ os.chdir(os.path.join(BASE_DIR, os.pardir))
20
+
21
+ from learning3d.models import RPMNet, PPFNet
22
+ from learning3d.losses import FrobeniusNormLoss, RMSEFeaturesLoss
23
+ from learning3d.data_utils import RegistrationData, ModelNet40Data
24
+
25
+ def display_open3d(template, source, transformed_source):
26
+ template_ = o3d.geometry.PointCloud()
27
+ source_ = o3d.geometry.PointCloud()
28
+ transformed_source_ = o3d.geometry.PointCloud()
29
+ template_.points = o3d.utility.Vector3dVector(template)
30
+ source_.points = o3d.utility.Vector3dVector(source + np.array([0,0,0]))
31
+ transformed_source_.points = o3d.utility.Vector3dVector(transformed_source)
32
+ template_.paint_uniform_color([1, 0, 0])
33
+ source_.paint_uniform_color([0, 1, 0])
34
+ transformed_source_.paint_uniform_color([0, 0, 1])
35
+ o3d.visualization.draw_geometries([template_, source_, transformed_source_])
36
+
37
+ def test_one_epoch(device, model, test_loader):
38
+ model.eval()
39
+ test_loss = 0.0
40
+ pred = 0.0
41
+ count = 0
42
+ for i, data in enumerate(tqdm(test_loader)):
43
+ template, source, igt = data
44
+
45
+ template = template.to(device)
46
+ source = source.to(device)
47
+ igt = igt.to(device)
48
+
49
+ output = model(template, source)
50
+
51
+ display_open3d(template.detach().cpu().numpy()[0,:,:3], source.detach().cpu().numpy()[0,:,:3], output['transformed_source'].detach().cpu().numpy()[0])
52
+ loss_val = FrobeniusNormLoss()(output['est_T'], igt)
53
+
54
+ test_loss += loss_val.item()
55
+ count += 1
56
+
57
+ test_loss = float(test_loss)/count
58
+ return test_loss
59
+
60
+ def test(args, model, test_loader):
61
+ test_loss, test_accuracy = test_one_epoch(args.device, model, test_loader)
62
+
63
+
64
+ def options():
65
+ parser = argparse.ArgumentParser(description='Point Cloud Registration')
66
+ parser.add_argument('--exp_name', type=str, default='exp_rpmnet', metavar='N',
67
+ help='Name of the experiment')
68
+ parser.add_argument('--dataset_path', type=str, default='ModelNet40',
69
+ metavar='PATH', help='path to the input dataset') # like '/path/to/ModelNet40'
70
+ parser.add_argument('--eval', type=bool, default=False, help='Train or Evaluate the network.')
71
+
72
+ # settings for input data
73
+ parser.add_argument('--dataset_type', default='modelnet', choices=['modelnet', 'shapenet2'],
74
+ metavar='DATASET', help='dataset type (default: modelnet)')
75
+ parser.add_argument('--num_points', default=1024, type=int,
76
+ metavar='N', help='points in point-cloud (default: 1024)')
77
+
78
+ # settings for PointNet
79
+ parser.add_argument('--emb_dims', default=1024, type=int,
80
+ metavar='K', help='dim. of the feature vector (default: 1024)')
81
+ parser.add_argument('--symfn', default='max', choices=['max', 'avg'],
82
+ help='symmetric function (default: max)')
83
+
84
+ # settings for on training
85
+ parser.add_argument('--seed', type=int, default=1234)
86
+ parser.add_argument('-j', '--workers', default=4, type=int,
87
+ metavar='N', help='number of data loading workers (default: 4)')
88
+ parser.add_argument('-b', '--batch_size', default=10, type=int,
89
+ metavar='N', help='mini-batch size (default: 32)')
90
+ parser.add_argument('--pretrained', default='learning3d/pretrained/exp_rpmnet/models/partial-trained.pth', type=str,
91
+ metavar='PATH', help='path to pretrained model file (default: null (no-use))')
92
+ parser.add_argument('--device', default='cuda:0', type=str,
93
+ metavar='DEVICE', help='use CUDA if available')
94
+
95
+ args = parser.parse_args()
96
+ return args
97
+
98
+ def main():
99
+ args = options()
100
+
101
+ testset = RegistrationData('RPMNet', ModelNet40Data(train=False, num_points=args.num_points, use_normals=True), partial_source=True, partial_template=False)
102
+ test_loader = DataLoader(testset, batch_size=1, shuffle=False, drop_last=False, num_workers=args.workers)
103
+
104
+ if not torch.cuda.is_available():
105
+ args.device = 'cpu'
106
+ args.device = torch.device(args.device)
107
+
108
+ # Create RPMNet Model.
109
+ model = RPMNet(feature_model=PPFNet())
110
+ model = model.to(args.device)
111
+
112
+ if args.pretrained:
113
+ assert os.path.isfile(args.pretrained)
114
+ model.load_state_dict(torch.load(args.pretrained, map_location='cpu')['state_dict'])
115
+ model.to(args.device)
116
+
117
+ test(args, model, test_loader)
118
+
119
+ if __name__ == '__main__':
120
+ main()
@@ -0,0 +1,240 @@
1
+ import argparse
2
+ import os
3
+ import sys
4
+ import logging
5
+ import numpy
6
+ import numpy as np
7
+ import torch
8
+ import torch.utils.data
9
+ import torchvision
10
+ from torch.utils.data import DataLoader
11
+ from tensorboardX import SummaryWriter
12
+ from tqdm import tqdm
13
+
14
+ # Only if the files are in example folder.
15
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
16
+ if BASE_DIR[-8:] == 'examples':
17
+ sys.path.append(os.path.join(BASE_DIR, os.pardir))
18
+ os.chdir(os.path.join(BASE_DIR, os.pardir))
19
+
20
+ from learning3d.models import PointNet
21
+ from learning3d.models import PointNetLK
22
+ from learning3d.losses import FrobeniusNormLoss, RMSEFeaturesLoss
23
+ from learning3d.data_utils import RegistrationData, ModelNet40Data
24
+
25
+ def _init_(args):
26
+ if not os.path.exists('checkpoints'):
27
+ os.makedirs('checkpoints')
28
+ if not os.path.exists('checkpoints/' + args.exp_name):
29
+ os.makedirs('checkpoints/' + args.exp_name)
30
+ if not os.path.exists('checkpoints/' + args.exp_name + '/' + 'models'):
31
+ os.makedirs('checkpoints/' + args.exp_name + '/' + 'models')
32
+ os.system('cp main.py checkpoints' + '/' + args.exp_name + '/' + 'main.py.backup')
33
+ os.system('cp model.py checkpoints' + '/' + args.exp_name + '/' + 'model.py.backup')
34
+
35
+
36
+ class IOStream:
37
+ def __init__(self, path):
38
+ self.f = open(path, 'a')
39
+
40
+ def cprint(self, text):
41
+ print(text)
42
+ self.f.write(text + '\n')
43
+ self.f.flush()
44
+
45
+ def close(self):
46
+ self.f.close()
47
+
48
+ def test_one_epoch(device, model, test_loader):
49
+ model.eval()
50
+ test_loss = 0.0
51
+ pred = 0.0
52
+ count = 0
53
+ for i, data in enumerate(tqdm(test_loader)):
54
+ template, source, igt = data
55
+
56
+ template = template.to(device)
57
+ source = source.to(device)
58
+ igt = igt.to(device)
59
+
60
+ output = model(template, source)
61
+ loss_val = FrobeniusNormLoss()(output['est_T'], igt) + RMSEFeaturesLoss()(output['r'])
62
+
63
+ test_loss += loss_val.item()
64
+ count += 1
65
+
66
+ test_loss = float(test_loss)/count
67
+ return test_loss
68
+
69
+ def test(args, model, test_loader, textio):
70
+ test_loss, test_accuracy = test_one_epoch(args.device, model, test_loader)
71
+ textio.cprint('Validation Loss: %f & Validation Accuracy: %f'%(test_loss, test_accuracy))
72
+
73
+ def train_one_epoch(device, model, train_loader, optimizer):
74
+ model.train()
75
+ train_loss = 0.0
76
+ pred = 0.0
77
+ count = 0
78
+ for i, data in enumerate(tqdm(train_loader)):
79
+ template, source, igt = data
80
+
81
+ template = template.to(device)
82
+ source = source.to(device)
83
+ igt = igt.to(device)
84
+
85
+ output = model(template, source)
86
+ loss_val = FrobeniusNormLoss()(output['est_T'], igt) + RMSEFeaturesLoss()(output['r'])
87
+ # print(loss_val.item())
88
+
89
+ # forward + backward + optimize
90
+ optimizer.zero_grad()
91
+ loss_val.backward()
92
+ optimizer.step()
93
+
94
+ train_loss += loss_val.item()
95
+ count += 1
96
+
97
+ train_loss = float(train_loss)/count
98
+ return train_loss
99
+
100
+ def train(args, model, train_loader, test_loader, boardio, textio, checkpoint):
101
+ learnable_params = filter(lambda p: p.requires_grad, model.parameters())
102
+ if args.optimizer == 'Adam':
103
+ optimizer = torch.optim.Adam(learnable_params)
104
+ else:
105
+ optimizer = torch.optim.SGD(learnable_params, lr=0.1)
106
+
107
+ if checkpoint is not None:
108
+ min_loss = checkpoint['min_loss']
109
+ optimizer.load_state_dict(checkpoint['optimizer'])
110
+
111
+ best_test_loss = np.inf
112
+
113
+ for epoch in range(args.start_epoch, args.epochs):
114
+ train_loss = train_one_epoch(args.device, model, train_loader, optimizer)
115
+ test_loss = test_one_epoch(args.device, model, test_loader)
116
+
117
+ if test_loss<best_test_loss:
118
+ best_test_loss = test_loss
119
+ snap = {'epoch': epoch + 1,
120
+ 'model': model.state_dict(),
121
+ 'min_loss': best_test_loss,
122
+ 'optimizer' : optimizer.state_dict(),}
123
+ torch.save(snap, 'checkpoints/%s/models/best_model_snap.t7' % (args.exp_name))
124
+ torch.save(model.state_dict(), 'checkpoints/%s/models/best_model.t7' % (args.exp_name))
125
+ torch.save(model.feature_model.state_dict(), 'checkpoints/%s/models/best_ptnet_model.t7' % (args.exp_name))
126
+
127
+ torch.save(snap, 'checkpoints/%s/models/model_snap.t7' % (args.exp_name))
128
+ torch.save(model.state_dict(), 'checkpoints/%s/models/model.t7' % (args.exp_name))
129
+ torch.save(model.feature_model.state_dict(), 'checkpoints/%s/models/ptnet_model.t7' % (args.exp_name))
130
+
131
+ boardio.add_scalar('Train Loss', train_loss, epoch+1)
132
+ boardio.add_scalar('Test Loss', test_loss, epoch+1)
133
+ boardio.add_scalar('Best Test Loss', best_test_loss, epoch+1)
134
+
135
+ textio.cprint('EPOCH:: %d, Traininig Loss: %f, Testing Loss: %f, Best Loss: %f'%(epoch+1, train_loss, test_loss, best_test_loss))
136
+
137
+ def options():
138
+ parser = argparse.ArgumentParser(description='Point Cloud Registration')
139
+ parser.add_argument('--exp_name', type=str, default='exp_pnlk', metavar='N',
140
+ help='Name of the experiment')
141
+ parser.add_argument('--dataset_path', type=str, default='ModelNet40',
142
+ metavar='PATH', help='path to the input dataset') # like '/path/to/ModelNet40'
143
+ parser.add_argument('--eval', type=bool, default=False, help='Train or Evaluate the network.')
144
+
145
+ # settings for input data
146
+ parser.add_argument('--dataset_type', default='modelnet', choices=['modelnet', 'shapenet2'],
147
+ metavar='DATASET', help='dataset type (default: modelnet)')
148
+ parser.add_argument('--num_points', default=1024, type=int,
149
+ metavar='N', help='points in point-cloud (default: 1024)')
150
+
151
+ # settings for PointNet
152
+ parser.add_argument('--fine_tune_pointnet', default='tune', type=str, choices=['fixed', 'tune'],
153
+ help='train pointnet (default: tune)')
154
+ parser.add_argument('--transfer_ptnet_weights', default='./checkpoints/exp_classifier/models/best_ptnet_model.t7', type=str,
155
+ metavar='PATH', help='path to pointnet features file')
156
+ parser.add_argument('--emb_dims', default=1024, type=int,
157
+ metavar='K', help='dim. of the feature vector (default: 1024)')
158
+ parser.add_argument('--symfn', default='max', choices=['max', 'avg'],
159
+ help='symmetric function (default: max)')
160
+
161
+ # settings for on training
162
+ parser.add_argument('--seed', type=int, default=1234)
163
+ parser.add_argument('-j', '--workers', default=4, type=int,
164
+ metavar='N', help='number of data loading workers (default: 4)')
165
+ parser.add_argument('-b', '--batch_size', default=10, type=int,
166
+ metavar='N', help='mini-batch size (default: 32)')
167
+ parser.add_argument('--epochs', default=200, type=int,
168
+ metavar='N', help='number of total epochs to run')
169
+ parser.add_argument('--start_epoch', default=0, type=int,
170
+ metavar='N', help='manual epoch number (useful on restarts)')
171
+ parser.add_argument('--optimizer', default='Adam', choices=['Adam', 'SGD'],
172
+ metavar='METHOD', help='name of an optimizer (default: Adam)')
173
+ parser.add_argument('--resume', default='', type=str,
174
+ metavar='PATH', help='path to latest checkpoint (default: null (no-use))')
175
+ parser.add_argument('--pretrained', default='', type=str,
176
+ metavar='PATH', help='path to pretrained model file (default: null (no-use))')
177
+ parser.add_argument('--device', default='cuda:0', type=str,
178
+ metavar='DEVICE', help='use CUDA if available')
179
+
180
+ args = parser.parse_args()
181
+ return args
182
+
183
+ def main():
184
+ args = options()
185
+
186
+ torch.backends.cudnn.deterministic = True
187
+ torch.manual_seed(args.seed)
188
+ torch.cuda.manual_seed_all(args.seed)
189
+ np.random.seed(args.seed)
190
+
191
+ boardio = SummaryWriter(log_dir='checkpoints/' + args.exp_name)
192
+ _init_(args)
193
+
194
+ textio = IOStream('checkpoints/' + args.exp_name + '/run.log')
195
+ textio.cprint(str(args))
196
+
197
+
198
+ trainset = RegistrationData('PointNetLK', ModelNet40Data(train=True))
199
+ testset = RegistrationData('PointNetLK', ModelNet40Data(train=False))
200
+ train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers)
201
+ test_loader = DataLoader(testset, batch_size=8, shuffle=False, drop_last=False, num_workers=args.workers)
202
+
203
+ if not torch.cuda.is_available():
204
+ args.device = 'cpu'
205
+ args.device = torch.device(args.device)
206
+
207
+ # Create PointNet Model.
208
+ ptnet = PointNet(emb_dims=args.emb_dims, use_bn=True)
209
+
210
+ if args.transfer_ptnet_weights and os.path.isfile(args.transfer_ptnet_weights):
211
+ ptnet.load_state_dict(torch.load(args.transfer_ptnet_weights, map_location='cpu'))
212
+
213
+ if args.fine_tune_pointnet == 'tune':
214
+ pass
215
+ elif args.fine_tune_pointnet == 'fixed':
216
+ for param in ptnet.parameters():
217
+ param.requires_grad_(False)
218
+
219
+ model = PointNetLK(feature_model=ptnet)
220
+ model = model.to(args.device)
221
+
222
+ checkpoint = None
223
+ if args.resume:
224
+ assert os.path.isfile(args.resume)
225
+ checkpoint = torch.load(args.resume)
226
+ args.start_epoch = checkpoint['epoch']
227
+ model.load_state_dict(checkpoint['model'])
228
+
229
+ if args.pretrained:
230
+ assert os.path.isfile(args.pretrained)
231
+ model.load_state_dict(torch.load(args.pretrained, map_location='cpu'))
232
+ model.to(args.device)
233
+
234
+ if args.eval:
235
+ test(args, model, test_loader, textio)
236
+ else:
237
+ train(args, model, train_loader, test_loader, boardio, textio, checkpoint)
238
+
239
+ if __name__ == '__main__':
240
+ main()