learning3d 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (115) hide show
  1. learning3d/__init__.py +2 -0
  2. learning3d/data_utils/__init__.py +4 -0
  3. learning3d/data_utils/dataloaders.py +454 -0
  4. learning3d/data_utils/user_data.py +119 -0
  5. learning3d/examples/test_dcp.py +139 -0
  6. learning3d/examples/test_deepgmr.py +144 -0
  7. learning3d/examples/test_flownet.py +113 -0
  8. learning3d/examples/test_masknet.py +159 -0
  9. learning3d/examples/test_masknet2.py +162 -0
  10. learning3d/examples/test_pcn.py +118 -0
  11. learning3d/examples/test_pcrnet.py +120 -0
  12. learning3d/examples/test_pnlk.py +121 -0
  13. learning3d/examples/test_pointconv.py +126 -0
  14. learning3d/examples/test_pointnet.py +121 -0
  15. learning3d/examples/test_prnet.py +126 -0
  16. learning3d/examples/test_rpmnet.py +120 -0
  17. learning3d/examples/train_PointNetLK.py +240 -0
  18. learning3d/examples/train_dcp.py +249 -0
  19. learning3d/examples/train_deepgmr.py +244 -0
  20. learning3d/examples/train_flownet.py +259 -0
  21. learning3d/examples/train_masknet.py +239 -0
  22. learning3d/examples/train_pcn.py +216 -0
  23. learning3d/examples/train_pcrnet.py +228 -0
  24. learning3d/examples/train_pointconv.py +245 -0
  25. learning3d/examples/train_pointnet.py +244 -0
  26. learning3d/examples/train_prnet.py +229 -0
  27. learning3d/examples/train_rpmnet.py +228 -0
  28. learning3d/losses/__init__.py +12 -0
  29. learning3d/losses/chamfer_distance.py +51 -0
  30. learning3d/losses/classification.py +14 -0
  31. learning3d/losses/correspondence_loss.py +10 -0
  32. learning3d/losses/cuda/chamfer_distance/__init__.py +1 -0
  33. learning3d/losses/cuda/chamfer_distance/chamfer_distance.cpp +185 -0
  34. learning3d/losses/cuda/chamfer_distance/chamfer_distance.cu +209 -0
  35. learning3d/losses/cuda/chamfer_distance/chamfer_distance.py +66 -0
  36. learning3d/losses/cuda/emd_torch/pkg/emd_loss_layer.py +41 -0
  37. learning3d/losses/cuda/emd_torch/pkg/include/cuda/emd.cuh +347 -0
  38. learning3d/losses/cuda/emd_torch/pkg/include/cuda_helper.h +18 -0
  39. learning3d/losses/cuda/emd_torch/pkg/include/emd.h +54 -0
  40. learning3d/losses/cuda/emd_torch/pkg/layer/__init__.py +1 -0
  41. learning3d/losses/cuda/emd_torch/pkg/layer/emd_loss_layer.py +40 -0
  42. learning3d/losses/cuda/emd_torch/pkg/src/cuda/emd.cu +70 -0
  43. learning3d/losses/cuda/emd_torch/pkg/src/emd.cpp +1 -0
  44. learning3d/losses/cuda/emd_torch/setup.py +29 -0
  45. learning3d/losses/emd.py +16 -0
  46. learning3d/losses/frobenius_norm.py +21 -0
  47. learning3d/losses/rmse_features.py +16 -0
  48. learning3d/models/__init__.py +23 -0
  49. learning3d/models/classifier.py +41 -0
  50. learning3d/models/dcp.py +92 -0
  51. learning3d/models/deepgmr.py +165 -0
  52. learning3d/models/dgcnn.py +92 -0
  53. learning3d/models/flownet3d.py +446 -0
  54. learning3d/models/masknet.py +84 -0
  55. learning3d/models/masknet2.py +264 -0
  56. learning3d/models/pcn.py +164 -0
  57. learning3d/models/pcrnet.py +74 -0
  58. learning3d/models/pointconv.py +108 -0
  59. learning3d/models/pointnet.py +108 -0
  60. learning3d/models/pointnetlk.py +173 -0
  61. learning3d/models/pooling.py +15 -0
  62. learning3d/models/ppfnet.py +102 -0
  63. learning3d/models/prnet.py +431 -0
  64. learning3d/models/rpmnet.py +359 -0
  65. learning3d/models/segmentation.py +38 -0
  66. learning3d/ops/__init__.py +0 -0
  67. learning3d/ops/data_utils.py +45 -0
  68. learning3d/ops/invmat.py +134 -0
  69. learning3d/ops/quaternion.py +218 -0
  70. learning3d/ops/se3.py +157 -0
  71. learning3d/ops/sinc.py +229 -0
  72. learning3d/ops/so3.py +213 -0
  73. learning3d/ops/transform_functions.py +342 -0
  74. learning3d/utils/__init__.py +9 -0
  75. learning3d/utils/lib/build/lib.linux-x86_64-3.5/pointnet2_cuda.cpython-35m-x86_64-linux-gnu.so +0 -0
  76. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/ball_query.o +0 -0
  77. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/ball_query_gpu.o +0 -0
  78. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/group_points.o +0 -0
  79. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/group_points_gpu.o +0 -0
  80. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/interpolate.o +0 -0
  81. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/interpolate_gpu.o +0 -0
  82. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/pointnet2_api.o +0 -0
  83. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/sampling.o +0 -0
  84. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/sampling_gpu.o +0 -0
  85. learning3d/utils/lib/dist/pointnet2-0.0.0-py3.5-linux-x86_64.egg +0 -0
  86. learning3d/utils/lib/pointnet2.egg-info/SOURCES.txt +14 -0
  87. learning3d/utils/lib/pointnet2.egg-info/dependency_links.txt +1 -0
  88. learning3d/utils/lib/pointnet2.egg-info/top_level.txt +1 -0
  89. learning3d/utils/lib/pointnet2_modules.py +160 -0
  90. learning3d/utils/lib/pointnet2_utils.py +318 -0
  91. learning3d/utils/lib/pytorch_utils.py +236 -0
  92. learning3d/utils/lib/setup.py +23 -0
  93. learning3d/utils/lib/src/ball_query.cpp +25 -0
  94. learning3d/utils/lib/src/ball_query_gpu.cu +67 -0
  95. learning3d/utils/lib/src/ball_query_gpu.h +15 -0
  96. learning3d/utils/lib/src/cuda_utils.h +15 -0
  97. learning3d/utils/lib/src/group_points.cpp +36 -0
  98. learning3d/utils/lib/src/group_points_gpu.cu +86 -0
  99. learning3d/utils/lib/src/group_points_gpu.h +22 -0
  100. learning3d/utils/lib/src/interpolate.cpp +65 -0
  101. learning3d/utils/lib/src/interpolate_gpu.cu +233 -0
  102. learning3d/utils/lib/src/interpolate_gpu.h +36 -0
  103. learning3d/utils/lib/src/pointnet2_api.cpp +25 -0
  104. learning3d/utils/lib/src/sampling.cpp +46 -0
  105. learning3d/utils/lib/src/sampling_gpu.cu +253 -0
  106. learning3d/utils/lib/src/sampling_gpu.h +29 -0
  107. learning3d/utils/pointconv_util.py +382 -0
  108. learning3d/utils/ppfnet_util.py +244 -0
  109. learning3d/utils/svd.py +59 -0
  110. learning3d/utils/transformer.py +243 -0
  111. learning3d-0.0.1.dist-info/LICENSE +21 -0
  112. learning3d-0.0.1.dist-info/METADATA +271 -0
  113. learning3d-0.0.1.dist-info/RECORD +115 -0
  114. learning3d-0.0.1.dist-info/WHEEL +5 -0
  115. learning3d-0.0.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,54 @@
1
+ #ifndef EMD_H_
2
+ #define EMD_H_
3
+
4
+ #include <torch/extension.h>
5
+ #include <vector>
6
+
7
+ #include "cuda_helper.h"
8
+
9
+
10
+ std::vector<at::Tensor> emd_forward_cuda(
11
+ at::Tensor xyz1,
12
+ at::Tensor xyz2);
13
+
14
+ std::vector<at::Tensor> emd_backward_cuda(
15
+ at::Tensor xyz1,
16
+ at::Tensor xyz2,
17
+ at::Tensor match);
18
+
19
+ // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
20
+ // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
21
+ // CALL FUNCTION IMPLEMENTATIONS
22
+ // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
23
+ // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
24
+
25
+ std::vector<at::Tensor> emd_forward(
26
+ at::Tensor xyz1,
27
+ at::Tensor xyz2)
28
+ {
29
+ CHECK_INPUT(xyz1);
30
+ CHECK_INPUT(xyz2);
31
+
32
+ return emd_forward_cuda(xyz1, xyz2);
33
+ }
34
+
35
+ std::vector<at::Tensor> emd_backward(
36
+ at::Tensor xyz1,
37
+ at::Tensor xyz2,
38
+ at::Tensor match)
39
+ {
40
+ CHECK_INPUT(xyz1);
41
+ CHECK_INPUT(xyz2);
42
+ CHECK_INPUT(match);
43
+
44
+ return emd_backward_cuda(xyz1, xyz2, match);
45
+ }
46
+
47
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
48
+ m.def("emd_forward", &emd_forward, "Compute Earth Mover's Distance");
49
+ m.def("emd_backward", &emd_backward, "Compute Gradients for Earth Mover's Distance");
50
+ }
51
+
52
+
53
+
54
+ #endif
@@ -0,0 +1 @@
1
+ from .emd_loss_layer import EMDLoss
@@ -0,0 +1,40 @@
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ import _emd_ext._emd as emd
5
+
6
+
7
+ class EMDFunction(torch.autograd.Function):
8
+ @staticmethod
9
+ def forward(self, xyz1, xyz2):
10
+ cost, match = emd.emd_forward(xyz1, xyz2)
11
+ self.save_for_backward(xyz1, xyz2, match)
12
+ return cost
13
+
14
+
15
+ @staticmethod
16
+ def backward(self, grad_output):
17
+ xyz1, xyz2, match = self.saved_tensors
18
+ grad_xyz1, grad_xyz2 = emd.emd_backward(xyz1, xyz2, match)
19
+ return grad_xyz1, grad_xyz2
20
+
21
+
22
+
23
+
24
+ class EMDLoss(nn.Module):
25
+ '''
26
+ Computes the (approximate) Earth Mover's Distance between two point sets.
27
+
28
+ IMPLEMENTATION LIMITATIONS:
29
+ - Double tensors must have <=11 dimensions
30
+ - Float tensors must have <=23 dimensions
31
+ This is due to the use of CUDA shared memory in the computation. This shared memory is limited by the hardware to 48kB.
32
+ '''
33
+
34
+ def __init__(self):
35
+ super(EMDLoss, self).__init__()
36
+
37
+ def forward(self, xyz1, xyz2):
38
+
39
+ assert xyz1.shape[-1] == xyz2.shape[-1], 'Both point sets must have the same dimensionality'
40
+ return EMDFunction.apply(xyz1, xyz2)
@@ -0,0 +1,70 @@
1
+ #include <ATen/ATen.h>
2
+
3
+ #include <vector>
4
+
5
+ #include "cuda/emd.cuh"
6
+
7
+
8
+ std::vector<at::Tensor> emd_forward_cuda(
9
+ at::Tensor xyz1, // B x N1 x D
10
+ at::Tensor xyz2) // B x N2 x D
11
+ {
12
+ // Some useful values
13
+ const int batch_size = xyz1.size(0);
14
+ const int num_pts_1 = xyz1.size(1);
15
+ const int num_pts_2 = xyz2.size(1);
16
+
17
+ // Allocate necessary data structures
18
+ at::Tensor match = at::zeros({batch_size, num_pts_1, num_pts_2},
19
+ xyz1.options());
20
+ at::Tensor cost = at::zeros({batch_size}, xyz1.options());
21
+ at::Tensor temp = at::zeros({batch_size, 2 * (num_pts_1 + num_pts_2)},
22
+ xyz1.options());
23
+
24
+ // Find the approximate matching
25
+ approxmatchLauncher(
26
+ batch_size, num_pts_1, num_pts_2,
27
+ xyz1,
28
+ xyz2,
29
+ match,
30
+ temp
31
+ );
32
+
33
+ // Compute the matching cost
34
+ matchcostLauncher(
35
+ batch_size, num_pts_1, num_pts_2,
36
+ xyz1,
37
+ xyz2,
38
+ match,
39
+ cost
40
+ );
41
+
42
+ return {cost, match};
43
+ }
44
+
45
+ std::vector<at::Tensor> emd_backward_cuda(
46
+ at::Tensor xyz1,
47
+ at::Tensor xyz2,
48
+ at::Tensor match)
49
+ {
50
+ // Some useful values
51
+ const int batch_size = xyz1.size(0);
52
+ const int num_pts_1 = xyz1.size(1);
53
+ const int num_pts_2 = xyz2.size(1);
54
+
55
+ // Allocate necessary data structures
56
+ at::Tensor grad_xyz1 = at::zeros_like(xyz1);
57
+ at::Tensor grad_xyz2 = at::zeros_like(xyz2);
58
+
59
+ // Compute the gradient with respect to the two inputs (xyz1 and xyz2)
60
+ matchcostgradLauncher(
61
+ batch_size, num_pts_1, num_pts_2,
62
+ xyz1,
63
+ xyz2,
64
+ match,
65
+ grad_xyz1,
66
+ grad_xyz2
67
+ );
68
+
69
+ return {grad_xyz1, grad_xyz2};
70
+ }
@@ -0,0 +1 @@
1
+ #include "emd.h"
@@ -0,0 +1,29 @@
1
+ from setuptools import setup
2
+ from torch.utils.cpp_extension import BuildExtension, CUDAExtension
3
+
4
+
5
+ setup(
6
+ name='PyTorch EMD',
7
+ version='0.0',
8
+ author='Vinit Sarode',
9
+ author_email='vinitsarode5@gmail.com',
10
+ description='A PyTorch module for the earth mover\'s distance loss',
11
+ ext_package='_emd_ext',
12
+ ext_modules=[
13
+ CUDAExtension(
14
+ name='_emd',
15
+ sources=[
16
+ 'pkg/src/emd.cpp',
17
+ 'pkg/src/cuda/emd.cu',
18
+ ],
19
+ include_dirs=['pkg/include'],
20
+ ),
21
+ ],
22
+ packages=[
23
+ 'emd',
24
+ ],
25
+ package_dir={
26
+ 'emd' : 'pkg/layer'
27
+ },
28
+ cmdclass={'build_ext': BuildExtension},
29
+ )
@@ -0,0 +1,16 @@
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+ def emd(template: torch.Tensor, source: torch.Tensor):
6
+ from emd import EMDLoss
7
+ emd_loss = torch.mean(self.emd(template, source))/(template.size()[1])
8
+ return emd_loss
9
+
10
+
11
+ class EMDLoss(nn.Module):
12
+ def __init__(self):
13
+ super(EMDLoss, self).__init__()
14
+
15
+ def forward(self, template, source):
16
+ return emd(template, source)
@@ -0,0 +1,21 @@
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+ def frobeniusNormLoss(predicted, igt):
6
+ """ |predicted*igt - I| (should be 0) """
7
+ assert predicted.size(0) == igt.size(0)
8
+ assert predicted.size(1) == igt.size(1) and predicted.size(1) == 4
9
+ assert predicted.size(2) == igt.size(2) and predicted.size(2) == 4
10
+
11
+ error = predicted.matmul(igt)
12
+ I = torch.eye(4).to(error).view(1, 4, 4).expand(error.size(0), 4, 4)
13
+ return torch.nn.functional.mse_loss(error, I, size_average=True) * 16
14
+
15
+
16
+ class FrobeniusNormLoss(nn.Module):
17
+ def __init__(self):
18
+ super(FrobeniusNormLoss, self).__init__()
19
+
20
+ def forward(self, predicted, igt):
21
+ return frobeniusNormLoss(predicted, igt)
@@ -0,0 +1,16 @@
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+ def rmseOnFeatures(feature_difference):
6
+ # |feature_difference| should be 0
7
+ gt = torch.zeros_like(feature_difference)
8
+ return torch.nn.functional.mse_loss(feature_difference, gt, size_average=False)
9
+
10
+
11
+ class RMSEFeaturesLoss(nn.Module):
12
+ def __init__(self):
13
+ super(RMSEFeaturesLoss, self).__init__()
14
+
15
+ def forward(self, feature_difference):
16
+ return rmseOnFeatures(feature_difference)
@@ -0,0 +1,23 @@
1
+ from .pointnet import PointNet
2
+ from .pointconv import create_pointconv
3
+ from .dgcnn import DGCNN
4
+ from .ppfnet import PPFNet
5
+ from .pooling import Pooling
6
+
7
+ from .classifier import Classifier
8
+ from .segmentation import Segmentation
9
+
10
+ from .dcp import DCP
11
+ from .prnet import PRNet
12
+ from .pcrnet import iPCRNet
13
+ from .pointnetlk import PointNetLK
14
+ from .rpmnet import RPMNet
15
+ from .pcn import PCN
16
+ from .deepgmr import DeepGMR
17
+ from .masknet import MaskNet
18
+ from .masknet2 import MaskNet2
19
+
20
+ try:
21
+ from .flownet3d import FlowNet3D
22
+ except:
23
+ print("Error raised in pointnet2 module for FlowNet3D Network!\nEither don't use pointnet2_utils or retry it's setup.")
@@ -0,0 +1,41 @@
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from .pooling import Pooling
5
+
6
+ class Classifier(nn.Module):
7
+ def __init__(self, feature_model, num_classes=40):
8
+ super(Classifier, self).__init__()
9
+ self.feature_model = feature_model
10
+ self.num_classes = num_classes
11
+
12
+ self.linear1 = torch.nn.Linear(self.feature_model.emb_dims, 512)
13
+ self.bn1 = torch.nn.BatchNorm1d(512)
14
+ self.dropout1 = torch.nn.Dropout(p=0.7)
15
+ self.linear2 = torch.nn.Linear(512, 256)
16
+ self.bn2 = torch.nn.BatchNorm1d(256)
17
+ self.dropout2 = torch.nn.Dropout(p=0.7)
18
+ self.linear3 = torch.nn.Linear(256, self.num_classes)
19
+
20
+ self.pooling = Pooling('max')
21
+
22
+ def forward(self, input_data):
23
+ output = self.pooling(self.feature_model(input_data))
24
+ output = F.relu(self.bn1(self.linear1(output)))
25
+ output = self.dropout1(output)
26
+ output = F.relu(self.bn2(self.linear2(output)))
27
+ output = self.dropout2(output)
28
+ output = self.linear3(output)
29
+ return output
30
+
31
+
32
+ if __name__ == '__main__':
33
+ from pointnet import PointNet
34
+ x = torch.rand(10,1024,3)
35
+
36
+ pn = PointNet()
37
+ classifier = Classifier(pn)
38
+ classes = classifier(x)
39
+
40
+ print('Input Shape: {}\nClassification Output Shape: {}'
41
+ .format(x.shape, classes.shape))
@@ -0,0 +1,92 @@
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from .dgcnn import DGCNN
5
+ from .pointnet import PointNet
6
+ from .. ops import transform_functions as transform
7
+ from .. utils import Transformer, SVDHead, Identity
8
+
9
+
10
+ class DCP(nn.Module):
11
+ def __init__(self, feature_model=DGCNN(), cycle=False, pointer_='transformer', head='svd'):
12
+ super(DCP, self).__init__()
13
+ self.cycle = cycle
14
+ self.emb_nn = feature_model
15
+
16
+ if pointer_ == 'identity':
17
+ self.pointer = Identity()
18
+ elif pointer_ == 'transformer':
19
+ self.pointer = Transformer(self.emb_nn.emb_dims, n_blocks=1, dropout=0.0, ff_dims=1024, n_heads=4)
20
+ else:
21
+ raise Exception("Not implemented")
22
+
23
+ if head == 'mlp':
24
+ self.head = MLPHead(self.emb_nn.emb_dims)
25
+ elif head == 'svd':
26
+ self.head = SVDHead(self.emb_nn.emb_dims)
27
+ else:
28
+ raise Exception('Not implemented')
29
+
30
+ def forward(self, template, source):
31
+ source_features = self.emb_nn(source)
32
+ template_features = self.emb_nn(template)
33
+
34
+ source_features_p, template_features_p = self.pointer(source_features, template_features)
35
+
36
+ source_features = source_features + source_features_p
37
+ template_features = template_features + template_features_p
38
+
39
+ rotation_ab, translation_ab = self.head(source_features, template_features, source, template)
40
+ if self.cycle:
41
+ rotation_ba, translation_ba = self.head(template_features, source_features, template, source)
42
+ else:
43
+ rotation_ba = rotation_ab.transpose(2, 1).contiguous()
44
+ translation_ba = -torch.matmul(rotation_ba, translation_ab.unsqueeze(2)).squeeze(2)
45
+
46
+ transformed_source = transform.transform_point_cloud(source, rotation_ab, translation_ab)
47
+
48
+ result = {'est_R': rotation_ab,
49
+ 'est_t': translation_ab,
50
+ 'est_R_': rotation_ba,
51
+ 'est_t_': translation_ba,
52
+ 'est_T': transform.convert2transformation(rotation_ab, translation_ab),
53
+ 'r': template_features - source_features,
54
+ 'transformed_source': transformed_source}
55
+ return result
56
+
57
+
58
+ class MLPHead(nn.Module):
59
+ def __init__(self, emb_dims):
60
+ super(MLPHead, self).__init__()
61
+ self.emb_dims = emb_dims
62
+ self.nn = nn.Sequential(nn.Linear(emb_dims * 2, emb_dims // 2),
63
+ nn.BatchNorm1d(emb_dims // 2),
64
+ nn.ReLU(),
65
+ nn.Linear(emb_dims // 2, emb_dims // 4),
66
+ nn.BatchNorm1d(emb_dims // 4),
67
+ nn.ReLU(),
68
+ nn.Linear(emb_dims // 4, emb_dims // 8),
69
+ nn.BatchNorm1d(emb_dims // 8),
70
+ nn.ReLU())
71
+ self.proj_rot = nn.Linear(emb_dims // 8, 4)
72
+ self.proj_trans = nn.Linear(emb_dims // 8, 3)
73
+
74
+ def forward(self, *input):
75
+ src_embedding = input[0]
76
+ tgt_embedding = input[1]
77
+ embedding = torch.cat((src_embedding, tgt_embedding), dim=1)
78
+ embedding = self.nn(embedding.max(dim=-1)[0])
79
+ rotation = self.proj_rot(embedding)
80
+ rotation = rotation / torch.norm(rotation, p=2, dim=1, keepdim=True)
81
+ translation = self.proj_trans(embedding)
82
+ return quat2mat(rotation), translation
83
+
84
+
85
+ if __name__ == '__main__':
86
+ template, source = torch.rand(10,1024,3), torch.rand(10,1024,3)
87
+ pn = PointNet()
88
+
89
+ # Not Tested Yet.
90
+ net = DCP(pn)
91
+ result = net(template, source)
92
+ import ipdb; ipdb.set_trace()
@@ -0,0 +1,165 @@
1
+ '''
2
+ We thank the author of DeepGMR paper to open-source their code.
3
+ Modified by Vinit Sarode.
4
+ '''
5
+
6
+ import math
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+ from .. ops import transform_functions as transform
11
+
12
+
13
+ def gmm_params(gamma, pts):
14
+ '''
15
+ Inputs:
16
+ gamma: B x N x J
17
+ pts: B x N x 3
18
+ '''
19
+ # pi: B x J
20
+ pi = gamma.mean(dim=1)
21
+ Npi = pi * gamma.shape[1]
22
+ # mu: B x J x 3
23
+ mu = gamma.transpose(1, 2) @ pts / Npi.unsqueeze(2)
24
+ # diff: B x N x J x 3
25
+ diff = pts.unsqueeze(2) - mu.unsqueeze(1)
26
+ # sigma: B x J x 3 x 3
27
+ eye = torch.eye(3).unsqueeze(0).unsqueeze(1).to(gamma.device)
28
+ sigma = (
29
+ ((diff.unsqueeze(3) @ diff.unsqueeze(4)).squeeze() * gamma).sum(dim=1) / Npi
30
+ ).unsqueeze(2).unsqueeze(3) * eye
31
+ return pi, mu, sigma
32
+
33
+
34
+ def gmm_register(pi_s, mu_s, mu_t, sigma_t):
35
+ '''
36
+ Inputs:
37
+ pi: B x J
38
+ mu: B x J x 3
39
+ sigma: B x J x 3 x 3
40
+ '''
41
+ c_s = pi_s.unsqueeze(1) @ mu_s
42
+ c_t = pi_s.unsqueeze(1) @ mu_t
43
+ Ms = torch.sum((pi_s.unsqueeze(2) * (mu_s - c_s)).unsqueeze(3) @
44
+ (mu_t - c_t).unsqueeze(2) @ sigma_t.inverse(), dim=1)
45
+ U, _, V = torch.svd(Ms.cpu())
46
+ U = U.cuda() if torch.cuda.is_available() else U
47
+ V = V.cuda() if torch.cuda.is_available() else V
48
+ S = torch.eye(3).unsqueeze(0).repeat(U.shape[0], 1, 1).to(U.device)
49
+ S[:, 2, 2] = torch.det(V @ U.transpose(1, 2))
50
+ R = V @ S @ U.transpose(1, 2)
51
+ t = c_t.transpose(1, 2) - R @ c_s.transpose(1, 2)
52
+ bot_row = torch.Tensor([[[0, 0, 0, 1]]]).repeat(R.shape[0], 1, 1).to(R.device)
53
+ T = torch.cat([torch.cat([R, t], dim=2), bot_row], dim=1)
54
+ return T
55
+
56
+
57
+ class Conv1dBNReLU(nn.Sequential):
58
+ def __init__(self, in_planes, out_planes):
59
+ super(Conv1dBNReLU, self).__init__(
60
+ nn.Conv1d(in_planes, out_planes, kernel_size=1, bias=False),
61
+ nn.BatchNorm1d(out_planes),
62
+ nn.ReLU(inplace=True))
63
+
64
+
65
+ class FCBNReLU(nn.Sequential):
66
+ def __init__(self, in_planes, out_planes):
67
+ super(FCBNReLU, self).__init__(
68
+ nn.Linear(in_planes, out_planes, bias=False),
69
+ nn.BatchNorm1d(out_planes),
70
+ nn.ReLU(inplace=True))
71
+
72
+
73
+ class TNet(nn.Module):
74
+ def __init__(self):
75
+ super(TNet, self).__init__()
76
+ self.encoder = nn.Sequential(
77
+ Conv1dBNReLU(3, 64),
78
+ Conv1dBNReLU(64, 128),
79
+ Conv1dBNReLU(128, 256))
80
+ self.decoder = nn.Sequential(
81
+ FCBNReLU(256, 128),
82
+ FCBNReLU(128, 64),
83
+ nn.Linear(64, 6))
84
+
85
+ @staticmethod
86
+ def f2R(f):
87
+ r1 = F.normalize(f[:, :3])
88
+ proj = (r1.unsqueeze(1) @ f[:, 3:].unsqueeze(2)).squeeze(2)
89
+ r2 = F.normalize(f[:, 3:] - proj * r1)
90
+ r3 = r1.cross(r2)
91
+ return torch.stack([r1, r2, r3], dim=2)
92
+
93
+ def forward(self, pts):
94
+ f = self.encoder(pts)
95
+ f, _ = f.max(dim=2)
96
+ f = self.decoder(f)
97
+ R = self.f2R(f)
98
+ return R @ pts
99
+
100
+
101
+ class PointNet(nn.Module):
102
+ def __init__(self, use_rri, use_tnet=False, nearest_neighbors=20):
103
+ super(PointNet, self).__init__()
104
+ self.use_tnet = use_tnet
105
+ self.tnet = TNet() if self.use_tnet else None
106
+ d_input = nearest_neighbors * 4 if use_rri else 3
107
+ self.encoder = nn.Sequential(
108
+ Conv1dBNReLU(d_input, 64),
109
+ Conv1dBNReLU(64, 128),
110
+ Conv1dBNReLU(128, 256),
111
+ Conv1dBNReLU(256, args.d_model))
112
+ self.decoder = nn.Sequential(
113
+ Conv1dBNReLU(args.d_model * 2, 512),
114
+ Conv1dBNReLU(512, 256),
115
+ Conv1dBNReLU(256, 128),
116
+ nn.Conv1d(128, args.n_clusters, kernel_size=1))
117
+
118
+ def forward(self, pts):
119
+ pts = self.tnet(pts) if self.use_tnet else pts
120
+ f_loc = self.encoder(pts)
121
+ f_glob, _ = f_loc.max(dim=2)
122
+ f_glob = f_glob.unsqueeze(2).expand_as(f_loc)
123
+ y = self.decoder(torch.cat([f_loc, f_glob], dim=1))
124
+ return y.transpose(1, 2)
125
+
126
+
127
+ class DeepGMR(nn.Module):
128
+ def __init__(self, use_rri=True, feature_model=None, nearest_neighbors=20):
129
+ super(DeepGMR, self).__init__()
130
+ self.backbone = feature_model if not None else PointNet(use_rri=use_rri, nearest_neighbors=nearest_neighbors)
131
+ self.use_rri = use_rri
132
+
133
+ def forward(self, template, source):
134
+ if self.use_rri:
135
+ self.template = template[..., :3]
136
+ self.source = source[..., :3]
137
+ template_features = template[..., 3:].transpose(1, 2)
138
+ source_features = source[..., 3:].transpose(1, 2)
139
+ else:
140
+ self.template = template
141
+ self.source = source
142
+ template_features = (template - template.mean(dim=2, keepdim=True)).transpose(1, 2)
143
+ source_features = (source - source.mean(dim=2, keepdim=True)).transpose(1, 2)
144
+
145
+ self.template_gamma = F.softmax(self.backbone(template_features), dim=2)
146
+ self.template_pi, self.template_mu, self.template_sigma = gmm_params(self.template_gamma, self.template)
147
+ self.source_gamma = F.softmax(self.backbone(source_features), dim=2)
148
+ self.source_pi, self.source_mu, self.source_sigma = gmm_params(self.source_gamma, self.source)
149
+
150
+ self.est_T_inverse = gmm_register(self.template_pi, self.template_mu, self.source_mu, self.source_sigma)
151
+ self.est_T = gmm_register(self.source_pi, self.source_mu, self.template_mu, self.template_sigma) # [template = source * est_T]
152
+ self.igt = igt # [source = template * igt]
153
+
154
+ transformed_source = transform.transform_point_cloud(source, est_T[:, :3, :3], est_T[:, :3, 3])
155
+
156
+ result = {'est_R': est_T[:, :3, :3],
157
+ 'est_t': est_T[:, :3, 3],
158
+ 'est_R_inverse': est_T_inverse[:, :3, :3],
159
+ 'est_t_inverese': est_T_inverse[:, :3, 3],
160
+ 'est_T': est_T,
161
+ 'est_T_inverse': est_T_inverse,
162
+ 'r': template_features - source_features,
163
+ 'transformed_source': transformed_source}
164
+
165
+ return result
@@ -0,0 +1,92 @@
1
+ import torch
2
+ import torch.nn.functional as F
3
+
4
+ def knn(x, k):
5
+ inner = -2 * torch.matmul(x.transpose(2, 1).contiguous(), x)
6
+ xx = torch.sum(x ** 2, dim=1, keepdim=True)
7
+ pairwise_distance = -xx - inner - xx.transpose(2, 1).contiguous()
8
+
9
+ idx = pairwise_distance.topk(k=k, dim=-1)[1] # (batch_size, num_points, k)
10
+ return idx
11
+
12
+
13
+ def get_graph_feature(x, k=20):
14
+ # x = x.squeeze()
15
+ idx = knn(x, k=k) # (batch_size, num_points, k)
16
+ batch_size, num_points, _ = idx.size()
17
+
18
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
19
+
20
+ idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1) * num_points
21
+
22
+ idx = idx + idx_base
23
+
24
+ idx = idx.view(-1)
25
+
26
+ _, num_dims, _ = x.size()
27
+
28
+ # (batch_size, num_points, num_dims) -> (batch_size*num_points, num_dims) # batch_size * num_points * k + range(0, batch_size*num_points)
29
+ x = x.transpose(2, 1).contiguous()
30
+
31
+ feature = x.view(batch_size * num_points, -1)[idx, :]
32
+ feature = feature.view(batch_size, num_points, k, num_dims)
33
+ x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)
34
+
35
+ feature = torch.cat((feature, x), dim=3).permute(0, 3, 1, 2)
36
+
37
+ return feature
38
+
39
+
40
+ class DGCNN(torch.nn.Module):
41
+ def __init__(self, emb_dims=1024, input_shape="bnc"):
42
+ super(DGCNN, self).__init__()
43
+ if input_shape not in ["bcn", "bnc"]:
44
+ raise ValueError("Allowed shapes are 'bcn' (batch * channels * num_in_points), 'bnc' ")
45
+ self.input_shape = input_shape
46
+ self.emb_dims = emb_dims
47
+
48
+ self.conv1 = torch.nn.Conv2d(6, 64, kernel_size=1, bias=False)
49
+ self.conv2 = torch.nn.Conv2d(64, 64, kernel_size=1, bias=False)
50
+ self.conv3 = torch.nn.Conv2d(64, 128, kernel_size=1, bias=False)
51
+ self.conv4 = torch.nn.Conv2d(128, 256, kernel_size=1, bias=False)
52
+ self.conv5 = torch.nn.Conv2d(512, emb_dims, kernel_size=1, bias=False)
53
+ self.bn1 = torch.nn.BatchNorm2d(64)
54
+ self.bn2 = torch.nn.BatchNorm2d(64)
55
+ self.bn3 = torch.nn.BatchNorm2d(128)
56
+ self.bn4 = torch.nn.BatchNorm2d(256)
57
+ self.bn5 = torch.nn.BatchNorm2d(emb_dims)
58
+
59
+ def forward(self, input_data):
60
+ if self.input_shape == "bnc":
61
+ input_data = input_data.permute(0, 2, 1)
62
+ if input_data.shape[1] != 3:
63
+ raise RuntimeError("shape of x must be of [Batch x 3 x NumInPoints]")
64
+
65
+ batch_size, num_dims, num_points = input_data.size()
66
+ output = get_graph_feature(input_data)
67
+
68
+ output = F.relu(self.bn1(self.conv1(output)))
69
+ output1 = output.max(dim=-1, keepdim=True)[0]
70
+
71
+ output = F.relu(self.bn2(self.conv2(output)))
72
+ output2 = output.max(dim=-1, keepdim=True)[0]
73
+
74
+ output = F.relu(self.bn3(self.conv3(output)))
75
+ output3 = output.max(dim=-1, keepdim=True)[0]
76
+
77
+ output = F.relu(self.bn4(self.conv4(output)))
78
+ output4 = output.max(dim=-1, keepdim=True)[0]
79
+
80
+ output = torch.cat((output1, output2, output3, output4), dim=1)
81
+
82
+ output = F.relu(self.bn5(self.conv5(output))).view(batch_size, -1, num_points)
83
+ return output
84
+
85
+
86
+ if __name__ == '__main__':
87
+ # Test the code.
88
+ x = torch.rand((10,1024,3))
89
+
90
+ dgcnn = DGCNN()
91
+ y = dgcnn(x)
92
+ print("\nInput Shape of DGCNN: ", x.shape, "\nOutput Shape of DGCNN: ", y.shape)