learning3d 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (115) hide show
  1. learning3d/__init__.py +2 -0
  2. learning3d/data_utils/__init__.py +4 -0
  3. learning3d/data_utils/dataloaders.py +454 -0
  4. learning3d/data_utils/user_data.py +119 -0
  5. learning3d/examples/test_dcp.py +139 -0
  6. learning3d/examples/test_deepgmr.py +144 -0
  7. learning3d/examples/test_flownet.py +113 -0
  8. learning3d/examples/test_masknet.py +159 -0
  9. learning3d/examples/test_masknet2.py +162 -0
  10. learning3d/examples/test_pcn.py +118 -0
  11. learning3d/examples/test_pcrnet.py +120 -0
  12. learning3d/examples/test_pnlk.py +121 -0
  13. learning3d/examples/test_pointconv.py +126 -0
  14. learning3d/examples/test_pointnet.py +121 -0
  15. learning3d/examples/test_prnet.py +126 -0
  16. learning3d/examples/test_rpmnet.py +120 -0
  17. learning3d/examples/train_PointNetLK.py +240 -0
  18. learning3d/examples/train_dcp.py +249 -0
  19. learning3d/examples/train_deepgmr.py +244 -0
  20. learning3d/examples/train_flownet.py +259 -0
  21. learning3d/examples/train_masknet.py +239 -0
  22. learning3d/examples/train_pcn.py +216 -0
  23. learning3d/examples/train_pcrnet.py +228 -0
  24. learning3d/examples/train_pointconv.py +245 -0
  25. learning3d/examples/train_pointnet.py +244 -0
  26. learning3d/examples/train_prnet.py +229 -0
  27. learning3d/examples/train_rpmnet.py +228 -0
  28. learning3d/losses/__init__.py +12 -0
  29. learning3d/losses/chamfer_distance.py +51 -0
  30. learning3d/losses/classification.py +14 -0
  31. learning3d/losses/correspondence_loss.py +10 -0
  32. learning3d/losses/cuda/chamfer_distance/__init__.py +1 -0
  33. learning3d/losses/cuda/chamfer_distance/chamfer_distance.cpp +185 -0
  34. learning3d/losses/cuda/chamfer_distance/chamfer_distance.cu +209 -0
  35. learning3d/losses/cuda/chamfer_distance/chamfer_distance.py +66 -0
  36. learning3d/losses/cuda/emd_torch/pkg/emd_loss_layer.py +41 -0
  37. learning3d/losses/cuda/emd_torch/pkg/include/cuda/emd.cuh +347 -0
  38. learning3d/losses/cuda/emd_torch/pkg/include/cuda_helper.h +18 -0
  39. learning3d/losses/cuda/emd_torch/pkg/include/emd.h +54 -0
  40. learning3d/losses/cuda/emd_torch/pkg/layer/__init__.py +1 -0
  41. learning3d/losses/cuda/emd_torch/pkg/layer/emd_loss_layer.py +40 -0
  42. learning3d/losses/cuda/emd_torch/pkg/src/cuda/emd.cu +70 -0
  43. learning3d/losses/cuda/emd_torch/pkg/src/emd.cpp +1 -0
  44. learning3d/losses/cuda/emd_torch/setup.py +29 -0
  45. learning3d/losses/emd.py +16 -0
  46. learning3d/losses/frobenius_norm.py +21 -0
  47. learning3d/losses/rmse_features.py +16 -0
  48. learning3d/models/__init__.py +23 -0
  49. learning3d/models/classifier.py +41 -0
  50. learning3d/models/dcp.py +92 -0
  51. learning3d/models/deepgmr.py +165 -0
  52. learning3d/models/dgcnn.py +92 -0
  53. learning3d/models/flownet3d.py +446 -0
  54. learning3d/models/masknet.py +84 -0
  55. learning3d/models/masknet2.py +264 -0
  56. learning3d/models/pcn.py +164 -0
  57. learning3d/models/pcrnet.py +74 -0
  58. learning3d/models/pointconv.py +108 -0
  59. learning3d/models/pointnet.py +108 -0
  60. learning3d/models/pointnetlk.py +173 -0
  61. learning3d/models/pooling.py +15 -0
  62. learning3d/models/ppfnet.py +102 -0
  63. learning3d/models/prnet.py +431 -0
  64. learning3d/models/rpmnet.py +359 -0
  65. learning3d/models/segmentation.py +38 -0
  66. learning3d/ops/__init__.py +0 -0
  67. learning3d/ops/data_utils.py +45 -0
  68. learning3d/ops/invmat.py +134 -0
  69. learning3d/ops/quaternion.py +218 -0
  70. learning3d/ops/se3.py +157 -0
  71. learning3d/ops/sinc.py +229 -0
  72. learning3d/ops/so3.py +213 -0
  73. learning3d/ops/transform_functions.py +342 -0
  74. learning3d/utils/__init__.py +9 -0
  75. learning3d/utils/lib/build/lib.linux-x86_64-3.5/pointnet2_cuda.cpython-35m-x86_64-linux-gnu.so +0 -0
  76. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/ball_query.o +0 -0
  77. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/ball_query_gpu.o +0 -0
  78. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/group_points.o +0 -0
  79. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/group_points_gpu.o +0 -0
  80. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/interpolate.o +0 -0
  81. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/interpolate_gpu.o +0 -0
  82. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/pointnet2_api.o +0 -0
  83. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/sampling.o +0 -0
  84. learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/sampling_gpu.o +0 -0
  85. learning3d/utils/lib/dist/pointnet2-0.0.0-py3.5-linux-x86_64.egg +0 -0
  86. learning3d/utils/lib/pointnet2.egg-info/SOURCES.txt +14 -0
  87. learning3d/utils/lib/pointnet2.egg-info/dependency_links.txt +1 -0
  88. learning3d/utils/lib/pointnet2.egg-info/top_level.txt +1 -0
  89. learning3d/utils/lib/pointnet2_modules.py +160 -0
  90. learning3d/utils/lib/pointnet2_utils.py +318 -0
  91. learning3d/utils/lib/pytorch_utils.py +236 -0
  92. learning3d/utils/lib/setup.py +23 -0
  93. learning3d/utils/lib/src/ball_query.cpp +25 -0
  94. learning3d/utils/lib/src/ball_query_gpu.cu +67 -0
  95. learning3d/utils/lib/src/ball_query_gpu.h +15 -0
  96. learning3d/utils/lib/src/cuda_utils.h +15 -0
  97. learning3d/utils/lib/src/group_points.cpp +36 -0
  98. learning3d/utils/lib/src/group_points_gpu.cu +86 -0
  99. learning3d/utils/lib/src/group_points_gpu.h +22 -0
  100. learning3d/utils/lib/src/interpolate.cpp +65 -0
  101. learning3d/utils/lib/src/interpolate_gpu.cu +233 -0
  102. learning3d/utils/lib/src/interpolate_gpu.h +36 -0
  103. learning3d/utils/lib/src/pointnet2_api.cpp +25 -0
  104. learning3d/utils/lib/src/sampling.cpp +46 -0
  105. learning3d/utils/lib/src/sampling_gpu.cu +253 -0
  106. learning3d/utils/lib/src/sampling_gpu.h +29 -0
  107. learning3d/utils/pointconv_util.py +382 -0
  108. learning3d/utils/ppfnet_util.py +244 -0
  109. learning3d/utils/svd.py +59 -0
  110. learning3d/utils/transformer.py +243 -0
  111. learning3d-0.0.1.dist-info/LICENSE +21 -0
  112. learning3d-0.0.1.dist-info/METADATA +271 -0
  113. learning3d-0.0.1.dist-info/RECORD +115 -0
  114. learning3d-0.0.1.dist-info/WHEEL +5 -0
  115. learning3d-0.0.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,243 @@
1
+ import os
2
+ import sys
3
+ import glob
4
+ import h5py
5
+ import copy
6
+ import math
7
+ import numpy as np
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.nn.functional as F
11
+
12
+ # Part of the code is referred from: http://nlp.seas.harvard.edu/2018/04/03/attention.html#positional-encoding
13
+
14
+ def clones(module, N):
15
+ return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
16
+
17
+ def attention(query, key, value, mask=None, dropout=None):
18
+ d_k = query.size(-1)
19
+ scores = torch.matmul(query, key.transpose(-2, -1).contiguous()) / math.sqrt(d_k)
20
+ if mask is not None:
21
+ scores = scores.masked_fill(mask == 0, -1e9)
22
+ p_attn = F.softmax(scores, dim=-1)
23
+ return torch.matmul(p_attn, value), p_attn
24
+
25
+ def nearest_neighbor(src, dst):
26
+ inner = -2 * torch.matmul(src.transpose(1, 0).contiguous(), dst) # src, dst (num_dims, num_points)
27
+ distances = -torch.sum(src ** 2, dim=0, keepdim=True).transpose(1, 0).contiguous() - inner - torch.sum(dst ** 2,
28
+ dim=0,
29
+ keepdim=True)
30
+ distances, indices = distances.topk(k=1, dim=-1)
31
+ return distances, indices
32
+
33
+
34
+ class EncoderDecoder(nn.Module):
35
+ """
36
+ A standard Encoder-Decoder architecture. Base for this and many
37
+ other models.
38
+ """
39
+
40
+ def __init__(self, encoder, decoder, src_embed, tgt_embed, generator):
41
+ super(EncoderDecoder, self).__init__()
42
+ self.encoder = encoder
43
+ self.decoder = decoder
44
+ self.src_embed = src_embed
45
+ self.tgt_embed = tgt_embed
46
+ self.generator = generator
47
+
48
+ def forward(self, src, tgt, src_mask, tgt_mask):
49
+ "Take in and process masked src and target sequences."
50
+ return self.decode(self.encode(src, src_mask), src_mask,
51
+ tgt, tgt_mask)
52
+
53
+ def encode(self, src, src_mask):
54
+ return self.encoder(self.src_embed(src), src_mask)
55
+
56
+ def decode(self, memory, src_mask, tgt, tgt_mask):
57
+ return self.generator(self.decoder(self.tgt_embed(tgt), memory, src_mask, tgt_mask))
58
+
59
+
60
+ class Generator(nn.Module):
61
+ def __init__(self, emb_dims):
62
+ super(Generator, self).__init__()
63
+ self.nn = nn.Sequential(nn.Linear(emb_dims, emb_dims // 2),
64
+ nn.BatchNorm1d(emb_dims // 2),
65
+ nn.ReLU(),
66
+ nn.Linear(emb_dims // 2, emb_dims // 4),
67
+ nn.BatchNorm1d(emb_dims // 4),
68
+ nn.ReLU(),
69
+ nn.Linear(emb_dims // 4, emb_dims // 8),
70
+ nn.BatchNorm1d(emb_dims // 8),
71
+ nn.ReLU())
72
+ self.proj_rot = nn.Linear(emb_dims // 8, 4)
73
+ self.proj_trans = nn.Linear(emb_dims // 8, 3)
74
+
75
+ def forward(self, x):
76
+ x = self.nn(x.max(dim=1)[0])
77
+ rotation = self.proj_rot(x)
78
+ translation = self.proj_trans(x)
79
+ rotation = rotation / torch.norm(rotation, p=2, dim=1, keepdim=True)
80
+ return rotation, translation
81
+
82
+
83
+ class Encoder(nn.Module):
84
+ def __init__(self, layer, N):
85
+ super(Encoder, self).__init__()
86
+ self.layers = clones(layer, N)
87
+ self.norm = LayerNorm(layer.size)
88
+
89
+ def forward(self, x, mask):
90
+ for layer in self.layers:
91
+ x = layer(x, mask)
92
+ return self.norm(x)
93
+
94
+
95
+ class Decoder(nn.Module):
96
+ "Generic N layer decoder with masking."
97
+
98
+ def __init__(self, layer, N):
99
+ super(Decoder, self).__init__()
100
+ self.layers = clones(layer, N)
101
+ self.norm = LayerNorm(layer.size)
102
+
103
+ def forward(self, x, memory, src_mask, tgt_mask):
104
+ for layer in self.layers:
105
+ x = layer(x, memory, src_mask, tgt_mask)
106
+ return self.norm(x)
107
+
108
+
109
+ class LayerNorm(nn.Module):
110
+ def __init__(self, features, eps=1e-6):
111
+ super(LayerNorm, self).__init__()
112
+ self.a_2 = nn.Parameter(torch.ones(features))
113
+ self.b_2 = nn.Parameter(torch.zeros(features))
114
+ self.eps = eps
115
+
116
+ def forward(self, x):
117
+ mean = x.mean(-1, keepdim=True)
118
+ std = x.std(-1, keepdim=True)
119
+ return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
120
+
121
+
122
+ class SublayerConnection(nn.Module):
123
+ def __init__(self, size, dropout=None):
124
+ super(SublayerConnection, self).__init__()
125
+ self.norm = LayerNorm(size)
126
+
127
+ def forward(self, x, sublayer):
128
+ return x + sublayer(self.norm(x))
129
+
130
+
131
+ class EncoderLayer(nn.Module):
132
+ def __init__(self, size, self_attn, feed_forward, dropout):
133
+ super(EncoderLayer, self).__init__()
134
+ self.self_attn = self_attn
135
+ self.feed_forward = feed_forward
136
+ self.sublayer = clones(SublayerConnection(size, dropout), 2)
137
+ self.size = size
138
+
139
+ def forward(self, x, mask):
140
+ x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
141
+ return self.sublayer[1](x, self.feed_forward)
142
+
143
+
144
+ class DecoderLayer(nn.Module):
145
+ "Decoder is made of self-attn, src-attn, and feed forward (defined below)"
146
+
147
+ def __init__(self, size, self_attn, src_attn, feed_forward, dropout):
148
+ super(DecoderLayer, self).__init__()
149
+ self.size = size
150
+ self.self_attn = self_attn
151
+ self.src_attn = src_attn
152
+ self.feed_forward = feed_forward
153
+ self.sublayer = clones(SublayerConnection(size, dropout), 3)
154
+
155
+ def forward(self, x, memory, src_mask, tgt_mask):
156
+ "Follow Figure 1 (right) for connections."
157
+ m = memory
158
+ x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
159
+ x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
160
+ return self.sublayer[2](x, self.feed_forward)
161
+
162
+
163
+ class MultiHeadedAttention(nn.Module):
164
+ def __init__(self, h, d_model, dropout=0.1):
165
+ "Take in model size and number of heads."
166
+ super(MultiHeadedAttention, self).__init__()
167
+ assert d_model % h == 0
168
+ # We assume d_v always equals d_k
169
+ self.d_k = d_model // h
170
+ self.h = h
171
+ self.linears = clones(nn.Linear(d_model, d_model), 4)
172
+ self.attn = None
173
+ self.dropout = None
174
+
175
+ def forward(self, query, key, value, mask=None):
176
+ "Implements Figure 2"
177
+ if mask is not None:
178
+ # Same mask applied to all h heads.
179
+ mask = mask.unsqueeze(1)
180
+ nbatches = query.size(0)
181
+
182
+ # 1) Do all the linear projections in batch from d_model => h x d_k
183
+ query, key, value = \
184
+ [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2).contiguous()
185
+ for l, x in zip(self.linears, (query, key, value))]
186
+
187
+ # 2) Apply attention on all the projected vectors in batch.
188
+ x, self.attn = attention(query, key, value, mask=mask,
189
+ dropout=self.dropout)
190
+
191
+ # 3) "Concat" using a view and apply a final linear.
192
+ x = x.transpose(1, 2).contiguous() \
193
+ .view(nbatches, -1, self.h * self.d_k)
194
+ return self.linears[-1](x)
195
+
196
+
197
+ class PositionwiseFeedForward(nn.Module):
198
+ "Implements FFN equation."
199
+
200
+ def __init__(self, d_model, d_ff, dropout=0.1):
201
+ super(PositionwiseFeedForward, self).__init__()
202
+ self.w_1 = nn.Linear(d_model, d_ff)
203
+ self.norm = nn.Sequential() # nn.BatchNorm1d(d_ff)
204
+ self.w_2 = nn.Linear(d_ff, d_model)
205
+ self.dropout = None
206
+
207
+ def forward(self, x):
208
+ return self.w_2(self.norm(F.relu(self.w_1(x)).transpose(2, 1).contiguous()).transpose(2, 1).contiguous())
209
+
210
+
211
+ class Identity(nn.Module):
212
+ def __init__(self):
213
+ super(Identity, self).__init__()
214
+
215
+ def forward(self, *input):
216
+ return input
217
+
218
+
219
+ class Transformer(nn.Module):
220
+ def __init__(self, emb_dims, n_blocks, dropout, ff_dims, n_heads):
221
+ super(Transformer, self).__init__()
222
+ self.emb_dims = emb_dims
223
+ self.N = n_blocks
224
+ self.dropout = dropout
225
+ self.ff_dims = ff_dims
226
+ self.n_heads = n_heads
227
+ c = copy.deepcopy
228
+ attn = MultiHeadedAttention(self.n_heads, self.emb_dims)
229
+ ff = PositionwiseFeedForward(self.emb_dims, self.ff_dims, self.dropout)
230
+ self.model = EncoderDecoder(Encoder(EncoderLayer(self.emb_dims, c(attn), c(ff), self.dropout), self.N),
231
+ Decoder(DecoderLayer(self.emb_dims, c(attn), c(attn), c(ff), self.dropout), self.N),
232
+ nn.Sequential(),
233
+ nn.Sequential(),
234
+ nn.Sequential())
235
+
236
+ def forward(self, *input):
237
+ src = input[0]
238
+ tgt = input[1]
239
+ src = src.transpose(2, 1).contiguous()
240
+ tgt = tgt.transpose(2, 1).contiguous()
241
+ tgt_embedding = self.model(src, tgt, None, None).transpose(2, 1).contiguous()
242
+ src_embedding = self.model(tgt, src, None, None).transpose(2, 1).contiguous()
243
+ return src_embedding, tgt_embedding
@@ -0,0 +1,21 @@
1
+ The MIT License
2
+
3
+ Copyright (c) 2010-2019 Google, Inc. http://angularjs.org
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in
13
+ all copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21
+ THE SOFTWARE.
@@ -0,0 +1,271 @@
1
+ Metadata-Version: 2.1
2
+ Name: learning3d
3
+ Version: 0.0.1
4
+ Summary: Learning3D: A Modern Library for Deep Learning on 3D Point Clouds Data
5
+ Author-email: Vinit Sarode <vinitsarode5@gmail.com>
6
+ Project-URL: Homepage, https://github.com/vinits5/learning3d
7
+ Project-URL: Issues, https://github.com/vinits5/learning3d/issues
8
+ Classifier: Programming Language :: Python :: 3
9
+ Classifier: License :: OSI Approved :: MIT License
10
+ Classifier: Operating System :: OS Independent
11
+ Requires-Python: >=3.8
12
+ Description-Content-Type: text/markdown
13
+ License-File: LICENSE
14
+ Requires-Dist: torch ==2.0.1
15
+ Requires-Dist: torchvision ==0.15.2
16
+ Requires-Dist: h5py ==3.8.0
17
+ Requires-Dist: ninja ==1.11.1
18
+ Requires-Dist: open3d ==0.17.0
19
+ Requires-Dist: tensorboardX ==2.6
20
+ Requires-Dist: tqdm ==4.65.0
21
+ Requires-Dist: scikit-learn ==1.2.2
22
+ Requires-Dist: scipy ==1.10.1
23
+ Requires-Dist: numpy ==1.24.3
24
+ Requires-Dist: transforms3d ==0.4.1
25
+ Requires-Dist: pycuda
26
+
27
+ <p align="center">
28
+ <img src="https://github.com/vinits5/learning3d/blob/master/images/logo.png" height="170">
29
+ </p>
30
+
31
+ # Learning3D: A Modern Library for Deep Learning on 3D Point Clouds Data.
32
+
33
+ **[Documentation](https://github.com/vinits5/learning3d#documentation) | [Blog](https://medium.com/@vinitsarode5/learning3d-a-modern-library-for-deep-learning-on-3d-point-clouds-data-48adc1fd3e0?sk=0beb59651e5ce980243bcdfbf0859b7a) | [Demo](https://github.com/vinits5/learning3d/blob/master/examples/test_pointnet.py)**
34
+
35
+ Learning3D is an open-source library that supports the development of deep learning algorithms that deal with 3D data. The Learning3D exposes a set of state of art deep neural networks in python. A modular code has been provided for further development. We welcome contributions from the open-source community.
36
+
37
+ ## Latest News:
38
+ 1. \[24 Oct, 2023\]: [MaskNet++](https://github.com/zhouruqin/MaskNet2) is now a part of learning3d library.
39
+ 2. \[12 May, 2022\]: [ChamferDistance](https://github.com/fwilliams/fml) loss function is incorporated in learning3d. This is a purely pytorch based loss function.
40
+ 3. \[24 Dec. 2020\]: [MaskNet](https://arxiv.org/pdf/2010.09185.pdf) is now ready to enhance the performance of registration algorithms in learning3d for occluded point clouds.
41
+ 4. \[24 Dec. 2020\]: Loss based on the predicted and ground truth correspondences is added in learning3d after consideration of [Correspondence Matrices are Underrated](https://arxiv.org/pdf/2010.16085.pdf) paper.
42
+ 5. \[24 Dec. 2020\]: [PointConv](https://arxiv.org/abs/1811.07246), latent feature estimation using convolutions on point clouds is now available in learning3d.
43
+ 6. \[16 Oct. 2020\]: [DeepGMR](https://wentaoyuan.github.io/deepgmr/), registration using gaussian mixture models is now available in learning3d
44
+ 7. \[14 Oct. 2020\]: Now, use your own data in learning3d. (Check out [UserData](https://github.com/vinits5/learning3d#use-your-own-data) functionality!)
45
+
46
+ ## Available Computer Vision Algorithms in Learning3D
47
+
48
+ | Sr. No. | Tasks | Algorithms |
49
+ |:-------------:|:----------:|:-----|
50
+ | 1 | [Classification](https://github.com/vinits5/learning3d#use-of-classification--segmentation-network) | PointNet, DGCNN, PPFNet, [PointConv](https://github.com/vinits5/learning3d#use-of-pointconv) |
51
+ | 2 | [Segmentation](https://github.com/vinits5/learning3d#use-of-classification--segmentation-network) | PointNet, DGCNN |
52
+ | 3 | [Reconstruction](https://github.com/vinits5/learning3d#use-of-point-completion-network) | Point Completion Network (PCN) |
53
+ | 4 | [Registration](https://github.com/vinits5/learning3d#use-of-registration-networks) | PointNetLK, PCRNet, DCP, PRNet, RPM-Net, DeepGMR |
54
+ | 5 | [Flow Estimation](https://github.com/vinits5/learning3d#use-of-flow-estimation-network) | FlowNet3D |
55
+ | 6 | [Inlier Estimation](https://github.com/vinits5/learning3d#use-of-inlier-estimation-network-masknet) | MaskNet, MaskNet++ |
56
+
57
+ ## Available Pretrained Models
58
+ 1. PointNet
59
+ 2. PCN
60
+ 3. PointNetLK
61
+ 4. PCRNet
62
+ 5. DCP
63
+ 6. PRNet
64
+ 7. FlowNet3D
65
+ 8. RPM-Net (clean-trained.pth, noisy-trained.pth, partial-pretrained.pth)
66
+ 9. DeepGMR
67
+ 10. PointConv (Download from this [link](https://github.com/DylanWusee/pointconv_pytorch/blob/master/checkpoints/checkpoint.pth))
68
+ 11. MaskNet
69
+ 12. MaskNet++ / MaskNet2
70
+
71
+ ## Available Datasets
72
+ 1. ModelNet40
73
+
74
+ ## Available Loss Functions
75
+ 1. Classification Loss (Cross Entropy)
76
+ 2. Registration Losses (FrobeniusNormLoss, RMSEFeaturesLoss)
77
+ 3. Distance Losses (Chamfer Distance, Earth Mover's Distance)
78
+ 4. Correspondence Loss (based on this [paper](https://arxiv.org/pdf/2010.16085.pdf))
79
+
80
+ ## Technical Details
81
+ ### Supported OS
82
+ 1. Ubuntu 16.04
83
+ 2. Ubuntu 18.04
84
+ 3. Ubuntu 20.04.6
85
+ 3. Linux Mint
86
+
87
+ ### Requirements
88
+ 1. CUDA 10.0 or higher
89
+ 2. Pytorch 1.3 or higher
90
+ 3. Python 3.8
91
+
92
+ ## How to use this library?
93
+ **Important Note: Clone this repository in your project. Please don't add your codes in "learning3d" folder.**
94
+
95
+ 1. All networks are defined in the module "models".
96
+ 2. All loss functions are defined in the module "losses".
97
+ 3. Data loaders are pre-defined in data_utils/dataloaders.py file.
98
+ 4. All pretrained models are provided in learning3d/pretrained folder.
99
+
100
+ ## Documentation
101
+ B: Batch Size, N: No. of points and C: Channels.
102
+ #### Use of Point Embedding Networks:
103
+ > from learning3d.models import PointNet, DGCNN, PPFNet\
104
+ > pn = PointNet(emb_dims=1024, input_shape='bnc', use_bn=False)\
105
+ > dgcnn = DGCNN(emb_dims=1024, input_shape='bnc')\
106
+ > ppf = PPFNet(features=['ppf', 'dxyz', 'xyz'], emb_dims=96, radius='0.3', num_neighbours=64)
107
+
108
+ | Sr. No. | Variable | Data type | Shape | Choices | Use |
109
+ |:---:|:---:|:---:|:---:|:---:|:---:|
110
+ | 1. | emb_dims | Integer | Scalar | 1024, 512 | Size of feature vector for the each point|
111
+ | 2. | input_shape | String | - | 'bnc', 'bcn' | Shape of input point cloud|
112
+ | 3. | output | tensor | BxCxN | - | High dimensional embeddings for each point|
113
+ | 4. | features | List of Strings | - | ['ppf', 'dxyz', 'xyz'] | Use of various features |
114
+ | 5. | radius | Float | Scalar | 0.3 | Radius of cluster for computing local features |
115
+ | 6. | num_neighbours | Integer | Scalar | 64 | Maximum number of points to consider per cluster |
116
+
117
+ #### Use of Classification / Segmentation Network:
118
+ > from learning3d.models import Classifier, PointNet, Segmentation\
119
+ > classifier = Classifier(feature_model=PointNet(), num_classes=40)\
120
+ > seg = Segmentation(feature_model=PointNet(), num_classes=40)
121
+
122
+ | Sr. No. | Variable | Data type | Shape | Choices | Use |
123
+ |:---:|:---:|:---:|:---:|:---:|:---:|
124
+ | 1. | feature_model | Object | - | PointNet / DGCNN | Point cloud embedding network |
125
+ | 2. | num_classes | Integer | Scalar | 10, 40 | Number of object categories to be classified |
126
+ | 3. | output | tensor | Classification: Bx40, Segmentation: BxNx40 | 10, 40 | Probabilities of each category or each point |
127
+
128
+ #### Use of Registration Networks:
129
+ > from learning3d.models import PointNet, PointNetLK, DCP, iPCRNet, PRNet, PPFNet, RPMNet\
130
+ > pnlk = PointNetLK(feature_model=PointNet(), delta=1e-02, xtol=1e-07, p0_zero_mean=True, p1_zero_mean=True, pooling='max')\
131
+ > dcp = DCP(feature_model=PointNet(), pointer_='transformer', head='svd')\
132
+ > pcrnet = iPCRNet(feature_moodel=PointNet(), pooling='max')\
133
+ > rpmnet = RPMNet(feature_model=PPFNet())\
134
+ > deepgmr = DeepGMR(use_rri=True, feature_model=PointNet(), nearest_neighbors=20)
135
+
136
+ | Sr. No. | Variable | Data type | Choices | Use | Algorithm |
137
+ |:---:|:---:|:---:|:---:|:---:|:---:|
138
+ | 1. | feature_model | Object | PointNet / DGCNN | Point cloud embedding network | PointNetLK |
139
+ | 2. | delta | Float | Scalar | Parameter to calculate approximate jacobian | PointNetLK |
140
+ | 3. | xtol | Float | Scalar | Check tolerance to stop iterations | PointNetLK |
141
+ | 4. | p0_zero_mean | Boolean | True/False | Subtract mean from template point cloud | PointNetLK |
142
+ | 5. | p1_zero_mean | Boolean | True/False | Subtract mean from source point cloud | PointNetLK |
143
+ | 6. | pooling | String | 'max' / 'avg' | Type of pooling used to get global feature vectror | PointNetLK |
144
+ | 7. | pointer_ | String | 'transformer' / 'identity' | Choice for Transformer/Attention network | DCP |
145
+ | 8. | head | String | 'svd' / 'mlp' | Choice of module to estimate registration params | DCP |
146
+ | 9. | use_rri | Boolean | True/False | Use nearest neighbors to estimate point cloud features. | DeepGMR |
147
+ | 10. | nearest_neighbores | Integer | 20/any integer | Give number of nearest neighbors used to estimate features | DeepGMR |
148
+
149
+ #### Use of Inlier Estimation Network (MaskNet):
150
+ > from learning3d.models import MaskNet, PointNet, MaskNet2\
151
+ > masknet = MaskNet(feature_model=PointNet(), is_training=True)
152
+ > masknet2 = MaskNet2(feature_model=PointNet(), is_training=True)
153
+
154
+ | Sr. No. | Variable | Data type | Choices | Use |
155
+ |:---:|:---:|:---:|:---:|:---:|
156
+ | 1. | feature_model | Object | PointNet / DGCNN | Point cloud embedding network |
157
+ | 2. | is_training | Boolean | True / False | Specify if the network will undergo training or testing |
158
+
159
+ #### Use of Point Completion Network:
160
+ > from learning3d.models import PCN\
161
+ > pcn = PCN(emb_dims=1024, input_shape='bnc', num_coarse=1024, grid_size=4, detailed_output=True)
162
+
163
+ | Sr. No. | Variable | Data type | Choices | Use |
164
+ |:---:|:---:|:---:|:---:|:---:|
165
+ | 1. | emb_dims | Integer | 1024, 512 | Size of feature vector for each point |
166
+ | 2. | input_shape | String | 'bnc' / 'bcn' | Shape of input point cloud |
167
+ | 3. | num_coarse | Integer | 1024 | Shape of output point cloud |
168
+ | 4. | grid_size | Integer | 4, 8, 16 | Size of grid used to produce detailed output |
169
+ | 5. | detailed_output | Boolean | True / False | Choice for additional module to create detailed output point cloud|
170
+
171
+ #### Use of PointConv:
172
+ Use the following to create pretrained model provided by authors.
173
+ > from learning3d.models import create_pointconv\
174
+ > PointConv = create_pointconv(classifier=True, pretrained='path of checkpoint')\
175
+ > ptconv = PointConv(emb_dims=1024, input_shape='bnc', input_channel_dim=6, classifier=True)
176
+
177
+ **OR**\
178
+ Use the following to create your own PointConv model.
179
+
180
+ > PointConv = create_pointconv(classifier=False, pretrained=None)\
181
+ > ptconv = PointConv(emb_dims=1024, input_shape='bnc', input_channel_dim=3, classifier=True)
182
+
183
+ PointConv variable is a class. Users can use it to create a sub-class to override *create_classifier* and *create_structure* methods in order to change PointConv's network architecture.
184
+
185
+ | Sr. No. | Variable | Data type | Choices | Use |
186
+ |:---:|:---:|:---:|:---:|:---:|
187
+ | 1. | emb_dims | Integer | 1024, 512 | Size of feature vector for each point |
188
+ | 2. | input_shape | String | 'bnc' / 'bcn' | Shape of input point cloud |
189
+ | 3. | input_channel_dim | Integer | 3/6 | Define if point cloud contains only xyz co-ordinates or normals and colors as well |
190
+ | 4. | classifier | Boolean | True / False | Choose if you want to use a classifier with PointConv |
191
+ | 5. | pretrained | Boolean | String | Give path of the pretrained classifier model (only use it for weights given by authors) |
192
+
193
+ #### Use of Flow Estimation Network:
194
+ > from learning3d.models import FlowNet3D\
195
+ > flownet = FlowNet3D()
196
+
197
+ #### Use of Data Loaders:
198
+ > from learning3d.data_utils import ModelNet40Data, ClassificationData, RegistrationData, FlowData\
199
+ > modelnet40 = ModelNet40Data(train=True, num_points=1024, download=True)\
200
+ > classification_data = ClassificationData(data_class=ModelNet40Data())\
201
+ > registration_data = RegistrationData(algorithm='PointNetLK', data_class=ModelNet40Data(), partial_source=False, partial_template=False, noise=False)\
202
+ > flow_data = FlowData()
203
+
204
+ | Sr. No. | Variable | Data type | Choices | Use |
205
+ |:---:|:---:|:---:|:---:|:---:|
206
+ | 1. | train | Boolean | True / False | Split data as train/test set |
207
+ | 2. | num_points | Integer | 1024 | Number of points in each point cloud |
208
+ | 3. | download | Boolean | True / False | If data not available then download it |
209
+ | 4. | data_class | Object | - | Specify which dataset to use |
210
+ | 5. | algorithm | String | 'PointNetLK', 'PCRNet', 'DCP', 'iPCRNet' | Algorithm used for registration |
211
+ | 6. | partial_source | Boolean | True / False | Create partial source point cloud |
212
+ | 7. | partial_template | Boolean | True / False | Create partial template point cloud |
213
+ | 8. | noise | Boolean | True / False | Add noise in source point cloud |
214
+
215
+ #### Use Your Own Data:
216
+ > from learning3d.data_utils import UserData\
217
+ > dataset = UserData(application, data_dict)
218
+
219
+ |Sr. No. | Application | Required Key | Respective Value |
220
+ |:---:|:---:|:---:|:---:|
221
+ | 1. | 'classification' | 'pcs' | Point Clouds (BxNx3) |
222
+ | | | 'labels' | Ground Truth Class Labels (BxN) |
223
+ | 2. | 'registration' | 'template' | Template Point Clouds (BxNx3) |
224
+ | | | 'source' | Source Point Clouds (BxNx3) |
225
+ | | | 'transformation' | Ground Truth Transformation (Bx4x4)|
226
+ | 3. | 'flow_estimation' | 'frame1' | Point Clouds (BxNx3) |
227
+ | | | 'frame2' | Point Clouds (BxNx3) |
228
+ | | | 'flow' | Ground Truth Flow Vector (BxNx3)|
229
+
230
+ #### Use of Loss Functions:
231
+ > from learning3d.losses import RMSEFeaturesLoss, FrobeniusNormLoss, ClassificationLoss, EMDLoss, ChamferDistanceLoss, CorrespondenceLoss\
232
+ > rmse = RMSEFeaturesLoss()\
233
+ > fn_loss = FrobeniusNormLoss()\
234
+ > classification_loss = ClassificationLoss()\
235
+ > emd = EMDLoss()\
236
+ > cd = ChamferDistanceLoss()\
237
+ > corr = CorrespondenceLoss()
238
+
239
+ | Sr. No. | Loss Type | Use |
240
+ |:---:|:---:|:---:|
241
+ | 1. | RMSEFeaturesLoss | Used to find root mean square value between two global feature vectors of point clouds |
242
+ | 2. | FrobeniusNormLoss | Used to find frobenius norm between two transfromation matrices |
243
+ | 3. | ClassificationLoss | Used to calculate cross-entropy loss |
244
+ | 4. | EMDLoss | Earth Mover's distance between two given point clouds |
245
+ | 5. | ChamferDistanceLoss | Chamfer's distance between two given point clouds |
246
+ | 6. | CorrespondenceLoss | Computes cross entropy loss using the predicted correspondence and ground truth correspondence for each source point |
247
+
248
+ ### To run codes from examples:
249
+ 1. Copy the file from "examples" folder outside of the directory "learning3d"
250
+ 2. Now, run the file. (ex. python test_pointnet.py)
251
+ - Your Directory/Location
252
+ - learning3d
253
+ - test_pointnet.py
254
+
255
+ ### References:
256
+ 1. [PointNet:](https://arxiv.org/abs/1612.00593) Deep Learning on Point Sets for 3D Classification and Segmentation
257
+ 2. [Dynamic Graph CNN](https://arxiv.org/abs/1801.07829) for Learning on Point Clouds
258
+ 3. [PPFNet:](https://arxiv.org/pdf/1802.02669.pdf) Global Context Aware Local Features for Robust 3D Point Matching
259
+ 4. [PointConv:](https://arxiv.org/abs/1811.07246) Deep Convolutional Networks on 3D Point Clouds
260
+ 5. [PointNetLK:](https://arxiv.org/abs/1903.05711) Robust & Efficient Point Cloud Registration using PointNet
261
+ 6. [PCRNet:](https://arxiv.org/abs/1908.07906) Point Cloud Registration Network using PointNet Encoding
262
+ 7. [Deep Closest Point:](https://arxiv.org/abs/1905.03304) Learning Representations for Point Cloud Registration
263
+ 8. [PRNet:](https://arxiv.org/abs/1910.12240) Self-Supervised Learning for Partial-to-Partial Registration
264
+ 9. [FlowNet3D:](https://arxiv.org/abs/1806.01411) Learning Scene Flow in 3D Point Clouds
265
+ 10. [PCN:](https://arxiv.org/pdf/1808.00671.pdf) Point Completion Network
266
+ 11. [RPM-Net:](https://arxiv.org/pdf/2003.13479.pdf) Robust Point Matching using Learned Features
267
+ 12. [3D ShapeNets:](https://people.csail.mit.edu/khosla/papers/cvpr2015_wu.pdf) A Deep Representation for Volumetric Shapes
268
+ 13. [DeepGMR:](https://arxiv.org/abs/2008.09088) Learning Latent Gaussian Mixture Models for Registration
269
+ 14. [CMU:](https://arxiv.org/pdf/2010.16085.pdf) Correspondence Matrices are Underrated
270
+ 15. [MaskNet:](https://arxiv.org/pdf/2010.09185.pdf) A Fully-Convolutional Network to Estimate Inlier Points
271
+ 16. [MaskNet++:](https://www.sciencedirect.com/science/article/abs/pii/S0097849322000085) Inlier/outlier identification for two point clouds
@@ -0,0 +1,115 @@
1
+ learning3d/__init__.py,sha256=WsLbk69HZ7OXJwTpNFcEphgtNp4sqEVGbZ1HGeNdYZ0,189
2
+ learning3d/data_utils/__init__.py,sha256=iYAVh0FThnVlG42QIgmDYrC3NGVYuzKX8s1oRqAI1YU,261
3
+ learning3d/data_utils/dataloaders.py,sha256=kb0wsLlMN7sB-CS_4BGSprSaZBwkUNYxS5iwUdD6JJM,14871
4
+ learning3d/data_utils/user_data.py,sha256=ADDGeCUCr6TcXhcxvAFncIeLO71xoRHYi4H418ktvQs,4828
5
+ learning3d/examples/test_dcp.py,sha256=o1hgl22b2xhqqYe4_k7NlNo3-rIMmSzzssCI74DCfoE,5606
6
+ learning3d/examples/test_deepgmr.py,sha256=pb-gRE6YLLaPFmiBXQjaE-B-J314QY4RkNwLHx65bcU,5597
7
+ learning3d/examples/test_flownet.py,sha256=52i9UOnSpCxZXrCgYJ-HNxsxRXqr_K4PydYMF90Lxv8,4603
8
+ learning3d/examples/test_masknet.py,sha256=dkqUui9sv2SzHtvtzUzL_PxJxMcBMqCSDPAYg0BWAVU,6405
9
+ learning3d/examples/test_masknet2.py,sha256=3_XWBQOwQjK3BCQ__BmPhCvYI_0hZMK3X4C-P2Krw6w,6859
10
+ learning3d/examples/test_pcn.py,sha256=4eaosjJVqiFxlqnaUWu-O2Jawt4uU16UJEzitIjP2us,4342
11
+ learning3d/examples/test_pcrnet.py,sha256=_x9l55sMBACXUfQHLaH7GJMfz6PWdYWSxjrRxVoY-As,4366
12
+ learning3d/examples/test_pnlk.py,sha256=9u7B--PuCpl6TAmObmIKfDvYW6bMj3Jcc3_djTDO-D4,4456
13
+ learning3d/examples/test_pointconv.py,sha256=NUcLjkkNJsGZYaUZHug6QGybm8NshZfg2tc8rfksNU8,4673
14
+ learning3d/examples/test_pointnet.py,sha256=VKfB5DE8fh2G1iIoY02GvjSgWUJk2jQBmmJs3HC5rVU,4324
15
+ learning3d/examples/test_prnet.py,sha256=ls1wWul26UHhquXt-PEn4O2-vCzCdxJ-aNIZDnHfvgg,4901
16
+ learning3d/examples/test_rpmnet.py,sha256=oy-z7I26IQxr4TD_p0qCRnOn6H8VbQFyiWO83ZSFcDk,4476
17
+ learning3d/examples/train_PointNetLK.py,sha256=0GgT2NYKNZl8o02rvW-nYBO_1tlfDNuakuAXtm1V16c,8773
18
+ learning3d/examples/train_dcp.py,sha256=SQVrwnZqGmFCZv_X2tzMysBmv-HI9sllZMWw5zsW3NM,9511
19
+ learning3d/examples/train_deepgmr.py,sha256=vxdkgfQZPtwuYryR0chegTiLuXOQag8r_ccGJ6qtw7o,9397
20
+ learning3d/examples/train_flownet.py,sha256=V3uG7EaqsQO0BtmAFFN_aHb5bsyYoLv3JKb1_XhYKNw,10369
21
+ learning3d/examples/train_masknet.py,sha256=XzgWsmVAm5Lk21mH9qhvNN0um4pI1fYVfsBAV4deSOM,8889
22
+ learning3d/examples/train_pcn.py,sha256=X7MSYVXwgIMExplua1M9pG20eNhZ_0p83yTADSYrAlA,7542
23
+ learning3d/examples/train_pcrnet.py,sha256=KQ8MiDUiR46qS9t7tc5POJ3NjMyZFBEPOVQY-7Vszpk,8198
24
+ learning3d/examples/train_pointconv.py,sha256=noGT2yGWHAuecObz1X9cEiWl0xjh7NhmRneP88jR8uI,8939
25
+ learning3d/examples/train_pointnet.py,sha256=SXheDRP_GHZQQw4BEYS7bfL481D8GcbTekON-GVwOsk,8840
26
+ learning3d/examples/train_prnet.py,sha256=2zvd-3cYzZP8L92XJmFL5rTzxpApUhetiEc4u4V0X5g,8373
27
+ learning3d/examples/train_rpmnet.py,sha256=PEdFgPXyeME0axvLEd--VbpbqWV6P5i6NnjQnJ_X3Oo,8530
28
+ learning3d/losses/__init__.py,sha256=zjjZeA_NvAhZlxiYBbtgjEsvMyLFhFWXlZioitrlGWw,425
29
+ learning3d/losses/chamfer_distance.py,sha256=UTZ6x5cGwL3L5hJZOWoC35gTzcKh1S0yCg8vGuGXU1w,2121
30
+ learning3d/losses/classification.py,sha256=QgDHC5VgSga6BiuD4Ee70t6vvchlE97BY0KExevhdgk,374
31
+ learning3d/losses/correspondence_loss.py,sha256=Tcq2o5eLY7j50pibAuH0vBcUTjwZ-wHNzGZD4H6mAe0,583
32
+ learning3d/losses/emd.py,sha256=DqP77dN6lPkpGGgBz10oO6YNYxt889weYbVYj6bZFUM,389
33
+ learning3d/losses/frobenius_norm.py,sha256=IuKr0DT9aPBlc5fjIy6lJ082yOh9F8xiNoXF6FvWZtY,682
34
+ learning3d/losses/rmse_features.py,sha256=_KMqIWqH9-lH2P6YSeGfSOIbP7plUAwWWBh2Cu7cpXA,453
35
+ learning3d/losses/cuda/chamfer_distance/__init__.py,sha256=5nz6Ui4PVKru4HLGovFue3M-eumQlovMYP6Qfk3O6SE,46
36
+ learning3d/losses/cuda/chamfer_distance/chamfer_distance.cpp,sha256=rcknAz6oYPhlTVjGnAga2c4bMz2rz_UU9ch7zrcmWoc,6260
37
+ learning3d/losses/cuda/chamfer_distance/chamfer_distance.cu,sha256=7-UQ8QVcSQ1tbN7xwDNQQfpbG-S5b7KuSv0nCBYlhI8,5070
38
+ learning3d/losses/cuda/chamfer_distance/chamfer_distance.py,sha256=vzF4GdkgdD5Rr3F6pk1u1kF1LpiFKpLTZcd-H1IVu3s,1926
39
+ learning3d/losses/cuda/emd_torch/setup.py,sha256=AUvCT6EI2fOcCbyzrGscvCKu5gLhjofmZ-PfNBY2dNw,574
40
+ learning3d/losses/cuda/emd_torch/pkg/emd_loss_layer.py,sha256=FMfOoT8VF05Ns6_BaF6tquT5B9T7man0WtKJ9lPRfk8,1116
41
+ learning3d/losses/cuda/emd_torch/pkg/include/cuda_helper.h,sha256=8uVFZEr7YWVMPqHqfu_5lLUHCDjgwE0bJc-MUhmbLRk,469
42
+ learning3d/losses/cuda/emd_torch/pkg/include/emd.h,sha256=5TF1_V_OFywM8ftegQGw0YpdgPAzg6UolJ4OOFG5rVM,1215
43
+ learning3d/losses/cuda/emd_torch/pkg/include/cuda/emd.cuh,sha256=XliqHIePtL73Kdh0eDlRLalNmP3KqzwAEcgWysAOnNs,9826
44
+ learning3d/losses/cuda/emd_torch/pkg/layer/__init__.py,sha256=aDpfP0iZyg3Uu-ppa33u2imCsmlvKSf5S8QhLnGfwyI,35
45
+ learning3d/losses/cuda/emd_torch/pkg/layer/emd_loss_layer.py,sha256=yCEEfafLZ1Ia_BCrE7mcnDRDaaEj6je3Rj8cnQ_Zrts,1019
46
+ learning3d/losses/cuda/emd_torch/pkg/src/emd.cpp,sha256=lQ4q2XO5K2CldYmnRJnGhKTR9hVRFTwO305bHT3Cauo,17
47
+ learning3d/losses/cuda/emd_torch/pkg/src/cuda/emd.cu,sha256=DJXALRWyiVDzaKWsD2tQnEXrIT9GpIldkvIJ9fZMGbI,1462
48
+ learning3d/models/__init__.py,sha256=r5upYkq5M4pu7rxuFxFI5AshVGQI-gMMsQRm_iF3TuE,642
49
+ learning3d/models/classifier.py,sha256=_LUNXbLrpKNXmCkO2R1mz64dbwfrze7f_4SYT1Z6SYo,1205
50
+ learning3d/models/dcp.py,sha256=LZFgtk9f9f9s3QvX65nFXGgC33yGIZuy4XjviwH8OGE,3377
51
+ learning3d/models/deepgmr.py,sha256=vIxOQrZjvOCHLElJCjZ8EcZ-vm0-v71IKsPGuSF-elE,5298
52
+ learning3d/models/dgcnn.py,sha256=Bt-dP2NwpOy4AcWrspXfVV1EKL-ZQElYUp2uaWNvE_Q,3057
53
+ learning3d/models/flownet3d.py,sha256=2cPqzwXyw5uBNWIpHLTwRf0exSHYcW2Lyd94wOHgXZ0,17667
54
+ learning3d/models/masknet.py,sha256=ElMF3b-JgYmgwSEf1taGQvhA7Xy7_MiHEofzc03VCd8,2705
55
+ learning3d/models/masknet2.py,sha256=6lgukurfzUOY-6xdCpMljOYFtvADLSczAXJzRC3Jkh4,9063
56
+ learning3d/models/pcn.py,sha256=FvpjLR6t3kFQ1I4Fhpbsaj_P8Ml6S912x36JAZ1dUKs,5346
57
+ learning3d/models/pcrnet.py,sha256=6C6iM3XkDNdgihtPIdy09RgFD2KKDCnDzLvFfp6X-Tg,2755
58
+ learning3d/models/pointconv.py,sha256=lJ3_3uslE29lO3roZiE5vxr5971AWV5ExeVTzbEl858,5151
59
+ learning3d/models/pointnet.py,sha256=qgSWLJ4N5Y7ObAwKiJH29Pcl67jm3sfqbXqi3tQbUQg,3238
60
+ learning3d/models/pointnetlk.py,sha256=Zl66LjDX1vLdZRgCdY2oQJnpWpqPEx6BH8GbcVCsw68,5805
61
+ learning3d/models/pooling.py,sha256=vOzJMambeG7kf1pnox5p5FE2CVH2iMDGU_DgWRw15WQ,412
62
+ learning3d/models/ppfnet.py,sha256=aBzWvtNHFo-eu1kWoZmPI9xJOFHyxYHjdapb6cN2Aso,2894
63
+ learning3d/models/prnet.py,sha256=KSTTMEUgEvE2PX59Mqas48LQUYyh8O6PwOHvP8zEYHU,18499
64
+ learning3d/models/rpmnet.py,sha256=eMVqJ6BalY96TSB8VFXjCJIA15J0XAB3BEpMB-6CMdM,11517
65
+ learning3d/models/segmentation.py,sha256=CjlINj5M0Y6C-CejrieIu9ZkuwEoCFNjq_hr5SX9umU,1166
66
+ learning3d/ops/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
67
+ learning3d/ops/data_utils.py,sha256=qW8FOQWgcHjuuiSaAayJ3nKJnDW_GDv3ujE-uFvWsPE,1764
68
+ learning3d/ops/invmat.py,sha256=m1Mm2mQNn4KgQv54w-ek0xLkZuKnfDlF2ooylXeXvAw,4154
69
+ learning3d/ops/quaternion.py,sha256=D00IL6VHitYy4CINFAilD0t0kyBjc_JHfKuMrJGJ1Cw,6793
70
+ learning3d/ops/se3.py,sha256=x6oLbQzLOXiM0xDJsVUCUE1liZ_TaJzkkHQvIyjqCqI,3957
71
+ learning3d/ops/sinc.py,sha256=A_Ffu07RXGx3daZn4zOGdnW10_l06cmhFdAiU4NKhcw,5228
72
+ learning3d/ops/so3.py,sha256=b0tX5nHyF2Qtp8V0ejGKaPaHJQ_G38ifQ7gSJzRU1ts,5166
73
+ learning3d/ops/transform_functions.py,sha256=hvNjZO-uJodsGYtQwtAtDxtQ6uBpA7Lv9t-_yAg6wxo,12806
74
+ learning3d/utils/__init__.py,sha256=iuYyToRcZ9YZMNn-ngJMjzU8p4FkaO2m8YyXSE14hN4,442
75
+ learning3d/utils/pointconv_util.py,sha256=kJxGztai7X15YsGuorMOc50SPtj_k1yfkP4XCTzIWdM,14331
76
+ learning3d/utils/ppfnet_util.py,sha256=HEoxkgUBlawKZLWspfQm3caWUyAMIrW-ECtStNYbe2Y,7989
77
+ learning3d/utils/svd.py,sha256=yCYQt2SKqeIzCBnBEr_8xFR79m4fIoNVFnp77epn1dM,1936
78
+ learning3d/utils/transformer.py,sha256=UDgJvnh7ekWyijaAn-a3ckeFeMxlK_chXzWlhAGDiPM,8974
79
+ learning3d/utils/lib/pointnet2_modules.py,sha256=Tqiz32BT-fc2gHmv87xbbNUctE2mEpYpnJ3jfVmqw0w,6339
80
+ learning3d/utils/lib/pointnet2_utils.py,sha256=TQOVhi22raBffgtKWsVf-reoEqPkN-zvv6GOh2TqHxY,10637
81
+ learning3d/utils/lib/pytorch_utils.py,sha256=-kJfrswEu0gnnrYM1yIrj6HFW2YVyK2P9To4EyYAqNw,6173
82
+ learning3d/utils/lib/setup.py,sha256=AF3MiatsAYYEIl26Vt5JlsmZEI19lV86k6b4fw1uxgU,679
83
+ learning3d/utils/lib/build/lib.linux-x86_64-3.5/pointnet2_cuda.cpython-35m-x86_64-linux-gnu.so,sha256=HjZKllvXRMLn1ONxFPA-RF2athcY06clU-D91CLS4Dw,7222568
84
+ learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/ball_query.o,sha256=_jT_UVj61Yi2eDgpn1gn595TTqbQ8hAERAazE9kcuwA,2730016
85
+ learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/ball_query_gpu.o,sha256=GFosEyxTG6gPBUZJ7niNojLxJvt12v2cnO0lqtnO3mI,13912
86
+ learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/group_points.o,sha256=kNMDj14OwT70OtmCf63mdtnf58tXpnKXe95DOHu63Ow,2490896
87
+ learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/group_points_gpu.o,sha256=CCX8QD24Kp4hJqUFG34e88Co5poWc6XvkARQYpBgs88,17048
88
+ learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/interpolate.o,sha256=xyOxGf0zrgX8k6Muvn4WgGuZWk0N1-E7r4Fnjpao-sg,2503232
89
+ learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/interpolate_gpu.o,sha256=4kAHyU-Ho3_Sr77NWJINzVndHu8PdirIUqm0tQ7yVY4,32712
90
+ learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/pointnet2_api.o,sha256=L8v-YcL3J53kKxFw50frNlsKl1n4HowU5krlscSZ-5E,7242784
91
+ learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/sampling.o,sha256=lOw0IlJMXjCbTw9LONz7_TEzjWl484WFzilyzW742Io,2495440
92
+ learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/sampling_gpu.o,sha256=ULYz2ppU_n0KU8ka0Q4FE9wdPUuZDYcmvxRWN8UgUnU,95544
93
+ learning3d/utils/lib/dist/pointnet2-0.0.0-py3.5-linux-x86_64.egg,sha256=zwF5_pf6Id5lVow5MGkdug_upjMpbEwW7hUTIHEub0I,2241340
94
+ learning3d/utils/lib/pointnet2.egg-info/SOURCES.txt,sha256=XtaY4SQXYOT8Hhdjw4iworVxoDjoWdmdBe5fpkwK_-c,328
95
+ learning3d/utils/lib/pointnet2.egg-info/dependency_links.txt,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
96
+ learning3d/utils/lib/pointnet2.egg-info/top_level.txt,sha256=XVeKz4zj8Ixbw0jjiXuOqdON71dDXExnItEZVUaOdVE,15
97
+ learning3d/utils/lib/src/ball_query.cpp,sha256=Ysx8OAn4G-dSrlPgbZS2WCDfxcV54HQTZu5FbL2QlKs,933
98
+ learning3d/utils/lib/src/ball_query_gpu.cu,sha256=vj-Y6gUh2oVLLYTuz8JE8tcHxGrJBTC4pTRsNcWNa1o,2050
99
+ learning3d/utils/lib/src/ball_query_gpu.h,sha256=47MXh8U46wcpvQ7wcC05QyAsE1N3GB-TExsT0hoApxE,476
100
+ learning3d/utils/lib/src/cuda_utils.h,sha256=es4AGc6gYXileyddOxCGsmzV0Mg9ZG6LiucMUw6ee8o,353
101
+ learning3d/utils/lib/src/group_points.cpp,sha256=koGBIklYmbwHDpCrHPURJl_f1AtsqpNB2DbgRRb4zuc,1171
102
+ learning3d/utils/lib/src/group_points_gpu.cu,sha256=OIa_C0egafVIvJTcfu7UUI-E4ylxktcSG-fUjJQbNow,3307
103
+ learning3d/utils/lib/src/group_points_gpu.h,sha256=lNX0VSo6xFCfWQh-ZPUIBobu2kfmFz8PF8l2zZe1NyQ,836
104
+ learning3d/utils/lib/src/interpolate.cpp,sha256=VKMC4dveIgdfTiqpjrhe5CMqW097Hgmza5PkZVKVUfc,2521
105
+ learning3d/utils/lib/src/interpolate_gpu.cu,sha256=yOHn2D5MFonqrfLD7K7-DFozXqSDqdetdaFHJzsPMKs,7470
106
+ learning3d/utils/lib/src/interpolate_gpu.h,sha256=SbxS1oY6Qrp3f-r_kDtjq5_EzRpdIZpJIGVfmhhmnsI,1477
107
+ learning3d/utils/lib/src/pointnet2_api.cpp,sha256=LRvlZoDLpaZHg2Tvb2ad5j8qVNCZ7TX9XEsNfdlSpT8,1213
108
+ learning3d/utils/lib/src/sampling.cpp,sha256=kH75rE1jxri4v8wIksKAjaUbJlXB3FLEXYQTmb0kmII,1549
109
+ learning3d/utils/lib/src/sampling_gpu.cu,sha256=V9dTFaYksQ-jNnoUvQgRNuFW9MmaBdZpbmXQbnvShJc,7934
110
+ learning3d/utils/lib/src/sampling_gpu.h,sha256=STr6hTB9A8D0skTTR6TiU79j1eSc-FRqik_j0PPWDmM,1045
111
+ learning3d-0.0.1.dist-info/LICENSE,sha256=3qY3_NeQIvalbLlsHFtOfuUKjs_U2k6u7rf6YVx6ac0,1098
112
+ learning3d-0.0.1.dist-info/METADATA,sha256=S7VpnwJuti1vz84GQLua3OME7NEVDczBTWrAQwXStEg,15835
113
+ learning3d-0.0.1.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
114
+ learning3d-0.0.1.dist-info/top_level.txt,sha256=nTmYW8NhbNV1_15DGNpl_OvvSFtQP98sy3qrrHr0eLo,11
115
+ learning3d-0.0.1.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.43.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+