learning3d 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- learning3d/__init__.py +2 -0
- learning3d/data_utils/__init__.py +4 -0
- learning3d/data_utils/dataloaders.py +454 -0
- learning3d/data_utils/user_data.py +119 -0
- learning3d/examples/test_dcp.py +139 -0
- learning3d/examples/test_deepgmr.py +144 -0
- learning3d/examples/test_flownet.py +113 -0
- learning3d/examples/test_masknet.py +159 -0
- learning3d/examples/test_masknet2.py +162 -0
- learning3d/examples/test_pcn.py +118 -0
- learning3d/examples/test_pcrnet.py +120 -0
- learning3d/examples/test_pnlk.py +121 -0
- learning3d/examples/test_pointconv.py +126 -0
- learning3d/examples/test_pointnet.py +121 -0
- learning3d/examples/test_prnet.py +126 -0
- learning3d/examples/test_rpmnet.py +120 -0
- learning3d/examples/train_PointNetLK.py +240 -0
- learning3d/examples/train_dcp.py +249 -0
- learning3d/examples/train_deepgmr.py +244 -0
- learning3d/examples/train_flownet.py +259 -0
- learning3d/examples/train_masknet.py +239 -0
- learning3d/examples/train_pcn.py +216 -0
- learning3d/examples/train_pcrnet.py +228 -0
- learning3d/examples/train_pointconv.py +245 -0
- learning3d/examples/train_pointnet.py +244 -0
- learning3d/examples/train_prnet.py +229 -0
- learning3d/examples/train_rpmnet.py +228 -0
- learning3d/losses/__init__.py +12 -0
- learning3d/losses/chamfer_distance.py +51 -0
- learning3d/losses/classification.py +14 -0
- learning3d/losses/correspondence_loss.py +10 -0
- learning3d/losses/cuda/chamfer_distance/__init__.py +1 -0
- learning3d/losses/cuda/chamfer_distance/chamfer_distance.cpp +185 -0
- learning3d/losses/cuda/chamfer_distance/chamfer_distance.cu +209 -0
- learning3d/losses/cuda/chamfer_distance/chamfer_distance.py +66 -0
- learning3d/losses/cuda/emd_torch/pkg/emd_loss_layer.py +41 -0
- learning3d/losses/cuda/emd_torch/pkg/include/cuda/emd.cuh +347 -0
- learning3d/losses/cuda/emd_torch/pkg/include/cuda_helper.h +18 -0
- learning3d/losses/cuda/emd_torch/pkg/include/emd.h +54 -0
- learning3d/losses/cuda/emd_torch/pkg/layer/__init__.py +1 -0
- learning3d/losses/cuda/emd_torch/pkg/layer/emd_loss_layer.py +40 -0
- learning3d/losses/cuda/emd_torch/pkg/src/cuda/emd.cu +70 -0
- learning3d/losses/cuda/emd_torch/pkg/src/emd.cpp +1 -0
- learning3d/losses/cuda/emd_torch/setup.py +29 -0
- learning3d/losses/emd.py +16 -0
- learning3d/losses/frobenius_norm.py +21 -0
- learning3d/losses/rmse_features.py +16 -0
- learning3d/models/__init__.py +23 -0
- learning3d/models/classifier.py +41 -0
- learning3d/models/dcp.py +92 -0
- learning3d/models/deepgmr.py +165 -0
- learning3d/models/dgcnn.py +92 -0
- learning3d/models/flownet3d.py +446 -0
- learning3d/models/masknet.py +84 -0
- learning3d/models/masknet2.py +264 -0
- learning3d/models/pcn.py +164 -0
- learning3d/models/pcrnet.py +74 -0
- learning3d/models/pointconv.py +108 -0
- learning3d/models/pointnet.py +108 -0
- learning3d/models/pointnetlk.py +173 -0
- learning3d/models/pooling.py +15 -0
- learning3d/models/ppfnet.py +102 -0
- learning3d/models/prnet.py +431 -0
- learning3d/models/rpmnet.py +359 -0
- learning3d/models/segmentation.py +38 -0
- learning3d/ops/__init__.py +0 -0
- learning3d/ops/data_utils.py +45 -0
- learning3d/ops/invmat.py +134 -0
- learning3d/ops/quaternion.py +218 -0
- learning3d/ops/se3.py +157 -0
- learning3d/ops/sinc.py +229 -0
- learning3d/ops/so3.py +213 -0
- learning3d/ops/transform_functions.py +342 -0
- learning3d/utils/__init__.py +9 -0
- learning3d/utils/lib/build/lib.linux-x86_64-3.5/pointnet2_cuda.cpython-35m-x86_64-linux-gnu.so +0 -0
- learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/ball_query.o +0 -0
- learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/ball_query_gpu.o +0 -0
- learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/group_points.o +0 -0
- learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/group_points_gpu.o +0 -0
- learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/interpolate.o +0 -0
- learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/interpolate_gpu.o +0 -0
- learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/pointnet2_api.o +0 -0
- learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/sampling.o +0 -0
- learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/sampling_gpu.o +0 -0
- learning3d/utils/lib/dist/pointnet2-0.0.0-py3.5-linux-x86_64.egg +0 -0
- learning3d/utils/lib/pointnet2.egg-info/SOURCES.txt +14 -0
- learning3d/utils/lib/pointnet2.egg-info/dependency_links.txt +1 -0
- learning3d/utils/lib/pointnet2.egg-info/top_level.txt +1 -0
- learning3d/utils/lib/pointnet2_modules.py +160 -0
- learning3d/utils/lib/pointnet2_utils.py +318 -0
- learning3d/utils/lib/pytorch_utils.py +236 -0
- learning3d/utils/lib/setup.py +23 -0
- learning3d/utils/lib/src/ball_query.cpp +25 -0
- learning3d/utils/lib/src/ball_query_gpu.cu +67 -0
- learning3d/utils/lib/src/ball_query_gpu.h +15 -0
- learning3d/utils/lib/src/cuda_utils.h +15 -0
- learning3d/utils/lib/src/group_points.cpp +36 -0
- learning3d/utils/lib/src/group_points_gpu.cu +86 -0
- learning3d/utils/lib/src/group_points_gpu.h +22 -0
- learning3d/utils/lib/src/interpolate.cpp +65 -0
- learning3d/utils/lib/src/interpolate_gpu.cu +233 -0
- learning3d/utils/lib/src/interpolate_gpu.h +36 -0
- learning3d/utils/lib/src/pointnet2_api.cpp +25 -0
- learning3d/utils/lib/src/sampling.cpp +46 -0
- learning3d/utils/lib/src/sampling_gpu.cu +253 -0
- learning3d/utils/lib/src/sampling_gpu.h +29 -0
- learning3d/utils/pointconv_util.py +382 -0
- learning3d/utils/ppfnet_util.py +244 -0
- learning3d/utils/svd.py +59 -0
- learning3d/utils/transformer.py +243 -0
- learning3d-0.0.1.dist-info/LICENSE +21 -0
- learning3d-0.0.1.dist-info/METADATA +271 -0
- learning3d-0.0.1.dist-info/RECORD +115 -0
- learning3d-0.0.1.dist-info/WHEEL +5 -0
- learning3d-0.0.1.dist-info/top_level.txt +1 -0
learning3d/ops/so3.py
ADDED
@@ -0,0 +1,213 @@
|
|
1
|
+
""" 3-d rotation group and corresponding Lie algebra """
|
2
|
+
import torch
|
3
|
+
from . import sinc
|
4
|
+
from .sinc import sinc1, sinc2, sinc3
|
5
|
+
|
6
|
+
|
7
|
+
def cross_prod(x, y):
|
8
|
+
z = torch.cross(x.view(-1, 3), y.view(-1, 3), dim=1).view_as(x)
|
9
|
+
return z
|
10
|
+
|
11
|
+
def liebracket(x, y):
|
12
|
+
return cross_prod(x, y)
|
13
|
+
|
14
|
+
def mat(x):
|
15
|
+
# size: [*, 3] -> [*, 3, 3]
|
16
|
+
x_ = x.view(-1, 3)
|
17
|
+
x1, x2, x3 = x_[:, 0], x_[:, 1], x_[:, 2]
|
18
|
+
O = torch.zeros_like(x1)
|
19
|
+
|
20
|
+
X = torch.stack((
|
21
|
+
torch.stack((O, -x3, x2), dim=1),
|
22
|
+
torch.stack((x3, O, -x1), dim=1),
|
23
|
+
torch.stack((-x2, x1, O), dim=1)), dim=1)
|
24
|
+
return X.view(*(x.size()[0:-1]), 3, 3)
|
25
|
+
|
26
|
+
def vec(X):
|
27
|
+
X_ = X.view(-1, 3, 3)
|
28
|
+
x1, x2, x3 = X_[:, 2, 1], X_[:, 0, 2], X_[:, 1, 0]
|
29
|
+
x = torch.stack((x1, x2, x3), dim=1)
|
30
|
+
return x.view(*X.size()[0:-2], 3)
|
31
|
+
|
32
|
+
def genvec():
|
33
|
+
return torch.eye(3)
|
34
|
+
|
35
|
+
def genmat():
|
36
|
+
return mat(genvec())
|
37
|
+
|
38
|
+
def RodriguesRotation(x):
|
39
|
+
# for autograd
|
40
|
+
w = x.view(-1, 3)
|
41
|
+
t = w.norm(p=2, dim=1).view(-1, 1, 1)
|
42
|
+
W = mat(w)
|
43
|
+
S = W.bmm(W)
|
44
|
+
I = torch.eye(3).to(w)
|
45
|
+
|
46
|
+
# Rodrigues' rotation formula.
|
47
|
+
#R = cos(t)*eye(3) + sinc1(t)*W + sinc2(t)*(w*w');
|
48
|
+
#R = eye(3) + sinc1(t)*W + sinc2(t)*S
|
49
|
+
|
50
|
+
R = I + sinc.Sinc1(t)*W + sinc.Sinc2(t)*S
|
51
|
+
|
52
|
+
return R.view(*(x.size()[0:-1]), 3, 3)
|
53
|
+
|
54
|
+
def exp(x):
|
55
|
+
w = x.view(-1, 3)
|
56
|
+
t = w.norm(p=2, dim=1).view(-1, 1, 1)
|
57
|
+
W = mat(w)
|
58
|
+
S = W.bmm(W)
|
59
|
+
I = torch.eye(3).to(w)
|
60
|
+
|
61
|
+
# Rodrigues' rotation formula.
|
62
|
+
#R = cos(t)*eye(3) + sinc1(t)*W + sinc2(t)*(w*w');
|
63
|
+
#R = eye(3) + sinc1(t)*W + sinc2(t)*S
|
64
|
+
|
65
|
+
R = I + sinc1(t)*W + sinc2(t)*S
|
66
|
+
|
67
|
+
return R.view(*(x.size()[0:-1]), 3, 3)
|
68
|
+
|
69
|
+
def inverse(g):
|
70
|
+
R = g.view(-1, 3, 3)
|
71
|
+
Rt = R.transpose(1, 2)
|
72
|
+
return Rt.view_as(g)
|
73
|
+
|
74
|
+
def btrace(X):
|
75
|
+
# batch-trace: [B, N, N] -> [B]
|
76
|
+
n = X.size(-1)
|
77
|
+
X_ = X.view(-1, n, n)
|
78
|
+
tr = torch.zeros(X_.size(0)).to(X)
|
79
|
+
for i in range(tr.size(0)):
|
80
|
+
m = X_[i, :, :]
|
81
|
+
tr[i] = torch.trace(m)
|
82
|
+
return tr.view(*(X.size()[0:-2]))
|
83
|
+
|
84
|
+
def log(g):
|
85
|
+
eps = 1.0e-7
|
86
|
+
R = g.view(-1, 3, 3)
|
87
|
+
tr = btrace(R)
|
88
|
+
c = (tr - 1) / 2
|
89
|
+
t = torch.acos(c)
|
90
|
+
sc = sinc1(t)
|
91
|
+
idx0 = (torch.abs(sc) <= eps)
|
92
|
+
idx1 = (torch.abs(sc) > eps)
|
93
|
+
sc = sc.view(-1, 1, 1)
|
94
|
+
|
95
|
+
X = torch.zeros_like(R)
|
96
|
+
if idx1.any():
|
97
|
+
X[idx1] = (R[idx1] - R[idx1].transpose(1, 2)) / (2*sc[idx1])
|
98
|
+
|
99
|
+
if idx0.any():
|
100
|
+
# t[idx0] == math.pi
|
101
|
+
t2 = t[idx0] ** 2
|
102
|
+
A = (R[idx0] + torch.eye(3).type_as(R).unsqueeze(0)) * t2.view(-1, 1, 1) / 2
|
103
|
+
aw1 = torch.sqrt(A[:, 0, 0])
|
104
|
+
aw2 = torch.sqrt(A[:, 1, 1])
|
105
|
+
aw3 = torch.sqrt(A[:, 2, 2])
|
106
|
+
sgn_3 = torch.sign(A[:, 0, 2])
|
107
|
+
sgn_3[sgn_3 == 0] = 1
|
108
|
+
sgn_23 = torch.sign(A[:, 1, 2])
|
109
|
+
sgn_23[sgn_23 == 0] = 1
|
110
|
+
sgn_2 = sgn_23 * sgn_3
|
111
|
+
w1 = aw1
|
112
|
+
w2 = aw2 * sgn_2
|
113
|
+
w3 = aw3 * sgn_3
|
114
|
+
w = torch.stack((w1, w2, w3), dim=-1)
|
115
|
+
W = mat(w)
|
116
|
+
X[idx0] = W
|
117
|
+
|
118
|
+
x = vec(X.view_as(g))
|
119
|
+
return x
|
120
|
+
|
121
|
+
def transform(g, a):
|
122
|
+
# g in SO(3): * x 3 x 3
|
123
|
+
# a in R^3: * x 3[x N]
|
124
|
+
if len(g.size()) == len(a.size()):
|
125
|
+
b = g.matmul(a)
|
126
|
+
else:
|
127
|
+
b = g.matmul(a.unsqueeze(-1)).squeeze(-1)
|
128
|
+
return b
|
129
|
+
|
130
|
+
def group_prod(g, h):
|
131
|
+
# g, h : SO(3)
|
132
|
+
g1 = g.matmul(h)
|
133
|
+
return g1
|
134
|
+
|
135
|
+
|
136
|
+
|
137
|
+
def vecs_Xg_ig(x):
|
138
|
+
""" Vi = vec(dg/dxi * inv(g)), where g = exp(x)
|
139
|
+
(== [Ad(exp(x))] * vecs_ig_Xg(x))
|
140
|
+
"""
|
141
|
+
t = x.view(-1, 3).norm(p=2, dim=1).view(-1, 1, 1)
|
142
|
+
X = mat(x)
|
143
|
+
S = X.bmm(X)
|
144
|
+
#B = x.view(-1,3,1).bmm(x.view(-1,1,3)) # B = x*x'
|
145
|
+
I = torch.eye(3).to(X)
|
146
|
+
|
147
|
+
#V = sinc1(t)*eye(3) + sinc2(t)*X + sinc3(t)*B
|
148
|
+
#V = eye(3) + sinc2(t)*X + sinc3(t)*S
|
149
|
+
|
150
|
+
V = I + sinc2(t)*X + sinc3(t)*S
|
151
|
+
|
152
|
+
return V.view(*(x.size()[0:-1]), 3, 3)
|
153
|
+
|
154
|
+
def inv_vecs_Xg_ig(x):
|
155
|
+
""" H = inv(vecs_Xg_ig(x)) """
|
156
|
+
t = x.view(-1, 3).norm(p=2, dim=1).view(-1, 1, 1)
|
157
|
+
X = mat(x)
|
158
|
+
S = X.bmm(X)
|
159
|
+
I = torch.eye(3).to(x)
|
160
|
+
|
161
|
+
e = 0.01
|
162
|
+
eta = torch.zeros_like(t)
|
163
|
+
s = (t < e)
|
164
|
+
c = (s == 0)
|
165
|
+
t2 = t[s] ** 2
|
166
|
+
eta[s] = ((t2/40 + 1)*t2/42 + 1)*t2/720 + 1/12 # O(t**8)
|
167
|
+
eta[c] = (1 - (t[c]/2) / torch.tan(t[c]/2)) / (t[c]**2)
|
168
|
+
|
169
|
+
H = I - 1/2*X + eta*S
|
170
|
+
return H.view(*(x.size()[0:-1]), 3, 3)
|
171
|
+
|
172
|
+
|
173
|
+
class ExpMap(torch.autograd.Function):
|
174
|
+
""" Exp: so(3) -> SO(3)
|
175
|
+
"""
|
176
|
+
@staticmethod
|
177
|
+
def forward(ctx, x):
|
178
|
+
""" Exp: R^3 -> M(3),
|
179
|
+
size: [B, 3] -> [B, 3, 3],
|
180
|
+
or [B, 1, 3] -> [B, 1, 3, 3]
|
181
|
+
"""
|
182
|
+
ctx.save_for_backward(x)
|
183
|
+
g = exp(x)
|
184
|
+
return g
|
185
|
+
|
186
|
+
@staticmethod
|
187
|
+
def backward(ctx, grad_output):
|
188
|
+
x, = ctx.saved_tensors
|
189
|
+
g = exp(x)
|
190
|
+
gen_k = genmat().to(x)
|
191
|
+
#gen_1 = gen_k[0, :, :]
|
192
|
+
#gen_2 = gen_k[1, :, :]
|
193
|
+
#gen_3 = gen_k[2, :, :]
|
194
|
+
|
195
|
+
# Let z = f(g) = f(exp(x))
|
196
|
+
# dz = df/dgij * dgij/dxk * dxk
|
197
|
+
# = df/dgij * (d/dxk)[exp(x)]_ij * dxk
|
198
|
+
# = df/dgij * [gen_k*g]_ij * dxk
|
199
|
+
|
200
|
+
dg = gen_k.matmul(g.view(-1, 1, 3, 3))
|
201
|
+
# (k, i, j)
|
202
|
+
dg = dg.to(grad_output)
|
203
|
+
|
204
|
+
go = grad_output.contiguous().view(-1, 1, 3, 3)
|
205
|
+
dd = go * dg
|
206
|
+
grad_input = dd.sum(-1).sum(-1)
|
207
|
+
|
208
|
+
return grad_input
|
209
|
+
|
210
|
+
Exp = ExpMap.apply
|
211
|
+
|
212
|
+
|
213
|
+
#EOF
|
@@ -0,0 +1,342 @@
|
|
1
|
+
import torch
|
2
|
+
import torch.nn as nn
|
3
|
+
import torch.nn.functional as F
|
4
|
+
import numpy as np
|
5
|
+
from . import quaternion # works with (w, x, y, z) quaternions
|
6
|
+
from scipy.spatial.transform import Rotation
|
7
|
+
from . import se3
|
8
|
+
|
9
|
+
|
10
|
+
def quat2mat(quat):
|
11
|
+
x, y, z, w = quat[:, 0], quat[:, 1], quat[:, 2], quat[:, 3]
|
12
|
+
|
13
|
+
B = quat.size(0)
|
14
|
+
|
15
|
+
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
|
16
|
+
wx, wy, wz = w*x, w*y, w*z
|
17
|
+
xy, xz, yz = x*y, x*z, y*z
|
18
|
+
|
19
|
+
rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,
|
20
|
+
2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,
|
21
|
+
2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).reshape(B, 3, 3)
|
22
|
+
return rotMat
|
23
|
+
|
24
|
+
def transform_point_cloud(point_cloud: torch.Tensor, rotation: torch.Tensor, translation: torch.Tensor):
|
25
|
+
if len(rotation.size()) == 2:
|
26
|
+
rot_mat = quat2mat(rotation)
|
27
|
+
else:
|
28
|
+
rot_mat = rotation
|
29
|
+
return (torch.matmul(rot_mat, point_cloud.permute(0, 2, 1)) + translation.unsqueeze(2)).permute(0, 2, 1)
|
30
|
+
|
31
|
+
def convert2transformation(rotation_matrix: torch.Tensor, translation_vector: torch.Tensor):
|
32
|
+
one_ = torch.tensor([[[0.0, 0.0, 0.0, 1.0]]]).repeat(rotation_matrix.shape[0], 1, 1).to(rotation_matrix) # (Bx1x4)
|
33
|
+
transformation_matrix = torch.cat([rotation_matrix, translation_vector.unsqueeze(-1)], dim=2) # (Bx3x4)
|
34
|
+
transformation_matrix = torch.cat([transformation_matrix, one_], dim=1) # (Bx4x4)
|
35
|
+
return transformation_matrix
|
36
|
+
|
37
|
+
def qmul(q, r):
|
38
|
+
"""
|
39
|
+
Multiply quaternion(s) q with quaternion(s) r.
|
40
|
+
Expects two equally-sized tensors of shape (*, 4), where * denotes any number of dimensions.
|
41
|
+
Returns q*r as a tensor of shape (*, 4).
|
42
|
+
"""
|
43
|
+
assert q.shape[-1] == 4
|
44
|
+
assert r.shape[-1] == 4
|
45
|
+
|
46
|
+
original_shape = q.shape
|
47
|
+
|
48
|
+
# Compute outer product
|
49
|
+
terms = torch.bmm(r.view(-1, 4, 1), q.view(-1, 1, 4))
|
50
|
+
|
51
|
+
w = terms[:, 0, 0] - terms[:, 1, 1] - terms[:, 2, 2] - terms[:, 3, 3]
|
52
|
+
x = terms[:, 0, 1] + terms[:, 1, 0] - terms[:, 2, 3] + terms[:, 3, 2]
|
53
|
+
y = terms[:, 0, 2] + terms[:, 1, 3] + terms[:, 2, 0] - terms[:, 3, 1]
|
54
|
+
z = terms[:, 0, 3] - terms[:, 1, 2] + terms[:, 2, 1] + terms[:, 3, 0]
|
55
|
+
return torch.stack((w, x, y, z), dim=1).view(original_shape)
|
56
|
+
|
57
|
+
def qmul_np(q, r):
|
58
|
+
q = torch.from_numpy(q).contiguous()
|
59
|
+
r = torch.from_numpy(r).contiguous()
|
60
|
+
return qmul(q, r).numpy()
|
61
|
+
|
62
|
+
def euler_to_quaternion(e, order):
|
63
|
+
"""
|
64
|
+
Convert Euler angles to quaternions.
|
65
|
+
"""
|
66
|
+
assert e.shape[-1] == 3
|
67
|
+
|
68
|
+
original_shape = list(e.shape)
|
69
|
+
original_shape[-1] = 4
|
70
|
+
|
71
|
+
e = e.reshape(-1, 3)
|
72
|
+
|
73
|
+
x = e[:, 0]
|
74
|
+
y = e[:, 1]
|
75
|
+
z = e[:, 2]
|
76
|
+
|
77
|
+
rx = np.stack(
|
78
|
+
(np.cos(x / 2), np.sin(x / 2), np.zeros_like(x), np.zeros_like(x)), axis=1
|
79
|
+
)
|
80
|
+
ry = np.stack(
|
81
|
+
(np.cos(y / 2), np.zeros_like(y), np.sin(y / 2), np.zeros_like(y)), axis=1
|
82
|
+
)
|
83
|
+
rz = np.stack(
|
84
|
+
(np.cos(z / 2), np.zeros_like(z), np.zeros_like(z), np.sin(z / 2)), axis=1
|
85
|
+
)
|
86
|
+
|
87
|
+
result = None
|
88
|
+
for coord in order:
|
89
|
+
if coord == "x":
|
90
|
+
r = rx
|
91
|
+
elif coord == "y":
|
92
|
+
r = ry
|
93
|
+
elif coord == "z":
|
94
|
+
r = rz
|
95
|
+
else:
|
96
|
+
raise
|
97
|
+
if result is None:
|
98
|
+
result = r
|
99
|
+
else:
|
100
|
+
result = qmul_np(result, r)
|
101
|
+
|
102
|
+
# Reverse antipodal representation to have a non-negative "w"
|
103
|
+
if order in ["xyz", "yzx", "zxy"]:
|
104
|
+
result *= -1
|
105
|
+
|
106
|
+
return result.reshape(original_shape)
|
107
|
+
|
108
|
+
|
109
|
+
class PNLKTransform:
|
110
|
+
""" rigid motion """
|
111
|
+
def __init__(self, mag=1, mag_randomly=False):
|
112
|
+
self.mag = mag
|
113
|
+
self.randomly = mag_randomly
|
114
|
+
|
115
|
+
self.gt = None
|
116
|
+
self.igt = None
|
117
|
+
self.index = 0
|
118
|
+
|
119
|
+
def generate_transform(self):
|
120
|
+
# return: a twist-vector
|
121
|
+
amp = self.mag
|
122
|
+
if self.randomly:
|
123
|
+
amp = torch.rand(1, 1) * self.mag
|
124
|
+
x = torch.randn(1, 6)
|
125
|
+
x = x / x.norm(p=2, dim=1, keepdim=True) * amp
|
126
|
+
|
127
|
+
return x # [1, 6]
|
128
|
+
|
129
|
+
def apply_transform(self, p0, x):
|
130
|
+
# p0: [N, 3]
|
131
|
+
# x: [1, 6]
|
132
|
+
g = se3.exp(x).to(p0) # [1, 4, 4]
|
133
|
+
gt = se3.exp(-x).to(p0) # [1, 4, 4]
|
134
|
+
|
135
|
+
p1 = se3.transform(g, p0)
|
136
|
+
self.gt = gt.squeeze(0) # gt: p1 -> p0
|
137
|
+
self.igt = g.squeeze(0) # igt: p0 -> p1
|
138
|
+
return p1
|
139
|
+
|
140
|
+
def transform(self, tensor):
|
141
|
+
x = self.generate_transform()
|
142
|
+
return self.apply_transform(tensor, x)
|
143
|
+
|
144
|
+
def __call__(self, tensor):
|
145
|
+
return self.transform(tensor)
|
146
|
+
|
147
|
+
|
148
|
+
class RPMNetTransform:
|
149
|
+
""" rigid motion """
|
150
|
+
def __init__(self, mag=1, mag_randomly=False):
|
151
|
+
self.mag = mag
|
152
|
+
self.randomly = mag_randomly
|
153
|
+
|
154
|
+
self.gt = None
|
155
|
+
self.igt = None
|
156
|
+
self.index = 0
|
157
|
+
|
158
|
+
def generate_transform(self):
|
159
|
+
# return: a twist-vector
|
160
|
+
amp = self.mag
|
161
|
+
if self.randomly:
|
162
|
+
amp = torch.rand(1, 1) * self.mag
|
163
|
+
x = torch.randn(1, 6)
|
164
|
+
x = x / x.norm(p=2, dim=1, keepdim=True) * amp
|
165
|
+
|
166
|
+
return x # [1, 6]
|
167
|
+
|
168
|
+
def apply_transform(self, p0, x):
|
169
|
+
# p0: [N, 3]
|
170
|
+
# x: [1, 6]
|
171
|
+
g = se3.exp(x).to(p0) # [1, 4, 4]
|
172
|
+
gt = se3.exp(-x).to(p0) # [1, 4, 4]
|
173
|
+
|
174
|
+
p1 = se3.transform(g, p0[:, :3])
|
175
|
+
|
176
|
+
if p0.shape[1] == 6: # Need to rotate normals also
|
177
|
+
g_n = g.clone()
|
178
|
+
g_n[:, :3, 3] = 0.0
|
179
|
+
n1 = se3.transform(g_n, p0[:, 3:6])
|
180
|
+
p1 = torch.cat([p1, n1], axis=-1)
|
181
|
+
|
182
|
+
self.gt = gt.squeeze(0) # gt: p1 -> p0
|
183
|
+
self.igt = g.squeeze(0) # igt: p0 -> p1
|
184
|
+
return p1
|
185
|
+
|
186
|
+
def transform(self, tensor):
|
187
|
+
x = self.generate_transform()
|
188
|
+
return self.apply_transform(tensor, x)
|
189
|
+
|
190
|
+
def __call__(self, tensor):
|
191
|
+
return self.transform(tensor)
|
192
|
+
|
193
|
+
|
194
|
+
class PCRNetTransform:
|
195
|
+
def __init__(self, data_size, angle_range=45, translation_range=1):
|
196
|
+
self.angle_range = angle_range
|
197
|
+
self.translation_range = translation_range
|
198
|
+
self.dtype = torch.float32
|
199
|
+
self.transformations = [self.create_random_transform(torch.float32, self.angle_range, self.translation_range) for _ in range(data_size)]
|
200
|
+
self.index = 0
|
201
|
+
|
202
|
+
@staticmethod
|
203
|
+
def deg_to_rad(deg):
|
204
|
+
return np.pi / 180 * deg
|
205
|
+
|
206
|
+
def create_random_transform(self, dtype, max_rotation_deg, max_translation):
|
207
|
+
max_rotation = self.deg_to_rad(max_rotation_deg)
|
208
|
+
rot = np.random.uniform(-max_rotation, max_rotation, [1, 3])
|
209
|
+
trans = np.random.uniform(-max_translation, max_translation, [1, 3])
|
210
|
+
quat = euler_to_quaternion(rot, "xyz")
|
211
|
+
|
212
|
+
vec = np.concatenate([quat, trans], axis=1)
|
213
|
+
vec = torch.tensor(vec, dtype=dtype)
|
214
|
+
return vec
|
215
|
+
|
216
|
+
@staticmethod
|
217
|
+
def create_pose_7d(vector: torch.Tensor):
|
218
|
+
# Normalize the quaternion.
|
219
|
+
pre_normalized_quaternion = vector[:, 0:4]
|
220
|
+
normalized_quaternion = F.normalize(pre_normalized_quaternion, dim=1)
|
221
|
+
|
222
|
+
# B x 7 vector of 4 quaternions and 3 translation parameters
|
223
|
+
translation = vector[:, 4:]
|
224
|
+
vector = torch.cat([normalized_quaternion, translation], dim=1)
|
225
|
+
return vector.view([-1, 7])
|
226
|
+
|
227
|
+
@staticmethod
|
228
|
+
def get_quaternion(pose_7d: torch.Tensor):
|
229
|
+
return pose_7d[:, 0:4]
|
230
|
+
|
231
|
+
@staticmethod
|
232
|
+
def get_translation(pose_7d: torch.Tensor):
|
233
|
+
return pose_7d[:, 4:]
|
234
|
+
|
235
|
+
@staticmethod
|
236
|
+
def quaternion_rotate(point_cloud: torch.Tensor, pose_7d: torch.Tensor):
|
237
|
+
ndim = point_cloud.dim()
|
238
|
+
if ndim == 2:
|
239
|
+
N, _ = point_cloud.shape
|
240
|
+
assert pose_7d.shape[0] == 1
|
241
|
+
# repeat transformation vector for each point in shape
|
242
|
+
quat = PCRNetTransform.get_quaternion(pose_7d).expand([N, -1])
|
243
|
+
rotated_point_cloud = quaternion.qrot(quat, point_cloud)
|
244
|
+
|
245
|
+
elif ndim == 3:
|
246
|
+
B, N, _ = point_cloud.shape
|
247
|
+
quat = PCRNetTransform.get_quaternion(pose_7d).unsqueeze(1).expand([-1, N, -1]).contiguous()
|
248
|
+
rotated_point_cloud = quaternion.qrot(quat, point_cloud)
|
249
|
+
|
250
|
+
return rotated_point_cloud
|
251
|
+
|
252
|
+
@staticmethod
|
253
|
+
def quaternion_transform(point_cloud: torch.Tensor, pose_7d: torch.Tensor):
|
254
|
+
transformed_point_cloud = PCRNetTransform.quaternion_rotate(point_cloud, pose_7d) + PCRNetTransform.get_translation(pose_7d).view(-1, 1, 3).repeat(1, point_cloud.shape[1], 1) # Ps' = R*Ps + t
|
255
|
+
return transformed_point_cloud
|
256
|
+
|
257
|
+
@staticmethod
|
258
|
+
def convert2transformation(rotation_matrix: torch.Tensor, translation_vector: torch.Tensor):
|
259
|
+
one_ = torch.tensor([[[0.0, 0.0, 0.0, 1.0]]]).repeat(rotation_matrix.shape[0], 1, 1).to(rotation_matrix) # (Bx1x4)
|
260
|
+
transformation_matrix = torch.cat([rotation_matrix, translation_vector[:,0,:].unsqueeze(-1)], dim=2) # (Bx3x4)
|
261
|
+
transformation_matrix = torch.cat([transformation_matrix, one_], dim=1) # (Bx4x4)
|
262
|
+
return transformation_matrix
|
263
|
+
|
264
|
+
def __call__(self, template):
|
265
|
+
self.igt = self.transformations[self.index]
|
266
|
+
gt = self.create_pose_7d(self.igt)
|
267
|
+
source = self.quaternion_rotate(template, gt) + self.get_translation(gt)
|
268
|
+
return source
|
269
|
+
|
270
|
+
|
271
|
+
class DCPTransform:
|
272
|
+
def __init__(self, angle_range=45, translation_range=1):
|
273
|
+
self.angle_range = angle_range*(np.pi/180)
|
274
|
+
self.translation_range = translation_range
|
275
|
+
self.index = 0
|
276
|
+
|
277
|
+
def generate_transform(self):
|
278
|
+
self.anglex = np.random.uniform() * self.angle_range
|
279
|
+
self.angley = np.random.uniform() * self.angle_range
|
280
|
+
self.anglez = np.random.uniform() * self.angle_range
|
281
|
+
self.translation = np.array([np.random.uniform(-self.translation_range, self.translation_range),
|
282
|
+
np.random.uniform(-self.translation_range, self.translation_range),
|
283
|
+
np.random.uniform(-self.translation_range, self.translation_range)])
|
284
|
+
# cosx = np.cos(self.anglex)
|
285
|
+
# cosy = np.cos(self.angley)
|
286
|
+
# cosz = np.cos(self.anglez)
|
287
|
+
# sinx = np.sin(self.anglex)
|
288
|
+
# siny = np.sin(self.angley)
|
289
|
+
# sinz = np.sin(self.anglez)
|
290
|
+
# Rx = np.array([[1, 0, 0],
|
291
|
+
# [0, cosx, -sinx],
|
292
|
+
# [0, sinx, cosx]])
|
293
|
+
# Ry = np.array([[cosy, 0, siny],
|
294
|
+
# [0, 1, 0],
|
295
|
+
# [-siny, 0, cosy]])
|
296
|
+
# Rz = np.array([[cosz, -sinz, 0],
|
297
|
+
# [sinz, cosz, 0],
|
298
|
+
# [0, 0, 1]])
|
299
|
+
# self.R_ab = Rx.dot(Ry).dot(Rz)
|
300
|
+
# last_row = np.array([[0., 0., 0., 1.]])
|
301
|
+
# self.igt = np.concatenate([self.R_ab, self.translation_ab.reshape(-1,1)], axis=1)
|
302
|
+
# self.igt = np.concatenate([self.igt, last_row], axis=0)
|
303
|
+
|
304
|
+
def apply_transformation(self, template):
|
305
|
+
rotation = Rotation.from_euler('zyx', [self.anglez, self.angley, self.anglex])
|
306
|
+
self.igt = rotation.apply(np.eye(3))
|
307
|
+
self.igt = np.concatenate([self.igt, self.translation.reshape(-1,1)], axis=1)
|
308
|
+
self.igt = torch.from_numpy(np.concatenate([self.igt, np.array([[0., 0., 0., 1.]])], axis=0)).float()
|
309
|
+
source = rotation.apply(template) + np.expand_dims(self.translation, axis=0)
|
310
|
+
return source
|
311
|
+
|
312
|
+
def __call__(self, template):
|
313
|
+
template = template.numpy()
|
314
|
+
self.generate_transform()
|
315
|
+
return torch.from_numpy(self.apply_transformation(template)).float()
|
316
|
+
|
317
|
+
class DeepGMRTransform:
|
318
|
+
def __init__(self, angle_range=45, translation_range=1):
|
319
|
+
self.angle_range = angle_range*(np.pi/180)
|
320
|
+
self.translation_range = translation_range
|
321
|
+
self.index = 0
|
322
|
+
|
323
|
+
def generate_transform(self):
|
324
|
+
self.anglex = np.random.uniform() * self.angle_range
|
325
|
+
self.angley = np.random.uniform() * self.angle_range
|
326
|
+
self.anglez = np.random.uniform() * self.angle_range
|
327
|
+
self.translation = np.array([np.random.uniform(-self.translation_range, self.translation_range),
|
328
|
+
np.random.uniform(-self.translation_range, self.translation_range),
|
329
|
+
np.random.uniform(-self.translation_range, self.translation_range)])
|
330
|
+
|
331
|
+
def apply_transformation(self, template):
|
332
|
+
rotation = Rotation.from_euler('zyx', [self.anglez, self.angley, self.anglex])
|
333
|
+
self.igt = rotation.apply(np.eye(3))
|
334
|
+
self.igt = np.concatenate([self.igt, self.translation.reshape(-1,1)], axis=1)
|
335
|
+
self.igt = torch.from_numpy(np.concatenate([self.igt, np.array([[0., 0., 0., 1.]])], axis=0)).float()
|
336
|
+
source = rotation.apply(template) + np.expand_dims(self.translation, axis=0)
|
337
|
+
return source
|
338
|
+
|
339
|
+
def __call__(self, template):
|
340
|
+
template = template.numpy()
|
341
|
+
self.generate_transform()
|
342
|
+
return torch.from_numpy(self.apply_transformation(template)).float()
|
@@ -0,0 +1,9 @@
|
|
1
|
+
from .svd import SVDHead
|
2
|
+
from .transformer import Transformer, Identity
|
3
|
+
from .ppfnet_util import angle_difference, square_distance, index_points, farthest_point_sample, query_ball_point, sample_and_group, sample_and_group_multi
|
4
|
+
from .pointconv_util import PointConvDensitySetAbstraction
|
5
|
+
|
6
|
+
try:
|
7
|
+
from .lib import pointnet2_utils
|
8
|
+
except:
|
9
|
+
print("Error raised in pointnet2 module in utils!\nEither don't use pointnet2_utils or retry it's setup.")
|
learning3d/utils/lib/build/lib.linux-x86_64-3.5/pointnet2_cuda.cpython-35m-x86_64-linux-gnu.so
ADDED
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
@@ -0,0 +1,14 @@
|
|
1
|
+
setup.py
|
2
|
+
pointnet2.egg-info/PKG-INFO
|
3
|
+
pointnet2.egg-info/SOURCES.txt
|
4
|
+
pointnet2.egg-info/dependency_links.txt
|
5
|
+
pointnet2.egg-info/top_level.txt
|
6
|
+
src/ball_query.cpp
|
7
|
+
src/ball_query_gpu.cu
|
8
|
+
src/group_points.cpp
|
9
|
+
src/group_points_gpu.cu
|
10
|
+
src/interpolate.cpp
|
11
|
+
src/interpolate_gpu.cu
|
12
|
+
src/pointnet2_api.cpp
|
13
|
+
src/sampling.cpp
|
14
|
+
src/sampling_gpu.cu
|
@@ -0,0 +1 @@
|
|
1
|
+
|
@@ -0,0 +1 @@
|
|
1
|
+
pointnet2_cuda
|