learning3d 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- learning3d/__init__.py +2 -0
- learning3d/data_utils/__init__.py +4 -0
- learning3d/data_utils/dataloaders.py +454 -0
- learning3d/data_utils/user_data.py +119 -0
- learning3d/examples/test_dcp.py +139 -0
- learning3d/examples/test_deepgmr.py +144 -0
- learning3d/examples/test_flownet.py +113 -0
- learning3d/examples/test_masknet.py +159 -0
- learning3d/examples/test_masknet2.py +162 -0
- learning3d/examples/test_pcn.py +118 -0
- learning3d/examples/test_pcrnet.py +120 -0
- learning3d/examples/test_pnlk.py +121 -0
- learning3d/examples/test_pointconv.py +126 -0
- learning3d/examples/test_pointnet.py +121 -0
- learning3d/examples/test_prnet.py +126 -0
- learning3d/examples/test_rpmnet.py +120 -0
- learning3d/examples/train_PointNetLK.py +240 -0
- learning3d/examples/train_dcp.py +249 -0
- learning3d/examples/train_deepgmr.py +244 -0
- learning3d/examples/train_flownet.py +259 -0
- learning3d/examples/train_masknet.py +239 -0
- learning3d/examples/train_pcn.py +216 -0
- learning3d/examples/train_pcrnet.py +228 -0
- learning3d/examples/train_pointconv.py +245 -0
- learning3d/examples/train_pointnet.py +244 -0
- learning3d/examples/train_prnet.py +229 -0
- learning3d/examples/train_rpmnet.py +228 -0
- learning3d/losses/__init__.py +12 -0
- learning3d/losses/chamfer_distance.py +51 -0
- learning3d/losses/classification.py +14 -0
- learning3d/losses/correspondence_loss.py +10 -0
- learning3d/losses/cuda/chamfer_distance/__init__.py +1 -0
- learning3d/losses/cuda/chamfer_distance/chamfer_distance.cpp +185 -0
- learning3d/losses/cuda/chamfer_distance/chamfer_distance.cu +209 -0
- learning3d/losses/cuda/chamfer_distance/chamfer_distance.py +66 -0
- learning3d/losses/cuda/emd_torch/pkg/emd_loss_layer.py +41 -0
- learning3d/losses/cuda/emd_torch/pkg/include/cuda/emd.cuh +347 -0
- learning3d/losses/cuda/emd_torch/pkg/include/cuda_helper.h +18 -0
- learning3d/losses/cuda/emd_torch/pkg/include/emd.h +54 -0
- learning3d/losses/cuda/emd_torch/pkg/layer/__init__.py +1 -0
- learning3d/losses/cuda/emd_torch/pkg/layer/emd_loss_layer.py +40 -0
- learning3d/losses/cuda/emd_torch/pkg/src/cuda/emd.cu +70 -0
- learning3d/losses/cuda/emd_torch/pkg/src/emd.cpp +1 -0
- learning3d/losses/cuda/emd_torch/setup.py +29 -0
- learning3d/losses/emd.py +16 -0
- learning3d/losses/frobenius_norm.py +21 -0
- learning3d/losses/rmse_features.py +16 -0
- learning3d/models/__init__.py +23 -0
- learning3d/models/classifier.py +41 -0
- learning3d/models/dcp.py +92 -0
- learning3d/models/deepgmr.py +165 -0
- learning3d/models/dgcnn.py +92 -0
- learning3d/models/flownet3d.py +446 -0
- learning3d/models/masknet.py +84 -0
- learning3d/models/masknet2.py +264 -0
- learning3d/models/pcn.py +164 -0
- learning3d/models/pcrnet.py +74 -0
- learning3d/models/pointconv.py +108 -0
- learning3d/models/pointnet.py +108 -0
- learning3d/models/pointnetlk.py +173 -0
- learning3d/models/pooling.py +15 -0
- learning3d/models/ppfnet.py +102 -0
- learning3d/models/prnet.py +431 -0
- learning3d/models/rpmnet.py +359 -0
- learning3d/models/segmentation.py +38 -0
- learning3d/ops/__init__.py +0 -0
- learning3d/ops/data_utils.py +45 -0
- learning3d/ops/invmat.py +134 -0
- learning3d/ops/quaternion.py +218 -0
- learning3d/ops/se3.py +157 -0
- learning3d/ops/sinc.py +229 -0
- learning3d/ops/so3.py +213 -0
- learning3d/ops/transform_functions.py +342 -0
- learning3d/utils/__init__.py +9 -0
- learning3d/utils/lib/build/lib.linux-x86_64-3.5/pointnet2_cuda.cpython-35m-x86_64-linux-gnu.so +0 -0
- learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/ball_query.o +0 -0
- learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/ball_query_gpu.o +0 -0
- learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/group_points.o +0 -0
- learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/group_points_gpu.o +0 -0
- learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/interpolate.o +0 -0
- learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/interpolate_gpu.o +0 -0
- learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/pointnet2_api.o +0 -0
- learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/sampling.o +0 -0
- learning3d/utils/lib/build/temp.linux-x86_64-3.5/src/sampling_gpu.o +0 -0
- learning3d/utils/lib/dist/pointnet2-0.0.0-py3.5-linux-x86_64.egg +0 -0
- learning3d/utils/lib/pointnet2.egg-info/SOURCES.txt +14 -0
- learning3d/utils/lib/pointnet2.egg-info/dependency_links.txt +1 -0
- learning3d/utils/lib/pointnet2.egg-info/top_level.txt +1 -0
- learning3d/utils/lib/pointnet2_modules.py +160 -0
- learning3d/utils/lib/pointnet2_utils.py +318 -0
- learning3d/utils/lib/pytorch_utils.py +236 -0
- learning3d/utils/lib/setup.py +23 -0
- learning3d/utils/lib/src/ball_query.cpp +25 -0
- learning3d/utils/lib/src/ball_query_gpu.cu +67 -0
- learning3d/utils/lib/src/ball_query_gpu.h +15 -0
- learning3d/utils/lib/src/cuda_utils.h +15 -0
- learning3d/utils/lib/src/group_points.cpp +36 -0
- learning3d/utils/lib/src/group_points_gpu.cu +86 -0
- learning3d/utils/lib/src/group_points_gpu.h +22 -0
- learning3d/utils/lib/src/interpolate.cpp +65 -0
- learning3d/utils/lib/src/interpolate_gpu.cu +233 -0
- learning3d/utils/lib/src/interpolate_gpu.h +36 -0
- learning3d/utils/lib/src/pointnet2_api.cpp +25 -0
- learning3d/utils/lib/src/sampling.cpp +46 -0
- learning3d/utils/lib/src/sampling_gpu.cu +253 -0
- learning3d/utils/lib/src/sampling_gpu.h +29 -0
- learning3d/utils/pointconv_util.py +382 -0
- learning3d/utils/ppfnet_util.py +244 -0
- learning3d/utils/svd.py +59 -0
- learning3d/utils/transformer.py +243 -0
- learning3d-0.0.1.dist-info/LICENSE +21 -0
- learning3d-0.0.1.dist-info/METADATA +271 -0
- learning3d-0.0.1.dist-info/RECORD +115 -0
- learning3d-0.0.1.dist-info/WHEEL +5 -0
- learning3d-0.0.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,218 @@
|
|
1
|
+
# Copyright (c) 2018-present, Facebook, Inc.
|
2
|
+
# All rights reserved.
|
3
|
+
#
|
4
|
+
# This source code is licensed under the license found in the
|
5
|
+
# LICENSE file in the root directory of this source tree.
|
6
|
+
#
|
7
|
+
|
8
|
+
import torch
|
9
|
+
import numpy as np
|
10
|
+
|
11
|
+
# PyTorch-backed implementations
|
12
|
+
|
13
|
+
|
14
|
+
def qmul(q, r):
|
15
|
+
"""
|
16
|
+
Multiply quaternion(s) q with quaternion(s) r.
|
17
|
+
Expects two equally-sized tensors of shape (*, 4), where * denotes any number of dimensions.
|
18
|
+
Returns q*r as a tensor of shape (*, 4).
|
19
|
+
"""
|
20
|
+
assert q.shape[-1] == 4
|
21
|
+
assert r.shape[-1] == 4
|
22
|
+
|
23
|
+
original_shape = q.shape
|
24
|
+
|
25
|
+
# Compute outer product
|
26
|
+
terms = torch.bmm(r.view(-1, 4, 1), q.view(-1, 1, 4))
|
27
|
+
|
28
|
+
w = terms[:, 0, 0] - terms[:, 1, 1] - terms[:, 2, 2] - terms[:, 3, 3]
|
29
|
+
x = terms[:, 0, 1] + terms[:, 1, 0] - terms[:, 2, 3] + terms[:, 3, 2]
|
30
|
+
y = terms[:, 0, 2] + terms[:, 1, 3] + terms[:, 2, 0] - terms[:, 3, 1]
|
31
|
+
z = terms[:, 0, 3] - terms[:, 1, 2] + terms[:, 2, 1] + terms[:, 3, 0]
|
32
|
+
return torch.stack((w, x, y, z), dim=1).view(original_shape)
|
33
|
+
|
34
|
+
|
35
|
+
def qrot(q, v):
|
36
|
+
"""
|
37
|
+
Rotate vector(s) v about the rotation described by quaternion(s) q.
|
38
|
+
Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v,
|
39
|
+
where * denotes any number of dimensions.
|
40
|
+
Returns a tensor of shape (*, 3).
|
41
|
+
"""
|
42
|
+
assert q.shape[-1] == 4
|
43
|
+
assert v.shape[-1] == 3
|
44
|
+
assert q.shape[:-1] == v.shape[:-1]
|
45
|
+
|
46
|
+
original_shape = list(v.shape)
|
47
|
+
q = q.view(-1, 4)
|
48
|
+
v = v.view(-1, 3)
|
49
|
+
|
50
|
+
qvec = q[:, 1:]
|
51
|
+
uv = torch.cross(qvec, v, dim=1)
|
52
|
+
uuv = torch.cross(qvec, uv, dim=1)
|
53
|
+
return (v + 2 * (q[:, :1] * uv + uuv)).view(original_shape)
|
54
|
+
|
55
|
+
|
56
|
+
def qeuler(q, order, epsilon=0):
|
57
|
+
"""
|
58
|
+
Convert quaternion(s) q to Euler angles.
|
59
|
+
Expects a tensor of shape (*, 4), where * denotes any number of dimensions.
|
60
|
+
Returns a tensor of shape (*, 3).
|
61
|
+
"""
|
62
|
+
assert q.shape[-1] == 4
|
63
|
+
|
64
|
+
original_shape = list(q.shape)
|
65
|
+
original_shape[-1] = 3
|
66
|
+
q = q.view(-1, 4)
|
67
|
+
|
68
|
+
q0 = q[:, 0]
|
69
|
+
q1 = q[:, 1]
|
70
|
+
q2 = q[:, 2]
|
71
|
+
q3 = q[:, 3]
|
72
|
+
|
73
|
+
if order == "xyz":
|
74
|
+
x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2))
|
75
|
+
y = torch.asin(torch.clamp(2 * (q1 * q3 + q0 * q2), -1 + epsilon, 1 - epsilon))
|
76
|
+
z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3))
|
77
|
+
elif order == "yzx":
|
78
|
+
x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2 * (q1 * q1 + q3 * q3))
|
79
|
+
y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2 * (q2 * q2 + q3 * q3))
|
80
|
+
z = torch.asin(torch.clamp(2 * (q1 * q2 + q0 * q3), -1 + epsilon, 1 - epsilon))
|
81
|
+
elif order == "zxy":
|
82
|
+
x = torch.asin(torch.clamp(2 * (q0 * q1 + q2 * q3), -1 + epsilon, 1 - epsilon))
|
83
|
+
y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2 * (q1 * q1 + q2 * q2))
|
84
|
+
z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2 * (q1 * q1 + q3 * q3))
|
85
|
+
elif order == "xzy":
|
86
|
+
x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q3 * q3))
|
87
|
+
y = torch.atan2(2 * (q0 * q2 + q1 * q3), 1 - 2 * (q2 * q2 + q3 * q3))
|
88
|
+
z = torch.asin(torch.clamp(2 * (q0 * q3 - q1 * q2), -1 + epsilon, 1 - epsilon))
|
89
|
+
elif order == "yxz":
|
90
|
+
x = torch.asin(torch.clamp(2 * (q0 * q1 - q2 * q3), -1 + epsilon, 1 - epsilon))
|
91
|
+
y = torch.atan2(2 * (q1 * q3 + q0 * q2), 1 - 2 * (q1 * q1 + q2 * q2))
|
92
|
+
z = torch.atan2(2 * (q1 * q2 + q0 * q3), 1 - 2 * (q1 * q1 + q3 * q3))
|
93
|
+
elif order == "zyx":
|
94
|
+
x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2))
|
95
|
+
y = torch.asin(torch.clamp(2 * (q0 * q2 - q1 * q3), -1 + epsilon, 1 - epsilon))
|
96
|
+
z = torch.atan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3))
|
97
|
+
else:
|
98
|
+
raise
|
99
|
+
|
100
|
+
return torch.stack((x, y, z), dim=1).view(original_shape)
|
101
|
+
|
102
|
+
|
103
|
+
# Numpy-backed implementations
|
104
|
+
|
105
|
+
|
106
|
+
def qmul_np(q, r):
|
107
|
+
q = torch.from_numpy(q).contiguous()
|
108
|
+
r = torch.from_numpy(r).contiguous()
|
109
|
+
return qmul(q, r).numpy()
|
110
|
+
|
111
|
+
|
112
|
+
def qrot_np(q, v):
|
113
|
+
q = torch.from_numpy(q).contiguous()
|
114
|
+
v = torch.from_numpy(v).contiguous()
|
115
|
+
return qrot(q, v).numpy()
|
116
|
+
|
117
|
+
|
118
|
+
def qeuler_np(q, order, epsilon=0, use_gpu=False):
|
119
|
+
if use_gpu:
|
120
|
+
q = torch.from_numpy(q).cuda()
|
121
|
+
return qeuler(q, order, epsilon).cpu().numpy()
|
122
|
+
else:
|
123
|
+
q = torch.from_numpy(q).contiguous()
|
124
|
+
return qeuler(q, order, epsilon).numpy()
|
125
|
+
|
126
|
+
|
127
|
+
def qfix(q):
|
128
|
+
"""
|
129
|
+
Enforce quaternion continuity across the time dimension by selecting
|
130
|
+
the representation (q or -q) with minimal distance (or, equivalently, maximal dot product)
|
131
|
+
between two consecutive frames.
|
132
|
+
|
133
|
+
Expects a tensor of shape (L, J, 4), where L is the sequence length and J is the number of joints.
|
134
|
+
Returns a tensor of the same shape.
|
135
|
+
"""
|
136
|
+
assert len(q.shape) == 3
|
137
|
+
assert q.shape[-1] == 4
|
138
|
+
|
139
|
+
result = q.copy()
|
140
|
+
dot_products = np.sum(q[1:] * q[:-1], axis=2)
|
141
|
+
mask = dot_products < 0
|
142
|
+
mask = (np.cumsum(mask, axis=0) % 2).astype(bool)
|
143
|
+
result[1:][mask] *= -1
|
144
|
+
return result
|
145
|
+
|
146
|
+
|
147
|
+
def expmap_to_quaternion(e):
|
148
|
+
"""
|
149
|
+
Convert axis-angle rotations (aka exponential maps) to quaternions.
|
150
|
+
Stable formula from "Practical Parameterization of Rotations Using the Exponential Map".
|
151
|
+
Expects a tensor of shape (*, 3), where * denotes any number of dimensions.
|
152
|
+
Returns a tensor of shape (*, 4).
|
153
|
+
"""
|
154
|
+
assert e.shape[-1] == 3
|
155
|
+
|
156
|
+
original_shape = list(e.shape)
|
157
|
+
original_shape[-1] = 4
|
158
|
+
e = e.reshape(-1, 3)
|
159
|
+
|
160
|
+
theta = np.linalg.norm(e, axis=1).reshape(-1, 1)
|
161
|
+
w = np.cos(0.5 * theta).reshape(-1, 1)
|
162
|
+
xyz = 0.5 * np.sinc(0.5 * theta / np.pi) * e
|
163
|
+
return np.concatenate((w, xyz), axis=1).reshape(original_shape)
|
164
|
+
|
165
|
+
|
166
|
+
def euler_to_quaternion(e, order):
|
167
|
+
"""
|
168
|
+
Convert Euler angles to quaternions.
|
169
|
+
"""
|
170
|
+
assert e.shape[-1] == 3
|
171
|
+
|
172
|
+
original_shape = list(e.shape)
|
173
|
+
original_shape[-1] = 4
|
174
|
+
|
175
|
+
e = e.reshape(-1, 3)
|
176
|
+
|
177
|
+
x = e[:, 0]
|
178
|
+
y = e[:, 1]
|
179
|
+
z = e[:, 2]
|
180
|
+
|
181
|
+
rx = np.stack(
|
182
|
+
(np.cos(x / 2), np.sin(x / 2), np.zeros_like(x), np.zeros_like(x)), axis=1
|
183
|
+
)
|
184
|
+
ry = np.stack(
|
185
|
+
(np.cos(y / 2), np.zeros_like(y), np.sin(y / 2), np.zeros_like(y)), axis=1
|
186
|
+
)
|
187
|
+
rz = np.stack(
|
188
|
+
(np.cos(z / 2), np.zeros_like(z), np.zeros_like(z), np.sin(z / 2)), axis=1
|
189
|
+
)
|
190
|
+
|
191
|
+
result = None
|
192
|
+
for coord in order:
|
193
|
+
if coord == "x":
|
194
|
+
r = rx
|
195
|
+
elif coord == "y":
|
196
|
+
r = ry
|
197
|
+
elif coord == "z":
|
198
|
+
r = rz
|
199
|
+
else:
|
200
|
+
raise
|
201
|
+
if result is None:
|
202
|
+
result = r
|
203
|
+
else:
|
204
|
+
result = qmul_np(result, r)
|
205
|
+
|
206
|
+
# Reverse antipodal representation to have a non-negative "w"
|
207
|
+
if order in ["xyz", "yzx", "zxy"]:
|
208
|
+
result *= -1
|
209
|
+
|
210
|
+
return result.reshape(original_shape)
|
211
|
+
|
212
|
+
|
213
|
+
def qinv(q):
|
214
|
+
# expectes q in (w,x,y,z) format
|
215
|
+
w = q[:, 0:1]
|
216
|
+
v = q[:, 1:]
|
217
|
+
inv = torch.cat([w, -v], dim=1)
|
218
|
+
return inv
|
learning3d/ops/se3.py
ADDED
@@ -0,0 +1,157 @@
|
|
1
|
+
""" 3-d rigid body transfomation group and corresponding Lie algebra. """
|
2
|
+
import torch
|
3
|
+
from .sinc import sinc1, sinc2, sinc3
|
4
|
+
from . import so3
|
5
|
+
|
6
|
+
def twist_prod(x, y):
|
7
|
+
x_ = x.view(-1, 6)
|
8
|
+
y_ = y.view(-1, 6)
|
9
|
+
|
10
|
+
xw, xv = x_[:, 0:3], x_[:, 3:6]
|
11
|
+
yw, yv = y_[:, 0:3], y_[:, 3:6]
|
12
|
+
|
13
|
+
zw = so3.cross_prod(xw, yw)
|
14
|
+
zv = so3.cross_prod(xw, yv) + so3.cross_prod(xv, yw)
|
15
|
+
|
16
|
+
z = torch.cat((zw, zv), dim=1)
|
17
|
+
|
18
|
+
return z.view_as(x)
|
19
|
+
|
20
|
+
def liebracket(x, y):
|
21
|
+
return twist_prod(x, y)
|
22
|
+
|
23
|
+
|
24
|
+
def mat(x):
|
25
|
+
# size: [*, 6] -> [*, 4, 4]
|
26
|
+
x_ = x.view(-1, 6)
|
27
|
+
w1, w2, w3 = x_[:, 0], x_[:, 1], x_[:, 2]
|
28
|
+
v1, v2, v3 = x_[:, 3], x_[:, 4], x_[:, 5]
|
29
|
+
O = torch.zeros_like(w1)
|
30
|
+
|
31
|
+
X = torch.stack((
|
32
|
+
torch.stack(( O, -w3, w2, v1), dim=1),
|
33
|
+
torch.stack(( w3, O, -w1, v2), dim=1),
|
34
|
+
torch.stack((-w2, w1, O, v3), dim=1),
|
35
|
+
torch.stack(( O, O, O, O), dim=1)), dim=1)
|
36
|
+
return X.view(*(x.size()[0:-1]), 4, 4)
|
37
|
+
|
38
|
+
def vec(X):
|
39
|
+
X_ = X.view(-1, 4, 4)
|
40
|
+
w1, w2, w3 = X_[:, 2, 1], X_[:, 0, 2], X_[:, 1, 0]
|
41
|
+
v1, v2, v3 = X_[:, 0, 3], X_[:, 1, 3], X_[:, 2, 3]
|
42
|
+
x = torch.stack((w1, w2, w3, v1, v2, v3), dim=1)
|
43
|
+
return x.view(*X.size()[0:-2], 6)
|
44
|
+
|
45
|
+
def genvec():
|
46
|
+
return torch.eye(6)
|
47
|
+
|
48
|
+
def genmat():
|
49
|
+
return mat(genvec())
|
50
|
+
|
51
|
+
def exp(x):
|
52
|
+
x_ = x.view(-1, 6)
|
53
|
+
w, v = x_[:, 0:3], x_[:, 3:6]
|
54
|
+
t = w.norm(p=2, dim=1).view(-1, 1, 1)
|
55
|
+
W = so3.mat(w)
|
56
|
+
S = W.bmm(W)
|
57
|
+
I = torch.eye(3).to(w)
|
58
|
+
|
59
|
+
# Rodrigues' rotation formula.
|
60
|
+
#R = cos(t)*eye(3) + sinc1(t)*W + sinc2(t)*(w*w');
|
61
|
+
# = eye(3) + sinc1(t)*W + sinc2(t)*S
|
62
|
+
R = I + sinc1(t)*W + sinc2(t)*S
|
63
|
+
|
64
|
+
#V = sinc1(t)*eye(3) + sinc2(t)*W + sinc3(t)*(w*w')
|
65
|
+
# = eye(3) + sinc2(t)*W + sinc3(t)*S
|
66
|
+
V = I + sinc2(t)*W + sinc3(t)*S
|
67
|
+
|
68
|
+
p = V.bmm(v.contiguous().view(-1, 3, 1))
|
69
|
+
|
70
|
+
z = torch.Tensor([0, 0, 0, 1]).view(1, 1, 4).repeat(x_.size(0), 1, 1).to(x)
|
71
|
+
Rp = torch.cat((R, p), dim=2)
|
72
|
+
g = torch.cat((Rp, z), dim=1)
|
73
|
+
|
74
|
+
return g.view(*(x.size()[0:-1]), 4, 4)
|
75
|
+
|
76
|
+
def inverse(g):
|
77
|
+
g_ = g.view(-1, 4, 4)
|
78
|
+
R = g_[:, 0:3, 0:3]
|
79
|
+
p = g_[:, 0:3, 3]
|
80
|
+
Q = R.transpose(1, 2)
|
81
|
+
q = -Q.matmul(p.unsqueeze(-1))
|
82
|
+
|
83
|
+
z = torch.Tensor([0, 0, 0, 1]).view(1, 1, 4).repeat(g_.size(0), 1, 1).to(g)
|
84
|
+
Qq = torch.cat((Q, q), dim=2)
|
85
|
+
ig = torch.cat((Qq, z), dim=1)
|
86
|
+
|
87
|
+
return ig.view(*(g.size()[0:-2]), 4, 4)
|
88
|
+
|
89
|
+
|
90
|
+
def log(g):
|
91
|
+
g_ = g.view(-1, 4, 4)
|
92
|
+
R = g_[:, 0:3, 0:3]
|
93
|
+
p = g_[:, 0:3, 3]
|
94
|
+
|
95
|
+
w = so3.log(R)
|
96
|
+
H = so3.inv_vecs_Xg_ig(w)
|
97
|
+
v = H.bmm(p.contiguous().view(-1, 3, 1)).view(-1, 3)
|
98
|
+
|
99
|
+
x = torch.cat((w, v), dim=1)
|
100
|
+
return x.view(*(g.size()[0:-2]), 6)
|
101
|
+
|
102
|
+
def transform(g, a):
|
103
|
+
# g : SE(3), * x 4 x 4
|
104
|
+
# a : R^3, * x 3[x N]
|
105
|
+
g_ = g.view(-1, 4, 4)
|
106
|
+
R = g_[:, 0:3, 0:3].contiguous().view(*(g.size()[0:-2]), 3, 3)
|
107
|
+
p = g_[:, 0:3, 3].contiguous().view(*(g.size()[0:-2]), 3)
|
108
|
+
if len(g.size()) == len(a.size()):
|
109
|
+
b = R.matmul(a) + p.unsqueeze(-1)
|
110
|
+
else:
|
111
|
+
b = R.matmul(a.unsqueeze(-1)).squeeze(-1) + p
|
112
|
+
return b
|
113
|
+
|
114
|
+
def group_prod(g, h):
|
115
|
+
# g, h : SE(3)
|
116
|
+
g1 = g.matmul(h)
|
117
|
+
return g1
|
118
|
+
|
119
|
+
|
120
|
+
class ExpMap(torch.autograd.Function):
|
121
|
+
""" Exp: se(3) -> SE(3)
|
122
|
+
"""
|
123
|
+
@staticmethod
|
124
|
+
def forward(ctx, x):
|
125
|
+
""" Exp: R^6 -> M(4),
|
126
|
+
size: [B, 6] -> [B, 4, 4],
|
127
|
+
or [B, 1, 6] -> [B, 1, 4, 4]
|
128
|
+
"""
|
129
|
+
ctx.save_for_backward(x)
|
130
|
+
g = exp(x)
|
131
|
+
return g
|
132
|
+
|
133
|
+
@staticmethod
|
134
|
+
def backward(ctx, grad_output):
|
135
|
+
x, = ctx.saved_tensors
|
136
|
+
g = exp(x)
|
137
|
+
gen_k = genmat().to(x)
|
138
|
+
|
139
|
+
# Let z = f(g) = f(exp(x))
|
140
|
+
# dz = df/dgij * dgij/dxk * dxk
|
141
|
+
# = df/dgij * (d/dxk)[exp(x)]_ij * dxk
|
142
|
+
# = df/dgij * [gen_k*g]_ij * dxk
|
143
|
+
|
144
|
+
dg = gen_k.matmul(g.view(-1, 1, 4, 4))
|
145
|
+
# (k, i, j)
|
146
|
+
dg = dg.to(grad_output)
|
147
|
+
|
148
|
+
go = grad_output.contiguous().view(-1, 1, 4, 4)
|
149
|
+
dd = go * dg
|
150
|
+
grad_input = dd.sum(-1).sum(-1)
|
151
|
+
|
152
|
+
return grad_input
|
153
|
+
|
154
|
+
Exp = ExpMap.apply
|
155
|
+
|
156
|
+
|
157
|
+
#EOF
|
learning3d/ops/sinc.py
ADDED
@@ -0,0 +1,229 @@
|
|
1
|
+
""" sinc(t) := sin(t) / t """
|
2
|
+
import torch
|
3
|
+
from torch import sin, cos
|
4
|
+
|
5
|
+
def sinc1(t):
|
6
|
+
""" sinc1: t -> sin(t)/t """
|
7
|
+
e = 0.01
|
8
|
+
r = torch.zeros_like(t)
|
9
|
+
a = torch.abs(t)
|
10
|
+
|
11
|
+
s = a < e
|
12
|
+
c = (s == 0)
|
13
|
+
t2 = t[s] ** 2
|
14
|
+
r[s] = 1 - t2/6*(1 - t2/20*(1 - t2/42)) # Taylor series O(t^8)
|
15
|
+
r[c] = sin(t[c]) / t[c]
|
16
|
+
|
17
|
+
return r
|
18
|
+
|
19
|
+
def sinc1_dt(t):
|
20
|
+
""" d/dt(sinc1) """
|
21
|
+
e = 0.01
|
22
|
+
r = torch.zeros_like(t)
|
23
|
+
a = torch.abs(t)
|
24
|
+
|
25
|
+
s = a < e
|
26
|
+
c = (s == 0)
|
27
|
+
t2 = t ** 2
|
28
|
+
r[s] = -t[s]/3*(1 - t2[s]/10*(1 - t2[s]/28*(1 - t2[s]/54))) # Taylor series O(t^8)
|
29
|
+
r[c] = cos(t[c])/t[c] - sin(t[c])/t2[c]
|
30
|
+
|
31
|
+
return r
|
32
|
+
|
33
|
+
def sinc1_dt_rt(t):
|
34
|
+
""" d/dt(sinc1) / t """
|
35
|
+
e = 0.01
|
36
|
+
r = torch.zeros_like(t)
|
37
|
+
a = torch.abs(t)
|
38
|
+
|
39
|
+
s = a < e
|
40
|
+
c = (s == 0)
|
41
|
+
t2 = t ** 2
|
42
|
+
r[s] = -1/3*(1 - t2[s]/10*(1 - t2[s]/28*(1 - t2[s]/54))) # Taylor series O(t^8)
|
43
|
+
r[c] = (cos(t[c]) / t[c] - sin(t[c]) / t2[c]) / t[c]
|
44
|
+
|
45
|
+
return r
|
46
|
+
|
47
|
+
|
48
|
+
def rsinc1(t):
|
49
|
+
""" rsinc1: t -> t/sinc1(t) """
|
50
|
+
e = 0.01
|
51
|
+
r = torch.zeros_like(t)
|
52
|
+
a = torch.abs(t)
|
53
|
+
|
54
|
+
s = a < e
|
55
|
+
c = (s == 0)
|
56
|
+
t2 = t[s] ** 2
|
57
|
+
r[s] = (((31*t2)/42 + 7)*t2/60 + 1)*t2/6 + 1 # Taylor series O(t^8)
|
58
|
+
r[c] = t[c] / sin(t[c])
|
59
|
+
|
60
|
+
return r
|
61
|
+
|
62
|
+
def rsinc1_dt(t):
|
63
|
+
""" d/dt(rsinc1) """
|
64
|
+
e = 0.01
|
65
|
+
r = torch.zeros_like(t)
|
66
|
+
a = torch.abs(t)
|
67
|
+
|
68
|
+
s = a < e
|
69
|
+
c = (s == 0)
|
70
|
+
t2 = t[s] ** 2
|
71
|
+
r[s] = ((((127*t2)/30 + 31)*t2/28 + 7)*t2/30 + 1)*t[s]/3 # Taylor series O(t^8)
|
72
|
+
r[c] = 1/sin(t[c]) - (t[c]*cos(t[c]))/(sin(t[c])*sin(t[c]))
|
73
|
+
|
74
|
+
return r
|
75
|
+
|
76
|
+
def rsinc1_dt_csc(t):
|
77
|
+
""" d/dt(rsinc1) / sin(t) """
|
78
|
+
e = 0.01
|
79
|
+
r = torch.zeros_like(t)
|
80
|
+
a = torch.abs(t)
|
81
|
+
|
82
|
+
s = a < e
|
83
|
+
c = (s == 0)
|
84
|
+
t2 = t[s] ** 2
|
85
|
+
r[s] = t2*(t2*((4*t2)/675 + 2/63) + 2/15) + 1/3 # Taylor series O(t^8)
|
86
|
+
r[c] = (1/sin(t[c]) - (t[c]*cos(t[c]))/(sin(t[c])*sin(t[c]))) / sin(t[c])
|
87
|
+
|
88
|
+
return r
|
89
|
+
|
90
|
+
|
91
|
+
def sinc2(t):
|
92
|
+
""" sinc2: t -> (1 - cos(t)) / (t**2) """
|
93
|
+
e = 0.01
|
94
|
+
r = torch.zeros_like(t)
|
95
|
+
a = torch.abs(t)
|
96
|
+
|
97
|
+
s = a < e
|
98
|
+
c = (s == 0)
|
99
|
+
t2 = t ** 2
|
100
|
+
r[s] = 1/2*(1-t2[s]/12*(1-t2[s]/30*(1-t2[s]/56))) # Taylor series O(t^8)
|
101
|
+
r[c] = (1-cos(t[c]))/t2[c]
|
102
|
+
|
103
|
+
return r
|
104
|
+
|
105
|
+
def sinc2_dt(t):
|
106
|
+
""" d/dt(sinc2) """
|
107
|
+
e = 0.01
|
108
|
+
r = torch.zeros_like(t)
|
109
|
+
a = torch.abs(t)
|
110
|
+
|
111
|
+
s = a < e
|
112
|
+
c = (s == 0)
|
113
|
+
t2 = t ** 2
|
114
|
+
r[s] = -t[s]/12*(1 - t2[s]/5*(1.0/3 - t2[s]/56*(1.0/2 - t2[s]/135))) # Taylor series O(t^8)
|
115
|
+
r[c] = sin(t[c])/t2[c] - 2*(1-cos(t[c]))/(t2[c]*t[c])
|
116
|
+
|
117
|
+
return r
|
118
|
+
|
119
|
+
|
120
|
+
def sinc3(t):
|
121
|
+
""" sinc3: t -> (t - sin(t)) / (t**3) """
|
122
|
+
e = 0.01
|
123
|
+
r = torch.zeros_like(t)
|
124
|
+
a = torch.abs(t)
|
125
|
+
|
126
|
+
s = a < e
|
127
|
+
c = (s == 0)
|
128
|
+
t2 = t[s] ** 2
|
129
|
+
r[s] = 1/6*(1-t2/20*(1-t2/42*(1-t2/72))) # Taylor series O(t^8)
|
130
|
+
r[c] = (t[c]-sin(t[c]))/(t[c]**3)
|
131
|
+
|
132
|
+
return r
|
133
|
+
|
134
|
+
def sinc3_dt(t):
|
135
|
+
""" d/dt(sinc3) """
|
136
|
+
e = 0.01
|
137
|
+
r = torch.zeros_like(t)
|
138
|
+
a = torch.abs(t)
|
139
|
+
|
140
|
+
s = a < e
|
141
|
+
c = (s == 0)
|
142
|
+
t2 = t[s] ** 2
|
143
|
+
r[s] = -t[s]/60*(1 - t2/21*(1 - t2/24*(1.0/2 - t2/165))) # Taylor series O(t^8)
|
144
|
+
r[c] = (3*sin(t[c]) - t[c]*(cos(t[c]) + 2))/(t[c]**4)
|
145
|
+
|
146
|
+
return r
|
147
|
+
|
148
|
+
|
149
|
+
def sinc4(t):
|
150
|
+
""" sinc4: t -> 1/t^2 * (1/2 - sinc2(t))
|
151
|
+
= 1/t^2 * (1/2 - (1 - cos(t))/t^2)
|
152
|
+
"""
|
153
|
+
e = 0.01
|
154
|
+
r = torch.zeros_like(t)
|
155
|
+
a = torch.abs(t)
|
156
|
+
|
157
|
+
s = a < e
|
158
|
+
c = (s == 0)
|
159
|
+
t2 = t ** 2
|
160
|
+
r[s] = 1/24*(1-t2/30*(1-t2/56*(1-t2/90))) # Taylor series O(t^8)
|
161
|
+
r[c] = (0.5 - (1 - cos(t))/t2) / t2
|
162
|
+
|
163
|
+
|
164
|
+
class Sinc1_autograd(torch.autograd.Function):
|
165
|
+
@staticmethod
|
166
|
+
def forward(ctx, theta):
|
167
|
+
ctx.save_for_backward(theta)
|
168
|
+
return sinc1(theta)
|
169
|
+
|
170
|
+
@staticmethod
|
171
|
+
def backward(ctx, grad_output):
|
172
|
+
theta, = ctx.saved_tensors
|
173
|
+
grad_theta = None
|
174
|
+
if ctx.needs_input_grad[0]:
|
175
|
+
grad_theta = grad_output * sinc1_dt(theta).to(grad_output)
|
176
|
+
return grad_theta
|
177
|
+
|
178
|
+
Sinc1 = Sinc1_autograd.apply
|
179
|
+
|
180
|
+
class RSinc1_autograd(torch.autograd.Function):
|
181
|
+
@staticmethod
|
182
|
+
def forward(ctx, theta):
|
183
|
+
ctx.save_for_backward(theta)
|
184
|
+
return rsinc1(theta)
|
185
|
+
|
186
|
+
@staticmethod
|
187
|
+
def backward(ctx, grad_output):
|
188
|
+
theta, = ctx.saved_tensors
|
189
|
+
grad_theta = None
|
190
|
+
if ctx.needs_input_grad[0]:
|
191
|
+
grad_theta = grad_output * rsinc1_dt(theta).to(grad_output)
|
192
|
+
return grad_theta
|
193
|
+
|
194
|
+
RSinc1 = RSinc1_autograd.apply
|
195
|
+
|
196
|
+
class Sinc2_autograd(torch.autograd.Function):
|
197
|
+
@staticmethod
|
198
|
+
def forward(ctx, theta):
|
199
|
+
ctx.save_for_backward(theta)
|
200
|
+
return sinc2(theta)
|
201
|
+
|
202
|
+
@staticmethod
|
203
|
+
def backward(ctx, grad_output):
|
204
|
+
theta, = ctx.saved_tensors
|
205
|
+
grad_theta = None
|
206
|
+
if ctx.needs_input_grad[0]:
|
207
|
+
grad_theta = grad_output * sinc2_dt(theta).to(grad_output)
|
208
|
+
return grad_theta
|
209
|
+
|
210
|
+
Sinc2 = Sinc2_autograd.apply
|
211
|
+
|
212
|
+
class Sinc3_autograd(torch.autograd.Function):
|
213
|
+
@staticmethod
|
214
|
+
def forward(ctx, theta):
|
215
|
+
ctx.save_for_backward(theta)
|
216
|
+
return sinc3(theta)
|
217
|
+
|
218
|
+
@staticmethod
|
219
|
+
def backward(ctx, grad_output):
|
220
|
+
theta, = ctx.saved_tensors
|
221
|
+
grad_theta = None
|
222
|
+
if ctx.needs_input_grad[0]:
|
223
|
+
grad_theta = grad_output * sinc3_dt(theta).to(grad_output)
|
224
|
+
return grad_theta
|
225
|
+
|
226
|
+
Sinc3 = Sinc3_autograd.apply
|
227
|
+
|
228
|
+
|
229
|
+
#EOF
|