warp-lang 1.0.2__py3-none-manylinux2014_x86_64.whl → 1.1.0__py3-none-manylinux2014_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of warp-lang might be problematic. Click here for more details.

Files changed (346) hide show
  1. warp/__init__.py +108 -97
  2. warp/__init__.pyi +1 -1
  3. warp/bin/warp-clang.so +0 -0
  4. warp/bin/warp.so +0 -0
  5. warp/build.py +115 -113
  6. warp/build_dll.py +383 -375
  7. warp/builtins.py +3425 -3354
  8. warp/codegen.py +2878 -2792
  9. warp/config.py +40 -36
  10. warp/constants.py +45 -45
  11. warp/context.py +5194 -5102
  12. warp/dlpack.py +442 -442
  13. warp/examples/__init__.py +16 -16
  14. warp/examples/assets/bear.usd +0 -0
  15. warp/examples/assets/bunny.usd +0 -0
  16. warp/examples/assets/cartpole.urdf +110 -110
  17. warp/examples/assets/crazyflie.usd +0 -0
  18. warp/examples/assets/cube.usd +0 -0
  19. warp/examples/assets/nv_ant.xml +92 -92
  20. warp/examples/assets/nv_humanoid.xml +183 -183
  21. warp/examples/assets/quadruped.urdf +267 -267
  22. warp/examples/assets/rocks.nvdb +0 -0
  23. warp/examples/assets/rocks.usd +0 -0
  24. warp/examples/assets/sphere.usd +0 -0
  25. warp/examples/benchmarks/benchmark_api.py +383 -383
  26. warp/examples/benchmarks/benchmark_cloth.py +278 -277
  27. warp/examples/benchmarks/benchmark_cloth_cupy.py +88 -88
  28. warp/examples/benchmarks/benchmark_cloth_jax.py +97 -100
  29. warp/examples/benchmarks/benchmark_cloth_numba.py +146 -142
  30. warp/examples/benchmarks/benchmark_cloth_numpy.py +77 -77
  31. warp/examples/benchmarks/benchmark_cloth_pytorch.py +86 -86
  32. warp/examples/benchmarks/benchmark_cloth_taichi.py +112 -112
  33. warp/examples/benchmarks/benchmark_cloth_warp.py +146 -146
  34. warp/examples/benchmarks/benchmark_launches.py +295 -295
  35. warp/examples/browse.py +29 -29
  36. warp/examples/core/example_dem.py +234 -219
  37. warp/examples/core/example_fluid.py +293 -267
  38. warp/examples/core/example_graph_capture.py +144 -126
  39. warp/examples/core/example_marching_cubes.py +188 -174
  40. warp/examples/core/example_mesh.py +174 -155
  41. warp/examples/core/example_mesh_intersect.py +205 -193
  42. warp/examples/core/example_nvdb.py +176 -170
  43. warp/examples/core/example_raycast.py +105 -90
  44. warp/examples/core/example_raymarch.py +199 -178
  45. warp/examples/core/example_render_opengl.py +185 -141
  46. warp/examples/core/example_sph.py +405 -387
  47. warp/examples/core/example_torch.py +222 -181
  48. warp/examples/core/example_wave.py +263 -248
  49. warp/examples/fem/bsr_utils.py +378 -380
  50. warp/examples/fem/example_apic_fluid.py +407 -389
  51. warp/examples/fem/example_convection_diffusion.py +182 -168
  52. warp/examples/fem/example_convection_diffusion_dg.py +219 -209
  53. warp/examples/fem/example_convection_diffusion_dg0.py +204 -194
  54. warp/examples/fem/example_deformed_geometry.py +177 -159
  55. warp/examples/fem/example_diffusion.py +201 -173
  56. warp/examples/fem/example_diffusion_3d.py +177 -152
  57. warp/examples/fem/example_diffusion_mgpu.py +221 -214
  58. warp/examples/fem/example_mixed_elasticity.py +244 -222
  59. warp/examples/fem/example_navier_stokes.py +259 -243
  60. warp/examples/fem/example_stokes.py +220 -192
  61. warp/examples/fem/example_stokes_transfer.py +265 -249
  62. warp/examples/fem/mesh_utils.py +133 -109
  63. warp/examples/fem/plot_utils.py +292 -287
  64. warp/examples/optim/example_bounce.py +260 -246
  65. warp/examples/optim/example_cloth_throw.py +222 -209
  66. warp/examples/optim/example_diffray.py +566 -536
  67. warp/examples/optim/example_drone.py +864 -835
  68. warp/examples/optim/example_inverse_kinematics.py +176 -168
  69. warp/examples/optim/example_inverse_kinematics_torch.py +185 -169
  70. warp/examples/optim/example_spring_cage.py +239 -231
  71. warp/examples/optim/example_trajectory.py +223 -199
  72. warp/examples/optim/example_walker.py +306 -293
  73. warp/examples/sim/example_cartpole.py +139 -129
  74. warp/examples/sim/example_cloth.py +196 -186
  75. warp/examples/sim/example_granular.py +124 -111
  76. warp/examples/sim/example_granular_collision_sdf.py +197 -186
  77. warp/examples/sim/example_jacobian_ik.py +236 -214
  78. warp/examples/sim/example_particle_chain.py +118 -105
  79. warp/examples/sim/example_quadruped.py +193 -180
  80. warp/examples/sim/example_rigid_chain.py +197 -187
  81. warp/examples/sim/example_rigid_contact.py +189 -177
  82. warp/examples/sim/example_rigid_force.py +127 -125
  83. warp/examples/sim/example_rigid_gyroscopic.py +109 -95
  84. warp/examples/sim/example_rigid_soft_contact.py +134 -122
  85. warp/examples/sim/example_soft_body.py +190 -177
  86. warp/fabric.py +337 -335
  87. warp/fem/__init__.py +60 -27
  88. warp/fem/cache.py +401 -388
  89. warp/fem/dirichlet.py +178 -179
  90. warp/fem/domain.py +262 -263
  91. warp/fem/field/__init__.py +100 -101
  92. warp/fem/field/field.py +148 -149
  93. warp/fem/field/nodal_field.py +298 -299
  94. warp/fem/field/restriction.py +22 -21
  95. warp/fem/field/test.py +180 -181
  96. warp/fem/field/trial.py +183 -183
  97. warp/fem/geometry/__init__.py +15 -19
  98. warp/fem/geometry/closest_point.py +69 -70
  99. warp/fem/geometry/deformed_geometry.py +270 -271
  100. warp/fem/geometry/element.py +744 -744
  101. warp/fem/geometry/geometry.py +184 -186
  102. warp/fem/geometry/grid_2d.py +380 -373
  103. warp/fem/geometry/grid_3d.py +441 -435
  104. warp/fem/geometry/hexmesh.py +953 -953
  105. warp/fem/geometry/partition.py +374 -376
  106. warp/fem/geometry/quadmesh_2d.py +532 -532
  107. warp/fem/geometry/tetmesh.py +840 -840
  108. warp/fem/geometry/trimesh_2d.py +577 -577
  109. warp/fem/integrate.py +1630 -1615
  110. warp/fem/operator.py +190 -191
  111. warp/fem/polynomial.py +214 -213
  112. warp/fem/quadrature/__init__.py +2 -2
  113. warp/fem/quadrature/pic_quadrature.py +243 -245
  114. warp/fem/quadrature/quadrature.py +295 -294
  115. warp/fem/space/__init__.py +294 -292
  116. warp/fem/space/basis_space.py +488 -489
  117. warp/fem/space/collocated_function_space.py +100 -105
  118. warp/fem/space/dof_mapper.py +236 -236
  119. warp/fem/space/function_space.py +148 -145
  120. warp/fem/space/grid_2d_function_space.py +267 -267
  121. warp/fem/space/grid_3d_function_space.py +305 -306
  122. warp/fem/space/hexmesh_function_space.py +350 -352
  123. warp/fem/space/partition.py +350 -350
  124. warp/fem/space/quadmesh_2d_function_space.py +368 -369
  125. warp/fem/space/restriction.py +158 -160
  126. warp/fem/space/shape/__init__.py +13 -15
  127. warp/fem/space/shape/cube_shape_function.py +738 -738
  128. warp/fem/space/shape/shape_function.py +102 -103
  129. warp/fem/space/shape/square_shape_function.py +611 -611
  130. warp/fem/space/shape/tet_shape_function.py +565 -567
  131. warp/fem/space/shape/triangle_shape_function.py +429 -429
  132. warp/fem/space/tetmesh_function_space.py +294 -292
  133. warp/fem/space/topology.py +297 -295
  134. warp/fem/space/trimesh_2d_function_space.py +223 -221
  135. warp/fem/types.py +77 -77
  136. warp/fem/utils.py +495 -495
  137. warp/jax.py +166 -141
  138. warp/jax_experimental.py +341 -339
  139. warp/native/array.h +1072 -1025
  140. warp/native/builtin.h +1560 -1560
  141. warp/native/bvh.cpp +398 -398
  142. warp/native/bvh.cu +525 -525
  143. warp/native/bvh.h +429 -429
  144. warp/native/clang/clang.cpp +495 -464
  145. warp/native/crt.cpp +31 -31
  146. warp/native/crt.h +334 -334
  147. warp/native/cuda_crt.h +1049 -1049
  148. warp/native/cuda_util.cpp +549 -540
  149. warp/native/cuda_util.h +288 -203
  150. warp/native/cutlass_gemm.cpp +34 -34
  151. warp/native/cutlass_gemm.cu +372 -372
  152. warp/native/error.cpp +66 -66
  153. warp/native/error.h +27 -27
  154. warp/native/fabric.h +228 -228
  155. warp/native/hashgrid.cpp +301 -278
  156. warp/native/hashgrid.cu +78 -77
  157. warp/native/hashgrid.h +227 -227
  158. warp/native/initializer_array.h +32 -32
  159. warp/native/intersect.h +1204 -1204
  160. warp/native/intersect_adj.h +365 -365
  161. warp/native/intersect_tri.h +322 -322
  162. warp/native/marching.cpp +2 -2
  163. warp/native/marching.cu +497 -497
  164. warp/native/marching.h +2 -2
  165. warp/native/mat.h +1498 -1498
  166. warp/native/matnn.h +333 -333
  167. warp/native/mesh.cpp +203 -203
  168. warp/native/mesh.cu +293 -293
  169. warp/native/mesh.h +1887 -1887
  170. warp/native/nanovdb/NanoVDB.h +4782 -4782
  171. warp/native/nanovdb/PNanoVDB.h +2553 -2553
  172. warp/native/nanovdb/PNanoVDBWrite.h +294 -294
  173. warp/native/noise.h +850 -850
  174. warp/native/quat.h +1084 -1084
  175. warp/native/rand.h +299 -299
  176. warp/native/range.h +108 -108
  177. warp/native/reduce.cpp +156 -156
  178. warp/native/reduce.cu +348 -348
  179. warp/native/runlength_encode.cpp +61 -61
  180. warp/native/runlength_encode.cu +46 -46
  181. warp/native/scan.cpp +30 -30
  182. warp/native/scan.cu +36 -36
  183. warp/native/scan.h +7 -7
  184. warp/native/solid_angle.h +442 -442
  185. warp/native/sort.cpp +94 -94
  186. warp/native/sort.cu +97 -97
  187. warp/native/sort.h +14 -14
  188. warp/native/sparse.cpp +337 -337
  189. warp/native/sparse.cu +544 -544
  190. warp/native/spatial.h +630 -630
  191. warp/native/svd.h +562 -562
  192. warp/native/temp_buffer.h +30 -30
  193. warp/native/vec.h +1132 -1132
  194. warp/native/volume.cpp +297 -297
  195. warp/native/volume.cu +32 -32
  196. warp/native/volume.h +538 -538
  197. warp/native/volume_builder.cu +425 -425
  198. warp/native/volume_builder.h +19 -19
  199. warp/native/warp.cpp +1057 -1052
  200. warp/native/warp.cu +2943 -2828
  201. warp/native/warp.h +313 -305
  202. warp/optim/__init__.py +9 -9
  203. warp/optim/adam.py +120 -120
  204. warp/optim/linear.py +1104 -939
  205. warp/optim/sgd.py +104 -92
  206. warp/render/__init__.py +10 -10
  207. warp/render/render_opengl.py +3217 -3204
  208. warp/render/render_usd.py +768 -749
  209. warp/render/utils.py +152 -150
  210. warp/sim/__init__.py +52 -59
  211. warp/sim/articulation.py +685 -685
  212. warp/sim/collide.py +1594 -1590
  213. warp/sim/import_mjcf.py +489 -481
  214. warp/sim/import_snu.py +220 -221
  215. warp/sim/import_urdf.py +536 -516
  216. warp/sim/import_usd.py +887 -881
  217. warp/sim/inertia.py +316 -317
  218. warp/sim/integrator.py +234 -233
  219. warp/sim/integrator_euler.py +1956 -1956
  220. warp/sim/integrator_featherstone.py +1910 -1991
  221. warp/sim/integrator_xpbd.py +3294 -3312
  222. warp/sim/model.py +4473 -4314
  223. warp/sim/particles.py +113 -112
  224. warp/sim/render.py +417 -403
  225. warp/sim/utils.py +413 -410
  226. warp/sparse.py +1227 -1227
  227. warp/stubs.py +2109 -2469
  228. warp/tape.py +1162 -225
  229. warp/tests/__init__.py +1 -1
  230. warp/tests/__main__.py +4 -4
  231. warp/tests/assets/torus.usda +105 -105
  232. warp/tests/aux_test_class_kernel.py +26 -26
  233. warp/tests/aux_test_compile_consts_dummy.py +10 -10
  234. warp/tests/aux_test_conditional_unequal_types_kernels.py +21 -21
  235. warp/tests/aux_test_dependent.py +22 -22
  236. warp/tests/aux_test_grad_customs.py +23 -23
  237. warp/tests/aux_test_reference.py +11 -11
  238. warp/tests/aux_test_reference_reference.py +10 -10
  239. warp/tests/aux_test_square.py +17 -17
  240. warp/tests/aux_test_unresolved_func.py +14 -14
  241. warp/tests/aux_test_unresolved_symbol.py +14 -14
  242. warp/tests/disabled_kinematics.py +239 -239
  243. warp/tests/run_coverage_serial.py +31 -31
  244. warp/tests/test_adam.py +157 -157
  245. warp/tests/test_arithmetic.py +1124 -1124
  246. warp/tests/test_array.py +2417 -2326
  247. warp/tests/test_array_reduce.py +150 -150
  248. warp/tests/test_async.py +668 -656
  249. warp/tests/test_atomic.py +141 -141
  250. warp/tests/test_bool.py +204 -149
  251. warp/tests/test_builtins_resolution.py +1292 -1292
  252. warp/tests/test_bvh.py +164 -171
  253. warp/tests/test_closest_point_edge_edge.py +228 -228
  254. warp/tests/test_codegen.py +566 -553
  255. warp/tests/test_compile_consts.py +97 -101
  256. warp/tests/test_conditional.py +246 -246
  257. warp/tests/test_copy.py +232 -215
  258. warp/tests/test_ctypes.py +632 -632
  259. warp/tests/test_dense.py +67 -67
  260. warp/tests/test_devices.py +91 -98
  261. warp/tests/test_dlpack.py +530 -529
  262. warp/tests/test_examples.py +400 -378
  263. warp/tests/test_fabricarray.py +955 -955
  264. warp/tests/test_fast_math.py +62 -54
  265. warp/tests/test_fem.py +1277 -1278
  266. warp/tests/test_fp16.py +130 -130
  267. warp/tests/test_func.py +338 -337
  268. warp/tests/test_generics.py +571 -571
  269. warp/tests/test_grad.py +746 -640
  270. warp/tests/test_grad_customs.py +333 -336
  271. warp/tests/test_hash_grid.py +210 -164
  272. warp/tests/test_import.py +39 -39
  273. warp/tests/test_indexedarray.py +1134 -1134
  274. warp/tests/test_intersect.py +67 -67
  275. warp/tests/test_jax.py +307 -307
  276. warp/tests/test_large.py +167 -164
  277. warp/tests/test_launch.py +354 -354
  278. warp/tests/test_lerp.py +261 -261
  279. warp/tests/test_linear_solvers.py +191 -171
  280. warp/tests/test_lvalue.py +421 -493
  281. warp/tests/test_marching_cubes.py +65 -65
  282. warp/tests/test_mat.py +1801 -1827
  283. warp/tests/test_mat_lite.py +115 -115
  284. warp/tests/test_mat_scalar_ops.py +2907 -2889
  285. warp/tests/test_math.py +126 -193
  286. warp/tests/test_matmul.py +500 -499
  287. warp/tests/test_matmul_lite.py +410 -410
  288. warp/tests/test_mempool.py +188 -190
  289. warp/tests/test_mesh.py +284 -324
  290. warp/tests/test_mesh_query_aabb.py +228 -241
  291. warp/tests/test_mesh_query_point.py +692 -702
  292. warp/tests/test_mesh_query_ray.py +292 -303
  293. warp/tests/test_mlp.py +276 -276
  294. warp/tests/test_model.py +110 -110
  295. warp/tests/test_modules_lite.py +39 -39
  296. warp/tests/test_multigpu.py +163 -163
  297. warp/tests/test_noise.py +248 -248
  298. warp/tests/test_operators.py +250 -250
  299. warp/tests/test_options.py +123 -125
  300. warp/tests/test_peer.py +133 -137
  301. warp/tests/test_pinned.py +78 -78
  302. warp/tests/test_print.py +54 -54
  303. warp/tests/test_quat.py +2086 -2086
  304. warp/tests/test_rand.py +288 -288
  305. warp/tests/test_reload.py +217 -217
  306. warp/tests/test_rounding.py +179 -179
  307. warp/tests/test_runlength_encode.py +190 -190
  308. warp/tests/test_sim_grad.py +243 -0
  309. warp/tests/test_sim_kinematics.py +91 -97
  310. warp/tests/test_smoothstep.py +168 -168
  311. warp/tests/test_snippet.py +305 -266
  312. warp/tests/test_sparse.py +468 -460
  313. warp/tests/test_spatial.py +2148 -2148
  314. warp/tests/test_streams.py +486 -473
  315. warp/tests/test_struct.py +710 -675
  316. warp/tests/test_tape.py +173 -148
  317. warp/tests/test_torch.py +743 -743
  318. warp/tests/test_transient_module.py +87 -87
  319. warp/tests/test_types.py +556 -659
  320. warp/tests/test_utils.py +490 -499
  321. warp/tests/test_vec.py +1264 -1268
  322. warp/tests/test_vec_lite.py +73 -73
  323. warp/tests/test_vec_scalar_ops.py +2099 -2099
  324. warp/tests/test_verify_fp.py +94 -94
  325. warp/tests/test_volume.py +737 -736
  326. warp/tests/test_volume_write.py +255 -265
  327. warp/tests/unittest_serial.py +37 -37
  328. warp/tests/unittest_suites.py +363 -359
  329. warp/tests/unittest_utils.py +603 -578
  330. warp/tests/unused_test_misc.py +71 -71
  331. warp/tests/walkthrough_debug.py +85 -85
  332. warp/thirdparty/appdirs.py +598 -598
  333. warp/thirdparty/dlpack.py +143 -143
  334. warp/thirdparty/unittest_parallel.py +566 -561
  335. warp/torch.py +321 -295
  336. warp/types.py +4504 -4450
  337. warp/utils.py +1008 -821
  338. {warp_lang-1.0.2.dist-info → warp_lang-1.1.0.dist-info}/LICENSE.md +126 -126
  339. {warp_lang-1.0.2.dist-info → warp_lang-1.1.0.dist-info}/METADATA +338 -400
  340. warp_lang-1.1.0.dist-info/RECORD +352 -0
  341. warp/examples/assets/cube.usda +0 -42
  342. warp/examples/assets/sphere.usda +0 -56
  343. warp/examples/assets/torus.usda +0 -105
  344. warp_lang-1.0.2.dist-info/RECORD +0 -352
  345. {warp_lang-1.0.2.dist-info → warp_lang-1.1.0.dist-info}/WHEEL +0 -0
  346. {warp_lang-1.0.2.dist-info → warp_lang-1.1.0.dist-info}/top_level.txt +0 -0
warp/native/quat.h CHANGED
@@ -1,1085 +1,1085 @@
1
- /** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
2
- * NVIDIA CORPORATION and its licensors retain all intellectual property
3
- * and proprietary rights in and to this software, related documentation
4
- * and any modifications thereto. Any use, reproduction, disclosure or
5
- * distribution of this software and related documentation without an express
6
- * license agreement from NVIDIA CORPORATION is strictly prohibited.
7
- */
8
-
9
- #pragma once
10
-
11
- #include "mat.h"
12
-
13
- namespace wp
14
- {
15
-
16
- template<typename Type>
17
- struct quat_t
18
- {
19
- // zero constructor for adjoint variable initialization
20
- inline CUDA_CALLABLE quat_t(Type x=Type(0), Type y=Type(0), Type z=Type(0), Type w=Type(0)) : x(x), y(y), z(z), w(w) {}
21
- explicit inline CUDA_CALLABLE quat_t(const vec_t<3,Type>& v, Type w=Type(0)) : x(v[0]), y(v[1]), z(v[2]), w(w) {}
22
-
23
- template<typename OtherType>
24
- explicit inline CUDA_CALLABLE quat_t(const quat_t<OtherType>& other)
25
- {
26
- x = static_cast<Type>(other.x);
27
- y = static_cast<Type>(other.y);
28
- z = static_cast<Type>(other.z);
29
- w = static_cast<Type>(other.w);
30
- }
31
-
32
- // imaginary part
33
- Type x;
34
- Type y;
35
- Type z;
36
-
37
- // real part
38
- Type w;
39
- };
40
-
41
- using quat = quat_t<float>;
42
- using quath = quat_t<half>;
43
- using quatf = quat_t<float>;
44
- using quatd = quat_t<double>;
45
-
46
-
47
- template<typename Type>
48
- inline CUDA_CALLABLE bool operator==(const quat_t<Type>& a, const quat_t<Type>& b)
49
- {
50
- return a.x == b.x && a.y == b.y && a.z == b.z && a.w == b.w;
51
- }
52
-
53
- template<typename Type>
54
- inline bool CUDA_CALLABLE isfinite(const quat_t<Type>& q)
55
- {
56
- return isfinite(q.x) && isfinite(q.y) && isfinite(q.z) && isfinite(q.w);
57
- }
58
-
59
- template<typename Type>
60
- inline CUDA_CALLABLE quat_t<Type> atomic_add(quat_t<Type> * addr, quat_t<Type> value)
61
- {
62
- Type x = atomic_add(&(addr -> x), value.x);
63
- Type y = atomic_add(&(addr -> y), value.y);
64
- Type z = atomic_add(&(addr -> z), value.z);
65
- Type w = atomic_add(&(addr -> w), value.w);
66
-
67
- return quat_t<Type>(x, y, z, w);
68
- }
69
-
70
- template<typename Type>
71
- inline CUDA_CALLABLE void adj_quat_t(Type x, Type y, Type z, Type w, Type& adj_x, Type& adj_y, Type& adj_z, Type& adj_w, quat_t<Type> adj_ret)
72
- {
73
- adj_x += adj_ret.x;
74
- adj_y += adj_ret.y;
75
- adj_z += adj_ret.z;
76
- adj_w += adj_ret.w;
77
- }
78
-
79
- template<typename Type>
80
- inline CUDA_CALLABLE void adj_quat_t(const vec_t<3,Type>& v, Type w, vec_t<3,Type>& adj_v, Type& adj_w, quat_t<Type> adj_ret)
81
- {
82
- adj_v[0] += adj_ret.x;
83
- adj_v[1] += adj_ret.y;
84
- adj_v[2] += adj_ret.z;
85
- adj_w += adj_ret.w;
86
- }
87
-
88
- // casting constructor adjoint
89
- template<typename Type, typename OtherType>
90
- inline CUDA_CALLABLE void adj_quat_t(const quat_t<OtherType>& other, quat_t<OtherType>& adj_other, const quat_t<Type>& adj_ret)
91
- {
92
- adj_other.x += static_cast<OtherType>(adj_ret.x);
93
- adj_other.y += static_cast<OtherType>(adj_ret.y);
94
- adj_other.z += static_cast<OtherType>(adj_ret.z);
95
- adj_other.w += static_cast<OtherType>(adj_ret.w);
96
- }
97
-
98
- // forward methods
99
-
100
- template<typename Type>
101
- inline CUDA_CALLABLE quat_t<Type> quat_from_axis_angle(const vec_t<3,Type>& axis, Type angle)
102
- {
103
- Type half = angle*Type(Type(0.5));
104
- Type w = cos(half);
105
-
106
- Type sin_theta_over_two = sin(half);
107
- vec_t<3,Type> v = axis*sin_theta_over_two;
108
-
109
- return quat_t<Type>(v[0], v[1], v[2], w);
110
- }
111
-
112
- template<typename Type>
113
- inline CUDA_CALLABLE void quat_to_axis_angle(const quat_t<Type>& q, vec_t<3,Type>& axis, Type& angle)
114
- {
115
- vec_t<3,Type> v = vec_t<3,Type>(q.x, q.y, q.z);
116
- axis = q.w < Type(0) ? -normalize(v) : normalize(v);
117
- angle = Type(2) * atan2(length(v), abs(q.w));
118
- }
119
-
120
- template<typename Type>
121
- inline CUDA_CALLABLE quat_t<Type> quat_rpy(Type roll, Type pitch, Type yaw)
122
- {
123
- Type cy = cos(yaw * Type(0.5));
124
- Type sy = sin(yaw * Type(0.5));
125
- Type cr = cos(roll * Type(0.5));
126
- Type sr = sin(roll * Type(0.5));
127
- Type cp = cos(pitch * Type(0.5));
128
- Type sp = sin(pitch * Type(0.5));
129
-
130
- Type w = (cy * cr * cp + sy * sr * sp);
131
- Type x = (cy * sr * cp - sy * cr * sp);
132
- Type y = (cy * cr * sp + sy * sr * cp);
133
- Type z = (sy * cr * cp - cy * sr * sp);
134
-
135
- return quat_t<Type>(x, y, z, w);
136
- }
137
-
138
-
139
-
140
- template<typename Type>
141
- inline CUDA_CALLABLE quat_t<Type> quat_inverse(const quat_t<Type>& q)
142
- {
143
- return quat_t<Type>(-q.x, -q.y, -q.z, q.w);
144
- }
145
-
146
-
147
- template<typename Type>
148
- inline CUDA_CALLABLE Type dot(const quat_t<Type>& a, const quat_t<Type>& b)
149
- {
150
- return a.x*b.x + a.y*b.y + a.z*b.z + a.w*b.w;
151
- }
152
-
153
- template<typename Type>
154
- inline CUDA_CALLABLE Type tensordot(const quat_t<Type>& a, const quat_t<Type>& b)
155
- {
156
- // corresponds to `np.tensordot()` with all axes being contracted
157
- return dot(a, b);
158
- }
159
-
160
- template<typename Type>
161
- inline CUDA_CALLABLE Type length(const quat_t<Type>& q)
162
- {
163
- return sqrt(dot(q, q));
164
- }
165
-
166
- template<typename Type>
167
- inline CUDA_CALLABLE Type length_sq(const quat_t<Type>& q)
168
- {
169
- return dot(q, q);
170
- }
171
-
172
- template<typename Type>
173
- inline CUDA_CALLABLE quat_t<Type> normalize(const quat_t<Type>& q)
174
- {
175
- Type l = length(q);
176
- if (l > Type(kEps))
177
- {
178
- Type inv_l = Type(1)/l;
179
-
180
- return quat_t<Type>(q.x*inv_l, q.y*inv_l, q.z*inv_l, q.w*inv_l);
181
- }
182
- else
183
- {
184
- return quat_t<Type>(Type(0), Type(0), Type(0), Type(1));
185
- }
186
- }
187
-
188
- template<typename Type>
189
- inline CUDA_CALLABLE quat_t<Type> add(const quat_t<Type>& a, const quat_t<Type>& b)
190
- {
191
- return quat_t<Type>(a.x+b.x, a.y+b.y, a.z+b.z, a.w+b.w);
192
- }
193
-
194
- template<typename Type>
195
- inline CUDA_CALLABLE quat_t<Type> sub(const quat_t<Type>& a, const quat_t<Type>& b)
196
- {
197
- return quat_t<Type>(a.x-b.x, a.y-b.y, a.z-b.z, a.w-b.w);}
198
-
199
-
200
- template<typename Type>
201
- inline CUDA_CALLABLE quat_t<Type> mul(const quat_t<Type>& a, const quat_t<Type>& b)
202
- {
203
- return quat_t<Type>(a.w*b.x + b.w*a.x + a.y*b.z - b.y*a.z,
204
- a.w*b.y + b.w*a.y + a.z*b.x - b.z*a.x,
205
- a.w*b.z + b.w*a.z + a.x*b.y - b.x*a.y,
206
- a.w*b.w - a.x*b.x - a.y*b.y - a.z*b.z);
207
- }
208
-
209
- template<typename Type>
210
- inline CUDA_CALLABLE quat_t<Type> mul(const quat_t<Type>& a, Type s)
211
- {
212
- return quat_t<Type>(a.x*s, a.y*s, a.z*s, a.w*s);
213
- }
214
-
215
- template<typename Type>
216
- inline CUDA_CALLABLE quat_t<Type> mul(Type s, const quat_t<Type>& a)
217
- {
218
- return mul(a, s);
219
- }
220
-
221
- // division
222
- template<typename Type>
223
- inline CUDA_CALLABLE quat_t<Type> div(quat_t<Type> q, Type s)
224
- {
225
- return quat_t<Type>(q.x/s, q.y/s, q.z/s, q.w/s);
226
- }
227
-
228
- template<typename Type>
229
- inline CUDA_CALLABLE quat_t<Type> div(Type s, quat_t<Type> q)
230
- {
231
- return quat_t<Type>(s/q.x, s/q.y, s/q.z, s/q.w);
232
- }
233
-
234
- template<typename Type>
235
- inline CUDA_CALLABLE quat_t<Type> operator / (quat_t<Type> a, Type s)
236
- {
237
- return div(a,s);
238
- }
239
-
240
- template<typename Type>
241
- inline CUDA_CALLABLE quat_t<Type> operator / (Type s, quat_t<Type> a)
242
- {
243
- return div(s,a);
244
- }
245
-
246
- template<typename Type>
247
- inline CUDA_CALLABLE quat_t<Type> operator*(Type s, const quat_t<Type>& a)
248
- {
249
- return mul(a, s);
250
- }
251
-
252
- template<typename Type>
253
- inline CUDA_CALLABLE quat_t<Type> operator*(const quat_t<Type>& a, Type s)
254
- {
255
- return mul(a, s);
256
- }
257
-
258
- template<typename Type>
259
- inline CUDA_CALLABLE vec_t<3,Type> quat_rotate(const quat_t<Type>& q, const vec_t<3,Type>& x)
260
- {
261
- Type c = (Type(2)*q.w*q.w-Type(1));
262
- Type d = Type(2)*(q.x*x.c[0] + q.y*x.c[1] + q.z*x.c[2]);
263
- return vec_t<3,Type>(
264
- x.c[0]*c + q.x*d + (q.y * x[2] - q.z * x[1])*q.w*Type(2),
265
- x.c[1]*c + q.y*d + (q.z * x[0] - q.x * x[2])*q.w*Type(2),
266
- x.c[2]*c + q.z*d + (q.x * x[1] - q.y * x[0])*q.w*Type(2)
267
- );
268
- }
269
-
270
- template<typename Type>
271
- inline CUDA_CALLABLE vec_t<3,Type> quat_rotate_inv(const quat_t<Type>& q, const vec_t<3,Type>& x)
272
- {
273
- Type c = (Type(2)*q.w*q.w-Type(1));
274
- Type d = Type(2)*(q.x*x.c[0] + q.y*x.c[1] + q.z*x.c[2]);
275
- return vec_t<3,Type>(
276
- x.c[0]*c + q.x*d - (q.y * x[2] - q.z * x[1])*q.w*Type(2),
277
- x.c[1]*c + q.y*d - (q.z * x[0] - q.x * x[2])*q.w*Type(2),
278
- x.c[2]*c + q.z*d - (q.x * x[1] - q.y * x[0])*q.w*Type(2)
279
- );
280
- }
281
-
282
- template<typename Type>
283
- inline CUDA_CALLABLE quat_t<Type> quat_slerp(const quat_t<Type>& q0, const quat_t<Type>& q1, Type t)
284
- {
285
- vec_t<3,Type> axis;
286
- Type angle;
287
- quat_to_axis_angle(mul(quat_inverse(q0), q1), axis, angle);
288
- return mul(q0, quat_from_axis_angle(axis, t * angle));
289
- }
290
-
291
- template<typename Type>
292
- inline CUDA_CALLABLE mat_t<3,3,Type> quat_to_matrix(const quat_t<Type>& q)
293
- {
294
- vec_t<3,Type> c1 = quat_rotate(q, vec_t<3,Type>(1.0, 0.0, 0.0));
295
- vec_t<3,Type> c2 = quat_rotate(q, vec_t<3,Type>(0.0, 1.0, 0.0));
296
- vec_t<3,Type> c3 = quat_rotate(q, vec_t<3,Type>(0.0, 0.0, 1.0));
297
-
298
- return mat_t<3,3,Type>(c1, c2, c3);
299
- }
300
-
301
- template<typename Type>
302
- inline CUDA_CALLABLE quat_t<Type> quat_from_matrix(const mat_t<3,3,Type>& m)
303
- {
304
- const Type tr = m.data[0][0] + m.data[1][1] + m.data[2][2];
305
- Type x, y, z, w, h = Type(0);
306
-
307
- if (tr >= Type(0)) {
308
- h = sqrt(tr + Type(1));
309
- w = Type(0.5) * h;
310
- h = Type(0.5) / h;
311
-
312
- x = (m.data[2][1] - m.data[1][2]) * h;
313
- y = (m.data[0][2] - m.data[2][0]) * h;
314
- z = (m.data[1][0] - m.data[0][1]) * h;
315
- } else {
316
- size_t max_diag = 0;
317
- if (m.data[1][1] > m.data[0][0]) {
318
- max_diag = 1;
319
- }
320
- if (m.data[2][2] > m.data[max_diag][max_diag]) {
321
- max_diag = 2;
322
- }
323
-
324
- if (max_diag == 0) {
325
- h = sqrt((m.data[0][0] - (m.data[1][1] + m.data[2][2])) + Type(1));
326
- x = Type(0.5) * h;
327
- h = Type(0.5) / h;
328
-
329
- y = (m.data[0][1] + m.data[1][0]) * h;
330
- z = (m.data[2][0] + m.data[0][2]) * h;
331
- w = (m.data[2][1] - m.data[1][2]) * h;
332
- } else if (max_diag == 1) {
333
- h = sqrt((m.data[1][1] - (m.data[2][2] + m.data[0][0])) + Type(1));
334
- y = Type(0.5) * h;
335
- h = Type(0.5) / h;
336
-
337
- z = (m.data[1][2] + m.data[2][1]) * h;
338
- x = (m.data[0][1] + m.data[1][0]) * h;
339
- w = (m.data[0][2] - m.data[2][0]) * h;
340
- } if (max_diag == 2) {
341
- h = sqrt((m.data[2][2] - (m.data[0][0] + m.data[1][1])) + Type(1));
342
- z = Type(0.5) * h;
343
- h = Type(0.5) / h;
344
-
345
- x = (m.data[2][0] + m.data[0][2]) * h;
346
- y = (m.data[1][2] + m.data[2][1]) * h;
347
- w = (m.data[1][0] - m.data[0][1]) * h;
348
- }
349
- }
350
-
351
- return normalize(quat_t<Type>(x, y, z, w));
352
- }
353
-
354
- template<typename Type>
355
- inline CUDA_CALLABLE Type extract(const quat_t<Type>& a, int idx)
356
- {
357
- #if FP_CHECK
358
- if (idx < 0 || idx > 3)
359
- {
360
- printf("quat_t index %d out of bounds at %s %d", idx, __FILE__, __LINE__);
361
- assert(0);
362
- }
363
- #endif
364
-
365
- /*
366
- * Because quat data is not stored in an array, we index the quaternion by checking all possible idx values.
367
- * (&a.x)[idx] would be the preferred access strategy, but this results in undefined behavior in the clang compiler
368
- * at optimization level 3.
369
- */
370
- if (idx == 0) {return a.x;}
371
- else if (idx == 1) {return a.y;}
372
- else if (idx == 2) {return a.z;}
373
- else {return a.w;}
374
- }
375
-
376
- template<typename Type>
377
- CUDA_CALLABLE inline quat_t<Type> lerp(const quat_t<Type>& a, const quat_t<Type>& b, Type t)
378
- {
379
- return a*(Type(1)-t) + b*t;
380
- }
381
-
382
- template<typename Type>
383
- CUDA_CALLABLE inline void adj_lerp(const quat_t<Type>& a, const quat_t<Type>& b, Type t, quat_t<Type>& adj_a, quat_t<Type>& adj_b, Type& adj_t, const quat_t<Type>& adj_ret)
384
- {
385
- adj_a += adj_ret*(Type(1)-t);
386
- adj_b += adj_ret*t;
387
- adj_t += tensordot(b, adj_ret) - tensordot(a, adj_ret);
388
- }
389
-
390
- template<typename Type>
391
- inline CUDA_CALLABLE void adj_extract(const quat_t<Type>& a, int idx, quat_t<Type>& adj_a, int & adj_idx, Type & adj_ret)
392
- {
393
- #if FP_CHECK
394
- if (idx < 0 || idx > 3)
395
- {
396
- printf("quat_t index %d out of bounds at %s %d", idx, __FILE__, __LINE__);
397
- assert(0);
398
- }
399
- #endif
400
-
401
- // See wp::extract(const quat_t<Type>& a, int idx) note
402
- if (idx == 0) {adj_a.x += adj_ret;}
403
- else if (idx == 1) {adj_a.y += adj_ret;}
404
- else if (idx == 2) {adj_a.z += adj_ret;}
405
- else {adj_a.w += adj_ret;}
406
- }
407
-
408
-
409
- // backward methods
410
- template<typename Type>
411
- inline CUDA_CALLABLE void adj_quat_from_axis_angle(const vec_t<3,Type>& axis, Type angle, vec_t<3,Type>& adj_axis, Type& adj_angle, const quat_t<Type>& adj_ret)
412
- {
413
- vec_t<3,Type> v = vec_t<3,Type>(adj_ret.x, adj_ret.y, adj_ret.z);
414
-
415
- Type s = sin(angle*Type(0.5));
416
- Type c = cos(angle*Type(0.5));
417
-
418
- quat_t<Type> dqda = quat_t<Type>(axis[0]*c, axis[1]*c, axis[2]*c, -s)*Type(0.5);
419
-
420
- adj_axis += v*s;
421
- adj_angle += dot(dqda, adj_ret);
422
- }
423
-
424
- template<typename Type>
425
- inline CUDA_CALLABLE void adj_quat_to_axis_angle(const quat_t<Type>& q, vec_t<3,Type>& axis, Type& angle, quat_t<Type>& adj_q, const vec_t<3,Type>& adj_axis, const Type& adj_angle)
426
- {
427
- Type l = length(vec_t<3,Type>(q.x, q.y, q.z));
428
-
429
- Type ax_qx = Type(0);
430
- Type ax_qy = Type(0);
431
- Type ax_qz = Type(0);
432
- Type ay_qx = Type(0);
433
- Type ay_qy = Type(0);
434
- Type ay_qz = Type(0);
435
- Type az_qx = Type(0);
436
- Type az_qy = Type(0);
437
- Type az_qz = Type(0);
438
-
439
- Type t_qx = Type(0);
440
- Type t_qy = Type(0);
441
- Type t_qz = Type(0);
442
- Type t_qw = Type(0);
443
-
444
- Type flip = q.w < Type(0) ? -1.0 : 1.0;
445
-
446
- if (l > Type(0))
447
- {
448
- Type l_sq = l*l;
449
- Type l_inv = Type(1) / l;
450
- Type l_inv_sq = l_inv * l_inv;
451
- Type l_inv_cu = l_inv_sq * l_inv;
452
-
453
- Type C = flip * l_inv_cu;
454
- ax_qx = C * (q.y*q.y + q.z*q.z);
455
- ax_qy = -C * q.x*q.y;
456
- ax_qz = -C * q.x*q.z;
457
- ay_qx = -C * q.y*q.x;
458
- ay_qy = C * (q.x*q.x + q.z*q.z);
459
- ay_qz = -C * q.y*q.z;
460
- az_qx = -C * q.z*q.x;
461
- az_qy = -C * q.z*q.y;
462
- az_qz = C * (q.x*q.x + q.y*q.y);
463
-
464
- Type D = Type(2) * flip / (l_sq + q.w*q.w);
465
- t_qx = D * l_inv * q.x * q.w;
466
- t_qy = D * l_inv * q.y * q.w;
467
- t_qz = D * l_inv * q.z * q.w;
468
- t_qw = -D * l;
469
- }
470
- else
471
- {
472
- if (abs(q.w) > Type(kEps))
473
- {
474
- Type t_qx = Type(2) / (sqrt(Type(3)) * abs(q.w));
475
- Type t_qy = Type(2) / (sqrt(Type(3)) * abs(q.w));
476
- Type t_qz = Type(2) / (sqrt(Type(3)) * abs(q.w));
477
- }
478
- // o/w we have a null quat_t which cannot backpropagate
479
- }
480
-
481
- adj_q.x += ax_qx * adj_axis[0] + ay_qx * adj_axis[1] + az_qx * adj_axis[2] + t_qx * adj_angle;
482
- adj_q.y += ax_qy * adj_axis[0] + ay_qy * adj_axis[1] + az_qy * adj_axis[2] + t_qy * adj_angle;
483
- adj_q.z += ax_qz * adj_axis[0] + ay_qz * adj_axis[1] + az_qz * adj_axis[2] + t_qz * adj_angle;
484
- adj_q.w += t_qw * adj_angle;
485
- }
486
-
487
- template<typename Type>
488
- inline CUDA_CALLABLE void adj_quat_rpy(Type roll, Type pitch, Type yaw, Type& adj_roll, Type& adj_pitch, Type& adj_yaw, const quat_t<Type>& adj_ret)
489
- {
490
- Type cy = cos(yaw * Type(0.5));
491
- Type sy = sin(yaw * Type(0.5));
492
- Type cr = cos(roll * Type(0.5));
493
- Type sr = sin(roll * Type(0.5));
494
- Type cp = cos(pitch * Type(0.5));
495
- Type sp = sin(pitch * Type(0.5));
496
-
497
- Type w = (cy * cr * cp + sy * sr * sp);
498
- Type x = (cy * sr * cp - sy * cr * sp);
499
- Type y = (cy * cr * sp + sy * sr * cp);
500
- Type z = (sy * cr * cp - cy * sr * sp);
501
-
502
- Type dx_dr = Type(0.5) * w;
503
- Type dx_dp = -Type(0.5) * cy * sr * sp - Type(0.5) * sy * cr * cp;
504
- Type dx_dy = -Type(0.5) * y;
505
-
506
- Type dy_dr = Type(0.5) * z;
507
- Type dy_dp = Type(0.5) * cy * cr * cp - Type(0.5) * sy * sr * sp;
508
- Type dy_dy = Type(0.5) * x;
509
-
510
- Type dz_dr = -Type(0.5) * y;
511
- Type dz_dp = -Type(0.5) * sy * cr * sp - Type(0.5) * cy * sr * cp;
512
- Type dz_dy = Type(0.5) * w;
513
-
514
- Type dw_dr = -Type(0.5) * x;
515
- Type dw_dp = -Type(0.5) * cy * cr * sp + Type(0.5) * sy * sr * cp;
516
- Type dw_dy = -Type(0.5) * z;
517
-
518
- adj_roll += dot(quat_t<Type>(dx_dr, dy_dr, dz_dr, dw_dr), adj_ret);
519
- adj_pitch += dot(quat_t<Type>(dx_dp, dy_dp, dz_dp, dw_dp), adj_ret);
520
- adj_yaw += dot(quat_t<Type>(dx_dy, dy_dy, dz_dy, dw_dy), adj_ret);
521
- }
522
-
523
-
524
- template<typename Type>
525
- inline CUDA_CALLABLE void adj_dot(const quat_t<Type>& a, const quat_t<Type>& b, quat_t<Type>& adj_a, quat_t<Type>& adj_b, const Type adj_ret)
526
- {
527
- adj_a += b*adj_ret;
528
- adj_b += a*adj_ret;
529
- }
530
-
531
- template<typename Type>
532
- inline CUDA_CALLABLE void tensordot(const quat_t<Type>& a, const quat_t<Type>& b, quat_t<Type>& adj_a, quat_t<Type>& adj_b, const Type adj_ret)
533
- {
534
- adj_dot(a, b, adj_a, adj_b, adj_ret);
535
- }
536
-
537
- template<typename Type>
538
- inline CUDA_CALLABLE void adj_length(const quat_t<Type>& a, Type ret, quat_t<Type>& adj_a, const Type adj_ret)
539
- {
540
- if (ret > Type(kEps))
541
- {
542
- Type inv_l = Type(1)/ret;
543
-
544
- adj_a += quat_t<Type>(a.x*inv_l, a.y*inv_l, a.z*inv_l, a.w*inv_l) * adj_ret;
545
- }
546
- }
547
-
548
- template<typename Type>
549
- inline CUDA_CALLABLE void adj_length_sq(const quat_t<Type>& a, quat_t<Type>& adj_a, const Type adj_ret)
550
- {
551
- adj_a += Type(2)*a*adj_ret;
552
- }
553
-
554
- template<typename Type>
555
- inline CUDA_CALLABLE void adj_normalize(const quat_t<Type>& q, quat_t<Type>& adj_q, const quat_t<Type>& adj_ret)
556
- {
557
- Type l = length(q);
558
-
559
- if (l > Type(kEps))
560
- {
561
- Type l_inv = Type(1)/l;
562
-
563
- adj_q += adj_ret*l_inv - q*(l_inv*l_inv*l_inv*dot(q, adj_ret));
564
- }
565
- }
566
-
567
- template<typename Type>
568
- inline CUDA_CALLABLE void adj_quat_inverse(const quat_t<Type>& q, quat_t<Type>& adj_q, const quat_t<Type>& adj_ret)
569
- {
570
- adj_q.x -= adj_ret.x;
571
- adj_q.y -= adj_ret.y;
572
- adj_q.z -= adj_ret.z;
573
- adj_q.w += adj_ret.w;
574
- }
575
-
576
- template<typename Type>
577
- inline CUDA_CALLABLE void adj_add(const quat_t<Type>& a, const quat_t<Type>& b, quat_t<Type>& adj_a, quat_t<Type>& adj_b, const quat_t<Type>& adj_ret)
578
- {
579
- adj_a += adj_ret;
580
- adj_b += adj_ret;
581
- }
582
-
583
- template<typename Type>
584
- inline CUDA_CALLABLE void adj_sub(const quat_t<Type>& a, const quat_t<Type>& b, quat_t<Type>& adj_a, quat_t<Type>& adj_b, const quat_t<Type>& adj_ret)
585
- {
586
- adj_a += adj_ret;
587
- adj_b -= adj_ret;
588
- }
589
-
590
- template<typename Type>
591
- inline CUDA_CALLABLE void adj_mul(const quat_t<Type>& a, const quat_t<Type>& b, quat_t<Type>& adj_a, quat_t<Type>& adj_b, const quat_t<Type>& adj_ret)
592
- {
593
- // shorthand
594
- const quat_t<Type>& r = adj_ret;
595
-
596
- adj_a += quat_t<Type>(b.w*r.x - b.x*r.w + b.y*r.z - b.z*r.y,
597
- b.w*r.y - b.y*r.w - b.x*r.z + b.z*r.x,
598
- b.w*r.z + b.x*r.y - b.y*r.x - b.z*r.w,
599
- b.w*r.w + b.x*r.x + b.y*r.y + b.z*r.z);
600
-
601
- adj_b += quat_t<Type>(a.w*r.x - a.x*r.w - a.y*r.z + a.z*r.y,
602
- a.w*r.y - a.y*r.w + a.x*r.z - a.z*r.x,
603
- a.w*r.z - a.x*r.y + a.y*r.x - a.z*r.w,
604
- a.w*r.w + a.x*r.x + a.y*r.y + a.z*r.z);
605
-
606
- }
607
-
608
- template<typename Type>
609
- inline CUDA_CALLABLE void adj_mul(const quat_t<Type>& a, Type s, quat_t<Type>& adj_a, Type& adj_s, const quat_t<Type>& adj_ret)
610
- {
611
- adj_a += adj_ret*s;
612
- adj_s += dot(a, adj_ret);
613
- }
614
-
615
- template<typename Type>
616
- inline CUDA_CALLABLE void adj_mul(Type s, const quat_t<Type>& a, Type& adj_s, quat_t<Type>& adj_a, const quat_t<Type>& adj_ret)
617
- {
618
- adj_mul(a, s, adj_a, adj_s, adj_ret);
619
- }
620
-
621
- template<typename Type>
622
- inline CUDA_CALLABLE void adj_div(quat_t<Type> a, Type s, quat_t<Type>& adj_a, Type& adj_s, const quat_t<Type>& adj_ret)
623
- {
624
- adj_s -= dot(a, adj_ret)/ (s * s); // - a / s^2
625
- adj_a += adj_ret / s;
626
- }
627
-
628
- template<typename Type>
629
- inline CUDA_CALLABLE void adj_div(Type s, quat_t<Type> a, Type& adj_s, quat_t<Type>& adj_a, const quat_t<Type>& adj_ret)
630
- {
631
- adj_s -= dot(a, adj_ret)/ (s * s); // - a / s^2
632
- adj_a += s / adj_ret;
633
- }
634
-
635
- template<typename Type>
636
- inline CUDA_CALLABLE void adj_quat_rotate(const quat_t<Type>& q, const vec_t<3,Type>& p, quat_t<Type>& adj_q, vec_t<3,Type>& adj_p, const vec_t<3,Type>& adj_ret)
637
- {
638
-
639
- {
640
- Type t2 = p[2]*q.z*Type(2);
641
- Type t3 = p[1]*q.w*Type(2);
642
- Type t4 = p[0]*q.w*Type(2);
643
- Type t5 = p[0]*q.x*Type(2);
644
- Type t6 = p[1]*q.y*Type(2);
645
- Type t7 = p[2]*q.y*Type(2);
646
- Type t8 = p[0]*q.z*Type(2);
647
- Type t9 = p[0]*q.y*Type(2);
648
- Type t10 = p[1]*q.x*Type(2);
649
- adj_q.x += adj_ret[2]*(t3+t8)+adj_ret[0]*(t2+t6+p[0]*q.x*Type(4))+adj_ret[1]*(t9-p[2]*q.w*Type(2));
650
- adj_q.y += adj_ret[1]*(t2+t5+p[1]*q.y*Type(4))+adj_ret[0]*(t10+p[2]*q.w*Type(2))-adj_ret[2]*(t4-p[1]*q.z*Type(2));
651
- adj_q.z += adj_ret[1]*(t4+t7)+adj_ret[2]*(t5+t6+p[2]*q.z*Type(4))-adj_ret[0]*(t3-p[2]*q.x*Type(2));
652
- adj_q.w += adj_ret[0]*(t7+p[0]*q.w*Type(4)-p[1]*q.z*Type(2))+adj_ret[1]*(t8+p[1]*q.w*Type(4)-p[2]*q.x*Type(2))+adj_ret[2]*(-t9+t10+p[2]*q.w*Type(4));
653
- }
654
-
655
- {
656
- Type t2 = q.w*q.w;
657
- Type t3 = t2*Type(2);
658
- Type t4 = q.w*q.z*Type(2);
659
- Type t5 = q.x*q.y*Type(2);
660
- Type t6 = q.w*q.y*Type(2);
661
- Type t7 = q.w*q.x*Type(2);
662
- Type t8 = q.y*q.z*Type(2);
663
- adj_p[0] += adj_ret[1]*(t4+t5)+adj_ret[0]*(t3+(q.x*q.x)*Type(2)-Type(1))-adj_ret[2]*(t6-q.x*q.z*Type(2));
664
- adj_p[1] += adj_ret[2]*(t7+t8)-adj_ret[0]*(t4-t5)+adj_ret[1]*(t3+(q.y*q.y)*Type(2)-Type(1));
665
- adj_p[2] += -adj_ret[1]*(t7-t8)+adj_ret[2]*(t3+(q.z*q.z)*Type(2)-Type(1))+adj_ret[0]*(t6+q.x*q.z*Type(2));
666
- }
667
- }
668
-
669
- template<typename Type>
670
- inline CUDA_CALLABLE void adj_quat_rotate_inv(const quat_t<Type>& q, const vec_t<3,Type>& p, quat_t<Type>& adj_q, vec_t<3,Type>& adj_p, const vec_t<3,Type>& adj_ret)
671
- {
672
- const vec_t<3,Type>& r = adj_ret;
673
-
674
- {
675
- Type t2 = p[2]*q.w*Type(2);
676
- Type t3 = p[2]*q.z*Type(2);
677
- Type t4 = p[1]*q.w*Type(2);
678
- Type t5 = p[0]*q.w*Type(2);
679
- Type t6 = p[0]*q.x*Type(2);
680
- Type t7 = p[1]*q.y*Type(2);
681
- Type t8 = p[1]*q.z*Type(2);
682
- Type t9 = p[2]*q.x*Type(2);
683
- Type t10 = p[0]*q.y*Type(2);
684
- adj_q.x += r[1]*(t2+t10)+r[0]*(t3+t7+p[0]*q.x*Type(4))-r[2]*(t4-p[0]*q.z*Type(2));
685
- adj_q.y += r[2]*(t5+t8)+r[1]*(t3+t6+p[1]*q.y*Type(4))-r[0]*(t2-p[1]*q.x*Type(2));
686
- adj_q.z += r[0]*(t4+t9)+r[2]*(t6+t7+p[2]*q.z*Type(4))-r[1]*(t5-p[2]*q.y*Type(2));
687
- adj_q.w += r[0]*(t8+p[0]*q.w*Type(4)-p[2]*q.y*Type(2))+r[1]*(t9+p[1]*q.w*Type(4)-p[0]*q.z*Type(2))+r[2]*(t10-p[1]*q.x*Type(2)+p[2]*q.w*Type(4));
688
- }
689
-
690
- {
691
- Type t2 = q.w*q.w;
692
- Type t3 = t2*Type(2);
693
- Type t4 = q.w*q.z*Type(2);
694
- Type t5 = q.w*q.y*Type(2);
695
- Type t6 = q.x*q.z*Type(2);
696
- Type t7 = q.w*q.x*Type(2);
697
- adj_p[0] += r[2]*(t5+t6)+r[0]*(t3+(q.x*q.x)*Type(2)-Type(1))-r[1]*(t4-q.x*q.y*Type(2));
698
- adj_p[1] += r[1]*(t3+(q.y*q.y)*Type(2)-Type(1))+r[0]*(t4+q.x*q.y*Type(2))-r[2]*(t7-q.y*q.z*Type(2));
699
- adj_p[2] += -r[0]*(t5-t6)+r[2]*(t3+(q.z*q.z)*Type(2)-Type(1))+r[1]*(t7+q.y*q.z*Type(2));
700
- }
701
- }
702
-
703
- template<typename Type>
704
- inline CUDA_CALLABLE void adj_quat_slerp(const quat_t<Type>& q0, const quat_t<Type>& q1, Type t, quat_t<Type>& ret, quat_t<Type>& adj_q0, quat_t<Type>& adj_q1, Type& adj_t, const quat_t<Type>& adj_ret)
705
- {
706
- vec_t<3,Type> axis;
707
- Type angle;
708
- quat_t<Type> q0_inv = quat_inverse(q0);
709
- quat_t<Type> q_inc = mul(q0_inv, q1);
710
- quat_to_axis_angle(q_inc, axis, angle);
711
- quat_t<Type> qt = quat_from_axis_angle(axis, angle * t);
712
- angle = angle * 0.5;
713
-
714
- // adj_t
715
- adj_t += dot(mul(ret, quat_t<Type>(angle*axis[0], angle*axis[1], angle*axis[2], Type(0))), adj_ret);
716
-
717
- // adj_q0
718
- quat_t<Type> q_inc_x_q0;
719
- quat_t<Type> q_inc_y_q0;
720
- quat_t<Type> q_inc_z_q0;
721
- quat_t<Type> q_inc_w_q0;
722
-
723
- quat_t<Type> q_inc_x_q1;
724
- quat_t<Type> q_inc_y_q1;
725
- quat_t<Type> q_inc_z_q1;
726
- quat_t<Type> q_inc_w_q1;
727
-
728
- adj_mul(q0_inv, q1, q_inc_x_q0, q_inc_x_q1, quat_t<Type>(1.f, Type(0), Type(0), Type(0)));
729
- adj_mul(q0_inv, q1, q_inc_y_q0, q_inc_y_q1, quat_t<Type>(Type(0), 1.f, Type(0), Type(0)));
730
- adj_mul(q0_inv, q1, q_inc_z_q0, q_inc_z_q1, quat_t<Type>(Type(0), Type(0), 1.f, Type(0)));
731
- adj_mul(q0_inv, q1, q_inc_w_q0, q_inc_w_q1, quat_t<Type>(Type(0), Type(0), Type(0), 1.f));
732
-
733
- quat_t<Type> a_x_q_inc;
734
- quat_t<Type> a_y_q_inc;
735
- quat_t<Type> a_z_q_inc;
736
- quat_t<Type> t_q_inc;
737
-
738
- adj_quat_to_axis_angle(q_inc, axis, angle, a_x_q_inc, vec_t<3,Type>(1.f, Type(0), Type(0)), Type(0));
739
- adj_quat_to_axis_angle(q_inc, axis, angle, a_y_q_inc, vec_t<3,Type>(Type(0), 1.f, Type(0)), Type(0));
740
- adj_quat_to_axis_angle(q_inc, axis, angle, a_z_q_inc, vec_t<3,Type>(Type(0), Type(0), 1.f), Type(0));
741
- adj_quat_to_axis_angle(q_inc, axis, angle, t_q_inc, vec_t<3,Type>(Type(0), Type(0), Type(0)), Type(1));
742
-
743
- Type cs = cos(angle*t);
744
- Type sn = sin(angle*t);
745
-
746
- quat_t<Type> q_inc_q0_x = quat_t<Type>(-q_inc_x_q0.x, -q_inc_y_q0.x, -q_inc_z_q0.x, -q_inc_w_q0.x);
747
- quat_t<Type> q_inc_q0_y = quat_t<Type>(-q_inc_x_q0.y, -q_inc_y_q0.y, -q_inc_z_q0.y, -q_inc_w_q0.y);
748
- quat_t<Type> q_inc_q0_z = quat_t<Type>(-q_inc_x_q0.z, -q_inc_y_q0.z, -q_inc_z_q0.z, -q_inc_w_q0.z);
749
- quat_t<Type> q_inc_q0_w = quat_t<Type>(q_inc_x_q0.w, q_inc_y_q0.w, q_inc_z_q0.w, q_inc_w_q0.w);
750
-
751
- Type a_x_q0_x = dot(a_x_q_inc, q_inc_q0_x);
752
- Type a_x_q0_y = dot(a_x_q_inc, q_inc_q0_y);
753
- Type a_x_q0_z = dot(a_x_q_inc, q_inc_q0_z);
754
- Type a_x_q0_w = dot(a_x_q_inc, q_inc_q0_w);
755
- Type a_y_q0_x = dot(a_y_q_inc, q_inc_q0_x);
756
- Type a_y_q0_y = dot(a_y_q_inc, q_inc_q0_y);
757
- Type a_y_q0_z = dot(a_y_q_inc, q_inc_q0_z);
758
- Type a_y_q0_w = dot(a_y_q_inc, q_inc_q0_w);
759
- Type a_z_q0_x = dot(a_z_q_inc, q_inc_q0_x);
760
- Type a_z_q0_y = dot(a_z_q_inc, q_inc_q0_y);
761
- Type a_z_q0_z = dot(a_z_q_inc, q_inc_q0_z);
762
- Type a_z_q0_w = dot(a_z_q_inc, q_inc_q0_w);
763
- Type t_q0_x = dot(t_q_inc, q_inc_q0_x);
764
- Type t_q0_y = dot(t_q_inc, q_inc_q0_y);
765
- Type t_q0_z = dot(t_q_inc, q_inc_q0_z);
766
- Type t_q0_w = dot(t_q_inc, q_inc_q0_w);
767
-
768
- quat_t<Type> q_s_q0_x = mul(quat_t<Type>(1.f, Type(0), Type(0), Type(0)), qt) + mul(q0, quat_t<Type>(
769
- 0.5 * t * axis[0] * t_q0_x * cs + a_x_q0_x * sn,
770
- 0.5 * t * axis[1] * t_q0_x * cs + a_y_q0_x * sn,
771
- 0.5 * t * axis[2] * t_q0_x * cs + a_z_q0_x * sn,
772
- -0.5 * t * t_q0_x * sn));
773
-
774
- quat_t<Type> q_s_q0_y = mul(quat_t<Type>(Type(0), 1.f, Type(0), Type(0)), qt) + mul(q0, quat_t<Type>(
775
- 0.5 * t * axis[0] * t_q0_y * cs + a_x_q0_y * sn,
776
- 0.5 * t * axis[1] * t_q0_y * cs + a_y_q0_y * sn,
777
- 0.5 * t * axis[2] * t_q0_y * cs + a_z_q0_y * sn,
778
- -0.5 * t * t_q0_y * sn));
779
-
780
- quat_t<Type> q_s_q0_z = mul(quat_t<Type>(Type(0), Type(0), 1.f, Type(0)), qt) + mul(q0, quat_t<Type>(
781
- 0.5 * t * axis[0] * t_q0_z * cs + a_x_q0_z * sn,
782
- 0.5 * t * axis[1] * t_q0_z * cs + a_y_q0_z * sn,
783
- 0.5 * t * axis[2] * t_q0_z * cs + a_z_q0_z * sn,
784
- -0.5 * t * t_q0_z * sn));
785
-
786
- quat_t<Type> q_s_q0_w = mul(quat_t<Type>(Type(0), Type(0), Type(0), 1.f), qt) + mul(q0, quat_t<Type>(
787
- 0.5 * t * axis[0] * t_q0_w * cs + a_x_q0_w * sn,
788
- 0.5 * t * axis[1] * t_q0_w * cs + a_y_q0_w * sn,
789
- 0.5 * t * axis[2] * t_q0_w * cs + a_z_q0_w * sn,
790
- -0.5 * t * t_q0_w * sn));
791
-
792
- adj_q0.x += dot(q_s_q0_x, adj_ret);
793
- adj_q0.y += dot(q_s_q0_y, adj_ret);
794
- adj_q0.z += dot(q_s_q0_z, adj_ret);
795
- adj_q0.w += dot(q_s_q0_w, adj_ret);
796
-
797
- // adj_q1
798
- quat_t<Type> q_inc_q1_x = quat_t<Type>(q_inc_x_q1.x, q_inc_y_q1.x, q_inc_z_q1.x, q_inc_w_q1.x);
799
- quat_t<Type> q_inc_q1_y = quat_t<Type>(q_inc_x_q1.y, q_inc_y_q1.y, q_inc_z_q1.y, q_inc_w_q1.y);
800
- quat_t<Type> q_inc_q1_z = quat_t<Type>(q_inc_x_q1.z, q_inc_y_q1.z, q_inc_z_q1.z, q_inc_w_q1.z);
801
- quat_t<Type> q_inc_q1_w = quat_t<Type>(q_inc_x_q1.w, q_inc_y_q1.w, q_inc_z_q1.w, q_inc_w_q1.w);
802
-
803
- Type a_x_q1_x = dot(a_x_q_inc, q_inc_q1_x);
804
- Type a_x_q1_y = dot(a_x_q_inc, q_inc_q1_y);
805
- Type a_x_q1_z = dot(a_x_q_inc, q_inc_q1_z);
806
- Type a_x_q1_w = dot(a_x_q_inc, q_inc_q1_w);
807
- Type a_y_q1_x = dot(a_y_q_inc, q_inc_q1_x);
808
- Type a_y_q1_y = dot(a_y_q_inc, q_inc_q1_y);
809
- Type a_y_q1_z = dot(a_y_q_inc, q_inc_q1_z);
810
- Type a_y_q1_w = dot(a_y_q_inc, q_inc_q1_w);
811
- Type a_z_q1_x = dot(a_z_q_inc, q_inc_q1_x);
812
- Type a_z_q1_y = dot(a_z_q_inc, q_inc_q1_y);
813
- Type a_z_q1_z = dot(a_z_q_inc, q_inc_q1_z);
814
- Type a_z_q1_w = dot(a_z_q_inc, q_inc_q1_w);
815
- Type t_q1_x = dot(t_q_inc, q_inc_q1_x);
816
- Type t_q1_y = dot(t_q_inc, q_inc_q1_y);
817
- Type t_q1_z = dot(t_q_inc, q_inc_q1_z);
818
- Type t_q1_w = dot(t_q_inc, q_inc_q1_w);
819
-
820
- quat_t<Type> q_s_q1_x = mul(q0, quat_t<Type>(
821
- 0.5 * t * axis[0] * t_q1_x * cs + a_x_q1_x * sn,
822
- 0.5 * t * axis[1] * t_q1_x * cs + a_y_q1_x * sn,
823
- 0.5 * t * axis[2] * t_q1_x * cs + a_z_q1_x * sn,
824
- -0.5 * t * t_q1_x * sn));
825
-
826
- quat_t<Type> q_s_q1_y = mul(q0, quat_t<Type>(
827
- 0.5 * t * axis[0] * t_q1_y * cs + a_x_q1_y * sn,
828
- 0.5 * t * axis[1] * t_q1_y * cs + a_y_q1_y * sn,
829
- 0.5 * t * axis[2] * t_q1_y * cs + a_z_q1_y * sn,
830
- -0.5 * t * t_q1_y * sn));
831
-
832
- quat_t<Type> q_s_q1_z = mul(q0, quat_t<Type>(
833
- 0.5 * t * axis[0] * t_q1_z * cs + a_x_q1_z * sn,
834
- 0.5 * t * axis[1] * t_q1_z * cs + a_y_q1_z * sn,
835
- 0.5 * t * axis[2] * t_q1_z * cs + a_z_q1_z * sn,
836
- -0.5 * t * t_q1_z * sn));
837
-
838
- quat_t<Type> q_s_q1_w = mul(q0, quat_t<Type>(
839
- 0.5 * t * axis[0] * t_q1_w * cs + a_x_q1_w * sn,
840
- 0.5 * t * axis[1] * t_q1_w * cs + a_y_q1_w * sn,
841
- 0.5 * t * axis[2] * t_q1_w * cs + a_z_q1_w * sn,
842
- -0.5 * t * t_q1_w * sn));
843
-
844
- adj_q1.x += dot(q_s_q1_x, adj_ret);
845
- adj_q1.y += dot(q_s_q1_y, adj_ret);
846
- adj_q1.z += dot(q_s_q1_z, adj_ret);
847
- adj_q1.w += dot(q_s_q1_w, adj_ret);
848
-
849
- }
850
-
851
- template<typename Type>
852
- inline CUDA_CALLABLE void adj_quat_to_matrix(const quat_t<Type>& q, quat_t<Type>& adj_q, mat_t<3,3,Type>& adj_ret)
853
- {
854
- // we don't care about adjoint w.r.t. constant identity matrix
855
- vec_t<3,Type> t;
856
-
857
- adj_quat_rotate(q, vec_t<3,Type>(1.0, 0.0, 0.0), adj_q, t, adj_ret.get_col(0));
858
- adj_quat_rotate(q, vec_t<3,Type>(0.0, 1.0, 0.0), adj_q, t, adj_ret.get_col(1));
859
- adj_quat_rotate(q, vec_t<3,Type>(0.0, 0.0, 1.0), adj_q, t, adj_ret.get_col(2));
860
- }
861
-
862
- template<typename Type>
863
- inline CUDA_CALLABLE void adj_quat_from_matrix(const mat_t<3,3,Type>& m, mat_t<3,3,Type>& adj_m, const quat_t<Type>& adj_ret)
864
- {
865
- const Type tr = m.data[0][0] + m.data[1][1] + m.data[2][2];
866
- Type x, y, z, w, h = Type(0);
867
-
868
- Type dx_dm00 = Type(0), dx_dm01 = Type(0), dx_dm02 = Type(0);
869
- Type dx_dm10 = Type(0), dx_dm11 = Type(0), dx_dm12 = Type(0);
870
- Type dx_dm20 = Type(0), dx_dm21 = Type(0), dx_dm22 = Type(0);
871
- Type dy_dm00 = Type(0), dy_dm01 = Type(0), dy_dm02 = Type(0);
872
- Type dy_dm10 = Type(0), dy_dm11 = Type(0), dy_dm12 = Type(0);
873
- Type dy_dm20 = Type(0), dy_dm21 = Type(0), dy_dm22 = Type(0);
874
- Type dz_dm00 = Type(0), dz_dm01 = Type(0), dz_dm02 = Type(0);
875
- Type dz_dm10 = Type(0), dz_dm11 = Type(0), dz_dm12 = Type(0);
876
- Type dz_dm20 = Type(0), dz_dm21 = Type(0), dz_dm22 = Type(0);
877
- Type dw_dm00 = Type(0), dw_dm01 = Type(0), dw_dm02 = Type(0);
878
- Type dw_dm10 = Type(0), dw_dm11 = Type(0), dw_dm12 = Type(0);
879
- Type dw_dm20 = Type(0), dw_dm21 = Type(0), dw_dm22 = Type(0);
880
-
881
- if (tr >= Type(0)) {
882
- h = sqrt(tr + Type(1));
883
- w = Type(0.5) * h;
884
- h = Type(0.5) / h;
885
-
886
- x = (m.data[2][1] - m.data[1][2]) * h;
887
- y = (m.data[0][2] - m.data[2][0]) * h;
888
- z = (m.data[1][0] - m.data[0][1]) * h;
889
-
890
- dw_dm00 = Type(0.5) * h;
891
- dw_dm11 = Type(0.5) * h;
892
- dw_dm22 = Type(0.5) * h;
893
- dx_dm21 = h;
894
- dx_dm12 = -h;
895
- dx_dm00 = Type(2) * h*h*h * (m.data[1][2] - m.data[2][1]);
896
- dx_dm11 = Type(2) * h*h*h * (m.data[1][2] - m.data[2][1]);
897
- dx_dm22 = Type(2) * h*h*h * (m.data[1][2] - m.data[2][1]);
898
- dy_dm02 = h;
899
- dy_dm20 = -h;
900
- dy_dm00 = Type(2) * h*h*h * (m.data[2][0] - m.data[0][2]);
901
- dy_dm11 = Type(2) * h*h*h * (m.data[2][0] - m.data[0][2]);
902
- dy_dm22 = Type(2) * h*h*h * (m.data[2][0] - m.data[0][2]);
903
- dz_dm10 = h;
904
- dz_dm01 = -h;
905
- dz_dm00 = Type(2) * h*h*h * (m.data[0][1] - m.data[1][0]);
906
- dz_dm11 = Type(2) * h*h*h * (m.data[0][1] - m.data[1][0]);
907
- dz_dm22 = Type(2) * h*h*h * (m.data[0][1] - m.data[1][0]);
908
- } else {
909
- size_t max_diag = 0;
910
- if (m.data[1][1] > m.data[0][0]) {
911
- max_diag = 1;
912
- }
913
- if (m.data[2][2] > m.data[max_diag][max_diag]) {
914
- max_diag = 2;
915
- }
916
-
917
- if (max_diag == 0) {
918
- h = sqrt((m.data[0][0] - (m.data[1][1] + m.data[2][2])) + Type(1));
919
- x = Type(0.5) * h;
920
- h = Type(0.5) / h;
921
-
922
- y = (m.data[0][1] + m.data[1][0]) * h;
923
- z = (m.data[2][0] + m.data[0][2]) * h;
924
- w = (m.data[2][1] - m.data[1][2]) * h;
925
-
926
- dx_dm00 = Type(0.5) * h;
927
- dx_dm11 = -Type(0.5) * h;
928
- dx_dm22 = -Type(0.5) * h;
929
- dy_dm01 = h;
930
- dy_dm10 = h;
931
- dy_dm00 = -Type(2) * h*h*h * (m.data[0][1] + m.data[1][0]);
932
- dy_dm11 = Type(2) * h*h*h * (m.data[0][1] + m.data[1][0]);
933
- dy_dm22 = Type(2) * h*h*h * (m.data[0][1] + m.data[1][0]);
934
- dz_dm20 = h;
935
- dz_dm02 = h;
936
- dz_dm00 = -Type(2) * h*h*h * (m.data[2][0] + m.data[0][2]);
937
- dz_dm11 = Type(2) * h*h*h * (m.data[2][0] + m.data[0][2]);
938
- dz_dm22 = Type(2) * h*h*h * (m.data[2][0] + m.data[0][2]);
939
- dw_dm21 = h;
940
- dw_dm12 = -h;
941
- dw_dm00 = Type(2) * h*h*h * (m.data[1][2] - m.data[2][1]);
942
- dw_dm11 = Type(2) * h*h*h * (m.data[2][1] - m.data[1][2]);
943
- dw_dm22 = Type(2) * h*h*h * (m.data[2][1] - m.data[1][2]);
944
- } else if (max_diag == 1) {
945
- h = sqrt((m.data[1][1] - (m.data[2][2] + m.data[0][0])) + Type(1));
946
- y = Type(0.5) * h;
947
- h = Type(0.5) / h;
948
-
949
- z = (m.data[1][2] + m.data[2][1]) * h;
950
- x = (m.data[0][1] + m.data[1][0]) * h;
951
- w = (m.data[0][2] - m.data[2][0]) * h;
952
-
953
- dy_dm00 = -Type(0.5) * h;
954
- dy_dm11 = Type(0.5) * h;
955
- dy_dm22 = -Type(0.5) * h;
956
- dz_dm12 = h;
957
- dz_dm21 = h;
958
- dz_dm00 = Type(2) * h*h*h * (m.data[1][2] + m.data[2][1]);
959
- dz_dm11 = -Type(2) * h*h*h * (m.data[1][2] + m.data[2][1]);
960
- dz_dm22 = Type(2) * h*h*h * (m.data[1][2] + m.data[2][1]);
961
- dx_dm01 = h;
962
- dx_dm10 = h;
963
- dx_dm00 = Type(2) * h*h*h * (m.data[0][1] + m.data[1][0]);
964
- dx_dm11 = -Type(2) * h*h*h * (m.data[0][1] + m.data[1][0]);
965
- dx_dm22 = Type(2) * h*h*h * (m.data[0][1] + m.data[1][0]);
966
- dw_dm02 = h;
967
- dw_dm20 = -h;
968
- dw_dm00 = Type(2) * h*h*h * (m.data[0][2] - m.data[2][0]);
969
- dw_dm11 = Type(2) * h*h*h * (m.data[2][0] - m.data[0][2]);
970
- dw_dm22 = Type(2) * h*h*h * (m.data[0][2] - m.data[2][0]);
971
- } if (max_diag == 2) {
972
- h = sqrt((m.data[2][2] - (m.data[0][0] + m.data[1][1])) + Type(1));
973
- z = Type(0.5) * h;
974
- h = Type(0.5) / h;
975
-
976
- x = (m.data[2][0] + m.data[0][2]) * h;
977
- y = (m.data[1][2] + m.data[2][1]) * h;
978
- w = (m.data[1][0] - m.data[0][1]) * h;
979
-
980
- dz_dm00 = -Type(0.5) * h;
981
- dz_dm11 = -Type(0.5) * h;
982
- dz_dm22 = Type(0.5) * h;
983
- dx_dm20 = h;
984
- dx_dm02 = h;
985
- dx_dm00 = Type(2) * h*h*h * (m.data[2][0] + m.data[0][2]);
986
- dx_dm11 = Type(2) * h*h*h * (m.data[2][0] + m.data[0][2]);
987
- dx_dm22 = -Type(2) * h*h*h * (m.data[2][0] + m.data[0][2]);
988
- dy_dm12 = h;
989
- dy_dm21 = h;
990
- dy_dm00 = Type(2) * h*h*h * (m.data[1][2] + m.data[2][1]);
991
- dy_dm11 = Type(2) * h*h*h * (m.data[1][2] + m.data[2][1]);
992
- dy_dm22 = -Type(2) * h*h*h * (m.data[1][2] + m.data[2][1]);
993
- dw_dm10 = h;
994
- dw_dm01 = -h;
995
- dw_dm00 = Type(2) * h*h*h * (m.data[1][0] - m.data[0][1]);
996
- dw_dm11 = Type(2) * h*h*h * (m.data[1][0] - m.data[0][1]);
997
- dw_dm22 = Type(2) * h*h*h * (m.data[0][1] - m.data[1][0]);
998
- }
999
- }
1000
-
1001
- quat_t<Type> dq_dm00 = quat_t<Type>(dx_dm00, dy_dm00, dz_dm00, dw_dm00);
1002
- quat_t<Type> dq_dm01 = quat_t<Type>(dx_dm01, dy_dm01, dz_dm01, dw_dm01);
1003
- quat_t<Type> dq_dm02 = quat_t<Type>(dx_dm02, dy_dm02, dz_dm02, dw_dm02);
1004
- quat_t<Type> dq_dm10 = quat_t<Type>(dx_dm10, dy_dm10, dz_dm10, dw_dm10);
1005
- quat_t<Type> dq_dm11 = quat_t<Type>(dx_dm11, dy_dm11, dz_dm11, dw_dm11);
1006
- quat_t<Type> dq_dm12 = quat_t<Type>(dx_dm12, dy_dm12, dz_dm12, dw_dm12);
1007
- quat_t<Type> dq_dm20 = quat_t<Type>(dx_dm20, dy_dm20, dz_dm20, dw_dm20);
1008
- quat_t<Type> dq_dm21 = quat_t<Type>(dx_dm21, dy_dm21, dz_dm21, dw_dm21);
1009
- quat_t<Type> dq_dm22 = quat_t<Type>(dx_dm22, dy_dm22, dz_dm22, dw_dm22);
1010
-
1011
- quat_t<Type> adj_q;
1012
- adj_normalize(quat_t<Type>(x, y, z, w), adj_q, adj_ret);
1013
-
1014
- adj_m.data[0][0] += dot(dq_dm00, adj_q);
1015
- adj_m.data[0][1] += dot(dq_dm01, adj_q);
1016
- adj_m.data[0][2] += dot(dq_dm02, adj_q);
1017
- adj_m.data[1][0] += dot(dq_dm10, adj_q);
1018
- adj_m.data[1][1] += dot(dq_dm11, adj_q);
1019
- adj_m.data[1][2] += dot(dq_dm12, adj_q);
1020
- adj_m.data[2][0] += dot(dq_dm20, adj_q);
1021
- adj_m.data[2][1] += dot(dq_dm21, adj_q);
1022
- adj_m.data[2][2] += dot(dq_dm22, adj_q);
1023
- }
1024
-
1025
- template<typename Type>
1026
- inline CUDA_CALLABLE void adj_mat_t(const vec_t<3,Type>& pos, const quat_t<Type>& rot, const vec_t<3,Type>& scale,
1027
- vec_t<3,Type>& adj_pos, quat_t<Type>& adj_rot, vec_t<3,Type>& adj_scale, const mat_t<4,4,Type>& adj_ret)
1028
- {
1029
- mat_t<3,3,Type> R = quat_to_matrix(rot);
1030
- mat_t<3,3,Type> adj_R(0);
1031
-
1032
- adj_pos[0] += adj_ret.data[0][3];
1033
- adj_pos[1] += adj_ret.data[1][3];
1034
- adj_pos[2] += adj_ret.data[2][3];
1035
-
1036
- adj_mul(R.data[0][0], scale[0], adj_R.data[0][0], adj_scale[0], adj_ret.data[0][0]);
1037
- adj_mul(R.data[1][0], scale[0], adj_R.data[1][0], adj_scale[0], adj_ret.data[1][0]);
1038
- adj_mul(R.data[2][0], scale[0], adj_R.data[2][0], adj_scale[0], adj_ret.data[2][0]);
1039
-
1040
- adj_mul(R.data[0][1], scale[1], adj_R.data[0][1], adj_scale[1], adj_ret.data[0][1]);
1041
- adj_mul(R.data[1][1], scale[1], adj_R.data[1][1], adj_scale[1], adj_ret.data[1][1]);
1042
- adj_mul(R.data[2][1], scale[1], adj_R.data[2][1], adj_scale[1], adj_ret.data[2][1]);
1043
-
1044
- adj_mul(R.data[0][2], scale[2], adj_R.data[0][2], adj_scale[2], adj_ret.data[0][2]);
1045
- adj_mul(R.data[1][2], scale[2], adj_R.data[1][2], adj_scale[2], adj_ret.data[1][2]);
1046
- adj_mul(R.data[2][2], scale[2], adj_R.data[2][2], adj_scale[2], adj_ret.data[2][2]);
1047
-
1048
- adj_quat_to_matrix(rot, adj_rot, adj_R);
1049
- }
1050
-
1051
- template<unsigned Rows, unsigned Cols, typename Type>
1052
- inline CUDA_CALLABLE mat_t<Rows,Cols,Type>::mat_t(const vec_t<3,Type>& pos, const quat_t<Type>& rot, const vec_t<3,Type>& scale)
1053
- {
1054
- mat_t<3,3,Type> R = quat_to_matrix(rot);
1055
-
1056
- data[0][0] = R.data[0][0]*scale[0];
1057
- data[1][0] = R.data[1][0]*scale[0];
1058
- data[2][0] = R.data[2][0]*scale[0];
1059
- data[3][0] = Type(0);
1060
-
1061
- data[0][1] = R.data[0][1]*scale[1];
1062
- data[1][1] = R.data[1][1]*scale[1];
1063
- data[2][1] = R.data[2][1]*scale[1];
1064
- data[3][1] = Type(0);
1065
-
1066
- data[0][2] = R.data[0][2]*scale[2];
1067
- data[1][2] = R.data[1][2]*scale[2];
1068
- data[2][2] = R.data[2][2]*scale[2];
1069
- data[3][2] = Type(0);
1070
-
1071
- data[0][3] = pos[0];
1072
- data[1][3] = pos[1];
1073
- data[2][3] = pos[2];
1074
- data[3][3] = Type(1);
1075
- }
1076
-
1077
- template<typename Type=float32>
1078
- inline CUDA_CALLABLE quat_t<Type> quat_identity()
1079
- {
1080
- return quat_t<Type>(Type(0), Type(0), Type(0), Type(1));
1081
- }
1082
-
1083
-
1084
-
1
+ /** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
2
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
3
+ * and proprietary rights in and to this software, related documentation
4
+ * and any modifications thereto. Any use, reproduction, disclosure or
5
+ * distribution of this software and related documentation without an express
6
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include "mat.h"
12
+
13
+ namespace wp
14
+ {
15
+
16
+ template<typename Type>
17
+ struct quat_t
18
+ {
19
+ // zero constructor for adjoint variable initialization
20
+ inline CUDA_CALLABLE quat_t(Type x=Type(0), Type y=Type(0), Type z=Type(0), Type w=Type(0)) : x(x), y(y), z(z), w(w) {}
21
+ explicit inline CUDA_CALLABLE quat_t(const vec_t<3,Type>& v, Type w=Type(0)) : x(v[0]), y(v[1]), z(v[2]), w(w) {}
22
+
23
+ template<typename OtherType>
24
+ explicit inline CUDA_CALLABLE quat_t(const quat_t<OtherType>& other)
25
+ {
26
+ x = static_cast<Type>(other.x);
27
+ y = static_cast<Type>(other.y);
28
+ z = static_cast<Type>(other.z);
29
+ w = static_cast<Type>(other.w);
30
+ }
31
+
32
+ // imaginary part
33
+ Type x;
34
+ Type y;
35
+ Type z;
36
+
37
+ // real part
38
+ Type w;
39
+ };
40
+
41
+ using quat = quat_t<float>;
42
+ using quath = quat_t<half>;
43
+ using quatf = quat_t<float>;
44
+ using quatd = quat_t<double>;
45
+
46
+
47
+ template<typename Type>
48
+ inline CUDA_CALLABLE bool operator==(const quat_t<Type>& a, const quat_t<Type>& b)
49
+ {
50
+ return a.x == b.x && a.y == b.y && a.z == b.z && a.w == b.w;
51
+ }
52
+
53
+ template<typename Type>
54
+ inline bool CUDA_CALLABLE isfinite(const quat_t<Type>& q)
55
+ {
56
+ return isfinite(q.x) && isfinite(q.y) && isfinite(q.z) && isfinite(q.w);
57
+ }
58
+
59
+ template<typename Type>
60
+ inline CUDA_CALLABLE quat_t<Type> atomic_add(quat_t<Type> * addr, quat_t<Type> value)
61
+ {
62
+ Type x = atomic_add(&(addr -> x), value.x);
63
+ Type y = atomic_add(&(addr -> y), value.y);
64
+ Type z = atomic_add(&(addr -> z), value.z);
65
+ Type w = atomic_add(&(addr -> w), value.w);
66
+
67
+ return quat_t<Type>(x, y, z, w);
68
+ }
69
+
70
+ template<typename Type>
71
+ inline CUDA_CALLABLE void adj_quat_t(Type x, Type y, Type z, Type w, Type& adj_x, Type& adj_y, Type& adj_z, Type& adj_w, quat_t<Type> adj_ret)
72
+ {
73
+ adj_x += adj_ret.x;
74
+ adj_y += adj_ret.y;
75
+ adj_z += adj_ret.z;
76
+ adj_w += adj_ret.w;
77
+ }
78
+
79
+ template<typename Type>
80
+ inline CUDA_CALLABLE void adj_quat_t(const vec_t<3,Type>& v, Type w, vec_t<3,Type>& adj_v, Type& adj_w, quat_t<Type> adj_ret)
81
+ {
82
+ adj_v[0] += adj_ret.x;
83
+ adj_v[1] += adj_ret.y;
84
+ adj_v[2] += adj_ret.z;
85
+ adj_w += adj_ret.w;
86
+ }
87
+
88
+ // casting constructor adjoint
89
+ template<typename Type, typename OtherType>
90
+ inline CUDA_CALLABLE void adj_quat_t(const quat_t<OtherType>& other, quat_t<OtherType>& adj_other, const quat_t<Type>& adj_ret)
91
+ {
92
+ adj_other.x += static_cast<OtherType>(adj_ret.x);
93
+ adj_other.y += static_cast<OtherType>(adj_ret.y);
94
+ adj_other.z += static_cast<OtherType>(adj_ret.z);
95
+ adj_other.w += static_cast<OtherType>(adj_ret.w);
96
+ }
97
+
98
+ // forward methods
99
+
100
+ template<typename Type>
101
+ inline CUDA_CALLABLE quat_t<Type> quat_from_axis_angle(const vec_t<3,Type>& axis, Type angle)
102
+ {
103
+ Type half = angle*Type(Type(0.5));
104
+ Type w = cos(half);
105
+
106
+ Type sin_theta_over_two = sin(half);
107
+ vec_t<3,Type> v = axis*sin_theta_over_two;
108
+
109
+ return quat_t<Type>(v[0], v[1], v[2], w);
110
+ }
111
+
112
+ template<typename Type>
113
+ inline CUDA_CALLABLE void quat_to_axis_angle(const quat_t<Type>& q, vec_t<3,Type>& axis, Type& angle)
114
+ {
115
+ vec_t<3,Type> v = vec_t<3,Type>(q.x, q.y, q.z);
116
+ axis = q.w < Type(0) ? -normalize(v) : normalize(v);
117
+ angle = Type(2) * atan2(length(v), abs(q.w));
118
+ }
119
+
120
+ template<typename Type>
121
+ inline CUDA_CALLABLE quat_t<Type> quat_rpy(Type roll, Type pitch, Type yaw)
122
+ {
123
+ Type cy = cos(yaw * Type(0.5));
124
+ Type sy = sin(yaw * Type(0.5));
125
+ Type cr = cos(roll * Type(0.5));
126
+ Type sr = sin(roll * Type(0.5));
127
+ Type cp = cos(pitch * Type(0.5));
128
+ Type sp = sin(pitch * Type(0.5));
129
+
130
+ Type w = (cy * cr * cp + sy * sr * sp);
131
+ Type x = (cy * sr * cp - sy * cr * sp);
132
+ Type y = (cy * cr * sp + sy * sr * cp);
133
+ Type z = (sy * cr * cp - cy * sr * sp);
134
+
135
+ return quat_t<Type>(x, y, z, w);
136
+ }
137
+
138
+
139
+
140
+ template<typename Type>
141
+ inline CUDA_CALLABLE quat_t<Type> quat_inverse(const quat_t<Type>& q)
142
+ {
143
+ return quat_t<Type>(-q.x, -q.y, -q.z, q.w);
144
+ }
145
+
146
+
147
+ template<typename Type>
148
+ inline CUDA_CALLABLE Type dot(const quat_t<Type>& a, const quat_t<Type>& b)
149
+ {
150
+ return a.x*b.x + a.y*b.y + a.z*b.z + a.w*b.w;
151
+ }
152
+
153
+ template<typename Type>
154
+ inline CUDA_CALLABLE Type tensordot(const quat_t<Type>& a, const quat_t<Type>& b)
155
+ {
156
+ // corresponds to `np.tensordot()` with all axes being contracted
157
+ return dot(a, b);
158
+ }
159
+
160
+ template<typename Type>
161
+ inline CUDA_CALLABLE Type length(const quat_t<Type>& q)
162
+ {
163
+ return sqrt(dot(q, q));
164
+ }
165
+
166
+ template<typename Type>
167
+ inline CUDA_CALLABLE Type length_sq(const quat_t<Type>& q)
168
+ {
169
+ return dot(q, q);
170
+ }
171
+
172
+ template<typename Type>
173
+ inline CUDA_CALLABLE quat_t<Type> normalize(const quat_t<Type>& q)
174
+ {
175
+ Type l = length(q);
176
+ if (l > Type(kEps))
177
+ {
178
+ Type inv_l = Type(1)/l;
179
+
180
+ return quat_t<Type>(q.x*inv_l, q.y*inv_l, q.z*inv_l, q.w*inv_l);
181
+ }
182
+ else
183
+ {
184
+ return quat_t<Type>(Type(0), Type(0), Type(0), Type(1));
185
+ }
186
+ }
187
+
188
+ template<typename Type>
189
+ inline CUDA_CALLABLE quat_t<Type> add(const quat_t<Type>& a, const quat_t<Type>& b)
190
+ {
191
+ return quat_t<Type>(a.x+b.x, a.y+b.y, a.z+b.z, a.w+b.w);
192
+ }
193
+
194
+ template<typename Type>
195
+ inline CUDA_CALLABLE quat_t<Type> sub(const quat_t<Type>& a, const quat_t<Type>& b)
196
+ {
197
+ return quat_t<Type>(a.x-b.x, a.y-b.y, a.z-b.z, a.w-b.w);}
198
+
199
+
200
+ template<typename Type>
201
+ inline CUDA_CALLABLE quat_t<Type> mul(const quat_t<Type>& a, const quat_t<Type>& b)
202
+ {
203
+ return quat_t<Type>(a.w*b.x + b.w*a.x + a.y*b.z - b.y*a.z,
204
+ a.w*b.y + b.w*a.y + a.z*b.x - b.z*a.x,
205
+ a.w*b.z + b.w*a.z + a.x*b.y - b.x*a.y,
206
+ a.w*b.w - a.x*b.x - a.y*b.y - a.z*b.z);
207
+ }
208
+
209
+ template<typename Type>
210
+ inline CUDA_CALLABLE quat_t<Type> mul(const quat_t<Type>& a, Type s)
211
+ {
212
+ return quat_t<Type>(a.x*s, a.y*s, a.z*s, a.w*s);
213
+ }
214
+
215
+ template<typename Type>
216
+ inline CUDA_CALLABLE quat_t<Type> mul(Type s, const quat_t<Type>& a)
217
+ {
218
+ return mul(a, s);
219
+ }
220
+
221
+ // division
222
+ template<typename Type>
223
+ inline CUDA_CALLABLE quat_t<Type> div(quat_t<Type> q, Type s)
224
+ {
225
+ return quat_t<Type>(q.x/s, q.y/s, q.z/s, q.w/s);
226
+ }
227
+
228
+ template<typename Type>
229
+ inline CUDA_CALLABLE quat_t<Type> div(Type s, quat_t<Type> q)
230
+ {
231
+ return quat_t<Type>(s/q.x, s/q.y, s/q.z, s/q.w);
232
+ }
233
+
234
+ template<typename Type>
235
+ inline CUDA_CALLABLE quat_t<Type> operator / (quat_t<Type> a, Type s)
236
+ {
237
+ return div(a,s);
238
+ }
239
+
240
+ template<typename Type>
241
+ inline CUDA_CALLABLE quat_t<Type> operator / (Type s, quat_t<Type> a)
242
+ {
243
+ return div(s,a);
244
+ }
245
+
246
+ template<typename Type>
247
+ inline CUDA_CALLABLE quat_t<Type> operator*(Type s, const quat_t<Type>& a)
248
+ {
249
+ return mul(a, s);
250
+ }
251
+
252
+ template<typename Type>
253
+ inline CUDA_CALLABLE quat_t<Type> operator*(const quat_t<Type>& a, Type s)
254
+ {
255
+ return mul(a, s);
256
+ }
257
+
258
+ template<typename Type>
259
+ inline CUDA_CALLABLE vec_t<3,Type> quat_rotate(const quat_t<Type>& q, const vec_t<3,Type>& x)
260
+ {
261
+ Type c = (Type(2)*q.w*q.w-Type(1));
262
+ Type d = Type(2)*(q.x*x.c[0] + q.y*x.c[1] + q.z*x.c[2]);
263
+ return vec_t<3,Type>(
264
+ x.c[0]*c + q.x*d + (q.y * x[2] - q.z * x[1])*q.w*Type(2),
265
+ x.c[1]*c + q.y*d + (q.z * x[0] - q.x * x[2])*q.w*Type(2),
266
+ x.c[2]*c + q.z*d + (q.x * x[1] - q.y * x[0])*q.w*Type(2)
267
+ );
268
+ }
269
+
270
+ template<typename Type>
271
+ inline CUDA_CALLABLE vec_t<3,Type> quat_rotate_inv(const quat_t<Type>& q, const vec_t<3,Type>& x)
272
+ {
273
+ Type c = (Type(2)*q.w*q.w-Type(1));
274
+ Type d = Type(2)*(q.x*x.c[0] + q.y*x.c[1] + q.z*x.c[2]);
275
+ return vec_t<3,Type>(
276
+ x.c[0]*c + q.x*d - (q.y * x[2] - q.z * x[1])*q.w*Type(2),
277
+ x.c[1]*c + q.y*d - (q.z * x[0] - q.x * x[2])*q.w*Type(2),
278
+ x.c[2]*c + q.z*d - (q.x * x[1] - q.y * x[0])*q.w*Type(2)
279
+ );
280
+ }
281
+
282
+ template<typename Type>
283
+ inline CUDA_CALLABLE quat_t<Type> quat_slerp(const quat_t<Type>& q0, const quat_t<Type>& q1, Type t)
284
+ {
285
+ vec_t<3,Type> axis;
286
+ Type angle;
287
+ quat_to_axis_angle(mul(quat_inverse(q0), q1), axis, angle);
288
+ return mul(q0, quat_from_axis_angle(axis, t * angle));
289
+ }
290
+
291
+ template<typename Type>
292
+ inline CUDA_CALLABLE mat_t<3,3,Type> quat_to_matrix(const quat_t<Type>& q)
293
+ {
294
+ vec_t<3,Type> c1 = quat_rotate(q, vec_t<3,Type>(1.0, 0.0, 0.0));
295
+ vec_t<3,Type> c2 = quat_rotate(q, vec_t<3,Type>(0.0, 1.0, 0.0));
296
+ vec_t<3,Type> c3 = quat_rotate(q, vec_t<3,Type>(0.0, 0.0, 1.0));
297
+
298
+ return mat_t<3,3,Type>(c1, c2, c3);
299
+ }
300
+
301
+ template<typename Type>
302
+ inline CUDA_CALLABLE quat_t<Type> quat_from_matrix(const mat_t<3,3,Type>& m)
303
+ {
304
+ const Type tr = m.data[0][0] + m.data[1][1] + m.data[2][2];
305
+ Type x, y, z, w, h = Type(0);
306
+
307
+ if (tr >= Type(0)) {
308
+ h = sqrt(tr + Type(1));
309
+ w = Type(0.5) * h;
310
+ h = Type(0.5) / h;
311
+
312
+ x = (m.data[2][1] - m.data[1][2]) * h;
313
+ y = (m.data[0][2] - m.data[2][0]) * h;
314
+ z = (m.data[1][0] - m.data[0][1]) * h;
315
+ } else {
316
+ size_t max_diag = 0;
317
+ if (m.data[1][1] > m.data[0][0]) {
318
+ max_diag = 1;
319
+ }
320
+ if (m.data[2][2] > m.data[max_diag][max_diag]) {
321
+ max_diag = 2;
322
+ }
323
+
324
+ if (max_diag == 0) {
325
+ h = sqrt((m.data[0][0] - (m.data[1][1] + m.data[2][2])) + Type(1));
326
+ x = Type(0.5) * h;
327
+ h = Type(0.5) / h;
328
+
329
+ y = (m.data[0][1] + m.data[1][0]) * h;
330
+ z = (m.data[2][0] + m.data[0][2]) * h;
331
+ w = (m.data[2][1] - m.data[1][2]) * h;
332
+ } else if (max_diag == 1) {
333
+ h = sqrt((m.data[1][1] - (m.data[2][2] + m.data[0][0])) + Type(1));
334
+ y = Type(0.5) * h;
335
+ h = Type(0.5) / h;
336
+
337
+ z = (m.data[1][2] + m.data[2][1]) * h;
338
+ x = (m.data[0][1] + m.data[1][0]) * h;
339
+ w = (m.data[0][2] - m.data[2][0]) * h;
340
+ } if (max_diag == 2) {
341
+ h = sqrt((m.data[2][2] - (m.data[0][0] + m.data[1][1])) + Type(1));
342
+ z = Type(0.5) * h;
343
+ h = Type(0.5) / h;
344
+
345
+ x = (m.data[2][0] + m.data[0][2]) * h;
346
+ y = (m.data[1][2] + m.data[2][1]) * h;
347
+ w = (m.data[1][0] - m.data[0][1]) * h;
348
+ }
349
+ }
350
+
351
+ return normalize(quat_t<Type>(x, y, z, w));
352
+ }
353
+
354
+ template<typename Type>
355
+ inline CUDA_CALLABLE Type extract(const quat_t<Type>& a, int idx)
356
+ {
357
+ #if FP_CHECK
358
+ if (idx < 0 || idx > 3)
359
+ {
360
+ printf("quat_t index %d out of bounds at %s %d", idx, __FILE__, __LINE__);
361
+ assert(0);
362
+ }
363
+ #endif
364
+
365
+ /*
366
+ * Because quat data is not stored in an array, we index the quaternion by checking all possible idx values.
367
+ * (&a.x)[idx] would be the preferred access strategy, but this results in undefined behavior in the clang compiler
368
+ * at optimization level 3.
369
+ */
370
+ if (idx == 0) {return a.x;}
371
+ else if (idx == 1) {return a.y;}
372
+ else if (idx == 2) {return a.z;}
373
+ else {return a.w;}
374
+ }
375
+
376
+ template<typename Type>
377
+ CUDA_CALLABLE inline quat_t<Type> lerp(const quat_t<Type>& a, const quat_t<Type>& b, Type t)
378
+ {
379
+ return a*(Type(1)-t) + b*t;
380
+ }
381
+
382
+ template<typename Type>
383
+ CUDA_CALLABLE inline void adj_lerp(const quat_t<Type>& a, const quat_t<Type>& b, Type t, quat_t<Type>& adj_a, quat_t<Type>& adj_b, Type& adj_t, const quat_t<Type>& adj_ret)
384
+ {
385
+ adj_a += adj_ret*(Type(1)-t);
386
+ adj_b += adj_ret*t;
387
+ adj_t += tensordot(b, adj_ret) - tensordot(a, adj_ret);
388
+ }
389
+
390
+ template<typename Type>
391
+ inline CUDA_CALLABLE void adj_extract(const quat_t<Type>& a, int idx, quat_t<Type>& adj_a, int & adj_idx, Type & adj_ret)
392
+ {
393
+ #if FP_CHECK
394
+ if (idx < 0 || idx > 3)
395
+ {
396
+ printf("quat_t index %d out of bounds at %s %d", idx, __FILE__, __LINE__);
397
+ assert(0);
398
+ }
399
+ #endif
400
+
401
+ // See wp::extract(const quat_t<Type>& a, int idx) note
402
+ if (idx == 0) {adj_a.x += adj_ret;}
403
+ else if (idx == 1) {adj_a.y += adj_ret;}
404
+ else if (idx == 2) {adj_a.z += adj_ret;}
405
+ else {adj_a.w += adj_ret;}
406
+ }
407
+
408
+
409
+ // backward methods
410
+ template<typename Type>
411
+ inline CUDA_CALLABLE void adj_quat_from_axis_angle(const vec_t<3,Type>& axis, Type angle, vec_t<3,Type>& adj_axis, Type& adj_angle, const quat_t<Type>& adj_ret)
412
+ {
413
+ vec_t<3,Type> v = vec_t<3,Type>(adj_ret.x, adj_ret.y, adj_ret.z);
414
+
415
+ Type s = sin(angle*Type(0.5));
416
+ Type c = cos(angle*Type(0.5));
417
+
418
+ quat_t<Type> dqda = quat_t<Type>(axis[0]*c, axis[1]*c, axis[2]*c, -s)*Type(0.5);
419
+
420
+ adj_axis += v*s;
421
+ adj_angle += dot(dqda, adj_ret);
422
+ }
423
+
424
+ template<typename Type>
425
+ inline CUDA_CALLABLE void adj_quat_to_axis_angle(const quat_t<Type>& q, vec_t<3,Type>& axis, Type& angle, quat_t<Type>& adj_q, const vec_t<3,Type>& adj_axis, const Type& adj_angle)
426
+ {
427
+ Type l = length(vec_t<3,Type>(q.x, q.y, q.z));
428
+
429
+ Type ax_qx = Type(0);
430
+ Type ax_qy = Type(0);
431
+ Type ax_qz = Type(0);
432
+ Type ay_qx = Type(0);
433
+ Type ay_qy = Type(0);
434
+ Type ay_qz = Type(0);
435
+ Type az_qx = Type(0);
436
+ Type az_qy = Type(0);
437
+ Type az_qz = Type(0);
438
+
439
+ Type t_qx = Type(0);
440
+ Type t_qy = Type(0);
441
+ Type t_qz = Type(0);
442
+ Type t_qw = Type(0);
443
+
444
+ Type flip = q.w < Type(0) ? -1.0 : 1.0;
445
+
446
+ if (l > Type(0))
447
+ {
448
+ Type l_sq = l*l;
449
+ Type l_inv = Type(1) / l;
450
+ Type l_inv_sq = l_inv * l_inv;
451
+ Type l_inv_cu = l_inv_sq * l_inv;
452
+
453
+ Type C = flip * l_inv_cu;
454
+ ax_qx = C * (q.y*q.y + q.z*q.z);
455
+ ax_qy = -C * q.x*q.y;
456
+ ax_qz = -C * q.x*q.z;
457
+ ay_qx = -C * q.y*q.x;
458
+ ay_qy = C * (q.x*q.x + q.z*q.z);
459
+ ay_qz = -C * q.y*q.z;
460
+ az_qx = -C * q.z*q.x;
461
+ az_qy = -C * q.z*q.y;
462
+ az_qz = C * (q.x*q.x + q.y*q.y);
463
+
464
+ Type D = Type(2) * flip / (l_sq + q.w*q.w);
465
+ t_qx = D * l_inv * q.x * q.w;
466
+ t_qy = D * l_inv * q.y * q.w;
467
+ t_qz = D * l_inv * q.z * q.w;
468
+ t_qw = -D * l;
469
+ }
470
+ else
471
+ {
472
+ if (abs(q.w) > Type(kEps))
473
+ {
474
+ Type t_qx = Type(2) / (sqrt(Type(3)) * abs(q.w));
475
+ Type t_qy = Type(2) / (sqrt(Type(3)) * abs(q.w));
476
+ Type t_qz = Type(2) / (sqrt(Type(3)) * abs(q.w));
477
+ }
478
+ // o/w we have a null quat_t which cannot backpropagate
479
+ }
480
+
481
+ adj_q.x += ax_qx * adj_axis[0] + ay_qx * adj_axis[1] + az_qx * adj_axis[2] + t_qx * adj_angle;
482
+ adj_q.y += ax_qy * adj_axis[0] + ay_qy * adj_axis[1] + az_qy * adj_axis[2] + t_qy * adj_angle;
483
+ adj_q.z += ax_qz * adj_axis[0] + ay_qz * adj_axis[1] + az_qz * adj_axis[2] + t_qz * adj_angle;
484
+ adj_q.w += t_qw * adj_angle;
485
+ }
486
+
487
+ template<typename Type>
488
+ inline CUDA_CALLABLE void adj_quat_rpy(Type roll, Type pitch, Type yaw, Type& adj_roll, Type& adj_pitch, Type& adj_yaw, const quat_t<Type>& adj_ret)
489
+ {
490
+ Type cy = cos(yaw * Type(0.5));
491
+ Type sy = sin(yaw * Type(0.5));
492
+ Type cr = cos(roll * Type(0.5));
493
+ Type sr = sin(roll * Type(0.5));
494
+ Type cp = cos(pitch * Type(0.5));
495
+ Type sp = sin(pitch * Type(0.5));
496
+
497
+ Type w = (cy * cr * cp + sy * sr * sp);
498
+ Type x = (cy * sr * cp - sy * cr * sp);
499
+ Type y = (cy * cr * sp + sy * sr * cp);
500
+ Type z = (sy * cr * cp - cy * sr * sp);
501
+
502
+ Type dx_dr = Type(0.5) * w;
503
+ Type dx_dp = -Type(0.5) * cy * sr * sp - Type(0.5) * sy * cr * cp;
504
+ Type dx_dy = -Type(0.5) * y;
505
+
506
+ Type dy_dr = Type(0.5) * z;
507
+ Type dy_dp = Type(0.5) * cy * cr * cp - Type(0.5) * sy * sr * sp;
508
+ Type dy_dy = Type(0.5) * x;
509
+
510
+ Type dz_dr = -Type(0.5) * y;
511
+ Type dz_dp = -Type(0.5) * sy * cr * sp - Type(0.5) * cy * sr * cp;
512
+ Type dz_dy = Type(0.5) * w;
513
+
514
+ Type dw_dr = -Type(0.5) * x;
515
+ Type dw_dp = -Type(0.5) * cy * cr * sp + Type(0.5) * sy * sr * cp;
516
+ Type dw_dy = -Type(0.5) * z;
517
+
518
+ adj_roll += dot(quat_t<Type>(dx_dr, dy_dr, dz_dr, dw_dr), adj_ret);
519
+ adj_pitch += dot(quat_t<Type>(dx_dp, dy_dp, dz_dp, dw_dp), adj_ret);
520
+ adj_yaw += dot(quat_t<Type>(dx_dy, dy_dy, dz_dy, dw_dy), adj_ret);
521
+ }
522
+
523
+
524
+ template<typename Type>
525
+ inline CUDA_CALLABLE void adj_dot(const quat_t<Type>& a, const quat_t<Type>& b, quat_t<Type>& adj_a, quat_t<Type>& adj_b, const Type adj_ret)
526
+ {
527
+ adj_a += b*adj_ret;
528
+ adj_b += a*adj_ret;
529
+ }
530
+
531
+ template<typename Type>
532
+ inline CUDA_CALLABLE void tensordot(const quat_t<Type>& a, const quat_t<Type>& b, quat_t<Type>& adj_a, quat_t<Type>& adj_b, const Type adj_ret)
533
+ {
534
+ adj_dot(a, b, adj_a, adj_b, adj_ret);
535
+ }
536
+
537
+ template<typename Type>
538
+ inline CUDA_CALLABLE void adj_length(const quat_t<Type>& a, Type ret, quat_t<Type>& adj_a, const Type adj_ret)
539
+ {
540
+ if (ret > Type(kEps))
541
+ {
542
+ Type inv_l = Type(1)/ret;
543
+
544
+ adj_a += quat_t<Type>(a.x*inv_l, a.y*inv_l, a.z*inv_l, a.w*inv_l) * adj_ret;
545
+ }
546
+ }
547
+
548
+ template<typename Type>
549
+ inline CUDA_CALLABLE void adj_length_sq(const quat_t<Type>& a, quat_t<Type>& adj_a, const Type adj_ret)
550
+ {
551
+ adj_a += Type(2)*a*adj_ret;
552
+ }
553
+
554
+ template<typename Type>
555
+ inline CUDA_CALLABLE void adj_normalize(const quat_t<Type>& q, quat_t<Type>& adj_q, const quat_t<Type>& adj_ret)
556
+ {
557
+ Type l = length(q);
558
+
559
+ if (l > Type(kEps))
560
+ {
561
+ Type l_inv = Type(1)/l;
562
+
563
+ adj_q += adj_ret*l_inv - q*(l_inv*l_inv*l_inv*dot(q, adj_ret));
564
+ }
565
+ }
566
+
567
+ template<typename Type>
568
+ inline CUDA_CALLABLE void adj_quat_inverse(const quat_t<Type>& q, quat_t<Type>& adj_q, const quat_t<Type>& adj_ret)
569
+ {
570
+ adj_q.x -= adj_ret.x;
571
+ adj_q.y -= adj_ret.y;
572
+ adj_q.z -= adj_ret.z;
573
+ adj_q.w += adj_ret.w;
574
+ }
575
+
576
+ template<typename Type>
577
+ inline CUDA_CALLABLE void adj_add(const quat_t<Type>& a, const quat_t<Type>& b, quat_t<Type>& adj_a, quat_t<Type>& adj_b, const quat_t<Type>& adj_ret)
578
+ {
579
+ adj_a += adj_ret;
580
+ adj_b += adj_ret;
581
+ }
582
+
583
+ template<typename Type>
584
+ inline CUDA_CALLABLE void adj_sub(const quat_t<Type>& a, const quat_t<Type>& b, quat_t<Type>& adj_a, quat_t<Type>& adj_b, const quat_t<Type>& adj_ret)
585
+ {
586
+ adj_a += adj_ret;
587
+ adj_b -= adj_ret;
588
+ }
589
+
590
+ template<typename Type>
591
+ inline CUDA_CALLABLE void adj_mul(const quat_t<Type>& a, const quat_t<Type>& b, quat_t<Type>& adj_a, quat_t<Type>& adj_b, const quat_t<Type>& adj_ret)
592
+ {
593
+ // shorthand
594
+ const quat_t<Type>& r = adj_ret;
595
+
596
+ adj_a += quat_t<Type>(b.w*r.x - b.x*r.w + b.y*r.z - b.z*r.y,
597
+ b.w*r.y - b.y*r.w - b.x*r.z + b.z*r.x,
598
+ b.w*r.z + b.x*r.y - b.y*r.x - b.z*r.w,
599
+ b.w*r.w + b.x*r.x + b.y*r.y + b.z*r.z);
600
+
601
+ adj_b += quat_t<Type>(a.w*r.x - a.x*r.w - a.y*r.z + a.z*r.y,
602
+ a.w*r.y - a.y*r.w + a.x*r.z - a.z*r.x,
603
+ a.w*r.z - a.x*r.y + a.y*r.x - a.z*r.w,
604
+ a.w*r.w + a.x*r.x + a.y*r.y + a.z*r.z);
605
+
606
+ }
607
+
608
+ template<typename Type>
609
+ inline CUDA_CALLABLE void adj_mul(const quat_t<Type>& a, Type s, quat_t<Type>& adj_a, Type& adj_s, const quat_t<Type>& adj_ret)
610
+ {
611
+ adj_a += adj_ret*s;
612
+ adj_s += dot(a, adj_ret);
613
+ }
614
+
615
+ template<typename Type>
616
+ inline CUDA_CALLABLE void adj_mul(Type s, const quat_t<Type>& a, Type& adj_s, quat_t<Type>& adj_a, const quat_t<Type>& adj_ret)
617
+ {
618
+ adj_mul(a, s, adj_a, adj_s, adj_ret);
619
+ }
620
+
621
+ template<typename Type>
622
+ inline CUDA_CALLABLE void adj_div(quat_t<Type> a, Type s, quat_t<Type>& adj_a, Type& adj_s, const quat_t<Type>& adj_ret)
623
+ {
624
+ adj_s -= dot(a, adj_ret)/ (s * s); // - a / s^2
625
+ adj_a += adj_ret / s;
626
+ }
627
+
628
+ template<typename Type>
629
+ inline CUDA_CALLABLE void adj_div(Type s, quat_t<Type> a, Type& adj_s, quat_t<Type>& adj_a, const quat_t<Type>& adj_ret)
630
+ {
631
+ adj_s -= dot(a, adj_ret)/ (s * s); // - a / s^2
632
+ adj_a += s / adj_ret;
633
+ }
634
+
635
+ template<typename Type>
636
+ inline CUDA_CALLABLE void adj_quat_rotate(const quat_t<Type>& q, const vec_t<3,Type>& p, quat_t<Type>& adj_q, vec_t<3,Type>& adj_p, const vec_t<3,Type>& adj_ret)
637
+ {
638
+
639
+ {
640
+ Type t2 = p[2]*q.z*Type(2);
641
+ Type t3 = p[1]*q.w*Type(2);
642
+ Type t4 = p[0]*q.w*Type(2);
643
+ Type t5 = p[0]*q.x*Type(2);
644
+ Type t6 = p[1]*q.y*Type(2);
645
+ Type t7 = p[2]*q.y*Type(2);
646
+ Type t8 = p[0]*q.z*Type(2);
647
+ Type t9 = p[0]*q.y*Type(2);
648
+ Type t10 = p[1]*q.x*Type(2);
649
+ adj_q.x += adj_ret[2]*(t3+t8)+adj_ret[0]*(t2+t6+p[0]*q.x*Type(4))+adj_ret[1]*(t9-p[2]*q.w*Type(2));
650
+ adj_q.y += adj_ret[1]*(t2+t5+p[1]*q.y*Type(4))+adj_ret[0]*(t10+p[2]*q.w*Type(2))-adj_ret[2]*(t4-p[1]*q.z*Type(2));
651
+ adj_q.z += adj_ret[1]*(t4+t7)+adj_ret[2]*(t5+t6+p[2]*q.z*Type(4))-adj_ret[0]*(t3-p[2]*q.x*Type(2));
652
+ adj_q.w += adj_ret[0]*(t7+p[0]*q.w*Type(4)-p[1]*q.z*Type(2))+adj_ret[1]*(t8+p[1]*q.w*Type(4)-p[2]*q.x*Type(2))+adj_ret[2]*(-t9+t10+p[2]*q.w*Type(4));
653
+ }
654
+
655
+ {
656
+ Type t2 = q.w*q.w;
657
+ Type t3 = t2*Type(2);
658
+ Type t4 = q.w*q.z*Type(2);
659
+ Type t5 = q.x*q.y*Type(2);
660
+ Type t6 = q.w*q.y*Type(2);
661
+ Type t7 = q.w*q.x*Type(2);
662
+ Type t8 = q.y*q.z*Type(2);
663
+ adj_p[0] += adj_ret[1]*(t4+t5)+adj_ret[0]*(t3+(q.x*q.x)*Type(2)-Type(1))-adj_ret[2]*(t6-q.x*q.z*Type(2));
664
+ adj_p[1] += adj_ret[2]*(t7+t8)-adj_ret[0]*(t4-t5)+adj_ret[1]*(t3+(q.y*q.y)*Type(2)-Type(1));
665
+ adj_p[2] += -adj_ret[1]*(t7-t8)+adj_ret[2]*(t3+(q.z*q.z)*Type(2)-Type(1))+adj_ret[0]*(t6+q.x*q.z*Type(2));
666
+ }
667
+ }
668
+
669
+ template<typename Type>
670
+ inline CUDA_CALLABLE void adj_quat_rotate_inv(const quat_t<Type>& q, const vec_t<3,Type>& p, quat_t<Type>& adj_q, vec_t<3,Type>& adj_p, const vec_t<3,Type>& adj_ret)
671
+ {
672
+ const vec_t<3,Type>& r = adj_ret;
673
+
674
+ {
675
+ Type t2 = p[2]*q.w*Type(2);
676
+ Type t3 = p[2]*q.z*Type(2);
677
+ Type t4 = p[1]*q.w*Type(2);
678
+ Type t5 = p[0]*q.w*Type(2);
679
+ Type t6 = p[0]*q.x*Type(2);
680
+ Type t7 = p[1]*q.y*Type(2);
681
+ Type t8 = p[1]*q.z*Type(2);
682
+ Type t9 = p[2]*q.x*Type(2);
683
+ Type t10 = p[0]*q.y*Type(2);
684
+ adj_q.x += r[1]*(t2+t10)+r[0]*(t3+t7+p[0]*q.x*Type(4))-r[2]*(t4-p[0]*q.z*Type(2));
685
+ adj_q.y += r[2]*(t5+t8)+r[1]*(t3+t6+p[1]*q.y*Type(4))-r[0]*(t2-p[1]*q.x*Type(2));
686
+ adj_q.z += r[0]*(t4+t9)+r[2]*(t6+t7+p[2]*q.z*Type(4))-r[1]*(t5-p[2]*q.y*Type(2));
687
+ adj_q.w += r[0]*(t8+p[0]*q.w*Type(4)-p[2]*q.y*Type(2))+r[1]*(t9+p[1]*q.w*Type(4)-p[0]*q.z*Type(2))+r[2]*(t10-p[1]*q.x*Type(2)+p[2]*q.w*Type(4));
688
+ }
689
+
690
+ {
691
+ Type t2 = q.w*q.w;
692
+ Type t3 = t2*Type(2);
693
+ Type t4 = q.w*q.z*Type(2);
694
+ Type t5 = q.w*q.y*Type(2);
695
+ Type t6 = q.x*q.z*Type(2);
696
+ Type t7 = q.w*q.x*Type(2);
697
+ adj_p[0] += r[2]*(t5+t6)+r[0]*(t3+(q.x*q.x)*Type(2)-Type(1))-r[1]*(t4-q.x*q.y*Type(2));
698
+ adj_p[1] += r[1]*(t3+(q.y*q.y)*Type(2)-Type(1))+r[0]*(t4+q.x*q.y*Type(2))-r[2]*(t7-q.y*q.z*Type(2));
699
+ adj_p[2] += -r[0]*(t5-t6)+r[2]*(t3+(q.z*q.z)*Type(2)-Type(1))+r[1]*(t7+q.y*q.z*Type(2));
700
+ }
701
+ }
702
+
703
+ template<typename Type>
704
+ inline CUDA_CALLABLE void adj_quat_slerp(const quat_t<Type>& q0, const quat_t<Type>& q1, Type t, quat_t<Type>& ret, quat_t<Type>& adj_q0, quat_t<Type>& adj_q1, Type& adj_t, const quat_t<Type>& adj_ret)
705
+ {
706
+ vec_t<3,Type> axis;
707
+ Type angle;
708
+ quat_t<Type> q0_inv = quat_inverse(q0);
709
+ quat_t<Type> q_inc = mul(q0_inv, q1);
710
+ quat_to_axis_angle(q_inc, axis, angle);
711
+ quat_t<Type> qt = quat_from_axis_angle(axis, angle * t);
712
+ angle = angle * 0.5;
713
+
714
+ // adj_t
715
+ adj_t += dot(mul(ret, quat_t<Type>(angle*axis[0], angle*axis[1], angle*axis[2], Type(0))), adj_ret);
716
+
717
+ // adj_q0
718
+ quat_t<Type> q_inc_x_q0;
719
+ quat_t<Type> q_inc_y_q0;
720
+ quat_t<Type> q_inc_z_q0;
721
+ quat_t<Type> q_inc_w_q0;
722
+
723
+ quat_t<Type> q_inc_x_q1;
724
+ quat_t<Type> q_inc_y_q1;
725
+ quat_t<Type> q_inc_z_q1;
726
+ quat_t<Type> q_inc_w_q1;
727
+
728
+ adj_mul(q0_inv, q1, q_inc_x_q0, q_inc_x_q1, quat_t<Type>(1.f, Type(0), Type(0), Type(0)));
729
+ adj_mul(q0_inv, q1, q_inc_y_q0, q_inc_y_q1, quat_t<Type>(Type(0), 1.f, Type(0), Type(0)));
730
+ adj_mul(q0_inv, q1, q_inc_z_q0, q_inc_z_q1, quat_t<Type>(Type(0), Type(0), 1.f, Type(0)));
731
+ adj_mul(q0_inv, q1, q_inc_w_q0, q_inc_w_q1, quat_t<Type>(Type(0), Type(0), Type(0), 1.f));
732
+
733
+ quat_t<Type> a_x_q_inc;
734
+ quat_t<Type> a_y_q_inc;
735
+ quat_t<Type> a_z_q_inc;
736
+ quat_t<Type> t_q_inc;
737
+
738
+ adj_quat_to_axis_angle(q_inc, axis, angle, a_x_q_inc, vec_t<3,Type>(1.f, Type(0), Type(0)), Type(0));
739
+ adj_quat_to_axis_angle(q_inc, axis, angle, a_y_q_inc, vec_t<3,Type>(Type(0), 1.f, Type(0)), Type(0));
740
+ adj_quat_to_axis_angle(q_inc, axis, angle, a_z_q_inc, vec_t<3,Type>(Type(0), Type(0), 1.f), Type(0));
741
+ adj_quat_to_axis_angle(q_inc, axis, angle, t_q_inc, vec_t<3,Type>(Type(0), Type(0), Type(0)), Type(1));
742
+
743
+ Type cs = cos(angle*t);
744
+ Type sn = sin(angle*t);
745
+
746
+ quat_t<Type> q_inc_q0_x = quat_t<Type>(-q_inc_x_q0.x, -q_inc_y_q0.x, -q_inc_z_q0.x, -q_inc_w_q0.x);
747
+ quat_t<Type> q_inc_q0_y = quat_t<Type>(-q_inc_x_q0.y, -q_inc_y_q0.y, -q_inc_z_q0.y, -q_inc_w_q0.y);
748
+ quat_t<Type> q_inc_q0_z = quat_t<Type>(-q_inc_x_q0.z, -q_inc_y_q0.z, -q_inc_z_q0.z, -q_inc_w_q0.z);
749
+ quat_t<Type> q_inc_q0_w = quat_t<Type>(q_inc_x_q0.w, q_inc_y_q0.w, q_inc_z_q0.w, q_inc_w_q0.w);
750
+
751
+ Type a_x_q0_x = dot(a_x_q_inc, q_inc_q0_x);
752
+ Type a_x_q0_y = dot(a_x_q_inc, q_inc_q0_y);
753
+ Type a_x_q0_z = dot(a_x_q_inc, q_inc_q0_z);
754
+ Type a_x_q0_w = dot(a_x_q_inc, q_inc_q0_w);
755
+ Type a_y_q0_x = dot(a_y_q_inc, q_inc_q0_x);
756
+ Type a_y_q0_y = dot(a_y_q_inc, q_inc_q0_y);
757
+ Type a_y_q0_z = dot(a_y_q_inc, q_inc_q0_z);
758
+ Type a_y_q0_w = dot(a_y_q_inc, q_inc_q0_w);
759
+ Type a_z_q0_x = dot(a_z_q_inc, q_inc_q0_x);
760
+ Type a_z_q0_y = dot(a_z_q_inc, q_inc_q0_y);
761
+ Type a_z_q0_z = dot(a_z_q_inc, q_inc_q0_z);
762
+ Type a_z_q0_w = dot(a_z_q_inc, q_inc_q0_w);
763
+ Type t_q0_x = dot(t_q_inc, q_inc_q0_x);
764
+ Type t_q0_y = dot(t_q_inc, q_inc_q0_y);
765
+ Type t_q0_z = dot(t_q_inc, q_inc_q0_z);
766
+ Type t_q0_w = dot(t_q_inc, q_inc_q0_w);
767
+
768
+ quat_t<Type> q_s_q0_x = mul(quat_t<Type>(1.f, Type(0), Type(0), Type(0)), qt) + mul(q0, quat_t<Type>(
769
+ 0.5 * t * axis[0] * t_q0_x * cs + a_x_q0_x * sn,
770
+ 0.5 * t * axis[1] * t_q0_x * cs + a_y_q0_x * sn,
771
+ 0.5 * t * axis[2] * t_q0_x * cs + a_z_q0_x * sn,
772
+ -0.5 * t * t_q0_x * sn));
773
+
774
+ quat_t<Type> q_s_q0_y = mul(quat_t<Type>(Type(0), 1.f, Type(0), Type(0)), qt) + mul(q0, quat_t<Type>(
775
+ 0.5 * t * axis[0] * t_q0_y * cs + a_x_q0_y * sn,
776
+ 0.5 * t * axis[1] * t_q0_y * cs + a_y_q0_y * sn,
777
+ 0.5 * t * axis[2] * t_q0_y * cs + a_z_q0_y * sn,
778
+ -0.5 * t * t_q0_y * sn));
779
+
780
+ quat_t<Type> q_s_q0_z = mul(quat_t<Type>(Type(0), Type(0), 1.f, Type(0)), qt) + mul(q0, quat_t<Type>(
781
+ 0.5 * t * axis[0] * t_q0_z * cs + a_x_q0_z * sn,
782
+ 0.5 * t * axis[1] * t_q0_z * cs + a_y_q0_z * sn,
783
+ 0.5 * t * axis[2] * t_q0_z * cs + a_z_q0_z * sn,
784
+ -0.5 * t * t_q0_z * sn));
785
+
786
+ quat_t<Type> q_s_q0_w = mul(quat_t<Type>(Type(0), Type(0), Type(0), 1.f), qt) + mul(q0, quat_t<Type>(
787
+ 0.5 * t * axis[0] * t_q0_w * cs + a_x_q0_w * sn,
788
+ 0.5 * t * axis[1] * t_q0_w * cs + a_y_q0_w * sn,
789
+ 0.5 * t * axis[2] * t_q0_w * cs + a_z_q0_w * sn,
790
+ -0.5 * t * t_q0_w * sn));
791
+
792
+ adj_q0.x += dot(q_s_q0_x, adj_ret);
793
+ adj_q0.y += dot(q_s_q0_y, adj_ret);
794
+ adj_q0.z += dot(q_s_q0_z, adj_ret);
795
+ adj_q0.w += dot(q_s_q0_w, adj_ret);
796
+
797
+ // adj_q1
798
+ quat_t<Type> q_inc_q1_x = quat_t<Type>(q_inc_x_q1.x, q_inc_y_q1.x, q_inc_z_q1.x, q_inc_w_q1.x);
799
+ quat_t<Type> q_inc_q1_y = quat_t<Type>(q_inc_x_q1.y, q_inc_y_q1.y, q_inc_z_q1.y, q_inc_w_q1.y);
800
+ quat_t<Type> q_inc_q1_z = quat_t<Type>(q_inc_x_q1.z, q_inc_y_q1.z, q_inc_z_q1.z, q_inc_w_q1.z);
801
+ quat_t<Type> q_inc_q1_w = quat_t<Type>(q_inc_x_q1.w, q_inc_y_q1.w, q_inc_z_q1.w, q_inc_w_q1.w);
802
+
803
+ Type a_x_q1_x = dot(a_x_q_inc, q_inc_q1_x);
804
+ Type a_x_q1_y = dot(a_x_q_inc, q_inc_q1_y);
805
+ Type a_x_q1_z = dot(a_x_q_inc, q_inc_q1_z);
806
+ Type a_x_q1_w = dot(a_x_q_inc, q_inc_q1_w);
807
+ Type a_y_q1_x = dot(a_y_q_inc, q_inc_q1_x);
808
+ Type a_y_q1_y = dot(a_y_q_inc, q_inc_q1_y);
809
+ Type a_y_q1_z = dot(a_y_q_inc, q_inc_q1_z);
810
+ Type a_y_q1_w = dot(a_y_q_inc, q_inc_q1_w);
811
+ Type a_z_q1_x = dot(a_z_q_inc, q_inc_q1_x);
812
+ Type a_z_q1_y = dot(a_z_q_inc, q_inc_q1_y);
813
+ Type a_z_q1_z = dot(a_z_q_inc, q_inc_q1_z);
814
+ Type a_z_q1_w = dot(a_z_q_inc, q_inc_q1_w);
815
+ Type t_q1_x = dot(t_q_inc, q_inc_q1_x);
816
+ Type t_q1_y = dot(t_q_inc, q_inc_q1_y);
817
+ Type t_q1_z = dot(t_q_inc, q_inc_q1_z);
818
+ Type t_q1_w = dot(t_q_inc, q_inc_q1_w);
819
+
820
+ quat_t<Type> q_s_q1_x = mul(q0, quat_t<Type>(
821
+ 0.5 * t * axis[0] * t_q1_x * cs + a_x_q1_x * sn,
822
+ 0.5 * t * axis[1] * t_q1_x * cs + a_y_q1_x * sn,
823
+ 0.5 * t * axis[2] * t_q1_x * cs + a_z_q1_x * sn,
824
+ -0.5 * t * t_q1_x * sn));
825
+
826
+ quat_t<Type> q_s_q1_y = mul(q0, quat_t<Type>(
827
+ 0.5 * t * axis[0] * t_q1_y * cs + a_x_q1_y * sn,
828
+ 0.5 * t * axis[1] * t_q1_y * cs + a_y_q1_y * sn,
829
+ 0.5 * t * axis[2] * t_q1_y * cs + a_z_q1_y * sn,
830
+ -0.5 * t * t_q1_y * sn));
831
+
832
+ quat_t<Type> q_s_q1_z = mul(q0, quat_t<Type>(
833
+ 0.5 * t * axis[0] * t_q1_z * cs + a_x_q1_z * sn,
834
+ 0.5 * t * axis[1] * t_q1_z * cs + a_y_q1_z * sn,
835
+ 0.5 * t * axis[2] * t_q1_z * cs + a_z_q1_z * sn,
836
+ -0.5 * t * t_q1_z * sn));
837
+
838
+ quat_t<Type> q_s_q1_w = mul(q0, quat_t<Type>(
839
+ 0.5 * t * axis[0] * t_q1_w * cs + a_x_q1_w * sn,
840
+ 0.5 * t * axis[1] * t_q1_w * cs + a_y_q1_w * sn,
841
+ 0.5 * t * axis[2] * t_q1_w * cs + a_z_q1_w * sn,
842
+ -0.5 * t * t_q1_w * sn));
843
+
844
+ adj_q1.x += dot(q_s_q1_x, adj_ret);
845
+ adj_q1.y += dot(q_s_q1_y, adj_ret);
846
+ adj_q1.z += dot(q_s_q1_z, adj_ret);
847
+ adj_q1.w += dot(q_s_q1_w, adj_ret);
848
+
849
+ }
850
+
851
+ template<typename Type>
852
+ inline CUDA_CALLABLE void adj_quat_to_matrix(const quat_t<Type>& q, quat_t<Type>& adj_q, mat_t<3,3,Type>& adj_ret)
853
+ {
854
+ // we don't care about adjoint w.r.t. constant identity matrix
855
+ vec_t<3,Type> t;
856
+
857
+ adj_quat_rotate(q, vec_t<3,Type>(1.0, 0.0, 0.0), adj_q, t, adj_ret.get_col(0));
858
+ adj_quat_rotate(q, vec_t<3,Type>(0.0, 1.0, 0.0), adj_q, t, adj_ret.get_col(1));
859
+ adj_quat_rotate(q, vec_t<3,Type>(0.0, 0.0, 1.0), adj_q, t, adj_ret.get_col(2));
860
+ }
861
+
862
+ template<typename Type>
863
+ inline CUDA_CALLABLE void adj_quat_from_matrix(const mat_t<3,3,Type>& m, mat_t<3,3,Type>& adj_m, const quat_t<Type>& adj_ret)
864
+ {
865
+ const Type tr = m.data[0][0] + m.data[1][1] + m.data[2][2];
866
+ Type x, y, z, w, h = Type(0);
867
+
868
+ Type dx_dm00 = Type(0), dx_dm01 = Type(0), dx_dm02 = Type(0);
869
+ Type dx_dm10 = Type(0), dx_dm11 = Type(0), dx_dm12 = Type(0);
870
+ Type dx_dm20 = Type(0), dx_dm21 = Type(0), dx_dm22 = Type(0);
871
+ Type dy_dm00 = Type(0), dy_dm01 = Type(0), dy_dm02 = Type(0);
872
+ Type dy_dm10 = Type(0), dy_dm11 = Type(0), dy_dm12 = Type(0);
873
+ Type dy_dm20 = Type(0), dy_dm21 = Type(0), dy_dm22 = Type(0);
874
+ Type dz_dm00 = Type(0), dz_dm01 = Type(0), dz_dm02 = Type(0);
875
+ Type dz_dm10 = Type(0), dz_dm11 = Type(0), dz_dm12 = Type(0);
876
+ Type dz_dm20 = Type(0), dz_dm21 = Type(0), dz_dm22 = Type(0);
877
+ Type dw_dm00 = Type(0), dw_dm01 = Type(0), dw_dm02 = Type(0);
878
+ Type dw_dm10 = Type(0), dw_dm11 = Type(0), dw_dm12 = Type(0);
879
+ Type dw_dm20 = Type(0), dw_dm21 = Type(0), dw_dm22 = Type(0);
880
+
881
+ if (tr >= Type(0)) {
882
+ h = sqrt(tr + Type(1));
883
+ w = Type(0.5) * h;
884
+ h = Type(0.5) / h;
885
+
886
+ x = (m.data[2][1] - m.data[1][2]) * h;
887
+ y = (m.data[0][2] - m.data[2][0]) * h;
888
+ z = (m.data[1][0] - m.data[0][1]) * h;
889
+
890
+ dw_dm00 = Type(0.5) * h;
891
+ dw_dm11 = Type(0.5) * h;
892
+ dw_dm22 = Type(0.5) * h;
893
+ dx_dm21 = h;
894
+ dx_dm12 = -h;
895
+ dx_dm00 = Type(2) * h*h*h * (m.data[1][2] - m.data[2][1]);
896
+ dx_dm11 = Type(2) * h*h*h * (m.data[1][2] - m.data[2][1]);
897
+ dx_dm22 = Type(2) * h*h*h * (m.data[1][2] - m.data[2][1]);
898
+ dy_dm02 = h;
899
+ dy_dm20 = -h;
900
+ dy_dm00 = Type(2) * h*h*h * (m.data[2][0] - m.data[0][2]);
901
+ dy_dm11 = Type(2) * h*h*h * (m.data[2][0] - m.data[0][2]);
902
+ dy_dm22 = Type(2) * h*h*h * (m.data[2][0] - m.data[0][2]);
903
+ dz_dm10 = h;
904
+ dz_dm01 = -h;
905
+ dz_dm00 = Type(2) * h*h*h * (m.data[0][1] - m.data[1][0]);
906
+ dz_dm11 = Type(2) * h*h*h * (m.data[0][1] - m.data[1][0]);
907
+ dz_dm22 = Type(2) * h*h*h * (m.data[0][1] - m.data[1][0]);
908
+ } else {
909
+ size_t max_diag = 0;
910
+ if (m.data[1][1] > m.data[0][0]) {
911
+ max_diag = 1;
912
+ }
913
+ if (m.data[2][2] > m.data[max_diag][max_diag]) {
914
+ max_diag = 2;
915
+ }
916
+
917
+ if (max_diag == 0) {
918
+ h = sqrt((m.data[0][0] - (m.data[1][1] + m.data[2][2])) + Type(1));
919
+ x = Type(0.5) * h;
920
+ h = Type(0.5) / h;
921
+
922
+ y = (m.data[0][1] + m.data[1][0]) * h;
923
+ z = (m.data[2][0] + m.data[0][2]) * h;
924
+ w = (m.data[2][1] - m.data[1][2]) * h;
925
+
926
+ dx_dm00 = Type(0.5) * h;
927
+ dx_dm11 = -Type(0.5) * h;
928
+ dx_dm22 = -Type(0.5) * h;
929
+ dy_dm01 = h;
930
+ dy_dm10 = h;
931
+ dy_dm00 = -Type(2) * h*h*h * (m.data[0][1] + m.data[1][0]);
932
+ dy_dm11 = Type(2) * h*h*h * (m.data[0][1] + m.data[1][0]);
933
+ dy_dm22 = Type(2) * h*h*h * (m.data[0][1] + m.data[1][0]);
934
+ dz_dm20 = h;
935
+ dz_dm02 = h;
936
+ dz_dm00 = -Type(2) * h*h*h * (m.data[2][0] + m.data[0][2]);
937
+ dz_dm11 = Type(2) * h*h*h * (m.data[2][0] + m.data[0][2]);
938
+ dz_dm22 = Type(2) * h*h*h * (m.data[2][0] + m.data[0][2]);
939
+ dw_dm21 = h;
940
+ dw_dm12 = -h;
941
+ dw_dm00 = Type(2) * h*h*h * (m.data[1][2] - m.data[2][1]);
942
+ dw_dm11 = Type(2) * h*h*h * (m.data[2][1] - m.data[1][2]);
943
+ dw_dm22 = Type(2) * h*h*h * (m.data[2][1] - m.data[1][2]);
944
+ } else if (max_diag == 1) {
945
+ h = sqrt((m.data[1][1] - (m.data[2][2] + m.data[0][0])) + Type(1));
946
+ y = Type(0.5) * h;
947
+ h = Type(0.5) / h;
948
+
949
+ z = (m.data[1][2] + m.data[2][1]) * h;
950
+ x = (m.data[0][1] + m.data[1][0]) * h;
951
+ w = (m.data[0][2] - m.data[2][0]) * h;
952
+
953
+ dy_dm00 = -Type(0.5) * h;
954
+ dy_dm11 = Type(0.5) * h;
955
+ dy_dm22 = -Type(0.5) * h;
956
+ dz_dm12 = h;
957
+ dz_dm21 = h;
958
+ dz_dm00 = Type(2) * h*h*h * (m.data[1][2] + m.data[2][1]);
959
+ dz_dm11 = -Type(2) * h*h*h * (m.data[1][2] + m.data[2][1]);
960
+ dz_dm22 = Type(2) * h*h*h * (m.data[1][2] + m.data[2][1]);
961
+ dx_dm01 = h;
962
+ dx_dm10 = h;
963
+ dx_dm00 = Type(2) * h*h*h * (m.data[0][1] + m.data[1][0]);
964
+ dx_dm11 = -Type(2) * h*h*h * (m.data[0][1] + m.data[1][0]);
965
+ dx_dm22 = Type(2) * h*h*h * (m.data[0][1] + m.data[1][0]);
966
+ dw_dm02 = h;
967
+ dw_dm20 = -h;
968
+ dw_dm00 = Type(2) * h*h*h * (m.data[0][2] - m.data[2][0]);
969
+ dw_dm11 = Type(2) * h*h*h * (m.data[2][0] - m.data[0][2]);
970
+ dw_dm22 = Type(2) * h*h*h * (m.data[0][2] - m.data[2][0]);
971
+ } if (max_diag == 2) {
972
+ h = sqrt((m.data[2][2] - (m.data[0][0] + m.data[1][1])) + Type(1));
973
+ z = Type(0.5) * h;
974
+ h = Type(0.5) / h;
975
+
976
+ x = (m.data[2][0] + m.data[0][2]) * h;
977
+ y = (m.data[1][2] + m.data[2][1]) * h;
978
+ w = (m.data[1][0] - m.data[0][1]) * h;
979
+
980
+ dz_dm00 = -Type(0.5) * h;
981
+ dz_dm11 = -Type(0.5) * h;
982
+ dz_dm22 = Type(0.5) * h;
983
+ dx_dm20 = h;
984
+ dx_dm02 = h;
985
+ dx_dm00 = Type(2) * h*h*h * (m.data[2][0] + m.data[0][2]);
986
+ dx_dm11 = Type(2) * h*h*h * (m.data[2][0] + m.data[0][2]);
987
+ dx_dm22 = -Type(2) * h*h*h * (m.data[2][0] + m.data[0][2]);
988
+ dy_dm12 = h;
989
+ dy_dm21 = h;
990
+ dy_dm00 = Type(2) * h*h*h * (m.data[1][2] + m.data[2][1]);
991
+ dy_dm11 = Type(2) * h*h*h * (m.data[1][2] + m.data[2][1]);
992
+ dy_dm22 = -Type(2) * h*h*h * (m.data[1][2] + m.data[2][1]);
993
+ dw_dm10 = h;
994
+ dw_dm01 = -h;
995
+ dw_dm00 = Type(2) * h*h*h * (m.data[1][0] - m.data[0][1]);
996
+ dw_dm11 = Type(2) * h*h*h * (m.data[1][0] - m.data[0][1]);
997
+ dw_dm22 = Type(2) * h*h*h * (m.data[0][1] - m.data[1][0]);
998
+ }
999
+ }
1000
+
1001
+ quat_t<Type> dq_dm00 = quat_t<Type>(dx_dm00, dy_dm00, dz_dm00, dw_dm00);
1002
+ quat_t<Type> dq_dm01 = quat_t<Type>(dx_dm01, dy_dm01, dz_dm01, dw_dm01);
1003
+ quat_t<Type> dq_dm02 = quat_t<Type>(dx_dm02, dy_dm02, dz_dm02, dw_dm02);
1004
+ quat_t<Type> dq_dm10 = quat_t<Type>(dx_dm10, dy_dm10, dz_dm10, dw_dm10);
1005
+ quat_t<Type> dq_dm11 = quat_t<Type>(dx_dm11, dy_dm11, dz_dm11, dw_dm11);
1006
+ quat_t<Type> dq_dm12 = quat_t<Type>(dx_dm12, dy_dm12, dz_dm12, dw_dm12);
1007
+ quat_t<Type> dq_dm20 = quat_t<Type>(dx_dm20, dy_dm20, dz_dm20, dw_dm20);
1008
+ quat_t<Type> dq_dm21 = quat_t<Type>(dx_dm21, dy_dm21, dz_dm21, dw_dm21);
1009
+ quat_t<Type> dq_dm22 = quat_t<Type>(dx_dm22, dy_dm22, dz_dm22, dw_dm22);
1010
+
1011
+ quat_t<Type> adj_q;
1012
+ adj_normalize(quat_t<Type>(x, y, z, w), adj_q, adj_ret);
1013
+
1014
+ adj_m.data[0][0] += dot(dq_dm00, adj_q);
1015
+ adj_m.data[0][1] += dot(dq_dm01, adj_q);
1016
+ adj_m.data[0][2] += dot(dq_dm02, adj_q);
1017
+ adj_m.data[1][0] += dot(dq_dm10, adj_q);
1018
+ adj_m.data[1][1] += dot(dq_dm11, adj_q);
1019
+ adj_m.data[1][2] += dot(dq_dm12, adj_q);
1020
+ adj_m.data[2][0] += dot(dq_dm20, adj_q);
1021
+ adj_m.data[2][1] += dot(dq_dm21, adj_q);
1022
+ adj_m.data[2][2] += dot(dq_dm22, adj_q);
1023
+ }
1024
+
1025
+ template<typename Type>
1026
+ inline CUDA_CALLABLE void adj_mat_t(const vec_t<3,Type>& pos, const quat_t<Type>& rot, const vec_t<3,Type>& scale,
1027
+ vec_t<3,Type>& adj_pos, quat_t<Type>& adj_rot, vec_t<3,Type>& adj_scale, const mat_t<4,4,Type>& adj_ret)
1028
+ {
1029
+ mat_t<3,3,Type> R = quat_to_matrix(rot);
1030
+ mat_t<3,3,Type> adj_R(0);
1031
+
1032
+ adj_pos[0] += adj_ret.data[0][3];
1033
+ adj_pos[1] += adj_ret.data[1][3];
1034
+ adj_pos[2] += adj_ret.data[2][3];
1035
+
1036
+ adj_mul(R.data[0][0], scale[0], adj_R.data[0][0], adj_scale[0], adj_ret.data[0][0]);
1037
+ adj_mul(R.data[1][0], scale[0], adj_R.data[1][0], adj_scale[0], adj_ret.data[1][0]);
1038
+ adj_mul(R.data[2][0], scale[0], adj_R.data[2][0], adj_scale[0], adj_ret.data[2][0]);
1039
+
1040
+ adj_mul(R.data[0][1], scale[1], adj_R.data[0][1], adj_scale[1], adj_ret.data[0][1]);
1041
+ adj_mul(R.data[1][1], scale[1], adj_R.data[1][1], adj_scale[1], adj_ret.data[1][1]);
1042
+ adj_mul(R.data[2][1], scale[1], adj_R.data[2][1], adj_scale[1], adj_ret.data[2][1]);
1043
+
1044
+ adj_mul(R.data[0][2], scale[2], adj_R.data[0][2], adj_scale[2], adj_ret.data[0][2]);
1045
+ adj_mul(R.data[1][2], scale[2], adj_R.data[1][2], adj_scale[2], adj_ret.data[1][2]);
1046
+ adj_mul(R.data[2][2], scale[2], adj_R.data[2][2], adj_scale[2], adj_ret.data[2][2]);
1047
+
1048
+ adj_quat_to_matrix(rot, adj_rot, adj_R);
1049
+ }
1050
+
1051
+ template<unsigned Rows, unsigned Cols, typename Type>
1052
+ inline CUDA_CALLABLE mat_t<Rows,Cols,Type>::mat_t(const vec_t<3,Type>& pos, const quat_t<Type>& rot, const vec_t<3,Type>& scale)
1053
+ {
1054
+ mat_t<3,3,Type> R = quat_to_matrix(rot);
1055
+
1056
+ data[0][0] = R.data[0][0]*scale[0];
1057
+ data[1][0] = R.data[1][0]*scale[0];
1058
+ data[2][0] = R.data[2][0]*scale[0];
1059
+ data[3][0] = Type(0);
1060
+
1061
+ data[0][1] = R.data[0][1]*scale[1];
1062
+ data[1][1] = R.data[1][1]*scale[1];
1063
+ data[2][1] = R.data[2][1]*scale[1];
1064
+ data[3][1] = Type(0);
1065
+
1066
+ data[0][2] = R.data[0][2]*scale[2];
1067
+ data[1][2] = R.data[1][2]*scale[2];
1068
+ data[2][2] = R.data[2][2]*scale[2];
1069
+ data[3][2] = Type(0);
1070
+
1071
+ data[0][3] = pos[0];
1072
+ data[1][3] = pos[1];
1073
+ data[2][3] = pos[2];
1074
+ data[3][3] = Type(1);
1075
+ }
1076
+
1077
+ template<typename Type=float32>
1078
+ inline CUDA_CALLABLE quat_t<Type> quat_identity()
1079
+ {
1080
+ return quat_t<Type>(Type(0), Type(0), Type(0), Type(1));
1081
+ }
1082
+
1083
+
1084
+
1085
1085
  } // namespace wp