warp-lang 1.0.2__py3-none-macosx_10_13_universal2.whl → 1.1.0__py3-none-macosx_10_13_universal2.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of warp-lang might be problematic. Click here for more details.
- warp/__init__.py +108 -97
- warp/__init__.pyi +1 -1
- warp/bin/libwarp-clang.dylib +0 -0
- warp/bin/libwarp.dylib +0 -0
- warp/build.py +115 -113
- warp/build_dll.py +383 -375
- warp/builtins.py +3425 -3354
- warp/codegen.py +2878 -2792
- warp/config.py +40 -36
- warp/constants.py +45 -45
- warp/context.py +5194 -5102
- warp/dlpack.py +442 -442
- warp/examples/__init__.py +16 -16
- warp/examples/assets/bear.usd +0 -0
- warp/examples/assets/bunny.usd +0 -0
- warp/examples/assets/cartpole.urdf +110 -110
- warp/examples/assets/crazyflie.usd +0 -0
- warp/examples/assets/cube.usd +0 -0
- warp/examples/assets/nv_ant.xml +92 -92
- warp/examples/assets/nv_humanoid.xml +183 -183
- warp/examples/assets/quadruped.urdf +267 -267
- warp/examples/assets/rocks.nvdb +0 -0
- warp/examples/assets/rocks.usd +0 -0
- warp/examples/assets/sphere.usd +0 -0
- warp/examples/benchmarks/benchmark_api.py +383 -383
- warp/examples/benchmarks/benchmark_cloth.py +278 -277
- warp/examples/benchmarks/benchmark_cloth_cupy.py +88 -88
- warp/examples/benchmarks/benchmark_cloth_jax.py +97 -100
- warp/examples/benchmarks/benchmark_cloth_numba.py +146 -142
- warp/examples/benchmarks/benchmark_cloth_numpy.py +77 -77
- warp/examples/benchmarks/benchmark_cloth_pytorch.py +86 -86
- warp/examples/benchmarks/benchmark_cloth_taichi.py +112 -112
- warp/examples/benchmarks/benchmark_cloth_warp.py +146 -146
- warp/examples/benchmarks/benchmark_launches.py +295 -295
- warp/examples/browse.py +29 -29
- warp/examples/core/example_dem.py +234 -219
- warp/examples/core/example_fluid.py +293 -267
- warp/examples/core/example_graph_capture.py +144 -126
- warp/examples/core/example_marching_cubes.py +188 -174
- warp/examples/core/example_mesh.py +174 -155
- warp/examples/core/example_mesh_intersect.py +205 -193
- warp/examples/core/example_nvdb.py +176 -170
- warp/examples/core/example_raycast.py +105 -90
- warp/examples/core/example_raymarch.py +199 -178
- warp/examples/core/example_render_opengl.py +185 -141
- warp/examples/core/example_sph.py +405 -387
- warp/examples/core/example_torch.py +222 -181
- warp/examples/core/example_wave.py +263 -248
- warp/examples/fem/bsr_utils.py +378 -380
- warp/examples/fem/example_apic_fluid.py +407 -389
- warp/examples/fem/example_convection_diffusion.py +182 -168
- warp/examples/fem/example_convection_diffusion_dg.py +219 -209
- warp/examples/fem/example_convection_diffusion_dg0.py +204 -194
- warp/examples/fem/example_deformed_geometry.py +177 -159
- warp/examples/fem/example_diffusion.py +201 -173
- warp/examples/fem/example_diffusion_3d.py +177 -152
- warp/examples/fem/example_diffusion_mgpu.py +221 -214
- warp/examples/fem/example_mixed_elasticity.py +244 -222
- warp/examples/fem/example_navier_stokes.py +259 -243
- warp/examples/fem/example_stokes.py +220 -192
- warp/examples/fem/example_stokes_transfer.py +265 -249
- warp/examples/fem/mesh_utils.py +133 -109
- warp/examples/fem/plot_utils.py +292 -287
- warp/examples/optim/example_bounce.py +260 -246
- warp/examples/optim/example_cloth_throw.py +222 -209
- warp/examples/optim/example_diffray.py +566 -536
- warp/examples/optim/example_drone.py +864 -835
- warp/examples/optim/example_inverse_kinematics.py +176 -168
- warp/examples/optim/example_inverse_kinematics_torch.py +185 -169
- warp/examples/optim/example_spring_cage.py +239 -231
- warp/examples/optim/example_trajectory.py +223 -199
- warp/examples/optim/example_walker.py +306 -293
- warp/examples/sim/example_cartpole.py +139 -129
- warp/examples/sim/example_cloth.py +196 -186
- warp/examples/sim/example_granular.py +124 -111
- warp/examples/sim/example_granular_collision_sdf.py +197 -186
- warp/examples/sim/example_jacobian_ik.py +236 -214
- warp/examples/sim/example_particle_chain.py +118 -105
- warp/examples/sim/example_quadruped.py +193 -180
- warp/examples/sim/example_rigid_chain.py +197 -187
- warp/examples/sim/example_rigid_contact.py +189 -177
- warp/examples/sim/example_rigid_force.py +127 -125
- warp/examples/sim/example_rigid_gyroscopic.py +109 -95
- warp/examples/sim/example_rigid_soft_contact.py +134 -122
- warp/examples/sim/example_soft_body.py +190 -177
- warp/fabric.py +337 -335
- warp/fem/__init__.py +60 -27
- warp/fem/cache.py +401 -388
- warp/fem/dirichlet.py +178 -179
- warp/fem/domain.py +262 -263
- warp/fem/field/__init__.py +100 -101
- warp/fem/field/field.py +148 -149
- warp/fem/field/nodal_field.py +298 -299
- warp/fem/field/restriction.py +22 -21
- warp/fem/field/test.py +180 -181
- warp/fem/field/trial.py +183 -183
- warp/fem/geometry/__init__.py +15 -19
- warp/fem/geometry/closest_point.py +69 -70
- warp/fem/geometry/deformed_geometry.py +270 -271
- warp/fem/geometry/element.py +744 -744
- warp/fem/geometry/geometry.py +184 -186
- warp/fem/geometry/grid_2d.py +380 -373
- warp/fem/geometry/grid_3d.py +441 -435
- warp/fem/geometry/hexmesh.py +953 -953
- warp/fem/geometry/partition.py +374 -376
- warp/fem/geometry/quadmesh_2d.py +532 -532
- warp/fem/geometry/tetmesh.py +840 -840
- warp/fem/geometry/trimesh_2d.py +577 -577
- warp/fem/integrate.py +1630 -1615
- warp/fem/operator.py +190 -191
- warp/fem/polynomial.py +214 -213
- warp/fem/quadrature/__init__.py +2 -2
- warp/fem/quadrature/pic_quadrature.py +243 -245
- warp/fem/quadrature/quadrature.py +295 -294
- warp/fem/space/__init__.py +294 -292
- warp/fem/space/basis_space.py +488 -489
- warp/fem/space/collocated_function_space.py +100 -105
- warp/fem/space/dof_mapper.py +236 -236
- warp/fem/space/function_space.py +148 -145
- warp/fem/space/grid_2d_function_space.py +267 -267
- warp/fem/space/grid_3d_function_space.py +305 -306
- warp/fem/space/hexmesh_function_space.py +350 -352
- warp/fem/space/partition.py +350 -350
- warp/fem/space/quadmesh_2d_function_space.py +368 -369
- warp/fem/space/restriction.py +158 -160
- warp/fem/space/shape/__init__.py +13 -15
- warp/fem/space/shape/cube_shape_function.py +738 -738
- warp/fem/space/shape/shape_function.py +102 -103
- warp/fem/space/shape/square_shape_function.py +611 -611
- warp/fem/space/shape/tet_shape_function.py +565 -567
- warp/fem/space/shape/triangle_shape_function.py +429 -429
- warp/fem/space/tetmesh_function_space.py +294 -292
- warp/fem/space/topology.py +297 -295
- warp/fem/space/trimesh_2d_function_space.py +223 -221
- warp/fem/types.py +77 -77
- warp/fem/utils.py +495 -495
- warp/jax.py +166 -141
- warp/jax_experimental.py +341 -339
- warp/native/array.h +1072 -1025
- warp/native/builtin.h +1560 -1560
- warp/native/bvh.cpp +398 -398
- warp/native/bvh.cu +525 -525
- warp/native/bvh.h +429 -429
- warp/native/clang/clang.cpp +495 -464
- warp/native/crt.cpp +31 -31
- warp/native/crt.h +334 -334
- warp/native/cuda_crt.h +1049 -1049
- warp/native/cuda_util.cpp +549 -540
- warp/native/cuda_util.h +288 -203
- warp/native/cutlass_gemm.cpp +34 -34
- warp/native/cutlass_gemm.cu +372 -372
- warp/native/error.cpp +66 -66
- warp/native/error.h +27 -27
- warp/native/fabric.h +228 -228
- warp/native/hashgrid.cpp +301 -278
- warp/native/hashgrid.cu +78 -77
- warp/native/hashgrid.h +227 -227
- warp/native/initializer_array.h +32 -32
- warp/native/intersect.h +1204 -1204
- warp/native/intersect_adj.h +365 -365
- warp/native/intersect_tri.h +322 -322
- warp/native/marching.cpp +2 -2
- warp/native/marching.cu +497 -497
- warp/native/marching.h +2 -2
- warp/native/mat.h +1498 -1498
- warp/native/matnn.h +333 -333
- warp/native/mesh.cpp +203 -203
- warp/native/mesh.cu +293 -293
- warp/native/mesh.h +1887 -1887
- warp/native/nanovdb/NanoVDB.h +4782 -4782
- warp/native/nanovdb/PNanoVDB.h +2553 -2553
- warp/native/nanovdb/PNanoVDBWrite.h +294 -294
- warp/native/noise.h +850 -850
- warp/native/quat.h +1084 -1084
- warp/native/rand.h +299 -299
- warp/native/range.h +108 -108
- warp/native/reduce.cpp +156 -156
- warp/native/reduce.cu +348 -348
- warp/native/runlength_encode.cpp +61 -61
- warp/native/runlength_encode.cu +46 -46
- warp/native/scan.cpp +30 -30
- warp/native/scan.cu +36 -36
- warp/native/scan.h +7 -7
- warp/native/solid_angle.h +442 -442
- warp/native/sort.cpp +94 -94
- warp/native/sort.cu +97 -97
- warp/native/sort.h +14 -14
- warp/native/sparse.cpp +337 -337
- warp/native/sparse.cu +544 -544
- warp/native/spatial.h +630 -630
- warp/native/svd.h +562 -562
- warp/native/temp_buffer.h +30 -30
- warp/native/vec.h +1132 -1132
- warp/native/volume.cpp +297 -297
- warp/native/volume.cu +32 -32
- warp/native/volume.h +538 -538
- warp/native/volume_builder.cu +425 -425
- warp/native/volume_builder.h +19 -19
- warp/native/warp.cpp +1057 -1052
- warp/native/warp.cu +2943 -2828
- warp/native/warp.h +313 -305
- warp/optim/__init__.py +9 -9
- warp/optim/adam.py +120 -120
- warp/optim/linear.py +1104 -939
- warp/optim/sgd.py +104 -92
- warp/render/__init__.py +10 -10
- warp/render/render_opengl.py +3217 -3204
- warp/render/render_usd.py +768 -749
- warp/render/utils.py +152 -150
- warp/sim/__init__.py +52 -59
- warp/sim/articulation.py +685 -685
- warp/sim/collide.py +1594 -1590
- warp/sim/import_mjcf.py +489 -481
- warp/sim/import_snu.py +220 -221
- warp/sim/import_urdf.py +536 -516
- warp/sim/import_usd.py +887 -881
- warp/sim/inertia.py +316 -317
- warp/sim/integrator.py +234 -233
- warp/sim/integrator_euler.py +1956 -1956
- warp/sim/integrator_featherstone.py +1910 -1991
- warp/sim/integrator_xpbd.py +3294 -3312
- warp/sim/model.py +4473 -4314
- warp/sim/particles.py +113 -112
- warp/sim/render.py +417 -403
- warp/sim/utils.py +413 -410
- warp/sparse.py +1227 -1227
- warp/stubs.py +2109 -2469
- warp/tape.py +1162 -225
- warp/tests/__init__.py +1 -1
- warp/tests/__main__.py +4 -4
- warp/tests/assets/torus.usda +105 -105
- warp/tests/aux_test_class_kernel.py +26 -26
- warp/tests/aux_test_compile_consts_dummy.py +10 -10
- warp/tests/aux_test_conditional_unequal_types_kernels.py +21 -21
- warp/tests/aux_test_dependent.py +22 -22
- warp/tests/aux_test_grad_customs.py +23 -23
- warp/tests/aux_test_reference.py +11 -11
- warp/tests/aux_test_reference_reference.py +10 -10
- warp/tests/aux_test_square.py +17 -17
- warp/tests/aux_test_unresolved_func.py +14 -14
- warp/tests/aux_test_unresolved_symbol.py +14 -14
- warp/tests/disabled_kinematics.py +239 -239
- warp/tests/run_coverage_serial.py +31 -31
- warp/tests/test_adam.py +157 -157
- warp/tests/test_arithmetic.py +1124 -1124
- warp/tests/test_array.py +2417 -2326
- warp/tests/test_array_reduce.py +150 -150
- warp/tests/test_async.py +668 -656
- warp/tests/test_atomic.py +141 -141
- warp/tests/test_bool.py +204 -149
- warp/tests/test_builtins_resolution.py +1292 -1292
- warp/tests/test_bvh.py +164 -171
- warp/tests/test_closest_point_edge_edge.py +228 -228
- warp/tests/test_codegen.py +566 -553
- warp/tests/test_compile_consts.py +97 -101
- warp/tests/test_conditional.py +246 -246
- warp/tests/test_copy.py +232 -215
- warp/tests/test_ctypes.py +632 -632
- warp/tests/test_dense.py +67 -67
- warp/tests/test_devices.py +91 -98
- warp/tests/test_dlpack.py +530 -529
- warp/tests/test_examples.py +400 -378
- warp/tests/test_fabricarray.py +955 -955
- warp/tests/test_fast_math.py +62 -54
- warp/tests/test_fem.py +1277 -1278
- warp/tests/test_fp16.py +130 -130
- warp/tests/test_func.py +338 -337
- warp/tests/test_generics.py +571 -571
- warp/tests/test_grad.py +746 -640
- warp/tests/test_grad_customs.py +333 -336
- warp/tests/test_hash_grid.py +210 -164
- warp/tests/test_import.py +39 -39
- warp/tests/test_indexedarray.py +1134 -1134
- warp/tests/test_intersect.py +67 -67
- warp/tests/test_jax.py +307 -307
- warp/tests/test_large.py +167 -164
- warp/tests/test_launch.py +354 -354
- warp/tests/test_lerp.py +261 -261
- warp/tests/test_linear_solvers.py +191 -171
- warp/tests/test_lvalue.py +421 -493
- warp/tests/test_marching_cubes.py +65 -65
- warp/tests/test_mat.py +1801 -1827
- warp/tests/test_mat_lite.py +115 -115
- warp/tests/test_mat_scalar_ops.py +2907 -2889
- warp/tests/test_math.py +126 -193
- warp/tests/test_matmul.py +500 -499
- warp/tests/test_matmul_lite.py +410 -410
- warp/tests/test_mempool.py +188 -190
- warp/tests/test_mesh.py +284 -324
- warp/tests/test_mesh_query_aabb.py +228 -241
- warp/tests/test_mesh_query_point.py +692 -702
- warp/tests/test_mesh_query_ray.py +292 -303
- warp/tests/test_mlp.py +276 -276
- warp/tests/test_model.py +110 -110
- warp/tests/test_modules_lite.py +39 -39
- warp/tests/test_multigpu.py +163 -163
- warp/tests/test_noise.py +248 -248
- warp/tests/test_operators.py +250 -250
- warp/tests/test_options.py +123 -125
- warp/tests/test_peer.py +133 -137
- warp/tests/test_pinned.py +78 -78
- warp/tests/test_print.py +54 -54
- warp/tests/test_quat.py +2086 -2086
- warp/tests/test_rand.py +288 -288
- warp/tests/test_reload.py +217 -217
- warp/tests/test_rounding.py +179 -179
- warp/tests/test_runlength_encode.py +190 -190
- warp/tests/test_sim_grad.py +243 -0
- warp/tests/test_sim_kinematics.py +91 -97
- warp/tests/test_smoothstep.py +168 -168
- warp/tests/test_snippet.py +305 -266
- warp/tests/test_sparse.py +468 -460
- warp/tests/test_spatial.py +2148 -2148
- warp/tests/test_streams.py +486 -473
- warp/tests/test_struct.py +710 -675
- warp/tests/test_tape.py +173 -148
- warp/tests/test_torch.py +743 -743
- warp/tests/test_transient_module.py +87 -87
- warp/tests/test_types.py +556 -659
- warp/tests/test_utils.py +490 -499
- warp/tests/test_vec.py +1264 -1268
- warp/tests/test_vec_lite.py +73 -73
- warp/tests/test_vec_scalar_ops.py +2099 -2099
- warp/tests/test_verify_fp.py +94 -94
- warp/tests/test_volume.py +737 -736
- warp/tests/test_volume_write.py +255 -265
- warp/tests/unittest_serial.py +37 -37
- warp/tests/unittest_suites.py +363 -359
- warp/tests/unittest_utils.py +603 -578
- warp/tests/unused_test_misc.py +71 -71
- warp/tests/walkthrough_debug.py +85 -85
- warp/thirdparty/appdirs.py +598 -598
- warp/thirdparty/dlpack.py +143 -143
- warp/thirdparty/unittest_parallel.py +566 -561
- warp/torch.py +321 -295
- warp/types.py +4504 -4450
- warp/utils.py +1008 -821
- {warp_lang-1.0.2.dist-info → warp_lang-1.1.0.dist-info}/LICENSE.md +126 -126
- {warp_lang-1.0.2.dist-info → warp_lang-1.1.0.dist-info}/METADATA +338 -400
- warp_lang-1.1.0.dist-info/RECORD +352 -0
- warp/examples/assets/cube.usda +0 -42
- warp/examples/assets/sphere.usda +0 -56
- warp/examples/assets/torus.usda +0 -105
- warp_lang-1.0.2.dist-info/RECORD +0 -352
- {warp_lang-1.0.2.dist-info → warp_lang-1.1.0.dist-info}/WHEEL +0 -0
- {warp_lang-1.0.2.dist-info → warp_lang-1.1.0.dist-info}/top_level.txt +0 -0
warp/native/svd.h
CHANGED
|
@@ -1,562 +1,562 @@
|
|
|
1
|
-
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
|
|
2
|
-
* NVIDIA CORPORATION and its licensors retain all intellectual property
|
|
3
|
-
* and proprietary rights in and to this software, related documentation
|
|
4
|
-
* and any modifications thereto. Any use, reproduction, disclosure or
|
|
5
|
-
* distribution of this software and related documentation without an express
|
|
6
|
-
* license agreement from NVIDIA CORPORATION is strictly prohibited.
|
|
7
|
-
*/
|
|
8
|
-
|
|
9
|
-
// The MIT License (MIT)
|
|
10
|
-
|
|
11
|
-
// Copyright (c) 2014 Eric V. Jang
|
|
12
|
-
|
|
13
|
-
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
14
|
-
// of this software and associated documentation files (the "Software"), to deal
|
|
15
|
-
// in the Software without restriction, including without limitation the rights
|
|
16
|
-
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
17
|
-
// copies of the Software, and to permit persons to whom the Software is
|
|
18
|
-
// furnished to do so, subject to the following conditions:
|
|
19
|
-
|
|
20
|
-
// The above copyright notice and this permission notice shall be included in all
|
|
21
|
-
// copies or substantial portions of the Software.
|
|
22
|
-
|
|
23
|
-
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
24
|
-
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
25
|
-
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
26
|
-
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
27
|
-
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
28
|
-
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
29
|
-
// SOFTWARE.
|
|
30
|
-
|
|
31
|
-
// Source: https://github.com/ericjang/svd3/blob/master/svd3_cuda/svd3_cuda.h
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
#pragma once
|
|
35
|
-
|
|
36
|
-
#include "builtin.h"
|
|
37
|
-
|
|
38
|
-
namespace wp
|
|
39
|
-
{
|
|
40
|
-
|
|
41
|
-
#define _gamma 5.828427124 // FOUR_GAMMA_SQUARED = sqrt(8)+3;
|
|
42
|
-
#define _cstar 0.923879532 // cos(pi/8)
|
|
43
|
-
#define _sstar 0.3826834323 // sin(p/8)
|
|
44
|
-
#define _EPSILON 1e-6
|
|
45
|
-
|
|
46
|
-
// TODO: replace sqrt with rsqrt
|
|
47
|
-
|
|
48
|
-
template<typename Type>
|
|
49
|
-
inline CUDA_CALLABLE
|
|
50
|
-
Type accurateSqrt(Type x)
|
|
51
|
-
{
|
|
52
|
-
return x / sqrt(x);
|
|
53
|
-
}
|
|
54
|
-
|
|
55
|
-
template<typename Type>
|
|
56
|
-
inline CUDA_CALLABLE
|
|
57
|
-
void condSwap(bool c, Type &X, Type &Y)
|
|
58
|
-
{
|
|
59
|
-
// used in step 2
|
|
60
|
-
Type Z = X;
|
|
61
|
-
X = c ? Y : X;
|
|
62
|
-
Y = c ? Z : Y;
|
|
63
|
-
}
|
|
64
|
-
|
|
65
|
-
template<typename Type>
|
|
66
|
-
inline CUDA_CALLABLE
|
|
67
|
-
void condNegSwap(bool c, Type &X, Type &Y)
|
|
68
|
-
{
|
|
69
|
-
// used in step 2 and 3
|
|
70
|
-
Type Z = -X;
|
|
71
|
-
X = c ? Y : X;
|
|
72
|
-
Y = c ? Z : Y;
|
|
73
|
-
}
|
|
74
|
-
|
|
75
|
-
// matrix multiplication M = A * B
|
|
76
|
-
template<typename Type>
|
|
77
|
-
inline CUDA_CALLABLE
|
|
78
|
-
void multAB(Type a11, Type a12, Type a13,
|
|
79
|
-
Type a21, Type a22, Type a23,
|
|
80
|
-
Type a31, Type a32, Type a33,
|
|
81
|
-
//
|
|
82
|
-
Type b11, Type b12, Type b13,
|
|
83
|
-
Type b21, Type b22, Type b23,
|
|
84
|
-
Type b31, Type b32, Type b33,
|
|
85
|
-
//
|
|
86
|
-
Type &m11, Type &m12, Type &m13,
|
|
87
|
-
Type &m21, Type &m22, Type &m23,
|
|
88
|
-
Type &m31, Type &m32, Type &m33)
|
|
89
|
-
{
|
|
90
|
-
|
|
91
|
-
m11=a11*b11 + a12*b21 + a13*b31; m12=a11*b12 + a12*b22 + a13*b32; m13=a11*b13 + a12*b23 + a13*b33;
|
|
92
|
-
m21=a21*b11 + a22*b21 + a23*b31; m22=a21*b12 + a22*b22 + a23*b32; m23=a21*b13 + a22*b23 + a23*b33;
|
|
93
|
-
m31=a31*b11 + a32*b21 + a33*b31; m32=a31*b12 + a32*b22 + a33*b32; m33=a31*b13 + a32*b23 + a33*b33;
|
|
94
|
-
}
|
|
95
|
-
|
|
96
|
-
// matrix multiplication M = Transpose[A] * B
|
|
97
|
-
template<typename Type>
|
|
98
|
-
inline CUDA_CALLABLE
|
|
99
|
-
void multAtB(Type a11, Type a12, Type a13,
|
|
100
|
-
Type a21, Type a22, Type a23,
|
|
101
|
-
Type a31, Type a32, Type a33,
|
|
102
|
-
//
|
|
103
|
-
Type b11, Type b12, Type b13,
|
|
104
|
-
Type b21, Type b22, Type b23,
|
|
105
|
-
Type b31, Type b32, Type b33,
|
|
106
|
-
//
|
|
107
|
-
Type &m11, Type &m12, Type &m13,
|
|
108
|
-
Type &m21, Type &m22, Type &m23,
|
|
109
|
-
Type &m31, Type &m32, Type &m33)
|
|
110
|
-
{
|
|
111
|
-
m11=a11*b11 + a21*b21 + a31*b31; m12=a11*b12 + a21*b22 + a31*b32; m13=a11*b13 + a21*b23 + a31*b33;
|
|
112
|
-
m21=a12*b11 + a22*b21 + a32*b31; m22=a12*b12 + a22*b22 + a32*b32; m23=a12*b13 + a22*b23 + a32*b33;
|
|
113
|
-
m31=a13*b11 + a23*b21 + a33*b31; m32=a13*b12 + a23*b22 + a33*b32; m33=a13*b13 + a23*b23 + a33*b33;
|
|
114
|
-
}
|
|
115
|
-
|
|
116
|
-
template<typename Type>
|
|
117
|
-
inline CUDA_CALLABLE
|
|
118
|
-
void quatToMat3(const Type * qV,
|
|
119
|
-
Type &m11, Type &m12, Type &m13,
|
|
120
|
-
Type &m21, Type &m22, Type &m23,
|
|
121
|
-
Type &m31, Type &m32, Type &m33
|
|
122
|
-
)
|
|
123
|
-
{
|
|
124
|
-
Type w = qV[3];
|
|
125
|
-
Type x = qV[0];
|
|
126
|
-
Type y = qV[1];
|
|
127
|
-
Type z = qV[2];
|
|
128
|
-
|
|
129
|
-
Type qxx = x*x;
|
|
130
|
-
Type qyy = y*y;
|
|
131
|
-
Type qzz = z*z;
|
|
132
|
-
Type qxz = x*z;
|
|
133
|
-
Type qxy = x*y;
|
|
134
|
-
Type qyz = y*z;
|
|
135
|
-
Type qwx = w*x;
|
|
136
|
-
Type qwy = w*y;
|
|
137
|
-
Type qwz = w*z;
|
|
138
|
-
|
|
139
|
-
m11=Type(1) - Type(2)*(qyy + qzz); m12=Type(2)*(qxy - qwz); m13=Type(2)*(qxz + qwy);
|
|
140
|
-
m21=Type(2)*(qxy + qwz); m22=Type(1) - Type(2)*(qxx + qzz); m23=Type(2)*(qyz - qwx);
|
|
141
|
-
m31=Type(2)*(qxz - qwy); m32=Type(2)*(qyz + qwx); m33=Type(1) - Type(2)*(qxx + qyy);
|
|
142
|
-
}
|
|
143
|
-
|
|
144
|
-
template<typename Type>
|
|
145
|
-
inline CUDA_CALLABLE
|
|
146
|
-
void approximateGivensQuaternion(Type a11, Type a12, Type a22, Type &ch, Type &sh)
|
|
147
|
-
{
|
|
148
|
-
/*
|
|
149
|
-
* Given givens angle computed by approximateGivensAngles,
|
|
150
|
-
* compute the corresponding rotation quaternion.
|
|
151
|
-
*/
|
|
152
|
-
ch = Type(2)*(a11-a22);
|
|
153
|
-
sh = a12;
|
|
154
|
-
bool b = _gamma*sh*sh < ch*ch;
|
|
155
|
-
Type w = Type(1) / sqrt(ch*ch+sh*sh);
|
|
156
|
-
ch=b?w*ch:Type(_cstar);
|
|
157
|
-
sh=b?w*sh:Type(_sstar);
|
|
158
|
-
}
|
|
159
|
-
|
|
160
|
-
template<typename Type>
|
|
161
|
-
inline CUDA_CALLABLE
|
|
162
|
-
void jacobiConjugation( const int x, const int y, const int z,
|
|
163
|
-
Type &s11,
|
|
164
|
-
Type &s21, Type &s22,
|
|
165
|
-
Type &s31, Type &s32, Type &s33,
|
|
166
|
-
Type * qV)
|
|
167
|
-
{
|
|
168
|
-
Type ch,sh;
|
|
169
|
-
approximateGivensQuaternion(s11,s21,s22,ch,sh);
|
|
170
|
-
|
|
171
|
-
Type scale = ch*ch+sh*sh;
|
|
172
|
-
Type a = (ch*ch-sh*sh)/scale;
|
|
173
|
-
Type b = (Type(2)*sh*ch)/scale;
|
|
174
|
-
|
|
175
|
-
// make temp copy of S
|
|
176
|
-
Type _s11 = s11;
|
|
177
|
-
Type _s21 = s21; Type _s22 = s22;
|
|
178
|
-
Type _s31 = s31; Type _s32 = s32; Type _s33 = s33;
|
|
179
|
-
|
|
180
|
-
// perform conjugation S = Q'*S*Q
|
|
181
|
-
// Q already implicitly solved from a, b
|
|
182
|
-
s11 =a*(a*_s11 + b*_s21) + b*(a*_s21 + b*_s22);
|
|
183
|
-
s21 =a*(-b*_s11 + a*_s21) + b*(-b*_s21 + a*_s22); s22=-b*(-b*_s11 + a*_s21) + a*(-b*_s21 + a*_s22);
|
|
184
|
-
s31 =a*_s31 + b*_s32; s32=-b*_s31 + a*_s32; s33=_s33;
|
|
185
|
-
|
|
186
|
-
// update cumulative rotation qV
|
|
187
|
-
Type tmp[3];
|
|
188
|
-
tmp[0]=qV[0]*sh;
|
|
189
|
-
tmp[1]=qV[1]*sh;
|
|
190
|
-
tmp[2]=qV[2]*sh;
|
|
191
|
-
sh *= qV[3];
|
|
192
|
-
|
|
193
|
-
qV[0] *= ch;
|
|
194
|
-
qV[1] *= ch;
|
|
195
|
-
qV[2] *= ch;
|
|
196
|
-
qV[3] *= ch;
|
|
197
|
-
|
|
198
|
-
// (x,y,z) corresponds to ((0,1,2),(1,2,0),(2,0,1))
|
|
199
|
-
// for (p,q) = ((0,1),(1,2),(0,2))
|
|
200
|
-
qV[z] += sh;
|
|
201
|
-
qV[3] -= tmp[z]; // w
|
|
202
|
-
qV[x] += tmp[y];
|
|
203
|
-
qV[y] -= tmp[x];
|
|
204
|
-
|
|
205
|
-
// re-arrange matrix for next iteration
|
|
206
|
-
_s11 = s22;
|
|
207
|
-
_s21 = s32; _s22 = s33;
|
|
208
|
-
_s31 = s21; _s32 = s31; _s33 = s11;
|
|
209
|
-
s11 = _s11;
|
|
210
|
-
s21 = _s21; s22 = _s22;
|
|
211
|
-
s31 = _s31; s32 = _s32; s33 = _s33;
|
|
212
|
-
|
|
213
|
-
}
|
|
214
|
-
|
|
215
|
-
template<typename Type>
|
|
216
|
-
inline CUDA_CALLABLE
|
|
217
|
-
Type dist2(Type x, Type y, Type z)
|
|
218
|
-
{
|
|
219
|
-
return x*x+y*y+z*z;
|
|
220
|
-
}
|
|
221
|
-
|
|
222
|
-
// finds transformation that diagonalizes a symmetric matrix
|
|
223
|
-
template<typename Type>
|
|
224
|
-
inline CUDA_CALLABLE
|
|
225
|
-
void jacobiEigenanlysis( // symmetric matrix
|
|
226
|
-
Type &s11,
|
|
227
|
-
Type &s21, Type &s22,
|
|
228
|
-
Type &s31, Type &s32, Type &s33,
|
|
229
|
-
// quaternion representation of V
|
|
230
|
-
Type * qV)
|
|
231
|
-
{
|
|
232
|
-
qV[3]=1; qV[0]=0;qV[1]=0;qV[2]=0; // follow same indexing convention as GLM
|
|
233
|
-
for (int i=0;i<4;i++)
|
|
234
|
-
{
|
|
235
|
-
// we wish to eliminate the maximum off-diagonal element
|
|
236
|
-
// on every iteration, but cycling over all 3 possible rotations
|
|
237
|
-
// in fixed order (p,q) = (1,2) , (2,3), (1,3) still retains
|
|
238
|
-
// asymptotic convergence
|
|
239
|
-
jacobiConjugation(0,1,2,s11,s21,s22,s31,s32,s33,qV); // p,q = 0,1
|
|
240
|
-
jacobiConjugation(1,2,0,s11,s21,s22,s31,s32,s33,qV); // p,q = 1,2
|
|
241
|
-
jacobiConjugation(2,0,1,s11,s21,s22,s31,s32,s33,qV); // p,q = 0,2
|
|
242
|
-
}
|
|
243
|
-
}
|
|
244
|
-
|
|
245
|
-
template<typename Type>
|
|
246
|
-
inline CUDA_CALLABLE
|
|
247
|
-
void sortSingularValues(// matrix that we want to decompose
|
|
248
|
-
Type &b11, Type &b12, Type &b13,
|
|
249
|
-
Type &b21, Type &b22, Type &b23,
|
|
250
|
-
Type &b31, Type &b32, Type &b33,
|
|
251
|
-
// sort V simultaneously
|
|
252
|
-
Type &v11, Type &v12, Type &v13,
|
|
253
|
-
Type &v21, Type &v22, Type &v23,
|
|
254
|
-
Type &v31, Type &v32, Type &v33)
|
|
255
|
-
{
|
|
256
|
-
Type rho1 = dist2(b11,b21,b31);
|
|
257
|
-
Type rho2 = dist2(b12,b22,b32);
|
|
258
|
-
Type rho3 = dist2(b13,b23,b33);
|
|
259
|
-
bool c;
|
|
260
|
-
c = rho1 < rho2;
|
|
261
|
-
condNegSwap(c,b11,b12); condNegSwap(c,v11,v12);
|
|
262
|
-
condNegSwap(c,b21,b22); condNegSwap(c,v21,v22);
|
|
263
|
-
condNegSwap(c,b31,b32); condNegSwap(c,v31,v32);
|
|
264
|
-
condSwap(c,rho1,rho2);
|
|
265
|
-
c = rho1 < rho3;
|
|
266
|
-
condNegSwap(c,b11,b13); condNegSwap(c,v11,v13);
|
|
267
|
-
condNegSwap(c,b21,b23); condNegSwap(c,v21,v23);
|
|
268
|
-
condNegSwap(c,b31,b33); condNegSwap(c,v31,v33);
|
|
269
|
-
condSwap(c,rho1,rho3);
|
|
270
|
-
c = rho2 < rho3;
|
|
271
|
-
condNegSwap(c,b12,b13); condNegSwap(c,v12,v13);
|
|
272
|
-
condNegSwap(c,b22,b23); condNegSwap(c,v22,v23);
|
|
273
|
-
condNegSwap(c,b32,b33); condNegSwap(c,v32,v33);
|
|
274
|
-
}
|
|
275
|
-
|
|
276
|
-
template<typename Type>
|
|
277
|
-
inline CUDA_CALLABLE
|
|
278
|
-
void QRGivensQuaternion(Type a1, Type a2, Type &ch, Type &sh)
|
|
279
|
-
{
|
|
280
|
-
// a1 = pivot point on diagonal
|
|
281
|
-
// a2 = lower triangular entry we want to annihilate
|
|
282
|
-
Type epsilon = _EPSILON;
|
|
283
|
-
Type rho = accurateSqrt(a1*a1 + a2*a2);
|
|
284
|
-
|
|
285
|
-
sh = rho > epsilon ? a2 : Type(0);
|
|
286
|
-
ch = abs(a1) + max(rho,epsilon);
|
|
287
|
-
bool b = a1 < Type(0);
|
|
288
|
-
condSwap(b,sh,ch);
|
|
289
|
-
Type w = Type(1) / sqrt(ch*ch+sh*sh);
|
|
290
|
-
ch *= w;
|
|
291
|
-
sh *= w;
|
|
292
|
-
}
|
|
293
|
-
|
|
294
|
-
template<typename Type>
|
|
295
|
-
inline CUDA_CALLABLE
|
|
296
|
-
void QRDecomposition(// matrix that we want to decompose
|
|
297
|
-
Type b11, Type b12, Type b13,
|
|
298
|
-
Type b21, Type b22, Type b23,
|
|
299
|
-
Type b31, Type b32, Type b33,
|
|
300
|
-
// output Q
|
|
301
|
-
Type &q11, Type &q12, Type &q13,
|
|
302
|
-
Type &q21, Type &q22, Type &q23,
|
|
303
|
-
Type &q31, Type &q32, Type &q33,
|
|
304
|
-
// output R
|
|
305
|
-
Type &r11, Type &r12, Type &r13,
|
|
306
|
-
Type &r21, Type &r22, Type &r23,
|
|
307
|
-
Type &r31, Type &r32, Type &r33)
|
|
308
|
-
{
|
|
309
|
-
Type ch1,sh1,ch2,sh2,ch3,sh3;
|
|
310
|
-
Type a,b;
|
|
311
|
-
|
|
312
|
-
// first givens rotation (ch,0,0,sh)
|
|
313
|
-
QRGivensQuaternion(b11,b21,ch1,sh1);
|
|
314
|
-
a=Type(1)-Type(2)*sh1*sh1;
|
|
315
|
-
b=Type(2)*ch1*sh1;
|
|
316
|
-
// apply B = Q' * B
|
|
317
|
-
r11=a*b11+b*b21; r12=a*b12+b*b22; r13=a*b13+b*b23;
|
|
318
|
-
r21=-b*b11+a*b21; r22=-b*b12+a*b22; r23=-b*b13+a*b23;
|
|
319
|
-
r31=b31; r32=b32; r33=b33;
|
|
320
|
-
|
|
321
|
-
// second givens rotation (ch,0,-sh,0)
|
|
322
|
-
QRGivensQuaternion(r11,r31,ch2,sh2);
|
|
323
|
-
a=Type(1)-Type(2)*sh2*sh2;
|
|
324
|
-
b=Type(2)*ch2*sh2;
|
|
325
|
-
// apply B = Q' * B;
|
|
326
|
-
b11=a*r11+b*r31; b12=a*r12+b*r32; b13=a*r13+b*r33;
|
|
327
|
-
b21=r21; b22=r22; b23=r23;
|
|
328
|
-
b31=-b*r11+a*r31; b32=-b*r12+a*r32; b33=-b*r13+a*r33;
|
|
329
|
-
|
|
330
|
-
// third givens rotation (ch,sh,0,0)
|
|
331
|
-
QRGivensQuaternion(b22,b32,ch3,sh3);
|
|
332
|
-
a=Type(1)-Type(2)*sh3*sh3;
|
|
333
|
-
b=Type(2)*ch3*sh3;
|
|
334
|
-
// R is now set to desired value
|
|
335
|
-
r11=b11; r12=b12; r13=b13;
|
|
336
|
-
r21=a*b21+b*b31; r22=a*b22+b*b32; r23=a*b23+b*b33;
|
|
337
|
-
r31=-b*b21+a*b31; r32=-b*b22+a*b32; r33=-b*b23+a*b33;
|
|
338
|
-
|
|
339
|
-
// construct the cumulative rotation Q=Q1 * Q2 * Q3
|
|
340
|
-
// the number of floating point operations for three quaternion multiplications
|
|
341
|
-
// is more or less comparable to the explicit form of the joined matrix.
|
|
342
|
-
// certainly more memory-efficient!
|
|
343
|
-
Type sh12=sh1*sh1;
|
|
344
|
-
Type sh22=sh2*sh2;
|
|
345
|
-
Type sh32=sh3*sh3;
|
|
346
|
-
|
|
347
|
-
q11=(Type(-1)+Type(2)*sh12)*(Type(-1)+Type(2)*sh22);
|
|
348
|
-
q12=Type(4)*ch2*ch3*(Type(-1)+Type(2)*sh12)*sh2*sh3+Type(2)*ch1*sh1*(Type(-1)+Type(2)*sh32);
|
|
349
|
-
q13=Type(4)*ch1*ch3*sh1*sh3-Type(2)*ch2*(Type(-1)+Type(2)*sh12)*sh2*(Type(-1)+Type(2)*sh32);
|
|
350
|
-
|
|
351
|
-
q21=Type(2)*ch1*sh1*(Type(1)-Type(2)*sh22);
|
|
352
|
-
q22=Type(-8)*ch1*ch2*ch3*sh1*sh2*sh3+(Type(-1)+Type(2)*sh12)*(Type(-1)+Type(2)*sh32);
|
|
353
|
-
q23=Type(-2)*ch3*sh3+Type(4)*sh1*(ch3*sh1*sh3+ch1*ch2*sh2*(Type(-1)+Type(2)*sh32));
|
|
354
|
-
|
|
355
|
-
q31=Type(2)*ch2*sh2;
|
|
356
|
-
q32=Type(2)*ch3*(Type(1)-Type(2)*sh22)*sh3;
|
|
357
|
-
q33=(Type(-1)+Type(2)*sh22)*(Type(-1)+Type(2)*sh32);
|
|
358
|
-
}
|
|
359
|
-
|
|
360
|
-
template<typename Type>
|
|
361
|
-
inline CUDA_CALLABLE
|
|
362
|
-
void _svd(// input A
|
|
363
|
-
Type a11, Type a12, Type a13,
|
|
364
|
-
Type a21, Type a22, Type a23,
|
|
365
|
-
Type a31, Type a32, Type a33,
|
|
366
|
-
// output U
|
|
367
|
-
Type &u11, Type &u12, Type &u13,
|
|
368
|
-
Type &u21, Type &u22, Type &u23,
|
|
369
|
-
Type &u31, Type &u32, Type &u33,
|
|
370
|
-
// output S
|
|
371
|
-
Type &s11, Type &s12, Type &s13,
|
|
372
|
-
Type &s21, Type &s22, Type &s23,
|
|
373
|
-
Type &s31, Type &s32, Type &s33,
|
|
374
|
-
// output V
|
|
375
|
-
Type &v11, Type &v12, Type &v13,
|
|
376
|
-
Type &v21, Type &v22, Type &v23,
|
|
377
|
-
Type &v31, Type &v32, Type &v33)
|
|
378
|
-
{
|
|
379
|
-
// normal equations matrix
|
|
380
|
-
Type ATA11, ATA12, ATA13;
|
|
381
|
-
Type ATA21, ATA22, ATA23;
|
|
382
|
-
Type ATA31, ATA32, ATA33;
|
|
383
|
-
|
|
384
|
-
multAtB(a11,a12,a13,a21,a22,a23,a31,a32,a33,
|
|
385
|
-
a11,a12,a13,a21,a22,a23,a31,a32,a33,
|
|
386
|
-
ATA11,ATA12,ATA13,ATA21,ATA22,ATA23,ATA31,ATA32,ATA33);
|
|
387
|
-
|
|
388
|
-
// symmetric eigenalysis
|
|
389
|
-
Type qV[4];
|
|
390
|
-
jacobiEigenanlysis( ATA11,ATA21,ATA22, ATA31,ATA32,ATA33,qV);
|
|
391
|
-
quatToMat3(qV,v11,v12,v13,v21,v22,v23,v31,v32,v33);
|
|
392
|
-
|
|
393
|
-
Type b11, b12, b13;
|
|
394
|
-
Type b21, b22, b23;
|
|
395
|
-
Type b31, b32, b33;
|
|
396
|
-
multAB(a11,a12,a13,a21,a22,a23,a31,a32,a33,
|
|
397
|
-
v11,v12,v13,v21,v22,v23,v31,v32,v33,
|
|
398
|
-
b11, b12, b13, b21, b22, b23, b31, b32, b33);
|
|
399
|
-
|
|
400
|
-
// sort singular values and find V
|
|
401
|
-
sortSingularValues(b11, b12, b13, b21, b22, b23, b31, b32, b33,
|
|
402
|
-
v11,v12,v13,v21,v22,v23,v31,v32,v33);
|
|
403
|
-
|
|
404
|
-
// QR decomposition
|
|
405
|
-
QRDecomposition(b11, b12, b13, b21, b22, b23, b31, b32, b33,
|
|
406
|
-
u11, u12, u13, u21, u22, u23, u31, u32, u33,
|
|
407
|
-
s11, s12, s13, s21, s22, s23, s31, s32, s33
|
|
408
|
-
);
|
|
409
|
-
}
|
|
410
|
-
|
|
411
|
-
template<typename Type>
|
|
412
|
-
inline CUDA_CALLABLE void svd3(const mat_t<3,3,Type>& A, mat_t<3,3,Type>& U, vec_t<3,Type>& sigma, mat_t<3,3,Type>& V) {
|
|
413
|
-
Type s12, s13, s21, s23, s31, s32;
|
|
414
|
-
_svd(A.data[0][0], A.data[0][1], A.data[0][2],
|
|
415
|
-
A.data[1][0], A.data[1][1], A.data[1][2],
|
|
416
|
-
A.data[2][0], A.data[2][1], A.data[2][2],
|
|
417
|
-
|
|
418
|
-
U.data[0][0], U.data[0][1], U.data[0][2],
|
|
419
|
-
U.data[1][0], U.data[1][1], U.data[1][2],
|
|
420
|
-
U.data[2][0], U.data[2][1], U.data[2][2],
|
|
421
|
-
|
|
422
|
-
sigma[0], s12, s13,
|
|
423
|
-
s21, sigma[1], s23,
|
|
424
|
-
s31, s32, sigma[2],
|
|
425
|
-
|
|
426
|
-
V.data[0][0], V.data[0][1], V.data[0][2],
|
|
427
|
-
V.data[1][0], V.data[1][1], V.data[1][2],
|
|
428
|
-
V.data[2][0], V.data[2][1], V.data[2][2]);
|
|
429
|
-
}
|
|
430
|
-
|
|
431
|
-
template<typename Type>
|
|
432
|
-
inline CUDA_CALLABLE void adj_svd3(const mat_t<3,3,Type>& A,
|
|
433
|
-
const mat_t<3,3,Type>& U,
|
|
434
|
-
const vec_t<3,Type>& sigma,
|
|
435
|
-
const mat_t<3,3,Type>& V,
|
|
436
|
-
mat_t<3,3,Type>& adj_A,
|
|
437
|
-
const mat_t<3,3,Type>& adj_U,
|
|
438
|
-
const vec_t<3,Type>& adj_sigma,
|
|
439
|
-
const mat_t<3,3,Type>& adj_V) {
|
|
440
|
-
Type sx2 = sigma[0] * sigma[0];
|
|
441
|
-
Type sy2 = sigma[1] * sigma[1];
|
|
442
|
-
Type sz2 = sigma[2] * sigma[2];
|
|
443
|
-
|
|
444
|
-
Type F01 = Type(1) / min(sy2 - sx2, Type(-1e-6f));
|
|
445
|
-
Type F02 = Type(1) / min(sz2 - sx2, Type(-1e-6f));
|
|
446
|
-
Type F12 = Type(1) / min(sz2 - sy2, Type(-1e-6f));
|
|
447
|
-
|
|
448
|
-
mat_t<3,3,Type> F = mat_t<3,3,Type>(0, F01, F02,
|
|
449
|
-
-F01, 0, F12,
|
|
450
|
-
-F02, -F12, 0);
|
|
451
|
-
|
|
452
|
-
mat_t<3,3,Type> adj_sigma_mat = mat_t<3,3,Type>(adj_sigma[0], 0, 0,
|
|
453
|
-
0, adj_sigma[1], 0,
|
|
454
|
-
0, 0, adj_sigma[2]);
|
|
455
|
-
mat_t<3,3,Type> s_mat = mat_t<3,3,Type>(sigma[0], 0, 0,
|
|
456
|
-
0, sigma[1], 0,
|
|
457
|
-
0, 0, sigma[2]);
|
|
458
|
-
|
|
459
|
-
// https://github.com/pytorch/pytorch/blob/d7ddae8e4fe66fa1330317673438d1eb5aa99ca4/torch/csrc/autograd/FunctionsManual.cpp
|
|
460
|
-
mat_t<3,3,Type> UT = transpose(U);
|
|
461
|
-
mat_t<3,3,Type> VT = transpose(V);
|
|
462
|
-
|
|
463
|
-
mat_t<3,3,Type> sigma_term = mul(U, mul(adj_sigma_mat, VT));
|
|
464
|
-
|
|
465
|
-
mat_t<3,3,Type> u_term = mul(mul(U, mul(cw_mul(F, (mul(UT, adj_U) - mul(transpose(adj_U), U))), s_mat)), VT);
|
|
466
|
-
mat_t<3,3,Type> v_term = mul(U, mul(s_mat, mul(cw_mul(F, (mul(VT, adj_V) - mul(transpose(adj_V), V))), VT)));
|
|
467
|
-
|
|
468
|
-
adj_A = adj_A + (u_term + v_term + sigma_term);
|
|
469
|
-
}
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
template<typename Type>
|
|
473
|
-
inline CUDA_CALLABLE void qr3(const mat_t<3,3,Type>& A, mat_t<3,3,Type>& Q, mat_t<3,3,Type>& R) {
|
|
474
|
-
QRDecomposition(A.data[0][0], A.data[0][1], A.data[0][2],
|
|
475
|
-
A.data[1][0], A.data[1][1], A.data[1][2],
|
|
476
|
-
A.data[2][0], A.data[2][1], A.data[2][2],
|
|
477
|
-
|
|
478
|
-
Q.data[0][0], Q.data[0][1], Q.data[0][2],
|
|
479
|
-
Q.data[1][0], Q.data[1][1], Q.data[1][2],
|
|
480
|
-
Q.data[2][0], Q.data[2][1], Q.data[2][2],
|
|
481
|
-
|
|
482
|
-
R.data[0][0], R.data[0][1], R.data[0][2],
|
|
483
|
-
R.data[1][0], R.data[1][1], R.data[1][2],
|
|
484
|
-
R.data[2][0], R.data[2][1], R.data[2][2]);
|
|
485
|
-
}
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
template<typename Type>
|
|
489
|
-
inline CUDA_CALLABLE void adj_qr3(const mat_t<3,3,Type>& A,
|
|
490
|
-
const mat_t<3,3,Type>& Q,
|
|
491
|
-
const mat_t<3,3,Type>& R,
|
|
492
|
-
mat_t<3,3,Type>& adj_A,
|
|
493
|
-
const mat_t<3,3,Type>& adj_Q,
|
|
494
|
-
const mat_t<3,3,Type>& adj_R) {
|
|
495
|
-
// Eq 3 of https://arxiv.org/pdf/2009.10071.pdf
|
|
496
|
-
mat_t<3,3,Type> M = mul(R,transpose(adj_R)) - mul(transpose(adj_Q), Q);
|
|
497
|
-
mat_t<3,3,Type> copyltuM = mat_t<3,3,Type>(M.data[0][0], M.data[1][0], M.data[2][0],
|
|
498
|
-
M.data[1][0], M.data[1][1], M.data[2][1],
|
|
499
|
-
M.data[2][0], M.data[2][1], M.data[2][2]);
|
|
500
|
-
adj_A = adj_A + mul(adj_Q + mul(Q,copyltuM), inverse(transpose(R)));
|
|
501
|
-
}
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
template<typename Type>
|
|
505
|
-
inline CUDA_CALLABLE void eig3(const mat_t<3,3,Type>& A, mat_t<3,3,Type>& Q, vec_t<3,Type>& d) {
|
|
506
|
-
Type qV[4];
|
|
507
|
-
Type s11 = A.data[0][0];
|
|
508
|
-
Type s21 = A.data[1][0];
|
|
509
|
-
Type s22 = A.data[1][1];
|
|
510
|
-
Type s31 = A.data[2][0];
|
|
511
|
-
Type s32 = A.data[2][1];
|
|
512
|
-
Type s33 = A.data[2][2];
|
|
513
|
-
|
|
514
|
-
jacobiEigenanlysis(s11, s21, s22, s31, s32, s33, qV);
|
|
515
|
-
quatToMat3(qV, Q.data[0][0], Q.data[0][1], Q.data[0][2], Q.data[1][0], Q.data[1][1], Q.data[1][2], Q.data[2][0], Q.data[2][1], Q.data[2][2]);
|
|
516
|
-
mat_t<3,3,Type> t;
|
|
517
|
-
multAtB(Q.data[0][0], Q.data[0][1], Q.data[0][2], Q.data[1][0], Q.data[1][1], Q.data[1][2], Q.data[2][0], Q.data[2][1], Q.data[2][2],
|
|
518
|
-
A.data[0][0], A.data[0][1], A.data[0][2], A.data[1][0], A.data[1][1], A.data[1][2], A.data[2][0], A.data[2][1], A.data[2][2],
|
|
519
|
-
t.data[0][0], t.data[0][1], t.data[0][2], t.data[1][0], t.data[1][1], t.data[1][2], t.data[2][0], t.data[2][1], t.data[2][2]);
|
|
520
|
-
|
|
521
|
-
mat_t<3,3,Type> u;
|
|
522
|
-
multAB(t.data[0][0], t.data[0][1], t.data[0][2], t.data[1][0], t.data[1][1], t.data[1][2], t.data[2][0], t.data[2][1], t.data[2][2],
|
|
523
|
-
Q.data[0][0], Q.data[0][1], Q.data[0][2], Q.data[1][0], Q.data[1][1], Q.data[1][2], Q.data[2][0], Q.data[2][1], Q.data[2][2],
|
|
524
|
-
u.data[0][0], u.data[0][1], u.data[0][2], u.data[1][0], u.data[1][1], u.data[1][2], u.data[2][0], u.data[2][1], u.data[2][2]
|
|
525
|
-
);
|
|
526
|
-
d = vec_t<3,Type>(u.data[0][0], u.data[1][1], u.data[2][2]);
|
|
527
|
-
}
|
|
528
|
-
|
|
529
|
-
template<typename Type>
|
|
530
|
-
inline CUDA_CALLABLE void adj_eig3(const mat_t<3,3,Type>& A, const mat_t<3,3,Type>& Q, const vec_t<3,Type>& d,
|
|
531
|
-
mat_t<3,3,Type>& adj_A, const mat_t<3,3,Type>& adj_Q, const vec_t<3,Type>& adj_d) {
|
|
532
|
-
// Page 10 of https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf
|
|
533
|
-
mat_t<3,3,Type> D = mat_t<3,3,Type>(d[0], 0, 0,
|
|
534
|
-
0, d[1], 0,
|
|
535
|
-
0, 0, d[2]);
|
|
536
|
-
mat_t<3,3,Type> D_bar = mat_t<3,3,Type>(adj_d[0], 0, 0,
|
|
537
|
-
0, adj_d[1], 0,
|
|
538
|
-
0, 0, adj_d[2]);
|
|
539
|
-
|
|
540
|
-
Type dyx = d[1] - d[0];
|
|
541
|
-
Type dzx = d[2] - d[0];
|
|
542
|
-
Type dzy = d[2] - d[1];
|
|
543
|
-
|
|
544
|
-
if ((dyx < Type(0)) && (dyx > Type(-1e-6))) dyx = -1e-6;
|
|
545
|
-
if ((dyx > Type(0)) && (dyx < Type(1e-6))) dyx = 1e-6;
|
|
546
|
-
|
|
547
|
-
if ((dzx < Type(0)) && (dzx > Type(-1e-6))) dzx = -1e-6;
|
|
548
|
-
if ((dzx > Type(0)) && (dzx < Type(1e-6))) dzx = 1e-6;
|
|
549
|
-
|
|
550
|
-
if ((dzy < Type(0)) && (dzy > Type(-1e-6))) dzy = -1e-6;
|
|
551
|
-
if ((dzy > Type(0)) && (dzy < Type(1e-6))) dzy = 1e-6;
|
|
552
|
-
|
|
553
|
-
Type F01 = Type(1) / dyx;
|
|
554
|
-
Type F02 = Type(1) / dzx;
|
|
555
|
-
Type F12 = Type(1) / dzy;
|
|
556
|
-
mat_t<3,3,Type> F = mat_t<3,3,Type>(0, F01, F02,
|
|
557
|
-
-F01, 0, F12,
|
|
558
|
-
-F02, -F12, 0);
|
|
559
|
-
mat_t<3,3,Type> QT = transpose(Q);
|
|
560
|
-
adj_A = adj_A + mul(Q, mul(D_bar + cw_mul(F, mul(QT, adj_Q)), QT));
|
|
561
|
-
}
|
|
562
|
-
}
|
|
1
|
+
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
|
|
2
|
+
* NVIDIA CORPORATION and its licensors retain all intellectual property
|
|
3
|
+
* and proprietary rights in and to this software, related documentation
|
|
4
|
+
* and any modifications thereto. Any use, reproduction, disclosure or
|
|
5
|
+
* distribution of this software and related documentation without an express
|
|
6
|
+
* license agreement from NVIDIA CORPORATION is strictly prohibited.
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
// The MIT License (MIT)
|
|
10
|
+
|
|
11
|
+
// Copyright (c) 2014 Eric V. Jang
|
|
12
|
+
|
|
13
|
+
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
14
|
+
// of this software and associated documentation files (the "Software"), to deal
|
|
15
|
+
// in the Software without restriction, including without limitation the rights
|
|
16
|
+
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
17
|
+
// copies of the Software, and to permit persons to whom the Software is
|
|
18
|
+
// furnished to do so, subject to the following conditions:
|
|
19
|
+
|
|
20
|
+
// The above copyright notice and this permission notice shall be included in all
|
|
21
|
+
// copies or substantial portions of the Software.
|
|
22
|
+
|
|
23
|
+
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
24
|
+
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
25
|
+
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
26
|
+
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
27
|
+
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
28
|
+
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
29
|
+
// SOFTWARE.
|
|
30
|
+
|
|
31
|
+
// Source: https://github.com/ericjang/svd3/blob/master/svd3_cuda/svd3_cuda.h
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
#pragma once
|
|
35
|
+
|
|
36
|
+
#include "builtin.h"
|
|
37
|
+
|
|
38
|
+
namespace wp
|
|
39
|
+
{
|
|
40
|
+
|
|
41
|
+
#define _gamma 5.828427124 // FOUR_GAMMA_SQUARED = sqrt(8)+3;
|
|
42
|
+
#define _cstar 0.923879532 // cos(pi/8)
|
|
43
|
+
#define _sstar 0.3826834323 // sin(p/8)
|
|
44
|
+
#define _EPSILON 1e-6
|
|
45
|
+
|
|
46
|
+
// TODO: replace sqrt with rsqrt
|
|
47
|
+
|
|
48
|
+
template<typename Type>
|
|
49
|
+
inline CUDA_CALLABLE
|
|
50
|
+
Type accurateSqrt(Type x)
|
|
51
|
+
{
|
|
52
|
+
return x / sqrt(x);
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
template<typename Type>
|
|
56
|
+
inline CUDA_CALLABLE
|
|
57
|
+
void condSwap(bool c, Type &X, Type &Y)
|
|
58
|
+
{
|
|
59
|
+
// used in step 2
|
|
60
|
+
Type Z = X;
|
|
61
|
+
X = c ? Y : X;
|
|
62
|
+
Y = c ? Z : Y;
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
template<typename Type>
|
|
66
|
+
inline CUDA_CALLABLE
|
|
67
|
+
void condNegSwap(bool c, Type &X, Type &Y)
|
|
68
|
+
{
|
|
69
|
+
// used in step 2 and 3
|
|
70
|
+
Type Z = -X;
|
|
71
|
+
X = c ? Y : X;
|
|
72
|
+
Y = c ? Z : Y;
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
// matrix multiplication M = A * B
|
|
76
|
+
template<typename Type>
|
|
77
|
+
inline CUDA_CALLABLE
|
|
78
|
+
void multAB(Type a11, Type a12, Type a13,
|
|
79
|
+
Type a21, Type a22, Type a23,
|
|
80
|
+
Type a31, Type a32, Type a33,
|
|
81
|
+
//
|
|
82
|
+
Type b11, Type b12, Type b13,
|
|
83
|
+
Type b21, Type b22, Type b23,
|
|
84
|
+
Type b31, Type b32, Type b33,
|
|
85
|
+
//
|
|
86
|
+
Type &m11, Type &m12, Type &m13,
|
|
87
|
+
Type &m21, Type &m22, Type &m23,
|
|
88
|
+
Type &m31, Type &m32, Type &m33)
|
|
89
|
+
{
|
|
90
|
+
|
|
91
|
+
m11=a11*b11 + a12*b21 + a13*b31; m12=a11*b12 + a12*b22 + a13*b32; m13=a11*b13 + a12*b23 + a13*b33;
|
|
92
|
+
m21=a21*b11 + a22*b21 + a23*b31; m22=a21*b12 + a22*b22 + a23*b32; m23=a21*b13 + a22*b23 + a23*b33;
|
|
93
|
+
m31=a31*b11 + a32*b21 + a33*b31; m32=a31*b12 + a32*b22 + a33*b32; m33=a31*b13 + a32*b23 + a33*b33;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
// matrix multiplication M = Transpose[A] * B
|
|
97
|
+
template<typename Type>
|
|
98
|
+
inline CUDA_CALLABLE
|
|
99
|
+
void multAtB(Type a11, Type a12, Type a13,
|
|
100
|
+
Type a21, Type a22, Type a23,
|
|
101
|
+
Type a31, Type a32, Type a33,
|
|
102
|
+
//
|
|
103
|
+
Type b11, Type b12, Type b13,
|
|
104
|
+
Type b21, Type b22, Type b23,
|
|
105
|
+
Type b31, Type b32, Type b33,
|
|
106
|
+
//
|
|
107
|
+
Type &m11, Type &m12, Type &m13,
|
|
108
|
+
Type &m21, Type &m22, Type &m23,
|
|
109
|
+
Type &m31, Type &m32, Type &m33)
|
|
110
|
+
{
|
|
111
|
+
m11=a11*b11 + a21*b21 + a31*b31; m12=a11*b12 + a21*b22 + a31*b32; m13=a11*b13 + a21*b23 + a31*b33;
|
|
112
|
+
m21=a12*b11 + a22*b21 + a32*b31; m22=a12*b12 + a22*b22 + a32*b32; m23=a12*b13 + a22*b23 + a32*b33;
|
|
113
|
+
m31=a13*b11 + a23*b21 + a33*b31; m32=a13*b12 + a23*b22 + a33*b32; m33=a13*b13 + a23*b23 + a33*b33;
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
template<typename Type>
|
|
117
|
+
inline CUDA_CALLABLE
|
|
118
|
+
void quatToMat3(const Type * qV,
|
|
119
|
+
Type &m11, Type &m12, Type &m13,
|
|
120
|
+
Type &m21, Type &m22, Type &m23,
|
|
121
|
+
Type &m31, Type &m32, Type &m33
|
|
122
|
+
)
|
|
123
|
+
{
|
|
124
|
+
Type w = qV[3];
|
|
125
|
+
Type x = qV[0];
|
|
126
|
+
Type y = qV[1];
|
|
127
|
+
Type z = qV[2];
|
|
128
|
+
|
|
129
|
+
Type qxx = x*x;
|
|
130
|
+
Type qyy = y*y;
|
|
131
|
+
Type qzz = z*z;
|
|
132
|
+
Type qxz = x*z;
|
|
133
|
+
Type qxy = x*y;
|
|
134
|
+
Type qyz = y*z;
|
|
135
|
+
Type qwx = w*x;
|
|
136
|
+
Type qwy = w*y;
|
|
137
|
+
Type qwz = w*z;
|
|
138
|
+
|
|
139
|
+
m11=Type(1) - Type(2)*(qyy + qzz); m12=Type(2)*(qxy - qwz); m13=Type(2)*(qxz + qwy);
|
|
140
|
+
m21=Type(2)*(qxy + qwz); m22=Type(1) - Type(2)*(qxx + qzz); m23=Type(2)*(qyz - qwx);
|
|
141
|
+
m31=Type(2)*(qxz - qwy); m32=Type(2)*(qyz + qwx); m33=Type(1) - Type(2)*(qxx + qyy);
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
template<typename Type>
|
|
145
|
+
inline CUDA_CALLABLE
|
|
146
|
+
void approximateGivensQuaternion(Type a11, Type a12, Type a22, Type &ch, Type &sh)
|
|
147
|
+
{
|
|
148
|
+
/*
|
|
149
|
+
* Given givens angle computed by approximateGivensAngles,
|
|
150
|
+
* compute the corresponding rotation quaternion.
|
|
151
|
+
*/
|
|
152
|
+
ch = Type(2)*(a11-a22);
|
|
153
|
+
sh = a12;
|
|
154
|
+
bool b = _gamma*sh*sh < ch*ch;
|
|
155
|
+
Type w = Type(1) / sqrt(ch*ch+sh*sh);
|
|
156
|
+
ch=b?w*ch:Type(_cstar);
|
|
157
|
+
sh=b?w*sh:Type(_sstar);
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
template<typename Type>
|
|
161
|
+
inline CUDA_CALLABLE
|
|
162
|
+
void jacobiConjugation( const int x, const int y, const int z,
|
|
163
|
+
Type &s11,
|
|
164
|
+
Type &s21, Type &s22,
|
|
165
|
+
Type &s31, Type &s32, Type &s33,
|
|
166
|
+
Type * qV)
|
|
167
|
+
{
|
|
168
|
+
Type ch,sh;
|
|
169
|
+
approximateGivensQuaternion(s11,s21,s22,ch,sh);
|
|
170
|
+
|
|
171
|
+
Type scale = ch*ch+sh*sh;
|
|
172
|
+
Type a = (ch*ch-sh*sh)/scale;
|
|
173
|
+
Type b = (Type(2)*sh*ch)/scale;
|
|
174
|
+
|
|
175
|
+
// make temp copy of S
|
|
176
|
+
Type _s11 = s11;
|
|
177
|
+
Type _s21 = s21; Type _s22 = s22;
|
|
178
|
+
Type _s31 = s31; Type _s32 = s32; Type _s33 = s33;
|
|
179
|
+
|
|
180
|
+
// perform conjugation S = Q'*S*Q
|
|
181
|
+
// Q already implicitly solved from a, b
|
|
182
|
+
s11 =a*(a*_s11 + b*_s21) + b*(a*_s21 + b*_s22);
|
|
183
|
+
s21 =a*(-b*_s11 + a*_s21) + b*(-b*_s21 + a*_s22); s22=-b*(-b*_s11 + a*_s21) + a*(-b*_s21 + a*_s22);
|
|
184
|
+
s31 =a*_s31 + b*_s32; s32=-b*_s31 + a*_s32; s33=_s33;
|
|
185
|
+
|
|
186
|
+
// update cumulative rotation qV
|
|
187
|
+
Type tmp[3];
|
|
188
|
+
tmp[0]=qV[0]*sh;
|
|
189
|
+
tmp[1]=qV[1]*sh;
|
|
190
|
+
tmp[2]=qV[2]*sh;
|
|
191
|
+
sh *= qV[3];
|
|
192
|
+
|
|
193
|
+
qV[0] *= ch;
|
|
194
|
+
qV[1] *= ch;
|
|
195
|
+
qV[2] *= ch;
|
|
196
|
+
qV[3] *= ch;
|
|
197
|
+
|
|
198
|
+
// (x,y,z) corresponds to ((0,1,2),(1,2,0),(2,0,1))
|
|
199
|
+
// for (p,q) = ((0,1),(1,2),(0,2))
|
|
200
|
+
qV[z] += sh;
|
|
201
|
+
qV[3] -= tmp[z]; // w
|
|
202
|
+
qV[x] += tmp[y];
|
|
203
|
+
qV[y] -= tmp[x];
|
|
204
|
+
|
|
205
|
+
// re-arrange matrix for next iteration
|
|
206
|
+
_s11 = s22;
|
|
207
|
+
_s21 = s32; _s22 = s33;
|
|
208
|
+
_s31 = s21; _s32 = s31; _s33 = s11;
|
|
209
|
+
s11 = _s11;
|
|
210
|
+
s21 = _s21; s22 = _s22;
|
|
211
|
+
s31 = _s31; s32 = _s32; s33 = _s33;
|
|
212
|
+
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
template<typename Type>
|
|
216
|
+
inline CUDA_CALLABLE
|
|
217
|
+
Type dist2(Type x, Type y, Type z)
|
|
218
|
+
{
|
|
219
|
+
return x*x+y*y+z*z;
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
// finds transformation that diagonalizes a symmetric matrix
|
|
223
|
+
template<typename Type>
|
|
224
|
+
inline CUDA_CALLABLE
|
|
225
|
+
void jacobiEigenanlysis( // symmetric matrix
|
|
226
|
+
Type &s11,
|
|
227
|
+
Type &s21, Type &s22,
|
|
228
|
+
Type &s31, Type &s32, Type &s33,
|
|
229
|
+
// quaternion representation of V
|
|
230
|
+
Type * qV)
|
|
231
|
+
{
|
|
232
|
+
qV[3]=1; qV[0]=0;qV[1]=0;qV[2]=0; // follow same indexing convention as GLM
|
|
233
|
+
for (int i=0;i<4;i++)
|
|
234
|
+
{
|
|
235
|
+
// we wish to eliminate the maximum off-diagonal element
|
|
236
|
+
// on every iteration, but cycling over all 3 possible rotations
|
|
237
|
+
// in fixed order (p,q) = (1,2) , (2,3), (1,3) still retains
|
|
238
|
+
// asymptotic convergence
|
|
239
|
+
jacobiConjugation(0,1,2,s11,s21,s22,s31,s32,s33,qV); // p,q = 0,1
|
|
240
|
+
jacobiConjugation(1,2,0,s11,s21,s22,s31,s32,s33,qV); // p,q = 1,2
|
|
241
|
+
jacobiConjugation(2,0,1,s11,s21,s22,s31,s32,s33,qV); // p,q = 0,2
|
|
242
|
+
}
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
template<typename Type>
|
|
246
|
+
inline CUDA_CALLABLE
|
|
247
|
+
void sortSingularValues(// matrix that we want to decompose
|
|
248
|
+
Type &b11, Type &b12, Type &b13,
|
|
249
|
+
Type &b21, Type &b22, Type &b23,
|
|
250
|
+
Type &b31, Type &b32, Type &b33,
|
|
251
|
+
// sort V simultaneously
|
|
252
|
+
Type &v11, Type &v12, Type &v13,
|
|
253
|
+
Type &v21, Type &v22, Type &v23,
|
|
254
|
+
Type &v31, Type &v32, Type &v33)
|
|
255
|
+
{
|
|
256
|
+
Type rho1 = dist2(b11,b21,b31);
|
|
257
|
+
Type rho2 = dist2(b12,b22,b32);
|
|
258
|
+
Type rho3 = dist2(b13,b23,b33);
|
|
259
|
+
bool c;
|
|
260
|
+
c = rho1 < rho2;
|
|
261
|
+
condNegSwap(c,b11,b12); condNegSwap(c,v11,v12);
|
|
262
|
+
condNegSwap(c,b21,b22); condNegSwap(c,v21,v22);
|
|
263
|
+
condNegSwap(c,b31,b32); condNegSwap(c,v31,v32);
|
|
264
|
+
condSwap(c,rho1,rho2);
|
|
265
|
+
c = rho1 < rho3;
|
|
266
|
+
condNegSwap(c,b11,b13); condNegSwap(c,v11,v13);
|
|
267
|
+
condNegSwap(c,b21,b23); condNegSwap(c,v21,v23);
|
|
268
|
+
condNegSwap(c,b31,b33); condNegSwap(c,v31,v33);
|
|
269
|
+
condSwap(c,rho1,rho3);
|
|
270
|
+
c = rho2 < rho3;
|
|
271
|
+
condNegSwap(c,b12,b13); condNegSwap(c,v12,v13);
|
|
272
|
+
condNegSwap(c,b22,b23); condNegSwap(c,v22,v23);
|
|
273
|
+
condNegSwap(c,b32,b33); condNegSwap(c,v32,v33);
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
template<typename Type>
|
|
277
|
+
inline CUDA_CALLABLE
|
|
278
|
+
void QRGivensQuaternion(Type a1, Type a2, Type &ch, Type &sh)
|
|
279
|
+
{
|
|
280
|
+
// a1 = pivot point on diagonal
|
|
281
|
+
// a2 = lower triangular entry we want to annihilate
|
|
282
|
+
Type epsilon = _EPSILON;
|
|
283
|
+
Type rho = accurateSqrt(a1*a1 + a2*a2);
|
|
284
|
+
|
|
285
|
+
sh = rho > epsilon ? a2 : Type(0);
|
|
286
|
+
ch = abs(a1) + max(rho,epsilon);
|
|
287
|
+
bool b = a1 < Type(0);
|
|
288
|
+
condSwap(b,sh,ch);
|
|
289
|
+
Type w = Type(1) / sqrt(ch*ch+sh*sh);
|
|
290
|
+
ch *= w;
|
|
291
|
+
sh *= w;
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
template<typename Type>
|
|
295
|
+
inline CUDA_CALLABLE
|
|
296
|
+
void QRDecomposition(// matrix that we want to decompose
|
|
297
|
+
Type b11, Type b12, Type b13,
|
|
298
|
+
Type b21, Type b22, Type b23,
|
|
299
|
+
Type b31, Type b32, Type b33,
|
|
300
|
+
// output Q
|
|
301
|
+
Type &q11, Type &q12, Type &q13,
|
|
302
|
+
Type &q21, Type &q22, Type &q23,
|
|
303
|
+
Type &q31, Type &q32, Type &q33,
|
|
304
|
+
// output R
|
|
305
|
+
Type &r11, Type &r12, Type &r13,
|
|
306
|
+
Type &r21, Type &r22, Type &r23,
|
|
307
|
+
Type &r31, Type &r32, Type &r33)
|
|
308
|
+
{
|
|
309
|
+
Type ch1,sh1,ch2,sh2,ch3,sh3;
|
|
310
|
+
Type a,b;
|
|
311
|
+
|
|
312
|
+
// first givens rotation (ch,0,0,sh)
|
|
313
|
+
QRGivensQuaternion(b11,b21,ch1,sh1);
|
|
314
|
+
a=Type(1)-Type(2)*sh1*sh1;
|
|
315
|
+
b=Type(2)*ch1*sh1;
|
|
316
|
+
// apply B = Q' * B
|
|
317
|
+
r11=a*b11+b*b21; r12=a*b12+b*b22; r13=a*b13+b*b23;
|
|
318
|
+
r21=-b*b11+a*b21; r22=-b*b12+a*b22; r23=-b*b13+a*b23;
|
|
319
|
+
r31=b31; r32=b32; r33=b33;
|
|
320
|
+
|
|
321
|
+
// second givens rotation (ch,0,-sh,0)
|
|
322
|
+
QRGivensQuaternion(r11,r31,ch2,sh2);
|
|
323
|
+
a=Type(1)-Type(2)*sh2*sh2;
|
|
324
|
+
b=Type(2)*ch2*sh2;
|
|
325
|
+
// apply B = Q' * B;
|
|
326
|
+
b11=a*r11+b*r31; b12=a*r12+b*r32; b13=a*r13+b*r33;
|
|
327
|
+
b21=r21; b22=r22; b23=r23;
|
|
328
|
+
b31=-b*r11+a*r31; b32=-b*r12+a*r32; b33=-b*r13+a*r33;
|
|
329
|
+
|
|
330
|
+
// third givens rotation (ch,sh,0,0)
|
|
331
|
+
QRGivensQuaternion(b22,b32,ch3,sh3);
|
|
332
|
+
a=Type(1)-Type(2)*sh3*sh3;
|
|
333
|
+
b=Type(2)*ch3*sh3;
|
|
334
|
+
// R is now set to desired value
|
|
335
|
+
r11=b11; r12=b12; r13=b13;
|
|
336
|
+
r21=a*b21+b*b31; r22=a*b22+b*b32; r23=a*b23+b*b33;
|
|
337
|
+
r31=-b*b21+a*b31; r32=-b*b22+a*b32; r33=-b*b23+a*b33;
|
|
338
|
+
|
|
339
|
+
// construct the cumulative rotation Q=Q1 * Q2 * Q3
|
|
340
|
+
// the number of floating point operations for three quaternion multiplications
|
|
341
|
+
// is more or less comparable to the explicit form of the joined matrix.
|
|
342
|
+
// certainly more memory-efficient!
|
|
343
|
+
Type sh12=sh1*sh1;
|
|
344
|
+
Type sh22=sh2*sh2;
|
|
345
|
+
Type sh32=sh3*sh3;
|
|
346
|
+
|
|
347
|
+
q11=(Type(-1)+Type(2)*sh12)*(Type(-1)+Type(2)*sh22);
|
|
348
|
+
q12=Type(4)*ch2*ch3*(Type(-1)+Type(2)*sh12)*sh2*sh3+Type(2)*ch1*sh1*(Type(-1)+Type(2)*sh32);
|
|
349
|
+
q13=Type(4)*ch1*ch3*sh1*sh3-Type(2)*ch2*(Type(-1)+Type(2)*sh12)*sh2*(Type(-1)+Type(2)*sh32);
|
|
350
|
+
|
|
351
|
+
q21=Type(2)*ch1*sh1*(Type(1)-Type(2)*sh22);
|
|
352
|
+
q22=Type(-8)*ch1*ch2*ch3*sh1*sh2*sh3+(Type(-1)+Type(2)*sh12)*(Type(-1)+Type(2)*sh32);
|
|
353
|
+
q23=Type(-2)*ch3*sh3+Type(4)*sh1*(ch3*sh1*sh3+ch1*ch2*sh2*(Type(-1)+Type(2)*sh32));
|
|
354
|
+
|
|
355
|
+
q31=Type(2)*ch2*sh2;
|
|
356
|
+
q32=Type(2)*ch3*(Type(1)-Type(2)*sh22)*sh3;
|
|
357
|
+
q33=(Type(-1)+Type(2)*sh22)*(Type(-1)+Type(2)*sh32);
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
template<typename Type>
|
|
361
|
+
inline CUDA_CALLABLE
|
|
362
|
+
void _svd(// input A
|
|
363
|
+
Type a11, Type a12, Type a13,
|
|
364
|
+
Type a21, Type a22, Type a23,
|
|
365
|
+
Type a31, Type a32, Type a33,
|
|
366
|
+
// output U
|
|
367
|
+
Type &u11, Type &u12, Type &u13,
|
|
368
|
+
Type &u21, Type &u22, Type &u23,
|
|
369
|
+
Type &u31, Type &u32, Type &u33,
|
|
370
|
+
// output S
|
|
371
|
+
Type &s11, Type &s12, Type &s13,
|
|
372
|
+
Type &s21, Type &s22, Type &s23,
|
|
373
|
+
Type &s31, Type &s32, Type &s33,
|
|
374
|
+
// output V
|
|
375
|
+
Type &v11, Type &v12, Type &v13,
|
|
376
|
+
Type &v21, Type &v22, Type &v23,
|
|
377
|
+
Type &v31, Type &v32, Type &v33)
|
|
378
|
+
{
|
|
379
|
+
// normal equations matrix
|
|
380
|
+
Type ATA11, ATA12, ATA13;
|
|
381
|
+
Type ATA21, ATA22, ATA23;
|
|
382
|
+
Type ATA31, ATA32, ATA33;
|
|
383
|
+
|
|
384
|
+
multAtB(a11,a12,a13,a21,a22,a23,a31,a32,a33,
|
|
385
|
+
a11,a12,a13,a21,a22,a23,a31,a32,a33,
|
|
386
|
+
ATA11,ATA12,ATA13,ATA21,ATA22,ATA23,ATA31,ATA32,ATA33);
|
|
387
|
+
|
|
388
|
+
// symmetric eigenalysis
|
|
389
|
+
Type qV[4];
|
|
390
|
+
jacobiEigenanlysis( ATA11,ATA21,ATA22, ATA31,ATA32,ATA33,qV);
|
|
391
|
+
quatToMat3(qV,v11,v12,v13,v21,v22,v23,v31,v32,v33);
|
|
392
|
+
|
|
393
|
+
Type b11, b12, b13;
|
|
394
|
+
Type b21, b22, b23;
|
|
395
|
+
Type b31, b32, b33;
|
|
396
|
+
multAB(a11,a12,a13,a21,a22,a23,a31,a32,a33,
|
|
397
|
+
v11,v12,v13,v21,v22,v23,v31,v32,v33,
|
|
398
|
+
b11, b12, b13, b21, b22, b23, b31, b32, b33);
|
|
399
|
+
|
|
400
|
+
// sort singular values and find V
|
|
401
|
+
sortSingularValues(b11, b12, b13, b21, b22, b23, b31, b32, b33,
|
|
402
|
+
v11,v12,v13,v21,v22,v23,v31,v32,v33);
|
|
403
|
+
|
|
404
|
+
// QR decomposition
|
|
405
|
+
QRDecomposition(b11, b12, b13, b21, b22, b23, b31, b32, b33,
|
|
406
|
+
u11, u12, u13, u21, u22, u23, u31, u32, u33,
|
|
407
|
+
s11, s12, s13, s21, s22, s23, s31, s32, s33
|
|
408
|
+
);
|
|
409
|
+
}
|
|
410
|
+
|
|
411
|
+
template<typename Type>
|
|
412
|
+
inline CUDA_CALLABLE void svd3(const mat_t<3,3,Type>& A, mat_t<3,3,Type>& U, vec_t<3,Type>& sigma, mat_t<3,3,Type>& V) {
|
|
413
|
+
Type s12, s13, s21, s23, s31, s32;
|
|
414
|
+
_svd(A.data[0][0], A.data[0][1], A.data[0][2],
|
|
415
|
+
A.data[1][0], A.data[1][1], A.data[1][2],
|
|
416
|
+
A.data[2][0], A.data[2][1], A.data[2][2],
|
|
417
|
+
|
|
418
|
+
U.data[0][0], U.data[0][1], U.data[0][2],
|
|
419
|
+
U.data[1][0], U.data[1][1], U.data[1][2],
|
|
420
|
+
U.data[2][0], U.data[2][1], U.data[2][2],
|
|
421
|
+
|
|
422
|
+
sigma[0], s12, s13,
|
|
423
|
+
s21, sigma[1], s23,
|
|
424
|
+
s31, s32, sigma[2],
|
|
425
|
+
|
|
426
|
+
V.data[0][0], V.data[0][1], V.data[0][2],
|
|
427
|
+
V.data[1][0], V.data[1][1], V.data[1][2],
|
|
428
|
+
V.data[2][0], V.data[2][1], V.data[2][2]);
|
|
429
|
+
}
|
|
430
|
+
|
|
431
|
+
template<typename Type>
|
|
432
|
+
inline CUDA_CALLABLE void adj_svd3(const mat_t<3,3,Type>& A,
|
|
433
|
+
const mat_t<3,3,Type>& U,
|
|
434
|
+
const vec_t<3,Type>& sigma,
|
|
435
|
+
const mat_t<3,3,Type>& V,
|
|
436
|
+
mat_t<3,3,Type>& adj_A,
|
|
437
|
+
const mat_t<3,3,Type>& adj_U,
|
|
438
|
+
const vec_t<3,Type>& adj_sigma,
|
|
439
|
+
const mat_t<3,3,Type>& adj_V) {
|
|
440
|
+
Type sx2 = sigma[0] * sigma[0];
|
|
441
|
+
Type sy2 = sigma[1] * sigma[1];
|
|
442
|
+
Type sz2 = sigma[2] * sigma[2];
|
|
443
|
+
|
|
444
|
+
Type F01 = Type(1) / min(sy2 - sx2, Type(-1e-6f));
|
|
445
|
+
Type F02 = Type(1) / min(sz2 - sx2, Type(-1e-6f));
|
|
446
|
+
Type F12 = Type(1) / min(sz2 - sy2, Type(-1e-6f));
|
|
447
|
+
|
|
448
|
+
mat_t<3,3,Type> F = mat_t<3,3,Type>(0, F01, F02,
|
|
449
|
+
-F01, 0, F12,
|
|
450
|
+
-F02, -F12, 0);
|
|
451
|
+
|
|
452
|
+
mat_t<3,3,Type> adj_sigma_mat = mat_t<3,3,Type>(adj_sigma[0], 0, 0,
|
|
453
|
+
0, adj_sigma[1], 0,
|
|
454
|
+
0, 0, adj_sigma[2]);
|
|
455
|
+
mat_t<3,3,Type> s_mat = mat_t<3,3,Type>(sigma[0], 0, 0,
|
|
456
|
+
0, sigma[1], 0,
|
|
457
|
+
0, 0, sigma[2]);
|
|
458
|
+
|
|
459
|
+
// https://github.com/pytorch/pytorch/blob/d7ddae8e4fe66fa1330317673438d1eb5aa99ca4/torch/csrc/autograd/FunctionsManual.cpp
|
|
460
|
+
mat_t<3,3,Type> UT = transpose(U);
|
|
461
|
+
mat_t<3,3,Type> VT = transpose(V);
|
|
462
|
+
|
|
463
|
+
mat_t<3,3,Type> sigma_term = mul(U, mul(adj_sigma_mat, VT));
|
|
464
|
+
|
|
465
|
+
mat_t<3,3,Type> u_term = mul(mul(U, mul(cw_mul(F, (mul(UT, adj_U) - mul(transpose(adj_U), U))), s_mat)), VT);
|
|
466
|
+
mat_t<3,3,Type> v_term = mul(U, mul(s_mat, mul(cw_mul(F, (mul(VT, adj_V) - mul(transpose(adj_V), V))), VT)));
|
|
467
|
+
|
|
468
|
+
adj_A = adj_A + (u_term + v_term + sigma_term);
|
|
469
|
+
}
|
|
470
|
+
|
|
471
|
+
|
|
472
|
+
template<typename Type>
|
|
473
|
+
inline CUDA_CALLABLE void qr3(const mat_t<3,3,Type>& A, mat_t<3,3,Type>& Q, mat_t<3,3,Type>& R) {
|
|
474
|
+
QRDecomposition(A.data[0][0], A.data[0][1], A.data[0][2],
|
|
475
|
+
A.data[1][0], A.data[1][1], A.data[1][2],
|
|
476
|
+
A.data[2][0], A.data[2][1], A.data[2][2],
|
|
477
|
+
|
|
478
|
+
Q.data[0][0], Q.data[0][1], Q.data[0][2],
|
|
479
|
+
Q.data[1][0], Q.data[1][1], Q.data[1][2],
|
|
480
|
+
Q.data[2][0], Q.data[2][1], Q.data[2][2],
|
|
481
|
+
|
|
482
|
+
R.data[0][0], R.data[0][1], R.data[0][2],
|
|
483
|
+
R.data[1][0], R.data[1][1], R.data[1][2],
|
|
484
|
+
R.data[2][0], R.data[2][1], R.data[2][2]);
|
|
485
|
+
}
|
|
486
|
+
|
|
487
|
+
|
|
488
|
+
template<typename Type>
|
|
489
|
+
inline CUDA_CALLABLE void adj_qr3(const mat_t<3,3,Type>& A,
|
|
490
|
+
const mat_t<3,3,Type>& Q,
|
|
491
|
+
const mat_t<3,3,Type>& R,
|
|
492
|
+
mat_t<3,3,Type>& adj_A,
|
|
493
|
+
const mat_t<3,3,Type>& adj_Q,
|
|
494
|
+
const mat_t<3,3,Type>& adj_R) {
|
|
495
|
+
// Eq 3 of https://arxiv.org/pdf/2009.10071.pdf
|
|
496
|
+
mat_t<3,3,Type> M = mul(R,transpose(adj_R)) - mul(transpose(adj_Q), Q);
|
|
497
|
+
mat_t<3,3,Type> copyltuM = mat_t<3,3,Type>(M.data[0][0], M.data[1][0], M.data[2][0],
|
|
498
|
+
M.data[1][0], M.data[1][1], M.data[2][1],
|
|
499
|
+
M.data[2][0], M.data[2][1], M.data[2][2]);
|
|
500
|
+
adj_A = adj_A + mul(adj_Q + mul(Q,copyltuM), inverse(transpose(R)));
|
|
501
|
+
}
|
|
502
|
+
|
|
503
|
+
|
|
504
|
+
template<typename Type>
|
|
505
|
+
inline CUDA_CALLABLE void eig3(const mat_t<3,3,Type>& A, mat_t<3,3,Type>& Q, vec_t<3,Type>& d) {
|
|
506
|
+
Type qV[4];
|
|
507
|
+
Type s11 = A.data[0][0];
|
|
508
|
+
Type s21 = A.data[1][0];
|
|
509
|
+
Type s22 = A.data[1][1];
|
|
510
|
+
Type s31 = A.data[2][0];
|
|
511
|
+
Type s32 = A.data[2][1];
|
|
512
|
+
Type s33 = A.data[2][2];
|
|
513
|
+
|
|
514
|
+
jacobiEigenanlysis(s11, s21, s22, s31, s32, s33, qV);
|
|
515
|
+
quatToMat3(qV, Q.data[0][0], Q.data[0][1], Q.data[0][2], Q.data[1][0], Q.data[1][1], Q.data[1][2], Q.data[2][0], Q.data[2][1], Q.data[2][2]);
|
|
516
|
+
mat_t<3,3,Type> t;
|
|
517
|
+
multAtB(Q.data[0][0], Q.data[0][1], Q.data[0][2], Q.data[1][0], Q.data[1][1], Q.data[1][2], Q.data[2][0], Q.data[2][1], Q.data[2][2],
|
|
518
|
+
A.data[0][0], A.data[0][1], A.data[0][2], A.data[1][0], A.data[1][1], A.data[1][2], A.data[2][0], A.data[2][1], A.data[2][2],
|
|
519
|
+
t.data[0][0], t.data[0][1], t.data[0][2], t.data[1][0], t.data[1][1], t.data[1][2], t.data[2][0], t.data[2][1], t.data[2][2]);
|
|
520
|
+
|
|
521
|
+
mat_t<3,3,Type> u;
|
|
522
|
+
multAB(t.data[0][0], t.data[0][1], t.data[0][2], t.data[1][0], t.data[1][1], t.data[1][2], t.data[2][0], t.data[2][1], t.data[2][2],
|
|
523
|
+
Q.data[0][0], Q.data[0][1], Q.data[0][2], Q.data[1][0], Q.data[1][1], Q.data[1][2], Q.data[2][0], Q.data[2][1], Q.data[2][2],
|
|
524
|
+
u.data[0][0], u.data[0][1], u.data[0][2], u.data[1][0], u.data[1][1], u.data[1][2], u.data[2][0], u.data[2][1], u.data[2][2]
|
|
525
|
+
);
|
|
526
|
+
d = vec_t<3,Type>(u.data[0][0], u.data[1][1], u.data[2][2]);
|
|
527
|
+
}
|
|
528
|
+
|
|
529
|
+
template<typename Type>
|
|
530
|
+
inline CUDA_CALLABLE void adj_eig3(const mat_t<3,3,Type>& A, const mat_t<3,3,Type>& Q, const vec_t<3,Type>& d,
|
|
531
|
+
mat_t<3,3,Type>& adj_A, const mat_t<3,3,Type>& adj_Q, const vec_t<3,Type>& adj_d) {
|
|
532
|
+
// Page 10 of https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf
|
|
533
|
+
mat_t<3,3,Type> D = mat_t<3,3,Type>(d[0], 0, 0,
|
|
534
|
+
0, d[1], 0,
|
|
535
|
+
0, 0, d[2]);
|
|
536
|
+
mat_t<3,3,Type> D_bar = mat_t<3,3,Type>(adj_d[0], 0, 0,
|
|
537
|
+
0, adj_d[1], 0,
|
|
538
|
+
0, 0, adj_d[2]);
|
|
539
|
+
|
|
540
|
+
Type dyx = d[1] - d[0];
|
|
541
|
+
Type dzx = d[2] - d[0];
|
|
542
|
+
Type dzy = d[2] - d[1];
|
|
543
|
+
|
|
544
|
+
if ((dyx < Type(0)) && (dyx > Type(-1e-6))) dyx = -1e-6;
|
|
545
|
+
if ((dyx > Type(0)) && (dyx < Type(1e-6))) dyx = 1e-6;
|
|
546
|
+
|
|
547
|
+
if ((dzx < Type(0)) && (dzx > Type(-1e-6))) dzx = -1e-6;
|
|
548
|
+
if ((dzx > Type(0)) && (dzx < Type(1e-6))) dzx = 1e-6;
|
|
549
|
+
|
|
550
|
+
if ((dzy < Type(0)) && (dzy > Type(-1e-6))) dzy = -1e-6;
|
|
551
|
+
if ((dzy > Type(0)) && (dzy < Type(1e-6))) dzy = 1e-6;
|
|
552
|
+
|
|
553
|
+
Type F01 = Type(1) / dyx;
|
|
554
|
+
Type F02 = Type(1) / dzx;
|
|
555
|
+
Type F12 = Type(1) / dzy;
|
|
556
|
+
mat_t<3,3,Type> F = mat_t<3,3,Type>(0, F01, F02,
|
|
557
|
+
-F01, 0, F12,
|
|
558
|
+
-F02, -F12, 0);
|
|
559
|
+
mat_t<3,3,Type> QT = transpose(Q);
|
|
560
|
+
adj_A = adj_A + mul(Q, mul(D_bar + cw_mul(F, mul(QT, adj_Q)), QT));
|
|
561
|
+
}
|
|
562
|
+
}
|