warp-lang 1.0.2__py3-none-manylinux2014_x86_64.whl → 1.2.0__py3-none-manylinux2014_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of warp-lang might be problematic. Click here for more details.
- warp/__init__.py +108 -97
- warp/__init__.pyi +1 -1
- warp/bin/warp-clang.so +0 -0
- warp/bin/warp.so +0 -0
- warp/build.py +88 -113
- warp/build_dll.py +383 -375
- warp/builtins.py +3693 -3354
- warp/codegen.py +2925 -2792
- warp/config.py +40 -36
- warp/constants.py +49 -45
- warp/context.py +5409 -5102
- warp/dlpack.py +442 -442
- warp/examples/__init__.py +16 -16
- warp/examples/assets/bear.usd +0 -0
- warp/examples/assets/bunny.usd +0 -0
- warp/examples/assets/cartpole.urdf +110 -110
- warp/examples/assets/crazyflie.usd +0 -0
- warp/examples/assets/cube.usd +0 -0
- warp/examples/assets/nv_ant.xml +92 -92
- warp/examples/assets/nv_humanoid.xml +183 -183
- warp/examples/assets/quadruped.urdf +267 -267
- warp/examples/assets/rocks.nvdb +0 -0
- warp/examples/assets/rocks.usd +0 -0
- warp/examples/assets/sphere.usd +0 -0
- warp/examples/benchmarks/benchmark_api.py +381 -383
- warp/examples/benchmarks/benchmark_cloth.py +278 -277
- warp/examples/benchmarks/benchmark_cloth_cupy.py +88 -88
- warp/examples/benchmarks/benchmark_cloth_jax.py +97 -100
- warp/examples/benchmarks/benchmark_cloth_numba.py +146 -142
- warp/examples/benchmarks/benchmark_cloth_numpy.py +77 -77
- warp/examples/benchmarks/benchmark_cloth_pytorch.py +86 -86
- warp/examples/benchmarks/benchmark_cloth_taichi.py +112 -112
- warp/examples/benchmarks/benchmark_cloth_warp.py +145 -146
- warp/examples/benchmarks/benchmark_launches.py +293 -295
- warp/examples/browse.py +29 -29
- warp/examples/core/example_dem.py +232 -219
- warp/examples/core/example_fluid.py +291 -267
- warp/examples/core/example_graph_capture.py +142 -126
- warp/examples/core/example_marching_cubes.py +186 -174
- warp/examples/core/example_mesh.py +172 -155
- warp/examples/core/example_mesh_intersect.py +203 -193
- warp/examples/core/example_nvdb.py +174 -170
- warp/examples/core/example_raycast.py +103 -90
- warp/examples/core/example_raymarch.py +197 -178
- warp/examples/core/example_render_opengl.py +183 -141
- warp/examples/core/example_sph.py +403 -387
- warp/examples/core/example_torch.py +219 -181
- warp/examples/core/example_wave.py +261 -248
- warp/examples/fem/bsr_utils.py +378 -380
- warp/examples/fem/example_apic_fluid.py +432 -389
- warp/examples/fem/example_burgers.py +262 -0
- warp/examples/fem/example_convection_diffusion.py +180 -168
- warp/examples/fem/example_convection_diffusion_dg.py +217 -209
- warp/examples/fem/example_deformed_geometry.py +175 -159
- warp/examples/fem/example_diffusion.py +199 -173
- warp/examples/fem/example_diffusion_3d.py +178 -152
- warp/examples/fem/example_diffusion_mgpu.py +219 -214
- warp/examples/fem/example_mixed_elasticity.py +242 -222
- warp/examples/fem/example_navier_stokes.py +257 -243
- warp/examples/fem/example_stokes.py +218 -192
- warp/examples/fem/example_stokes_transfer.py +263 -249
- warp/examples/fem/mesh_utils.py +133 -109
- warp/examples/fem/plot_utils.py +292 -287
- warp/examples/optim/example_bounce.py +258 -246
- warp/examples/optim/example_cloth_throw.py +220 -209
- warp/examples/optim/example_diffray.py +564 -536
- warp/examples/optim/example_drone.py +862 -835
- warp/examples/optim/example_inverse_kinematics.py +174 -168
- warp/examples/optim/example_inverse_kinematics_torch.py +183 -169
- warp/examples/optim/example_spring_cage.py +237 -231
- warp/examples/optim/example_trajectory.py +221 -199
- warp/examples/optim/example_walker.py +304 -293
- warp/examples/sim/example_cartpole.py +137 -129
- warp/examples/sim/example_cloth.py +194 -186
- warp/examples/sim/example_granular.py +122 -111
- warp/examples/sim/example_granular_collision_sdf.py +195 -186
- warp/examples/sim/example_jacobian_ik.py +234 -214
- warp/examples/sim/example_particle_chain.py +116 -105
- warp/examples/sim/example_quadruped.py +191 -180
- warp/examples/sim/example_rigid_chain.py +195 -187
- warp/examples/sim/example_rigid_contact.py +187 -177
- warp/examples/sim/example_rigid_force.py +125 -125
- warp/examples/sim/example_rigid_gyroscopic.py +107 -95
- warp/examples/sim/example_rigid_soft_contact.py +132 -122
- warp/examples/sim/example_soft_body.py +188 -177
- warp/fabric.py +337 -335
- warp/fem/__init__.py +61 -27
- warp/fem/cache.py +403 -388
- warp/fem/dirichlet.py +178 -179
- warp/fem/domain.py +262 -263
- warp/fem/field/__init__.py +100 -101
- warp/fem/field/field.py +148 -149
- warp/fem/field/nodal_field.py +298 -299
- warp/fem/field/restriction.py +22 -21
- warp/fem/field/test.py +180 -181
- warp/fem/field/trial.py +183 -183
- warp/fem/geometry/__init__.py +16 -19
- warp/fem/geometry/closest_point.py +69 -70
- warp/fem/geometry/deformed_geometry.py +270 -271
- warp/fem/geometry/element.py +748 -744
- warp/fem/geometry/geometry.py +184 -186
- warp/fem/geometry/grid_2d.py +380 -373
- warp/fem/geometry/grid_3d.py +437 -435
- warp/fem/geometry/hexmesh.py +953 -953
- warp/fem/geometry/nanogrid.py +455 -0
- warp/fem/geometry/partition.py +374 -376
- warp/fem/geometry/quadmesh_2d.py +532 -532
- warp/fem/geometry/tetmesh.py +840 -840
- warp/fem/geometry/trimesh_2d.py +577 -577
- warp/fem/integrate.py +1684 -1615
- warp/fem/operator.py +190 -191
- warp/fem/polynomial.py +214 -213
- warp/fem/quadrature/__init__.py +2 -2
- warp/fem/quadrature/pic_quadrature.py +243 -245
- warp/fem/quadrature/quadrature.py +295 -294
- warp/fem/space/__init__.py +179 -292
- warp/fem/space/basis_space.py +522 -489
- warp/fem/space/collocated_function_space.py +100 -105
- warp/fem/space/dof_mapper.py +236 -236
- warp/fem/space/function_space.py +148 -145
- warp/fem/space/grid_2d_function_space.py +148 -267
- warp/fem/space/grid_3d_function_space.py +167 -306
- warp/fem/space/hexmesh_function_space.py +253 -352
- warp/fem/space/nanogrid_function_space.py +202 -0
- warp/fem/space/partition.py +350 -350
- warp/fem/space/quadmesh_2d_function_space.py +261 -369
- warp/fem/space/restriction.py +161 -160
- warp/fem/space/shape/__init__.py +90 -15
- warp/fem/space/shape/cube_shape_function.py +728 -738
- warp/fem/space/shape/shape_function.py +102 -103
- warp/fem/space/shape/square_shape_function.py +611 -611
- warp/fem/space/shape/tet_shape_function.py +565 -567
- warp/fem/space/shape/triangle_shape_function.py +429 -429
- warp/fem/space/tetmesh_function_space.py +224 -292
- warp/fem/space/topology.py +297 -295
- warp/fem/space/trimesh_2d_function_space.py +153 -221
- warp/fem/types.py +77 -77
- warp/fem/utils.py +495 -495
- warp/jax.py +166 -141
- warp/jax_experimental.py +341 -339
- warp/native/array.h +1081 -1025
- warp/native/builtin.h +1603 -1560
- warp/native/bvh.cpp +402 -398
- warp/native/bvh.cu +533 -525
- warp/native/bvh.h +430 -429
- warp/native/clang/clang.cpp +496 -464
- warp/native/crt.cpp +42 -32
- warp/native/crt.h +352 -335
- warp/native/cuda_crt.h +1049 -1049
- warp/native/cuda_util.cpp +549 -540
- warp/native/cuda_util.h +288 -203
- warp/native/cutlass_gemm.cpp +34 -34
- warp/native/cutlass_gemm.cu +372 -372
- warp/native/error.cpp +66 -66
- warp/native/error.h +27 -27
- warp/native/exports.h +187 -0
- warp/native/fabric.h +228 -228
- warp/native/hashgrid.cpp +301 -278
- warp/native/hashgrid.cu +78 -77
- warp/native/hashgrid.h +227 -227
- warp/native/initializer_array.h +32 -32
- warp/native/intersect.h +1204 -1204
- warp/native/intersect_adj.h +365 -365
- warp/native/intersect_tri.h +322 -322
- warp/native/marching.cpp +2 -2
- warp/native/marching.cu +497 -497
- warp/native/marching.h +2 -2
- warp/native/mat.h +1545 -1498
- warp/native/matnn.h +333 -333
- warp/native/mesh.cpp +203 -203
- warp/native/mesh.cu +292 -293
- warp/native/mesh.h +1887 -1887
- warp/native/nanovdb/GridHandle.h +366 -0
- warp/native/nanovdb/HostBuffer.h +590 -0
- warp/native/nanovdb/NanoVDB.h +6624 -4782
- warp/native/nanovdb/PNanoVDB.h +3390 -2553
- warp/native/noise.h +850 -850
- warp/native/quat.h +1112 -1085
- warp/native/rand.h +303 -299
- warp/native/range.h +108 -108
- warp/native/reduce.cpp +156 -156
- warp/native/reduce.cu +348 -348
- warp/native/runlength_encode.cpp +61 -61
- warp/native/runlength_encode.cu +46 -46
- warp/native/scan.cpp +30 -30
- warp/native/scan.cu +36 -36
- warp/native/scan.h +7 -7
- warp/native/solid_angle.h +442 -442
- warp/native/sort.cpp +94 -94
- warp/native/sort.cu +97 -97
- warp/native/sort.h +14 -14
- warp/native/sparse.cpp +337 -337
- warp/native/sparse.cu +544 -544
- warp/native/spatial.h +630 -630
- warp/native/svd.h +562 -562
- warp/native/temp_buffer.h +30 -30
- warp/native/vec.h +1177 -1133
- warp/native/volume.cpp +529 -297
- warp/native/volume.cu +58 -32
- warp/native/volume.h +960 -538
- warp/native/volume_builder.cu +446 -425
- warp/native/volume_builder.h +34 -19
- warp/native/volume_impl.h +61 -0
- warp/native/warp.cpp +1057 -1052
- warp/native/warp.cu +2949 -2828
- warp/native/warp.h +321 -305
- warp/optim/__init__.py +9 -9
- warp/optim/adam.py +120 -120
- warp/optim/linear.py +1104 -939
- warp/optim/sgd.py +104 -92
- warp/render/__init__.py +10 -10
- warp/render/render_opengl.py +3356 -3204
- warp/render/render_usd.py +768 -749
- warp/render/utils.py +152 -150
- warp/sim/__init__.py +52 -59
- warp/sim/articulation.py +685 -685
- warp/sim/collide.py +1594 -1590
- warp/sim/import_mjcf.py +489 -481
- warp/sim/import_snu.py +220 -221
- warp/sim/import_urdf.py +536 -516
- warp/sim/import_usd.py +887 -881
- warp/sim/inertia.py +316 -317
- warp/sim/integrator.py +234 -233
- warp/sim/integrator_euler.py +1956 -1956
- warp/sim/integrator_featherstone.py +1917 -1991
- warp/sim/integrator_xpbd.py +3288 -3312
- warp/sim/model.py +4473 -4314
- warp/sim/particles.py +113 -112
- warp/sim/render.py +417 -403
- warp/sim/utils.py +413 -410
- warp/sparse.py +1289 -1227
- warp/stubs.py +2192 -2469
- warp/tape.py +1162 -225
- warp/tests/__init__.py +1 -1
- warp/tests/__main__.py +4 -4
- warp/tests/assets/test_index_grid.nvdb +0 -0
- warp/tests/assets/torus.usda +105 -105
- warp/tests/aux_test_class_kernel.py +26 -26
- warp/tests/aux_test_compile_consts_dummy.py +10 -10
- warp/tests/aux_test_conditional_unequal_types_kernels.py +21 -21
- warp/tests/aux_test_dependent.py +20 -22
- warp/tests/aux_test_grad_customs.py +21 -23
- warp/tests/aux_test_reference.py +9 -11
- warp/tests/aux_test_reference_reference.py +8 -10
- warp/tests/aux_test_square.py +15 -17
- warp/tests/aux_test_unresolved_func.py +14 -14
- warp/tests/aux_test_unresolved_symbol.py +14 -14
- warp/tests/disabled_kinematics.py +237 -239
- warp/tests/run_coverage_serial.py +31 -31
- warp/tests/test_adam.py +155 -157
- warp/tests/test_arithmetic.py +1088 -1124
- warp/tests/test_array.py +2415 -2326
- warp/tests/test_array_reduce.py +148 -150
- warp/tests/test_async.py +666 -656
- warp/tests/test_atomic.py +139 -141
- warp/tests/test_bool.py +212 -149
- warp/tests/test_builtins_resolution.py +1290 -1292
- warp/tests/test_bvh.py +162 -171
- warp/tests/test_closest_point_edge_edge.py +227 -228
- warp/tests/test_codegen.py +562 -553
- warp/tests/test_compile_consts.py +217 -101
- warp/tests/test_conditional.py +244 -246
- warp/tests/test_copy.py +230 -215
- warp/tests/test_ctypes.py +630 -632
- warp/tests/test_dense.py +65 -67
- warp/tests/test_devices.py +89 -98
- warp/tests/test_dlpack.py +528 -529
- warp/tests/test_examples.py +403 -378
- warp/tests/test_fabricarray.py +952 -955
- warp/tests/test_fast_math.py +60 -54
- warp/tests/test_fem.py +1298 -1278
- warp/tests/test_fp16.py +128 -130
- warp/tests/test_func.py +336 -337
- warp/tests/test_generics.py +596 -571
- warp/tests/test_grad.py +885 -640
- warp/tests/test_grad_customs.py +331 -336
- warp/tests/test_hash_grid.py +208 -164
- warp/tests/test_import.py +37 -39
- warp/tests/test_indexedarray.py +1132 -1134
- warp/tests/test_intersect.py +65 -67
- warp/tests/test_jax.py +305 -307
- warp/tests/test_large.py +169 -164
- warp/tests/test_launch.py +352 -354
- warp/tests/test_lerp.py +217 -261
- warp/tests/test_linear_solvers.py +189 -171
- warp/tests/test_lvalue.py +419 -493
- warp/tests/test_marching_cubes.py +63 -65
- warp/tests/test_mat.py +1799 -1827
- warp/tests/test_mat_lite.py +113 -115
- warp/tests/test_mat_scalar_ops.py +2905 -2889
- warp/tests/test_math.py +124 -193
- warp/tests/test_matmul.py +498 -499
- warp/tests/test_matmul_lite.py +408 -410
- warp/tests/test_mempool.py +186 -190
- warp/tests/test_mesh.py +281 -324
- warp/tests/test_mesh_query_aabb.py +226 -241
- warp/tests/test_mesh_query_point.py +690 -702
- warp/tests/test_mesh_query_ray.py +290 -303
- warp/tests/test_mlp.py +274 -276
- warp/tests/test_model.py +108 -110
- warp/tests/test_module_hashing.py +111 -0
- warp/tests/test_modules_lite.py +36 -39
- warp/tests/test_multigpu.py +161 -163
- warp/tests/test_noise.py +244 -248
- warp/tests/test_operators.py +248 -250
- warp/tests/test_options.py +121 -125
- warp/tests/test_peer.py +131 -137
- warp/tests/test_pinned.py +76 -78
- warp/tests/test_print.py +52 -54
- warp/tests/test_quat.py +2084 -2086
- warp/tests/test_rand.py +324 -288
- warp/tests/test_reload.py +207 -217
- warp/tests/test_rounding.py +177 -179
- warp/tests/test_runlength_encode.py +188 -190
- warp/tests/test_sim_grad.py +241 -0
- warp/tests/test_sim_kinematics.py +89 -97
- warp/tests/test_smoothstep.py +166 -168
- warp/tests/test_snippet.py +303 -266
- warp/tests/test_sparse.py +466 -460
- warp/tests/test_spatial.py +2146 -2148
- warp/tests/test_special_values.py +362 -0
- warp/tests/test_streams.py +484 -473
- warp/tests/test_struct.py +708 -675
- warp/tests/test_tape.py +171 -148
- warp/tests/test_torch.py +741 -743
- warp/tests/test_transient_module.py +85 -87
- warp/tests/test_types.py +554 -659
- warp/tests/test_utils.py +488 -499
- warp/tests/test_vec.py +1262 -1268
- warp/tests/test_vec_lite.py +71 -73
- warp/tests/test_vec_scalar_ops.py +2097 -2099
- warp/tests/test_verify_fp.py +92 -94
- warp/tests/test_volume.py +961 -736
- warp/tests/test_volume_write.py +338 -265
- warp/tests/unittest_serial.py +38 -37
- warp/tests/unittest_suites.py +367 -359
- warp/tests/unittest_utils.py +434 -578
- warp/tests/unused_test_misc.py +69 -71
- warp/tests/walkthrough_debug.py +85 -85
- warp/thirdparty/appdirs.py +598 -598
- warp/thirdparty/dlpack.py +143 -143
- warp/thirdparty/unittest_parallel.py +563 -561
- warp/torch.py +321 -295
- warp/types.py +4941 -4450
- warp/utils.py +1008 -821
- {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/LICENSE.md +126 -126
- {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/METADATA +365 -400
- warp_lang-1.2.0.dist-info/RECORD +359 -0
- warp/examples/assets/cube.usda +0 -42
- warp/examples/assets/sphere.usda +0 -56
- warp/examples/assets/torus.usda +0 -105
- warp/examples/fem/example_convection_diffusion_dg0.py +0 -194
- warp/native/nanovdb/PNanoVDBWrite.h +0 -295
- warp_lang-1.0.2.dist-info/RECORD +0 -352
- {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/WHEEL +0 -0
- {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/top_level.txt +0 -0
warp/native/mat.h
CHANGED
|
@@ -1,1498 +1,1545 @@
|
|
|
1
|
-
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
|
|
2
|
-
* NVIDIA CORPORATION and its licensors retain all intellectual property
|
|
3
|
-
* and proprietary rights in and to this software, related documentation
|
|
4
|
-
* and any modifications thereto. Any use, reproduction, disclosure or
|
|
5
|
-
* distribution of this software and related documentation without an express
|
|
6
|
-
* license agreement from NVIDIA CORPORATION is strictly prohibited.
|
|
7
|
-
*/
|
|
8
|
-
|
|
9
|
-
#pragma once
|
|
10
|
-
|
|
11
|
-
#include "initializer_array.h"
|
|
12
|
-
|
|
13
|
-
namespace wp
|
|
14
|
-
{
|
|
15
|
-
|
|
16
|
-
//----------------------------------------------------------
|
|
17
|
-
// mat
|
|
18
|
-
template<typename T>
|
|
19
|
-
struct quat_t;
|
|
20
|
-
|
|
21
|
-
template<unsigned Rows, unsigned Cols, typename Type>
|
|
22
|
-
struct mat_t
|
|
23
|
-
{
|
|
24
|
-
inline CUDA_CALLABLE mat_t()
|
|
25
|
-
: data()
|
|
26
|
-
{}
|
|
27
|
-
|
|
28
|
-
inline CUDA_CALLABLE mat_t(Type s)
|
|
29
|
-
{
|
|
30
|
-
for (unsigned i=0; i < Rows; ++i)
|
|
31
|
-
for (unsigned j=0; j < Cols; ++j)
|
|
32
|
-
data[i][j] = s;
|
|
33
|
-
}
|
|
34
|
-
|
|
35
|
-
template <typename OtherType>
|
|
36
|
-
inline explicit CUDA_CALLABLE mat_t(const mat_t<Rows, Cols, OtherType>& other)
|
|
37
|
-
{
|
|
38
|
-
for (unsigned i=0; i < Rows; ++i)
|
|
39
|
-
for (unsigned j=0; j < Cols; ++j)
|
|
40
|
-
data[i][j] = other.data[i][j];
|
|
41
|
-
}
|
|
42
|
-
|
|
43
|
-
inline CUDA_CALLABLE mat_t(vec_t<2,Type> c0, vec_t<2,Type> c1)
|
|
44
|
-
{
|
|
45
|
-
data[0][0] = c0[0];
|
|
46
|
-
data[1][0] = c0[1];
|
|
47
|
-
|
|
48
|
-
data[0][1] = c1[0];
|
|
49
|
-
data[1][1] = c1[1];
|
|
50
|
-
}
|
|
51
|
-
|
|
52
|
-
inline CUDA_CALLABLE mat_t(vec_t<3,Type> c0, vec_t<3,Type> c1, vec_t<3,Type> c2)
|
|
53
|
-
{
|
|
54
|
-
data[0][0] = c0[0];
|
|
55
|
-
data[1][0] = c0[1];
|
|
56
|
-
data[2][0] = c0[2];
|
|
57
|
-
|
|
58
|
-
data[0][1] = c1[0];
|
|
59
|
-
data[1][1] = c1[1];
|
|
60
|
-
data[2][1] = c1[2];
|
|
61
|
-
|
|
62
|
-
data[0][2] = c2[0];
|
|
63
|
-
data[1][2] = c2[1];
|
|
64
|
-
data[2][2] = c2[2];
|
|
65
|
-
}
|
|
66
|
-
|
|
67
|
-
inline CUDA_CALLABLE mat_t(vec_t<4,Type> c0, vec_t<4,Type> c1, vec_t<4,Type> c2, vec_t<4,Type> c3)
|
|
68
|
-
{
|
|
69
|
-
data[0][0] = c0[0];
|
|
70
|
-
data[1][0] = c0[1];
|
|
71
|
-
data[2][0] = c0[2];
|
|
72
|
-
data[3][0] = c0[3];
|
|
73
|
-
|
|
74
|
-
data[0][1] = c1[0];
|
|
75
|
-
data[1][1] = c1[1];
|
|
76
|
-
data[2][1] = c1[2];
|
|
77
|
-
data[3][1] = c1[3];
|
|
78
|
-
|
|
79
|
-
data[0][2] = c2[0];
|
|
80
|
-
data[1][2] = c2[1];
|
|
81
|
-
data[2][2] = c2[2];
|
|
82
|
-
data[3][2] = c2[3];
|
|
83
|
-
|
|
84
|
-
data[0][3] = c3[0];
|
|
85
|
-
data[1][3] = c3[1];
|
|
86
|
-
data[2][3] = c3[2];
|
|
87
|
-
data[3][3] = c3[3];
|
|
88
|
-
}
|
|
89
|
-
|
|
90
|
-
inline CUDA_CALLABLE mat_t(Type m00, Type m01, Type m10, Type m11)
|
|
91
|
-
{
|
|
92
|
-
data[0][0] = m00;
|
|
93
|
-
data[1][0] = m10;
|
|
94
|
-
data[0][1] = m01;
|
|
95
|
-
data[1][1] = m11;
|
|
96
|
-
}
|
|
97
|
-
|
|
98
|
-
inline CUDA_CALLABLE mat_t(
|
|
99
|
-
Type m00, Type m01, Type m02,
|
|
100
|
-
Type m10, Type m11, Type m12,
|
|
101
|
-
Type m20, Type m21, Type m22)
|
|
102
|
-
{
|
|
103
|
-
data[0][0] = m00;
|
|
104
|
-
data[1][0] = m10;
|
|
105
|
-
data[2][0] = m20;
|
|
106
|
-
|
|
107
|
-
data[0][1] = m01;
|
|
108
|
-
data[1][1] = m11;
|
|
109
|
-
data[2][1] = m21;
|
|
110
|
-
|
|
111
|
-
data[0][2] = m02;
|
|
112
|
-
data[1][2] = m12;
|
|
113
|
-
data[2][2] = m22;
|
|
114
|
-
}
|
|
115
|
-
|
|
116
|
-
inline CUDA_CALLABLE mat_t(
|
|
117
|
-
Type m00, Type m01, Type m02, Type m03,
|
|
118
|
-
Type m10, Type m11, Type m12, Type m13,
|
|
119
|
-
Type m20, Type m21, Type m22, Type m23,
|
|
120
|
-
Type m30, Type m31, Type m32, Type m33)
|
|
121
|
-
{
|
|
122
|
-
data[0][0] = m00;
|
|
123
|
-
data[1][0] = m10;
|
|
124
|
-
data[2][0] = m20;
|
|
125
|
-
data[3][0] = m30;
|
|
126
|
-
|
|
127
|
-
data[0][1] = m01;
|
|
128
|
-
data[1][1] = m11;
|
|
129
|
-
data[2][1] = m21;
|
|
130
|
-
data[3][1] = m31;
|
|
131
|
-
|
|
132
|
-
data[0][2] = m02;
|
|
133
|
-
data[1][2] = m12;
|
|
134
|
-
data[2][2] = m22;
|
|
135
|
-
data[3][2] = m32;
|
|
136
|
-
|
|
137
|
-
data[0][3] = m03;
|
|
138
|
-
data[1][3] = m13;
|
|
139
|
-
data[2][3] = m23;
|
|
140
|
-
data[3][3] = m33;
|
|
141
|
-
}
|
|
142
|
-
|
|
143
|
-
// implemented in quat.h
|
|
144
|
-
inline CUDA_CALLABLE mat_t(const vec_t<3,Type>& pos, const quat_t<Type>& rot, const vec_t<3,Type>& scale);
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
inline CUDA_CALLABLE mat_t(const initializer_array<Rows * Cols, Type> &l)
|
|
148
|
-
{
|
|
149
|
-
for (unsigned i=0; i < Rows; ++i)
|
|
150
|
-
{
|
|
151
|
-
for (unsigned j=0; j < Cols; ++j)
|
|
152
|
-
{
|
|
153
|
-
data[i][j] = l[i * Cols + j];
|
|
154
|
-
}
|
|
155
|
-
}
|
|
156
|
-
}
|
|
157
|
-
|
|
158
|
-
inline CUDA_CALLABLE mat_t(const initializer_array<Cols, vec_t<Rows,Type> > &l)
|
|
159
|
-
{
|
|
160
|
-
for (unsigned j=0; j < Cols; ++j)
|
|
161
|
-
{
|
|
162
|
-
for (unsigned i=0; i < Rows; ++i)
|
|
163
|
-
{
|
|
164
|
-
data[i][j] = l[j][i];
|
|
165
|
-
}
|
|
166
|
-
}
|
|
167
|
-
}
|
|
168
|
-
|
|
169
|
-
CUDA_CALLABLE vec_t<Cols,Type> get_row(int index) const
|
|
170
|
-
{
|
|
171
|
-
return (vec_t<Cols,Type>&)data[index];
|
|
172
|
-
}
|
|
173
|
-
|
|
174
|
-
CUDA_CALLABLE void set_row(int index, const vec_t<Cols,Type>& v)
|
|
175
|
-
{
|
|
176
|
-
(vec_t<Cols,Type>&)data[index] = v;
|
|
177
|
-
}
|
|
178
|
-
|
|
179
|
-
CUDA_CALLABLE vec_t<Rows,Type> get_col(int index) const
|
|
180
|
-
{
|
|
181
|
-
vec_t<Rows,Type> ret;
|
|
182
|
-
for( unsigned i=0;i < Rows; ++i )
|
|
183
|
-
{
|
|
184
|
-
ret[i] = data[i][index];
|
|
185
|
-
}
|
|
186
|
-
return ret;
|
|
187
|
-
}
|
|
188
|
-
|
|
189
|
-
CUDA_CALLABLE void set_col(int index, const vec_t<Rows,Type>& v)
|
|
190
|
-
{
|
|
191
|
-
for( unsigned i=0;i < Rows; ++i )
|
|
192
|
-
{
|
|
193
|
-
data[i][index] = v[i];
|
|
194
|
-
}
|
|
195
|
-
}
|
|
196
|
-
|
|
197
|
-
// row major storage assumed to be compatible with PyTorch
|
|
198
|
-
Type data[Rows][Cols];
|
|
199
|
-
};
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
template<unsigned Rows, typename Type>
|
|
203
|
-
inline CUDA_CALLABLE mat_t<Rows, Rows, Type> identity()
|
|
204
|
-
{
|
|
205
|
-
mat_t<Rows, Rows, Type> m;
|
|
206
|
-
for( unsigned i=0; i < Rows; ++i )
|
|
207
|
-
{
|
|
208
|
-
m.data[i][i] = Type(1);
|
|
209
|
-
}
|
|
210
|
-
return m;
|
|
211
|
-
}
|
|
212
|
-
|
|
213
|
-
template<unsigned Rows, unsigned Cols, typename Type>
|
|
214
|
-
inline CUDA_CALLABLE bool operator==(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b)
|
|
215
|
-
{
|
|
216
|
-
for (unsigned i=0; i < Rows; ++i)
|
|
217
|
-
for (unsigned j=0; j < Cols; ++j)
|
|
218
|
-
if (a.data[i][j] != b.data[i][j])
|
|
219
|
-
return false;
|
|
220
|
-
|
|
221
|
-
return true;
|
|
222
|
-
}
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
// negation:
|
|
226
|
-
template<unsigned Rows, unsigned Cols, typename Type>
|
|
227
|
-
inline CUDA_CALLABLE mat_t<Rows,Cols,Type> operator - (mat_t<Rows,Cols,Type> a)
|
|
228
|
-
{
|
|
229
|
-
// NB: this constructor will initialize all ret's components to 0, which is
|
|
230
|
-
// unnecessary...
|
|
231
|
-
mat_t<Rows,Cols,Type> ret;
|
|
232
|
-
for (unsigned i=0; i < Rows; ++i)
|
|
233
|
-
for (unsigned j=0; j < Cols; ++j)
|
|
234
|
-
ret.data[i][j] = -a.data[i][j];
|
|
235
|
-
|
|
236
|
-
// Wonder if this does a load of copying when it returns... hopefully not as it's inlined?
|
|
237
|
-
return ret;
|
|
238
|
-
}
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
template<unsigned Rows, unsigned Cols, typename Type>
|
|
242
|
-
CUDA_CALLABLE inline mat_t<Rows,Cols,Type> pos(const mat_t<Rows,Cols,Type>& x)
|
|
243
|
-
{
|
|
244
|
-
return x;
|
|
245
|
-
}
|
|
246
|
-
|
|
247
|
-
template<unsigned Rows, unsigned Cols, typename Type>
|
|
248
|
-
CUDA_CALLABLE inline void adj_pos(const mat_t<Rows,Cols,Type>& x, mat_t<Rows,Cols,Type>& adj_x, const mat_t<Rows,Cols,Type>& adj_ret)
|
|
249
|
-
{
|
|
250
|
-
adj_x += adj_ret;
|
|
251
|
-
}
|
|
252
|
-
|
|
253
|
-
template<unsigned Rows, unsigned Cols, typename Type>
|
|
254
|
-
CUDA_CALLABLE inline mat_t<Rows,Cols,Type> neg(const mat_t<Rows,Cols,Type>& x)
|
|
255
|
-
{
|
|
256
|
-
return -x;
|
|
257
|
-
}
|
|
258
|
-
|
|
259
|
-
template<unsigned Rows, unsigned Cols, typename Type>
|
|
260
|
-
CUDA_CALLABLE inline void adj_neg(const mat_t<Rows,Cols,Type>& x, mat_t<Rows,Cols,Type>& adj_x, const mat_t<Rows,Cols,Type>& adj_ret)
|
|
261
|
-
{
|
|
262
|
-
adj_x -= adj_ret;
|
|
263
|
-
}
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
template<unsigned Rows, unsigned Cols, typename Type>
|
|
267
|
-
inline CUDA_CALLABLE mat_t<Rows,Cols,Type> atomic_add(mat_t<Rows,Cols,Type> * addr, mat_t<Rows,Cols,Type> value)
|
|
268
|
-
{
|
|
269
|
-
mat_t<Rows,Cols,Type> m;
|
|
270
|
-
|
|
271
|
-
for (unsigned i=0; i < Rows; ++i)
|
|
272
|
-
for (unsigned j=0; j < Cols; ++j)
|
|
273
|
-
m.data[i][j] = atomic_add(&addr->data[i][j], value.data[i][j]);
|
|
274
|
-
|
|
275
|
-
return m;
|
|
276
|
-
}
|
|
277
|
-
|
|
278
|
-
template<unsigned Rows, unsigned Cols, typename Type>
|
|
279
|
-
inline CUDA_CALLABLE mat_t<Rows,Cols,Type> atomic_min(mat_t<Rows,Cols,Type> * addr, mat_t<Rows,Cols,Type> value)
|
|
280
|
-
{
|
|
281
|
-
mat_t<Rows,Cols,Type> m;
|
|
282
|
-
|
|
283
|
-
for (unsigned i=0; i < Rows; ++i)
|
|
284
|
-
for (unsigned j=0; j < Cols; ++j)
|
|
285
|
-
m.data[i][j] = atomic_min(&addr->data[i][j], value.data[i][j]);
|
|
286
|
-
|
|
287
|
-
return m;
|
|
288
|
-
}
|
|
289
|
-
|
|
290
|
-
template<unsigned Rows, unsigned Cols, typename Type>
|
|
291
|
-
inline CUDA_CALLABLE mat_t<Rows,Cols,Type> atomic_max(mat_t<Rows,Cols,Type> * addr, mat_t<Rows,Cols,Type> value)
|
|
292
|
-
{
|
|
293
|
-
mat_t<Rows,Cols,Type> m;
|
|
294
|
-
|
|
295
|
-
for (unsigned i=0; i < Rows; ++i)
|
|
296
|
-
for (unsigned j=0; j < Cols; ++j)
|
|
297
|
-
m.data[i][j] = atomic_max(&addr->data[i][j], value.data[i][j]);
|
|
298
|
-
|
|
299
|
-
return m;
|
|
300
|
-
}
|
|
301
|
-
|
|
302
|
-
template<unsigned Rows, unsigned Cols, typename Type>
|
|
303
|
-
inline CUDA_CALLABLE void adj_atomic_minmax(
|
|
304
|
-
mat_t<Rows,Cols,Type> *addr,
|
|
305
|
-
mat_t<Rows,Cols,Type> *adj_addr,
|
|
306
|
-
const mat_t<Rows,Cols,Type> &value,
|
|
307
|
-
mat_t<Rows,Cols,Type> &adj_value)
|
|
308
|
-
{
|
|
309
|
-
for (unsigned i=0; i < Rows; ++i)
|
|
310
|
-
for (unsigned j=0; j < Cols; ++j)
|
|
311
|
-
adj_atomic_minmax(&addr->data[i][j], &adj_addr->data[i][j], value.data[i][j], adj_value.data[i][j]);
|
|
312
|
-
}
|
|
313
|
-
|
|
314
|
-
template<unsigned Rows, unsigned Cols, typename Type>
|
|
315
|
-
inline CUDA_CALLABLE vec_t<Cols,Type> extract(const mat_t<Rows,Cols,Type>& m, int row)
|
|
316
|
-
{
|
|
317
|
-
vec_t<Cols,Type> ret;
|
|
318
|
-
for(unsigned i=0; i < Cols; ++i)
|
|
319
|
-
{
|
|
320
|
-
ret.c[i] = m.data[row][i];
|
|
321
|
-
}
|
|
322
|
-
return ret;
|
|
323
|
-
}
|
|
324
|
-
|
|
325
|
-
template<unsigned Rows, unsigned Cols, typename Type>
|
|
326
|
-
inline CUDA_CALLABLE Type extract(const mat_t<Rows,Cols,Type>& m, int row, int col)
|
|
327
|
-
{
|
|
328
|
-
#ifndef NDEBUG
|
|
329
|
-
if (row < 0 || row >= Rows)
|
|
330
|
-
{
|
|
331
|
-
printf("mat row index %d out of bounds at %s %d\n", row, __FILE__, __LINE__);
|
|
332
|
-
assert(0);
|
|
333
|
-
}
|
|
334
|
-
if (col < 0 || col >= Cols)
|
|
335
|
-
{
|
|
336
|
-
printf("mat col index %d out of bounds at %s %d\n", col, __FILE__, __LINE__);
|
|
337
|
-
assert(0);
|
|
338
|
-
}
|
|
339
|
-
#endif
|
|
340
|
-
return m.data[row][col];
|
|
341
|
-
}
|
|
342
|
-
|
|
343
|
-
template<unsigned Rows, unsigned Cols, typename Type>
|
|
344
|
-
inline CUDA_CALLABLE vec_t<Cols, Type>* index(mat_t<Rows,Cols,Type>& m, int row)
|
|
345
|
-
{
|
|
346
|
-
#ifndef NDEBUG
|
|
347
|
-
if (row < 0 || row >= Rows)
|
|
348
|
-
{
|
|
349
|
-
printf("mat row index %d out of bounds at %s %d\n", row, __FILE__, __LINE__);
|
|
350
|
-
assert(0);
|
|
351
|
-
}
|
|
352
|
-
#endif
|
|
353
|
-
|
|
354
|
-
return reinterpret_cast<vec_t<Cols, Type>*>(&m.data[row]);
|
|
355
|
-
}
|
|
356
|
-
|
|
357
|
-
template<unsigned Rows, unsigned Cols, typename Type>
|
|
358
|
-
inline CUDA_CALLABLE Type* index(mat_t<Rows,Cols,Type>& m, int row, int col)
|
|
359
|
-
{
|
|
360
|
-
#ifndef NDEBUG
|
|
361
|
-
if (row < 0 || row >= Rows)
|
|
362
|
-
{
|
|
363
|
-
printf("mat row index %d out of bounds at %s %d\n", row, __FILE__, __LINE__);
|
|
364
|
-
assert(0);
|
|
365
|
-
}
|
|
366
|
-
if (col < 0 || col >= Cols)
|
|
367
|
-
{
|
|
368
|
-
printf("mat col index %d out of bounds at %s %d\n", col, __FILE__, __LINE__);
|
|
369
|
-
assert(0);
|
|
370
|
-
}
|
|
371
|
-
#endif
|
|
372
|
-
|
|
373
|
-
return &m.data[row][col];
|
|
374
|
-
}
|
|
375
|
-
|
|
376
|
-
template<unsigned Rows, unsigned Cols, typename Type>
|
|
377
|
-
inline CUDA_CALLABLE void adj_index(const mat_t<Rows,Cols,Type>& m, int row,
|
|
378
|
-
const mat_t<Rows,Cols,Type>& adj_m, int adj_row, const vec_t<Cols, Type>& adj_value)
|
|
379
|
-
{
|
|
380
|
-
// nop
|
|
381
|
-
}
|
|
382
|
-
|
|
383
|
-
template<unsigned Rows, unsigned Cols, typename Type>
|
|
384
|
-
inline CUDA_CALLABLE void adj_index(const mat_t<Rows,Cols,Type>& m, int row, int col,
|
|
385
|
-
const mat_t<Rows,Cols,Type>& adj_m, int adj_row, int adj_col, Type adj_value)
|
|
386
|
-
{
|
|
387
|
-
// nop
|
|
388
|
-
}
|
|
389
|
-
|
|
390
|
-
template<unsigned Rows, unsigned Cols, typename Type>
|
|
391
|
-
inline bool CUDA_CALLABLE isfinite(const mat_t<Rows,Cols,Type>& m)
|
|
392
|
-
{
|
|
393
|
-
for (unsigned i=0; i < Rows; ++i)
|
|
394
|
-
for (unsigned j=0; j < Cols; ++j)
|
|
395
|
-
if (!isfinite(m.data[i][j]))
|
|
396
|
-
return false;
|
|
397
|
-
return true;
|
|
398
|
-
}
|
|
399
|
-
|
|
400
|
-
template<unsigned Rows, unsigned Cols, typename Type>
|
|
401
|
-
inline CUDA_CALLABLE
|
|
402
|
-
{
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
return
|
|
413
|
-
}
|
|
414
|
-
|
|
415
|
-
template<unsigned Rows, unsigned Cols, typename Type>
|
|
416
|
-
inline CUDA_CALLABLE
|
|
417
|
-
{
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
return
|
|
428
|
-
}
|
|
429
|
-
|
|
430
|
-
template<unsigned Rows, unsigned Cols, typename Type>
|
|
431
|
-
inline CUDA_CALLABLE mat_t<Rows,Cols,Type
|
|
432
|
-
{
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
return
|
|
514
|
-
}
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
}
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
{
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
return
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
)
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
{
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
|
|
904
|
-
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
|
|
908
|
-
|
|
909
|
-
|
|
910
|
-
|
|
911
|
-
|
|
912
|
-
{
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
927
|
-
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
932
|
-
|
|
933
|
-
|
|
934
|
-
|
|
935
|
-
|
|
936
|
-
|
|
937
|
-
|
|
938
|
-
|
|
939
|
-
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
{
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
}
|
|
974
|
-
|
|
975
|
-
template<unsigned Rows, unsigned Cols, typename Type>
|
|
976
|
-
inline CUDA_CALLABLE void
|
|
977
|
-
{
|
|
978
|
-
|
|
979
|
-
|
|
980
|
-
|
|
981
|
-
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
|
|
985
|
-
|
|
986
|
-
|
|
987
|
-
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
}
|
|
1007
|
-
|
|
1008
|
-
template<unsigned Rows, unsigned Cols, typename Type>
|
|
1009
|
-
inline CUDA_CALLABLE void
|
|
1010
|
-
{
|
|
1011
|
-
|
|
1012
|
-
|
|
1013
|
-
|
|
1014
|
-
|
|
1015
|
-
|
|
1016
|
-
|
|
1017
|
-
|
|
1018
|
-
|
|
1019
|
-
|
|
1020
|
-
}
|
|
1021
|
-
|
|
1022
|
-
template<unsigned Rows, unsigned Cols, typename Type>
|
|
1023
|
-
inline CUDA_CALLABLE void
|
|
1024
|
-
{
|
|
1025
|
-
|
|
1026
|
-
|
|
1027
|
-
|
|
1028
|
-
|
|
1029
|
-
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
|
|
1033
|
-
|
|
1034
|
-
}
|
|
1035
|
-
|
|
1036
|
-
template<unsigned Rows, unsigned Cols, typename Type>
|
|
1037
|
-
inline CUDA_CALLABLE void
|
|
1038
|
-
{
|
|
1039
|
-
|
|
1040
|
-
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
}
|
|
1048
|
-
|
|
1049
|
-
template<unsigned Rows, typename Type>
|
|
1050
|
-
inline CUDA_CALLABLE void
|
|
1051
|
-
{
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
|
|
1063
|
-
|
|
1064
|
-
|
|
1065
|
-
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
|
|
1070
|
-
|
|
1071
|
-
|
|
1072
|
-
|
|
1073
|
-
|
|
1074
|
-
|
|
1075
|
-
|
|
1076
|
-
|
|
1077
|
-
|
|
1078
|
-
|
|
1079
|
-
|
|
1080
|
-
|
|
1081
|
-
|
|
1082
|
-
|
|
1083
|
-
|
|
1084
|
-
|
|
1085
|
-
|
|
1086
|
-
|
|
1087
|
-
|
|
1088
|
-
|
|
1089
|
-
|
|
1090
|
-
|
|
1091
|
-
|
|
1092
|
-
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
|
|
1098
|
-
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
|
|
1108
|
-
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
|
|
1117
|
-
|
|
1118
|
-
|
|
1119
|
-
|
|
1120
|
-
|
|
1121
|
-
|
|
1122
|
-
|
|
1123
|
-
|
|
1124
|
-
|
|
1125
|
-
|
|
1126
|
-
|
|
1127
|
-
|
|
1128
|
-
|
|
1129
|
-
|
|
1130
|
-
|
|
1131
|
-
|
|
1132
|
-
|
|
1133
|
-
|
|
1134
|
-
|
|
1135
|
-
|
|
1136
|
-
|
|
1137
|
-
|
|
1138
|
-
|
|
1139
|
-
|
|
1140
|
-
|
|
1141
|
-
|
|
1142
|
-
|
|
1143
|
-
|
|
1144
|
-
|
|
1145
|
-
|
|
1146
|
-
|
|
1147
|
-
|
|
1148
|
-
|
|
1149
|
-
|
|
1150
|
-
|
|
1151
|
-
|
|
1152
|
-
|
|
1153
|
-
|
|
1154
|
-
|
|
1155
|
-
|
|
1156
|
-
|
|
1157
|
-
|
|
1158
|
-
|
|
1159
|
-
|
|
1160
|
-
|
|
1161
|
-
|
|
1162
|
-
|
|
1163
|
-
|
|
1164
|
-
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
|
|
1172
|
-
|
|
1173
|
-
|
|
1174
|
-
|
|
1175
|
-
|
|
1176
|
-
|
|
1177
|
-
|
|
1178
|
-
|
|
1179
|
-
|
|
1180
|
-
|
|
1181
|
-
|
|
1182
|
-
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
|
|
1194
|
-
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
|
|
1199
|
-
|
|
1200
|
-
|
|
1201
|
-
|
|
1202
|
-
|
|
1203
|
-
|
|
1204
|
-
|
|
1205
|
-
|
|
1206
|
-
|
|
1207
|
-
|
|
1208
|
-
|
|
1209
|
-
|
|
1210
|
-
|
|
1211
|
-
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
|
|
1216
|
-
|
|
1217
|
-
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
|
|
1223
|
-
|
|
1224
|
-
|
|
1225
|
-
|
|
1226
|
-
|
|
1227
|
-
|
|
1228
|
-
|
|
1229
|
-
|
|
1230
|
-
|
|
1231
|
-
|
|
1232
|
-
|
|
1233
|
-
|
|
1234
|
-
|
|
1235
|
-
|
|
1236
|
-
|
|
1237
|
-
|
|
1238
|
-
|
|
1239
|
-
|
|
1240
|
-
|
|
1241
|
-
|
|
1242
|
-
|
|
1243
|
-
|
|
1244
|
-
|
|
1245
|
-
|
|
1246
|
-
|
|
1247
|
-
|
|
1248
|
-
|
|
1249
|
-
|
|
1250
|
-
|
|
1251
|
-
|
|
1252
|
-
|
|
1253
|
-
|
|
1254
|
-
|
|
1255
|
-
|
|
1256
|
-
|
|
1257
|
-
|
|
1258
|
-
|
|
1259
|
-
|
|
1260
|
-
|
|
1261
|
-
|
|
1262
|
-
|
|
1263
|
-
|
|
1264
|
-
|
|
1265
|
-
|
|
1266
|
-
|
|
1267
|
-
|
|
1268
|
-
template<typename Type>
|
|
1269
|
-
inline CUDA_CALLABLE void adj_mat_t(Type
|
|
1270
|
-
|
|
1271
|
-
|
|
1272
|
-
|
|
1273
|
-
|
|
1274
|
-
|
|
1275
|
-
|
|
1276
|
-
|
|
1277
|
-
|
|
1278
|
-
|
|
1279
|
-
|
|
1280
|
-
|
|
1281
|
-
|
|
1282
|
-
|
|
1283
|
-
|
|
1284
|
-
|
|
1285
|
-
|
|
1286
|
-
|
|
1287
|
-
|
|
1288
|
-
|
|
1289
|
-
|
|
1290
|
-
|
|
1291
|
-
|
|
1292
|
-
|
|
1293
|
-
|
|
1294
|
-
|
|
1295
|
-
|
|
1296
|
-
|
|
1297
|
-
|
|
1298
|
-
|
|
1299
|
-
|
|
1300
|
-
|
|
1301
|
-
|
|
1302
|
-
|
|
1303
|
-
|
|
1304
|
-
|
|
1305
|
-
|
|
1306
|
-
|
|
1307
|
-
|
|
1308
|
-
|
|
1309
|
-
|
|
1310
|
-
|
|
1311
|
-
|
|
1312
|
-
|
|
1313
|
-
|
|
1314
|
-
|
|
1315
|
-
|
|
1316
|
-
|
|
1317
|
-
|
|
1318
|
-
|
|
1319
|
-
|
|
1320
|
-
|
|
1321
|
-
|
|
1322
|
-
|
|
1323
|
-
|
|
1324
|
-
|
|
1325
|
-
|
|
1326
|
-
|
|
1327
|
-
|
|
1328
|
-
|
|
1329
|
-
|
|
1330
|
-
|
|
1331
|
-
|
|
1332
|
-
|
|
1333
|
-
|
|
1334
|
-
|
|
1335
|
-
|
|
1336
|
-
template<typename Type>
|
|
1337
|
-
inline CUDA_CALLABLE void adj_mat_t(
|
|
1338
|
-
|
|
1339
|
-
|
|
1340
|
-
|
|
1341
|
-
|
|
1342
|
-
|
|
1343
|
-
|
|
1344
|
-
|
|
1345
|
-
|
|
1346
|
-
|
|
1347
|
-
|
|
1348
|
-
|
|
1349
|
-
|
|
1350
|
-
|
|
1351
|
-
|
|
1352
|
-
|
|
1353
|
-
|
|
1354
|
-
|
|
1355
|
-
|
|
1356
|
-
|
|
1357
|
-
|
|
1358
|
-
|
|
1359
|
-
|
|
1360
|
-
|
|
1361
|
-
|
|
1362
|
-
|
|
1363
|
-
|
|
1364
|
-
|
|
1365
|
-
|
|
1366
|
-
|
|
1367
|
-
|
|
1368
|
-
|
|
1369
|
-
|
|
1370
|
-
|
|
1371
|
-
|
|
1372
|
-
|
|
1373
|
-
|
|
1374
|
-
|
|
1375
|
-
|
|
1376
|
-
|
|
1377
|
-
{
|
|
1378
|
-
|
|
1379
|
-
|
|
1380
|
-
|
|
1381
|
-
}
|
|
1382
|
-
|
|
1383
|
-
|
|
1384
|
-
|
|
1385
|
-
|
|
1386
|
-
|
|
1387
|
-
|
|
1388
|
-
|
|
1389
|
-
|
|
1390
|
-
|
|
1391
|
-
|
|
1392
|
-
|
|
1393
|
-
|
|
1394
|
-
|
|
1395
|
-
|
|
1396
|
-
|
|
1397
|
-
|
|
1398
|
-
|
|
1399
|
-
|
|
1400
|
-
|
|
1401
|
-
|
|
1402
|
-
|
|
1403
|
-
|
|
1404
|
-
|
|
1405
|
-
|
|
1406
|
-
|
|
1407
|
-
|
|
1408
|
-
|
|
1409
|
-
|
|
1410
|
-
|
|
1411
|
-
|
|
1412
|
-
|
|
1413
|
-
|
|
1414
|
-
|
|
1415
|
-
|
|
1416
|
-
|
|
1417
|
-
|
|
1418
|
-
{
|
|
1419
|
-
|
|
1420
|
-
|
|
1421
|
-
|
|
1422
|
-
|
|
1423
|
-
|
|
1424
|
-
|
|
1425
|
-
|
|
1426
|
-
|
|
1427
|
-
|
|
1428
|
-
|
|
1429
|
-
|
|
1430
|
-
|
|
1431
|
-
|
|
1432
|
-
|
|
1433
|
-
|
|
1434
|
-
}
|
|
1435
|
-
|
|
1436
|
-
|
|
1437
|
-
|
|
1438
|
-
|
|
1439
|
-
|
|
1440
|
-
|
|
1441
|
-
|
|
1442
|
-
|
|
1443
|
-
|
|
1444
|
-
|
|
1445
|
-
|
|
1446
|
-
|
|
1447
|
-
|
|
1448
|
-
|
|
1449
|
-
|
|
1450
|
-
|
|
1451
|
-
|
|
1452
|
-
|
|
1453
|
-
|
|
1454
|
-
|
|
1455
|
-
|
|
1456
|
-
|
|
1457
|
-
|
|
1458
|
-
|
|
1459
|
-
{
|
|
1460
|
-
|
|
1461
|
-
|
|
1462
|
-
|
|
1463
|
-
|
|
1464
|
-
|
|
1465
|
-
|
|
1466
|
-
|
|
1467
|
-
|
|
1468
|
-
|
|
1469
|
-
|
|
1470
|
-
|
|
1471
|
-
|
|
1472
|
-
|
|
1473
|
-
|
|
1474
|
-
|
|
1475
|
-
|
|
1476
|
-
|
|
1477
|
-
|
|
1478
|
-
|
|
1479
|
-
|
|
1480
|
-
|
|
1481
|
-
|
|
1482
|
-
|
|
1483
|
-
|
|
1484
|
-
|
|
1485
|
-
|
|
1486
|
-
|
|
1487
|
-
|
|
1488
|
-
|
|
1489
|
-
|
|
1490
|
-
|
|
1491
|
-
|
|
1492
|
-
|
|
1493
|
-
|
|
1494
|
-
|
|
1495
|
-
|
|
1496
|
-
|
|
1497
|
-
|
|
1498
|
-
|
|
1
|
+
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
|
|
2
|
+
* NVIDIA CORPORATION and its licensors retain all intellectual property
|
|
3
|
+
* and proprietary rights in and to this software, related documentation
|
|
4
|
+
* and any modifications thereto. Any use, reproduction, disclosure or
|
|
5
|
+
* distribution of this software and related documentation without an express
|
|
6
|
+
* license agreement from NVIDIA CORPORATION is strictly prohibited.
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
#pragma once
|
|
10
|
+
|
|
11
|
+
#include "initializer_array.h"
|
|
12
|
+
|
|
13
|
+
namespace wp
|
|
14
|
+
{
|
|
15
|
+
|
|
16
|
+
//----------------------------------------------------------
|
|
17
|
+
// mat
|
|
18
|
+
template<typename T>
|
|
19
|
+
struct quat_t;
|
|
20
|
+
|
|
21
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
22
|
+
struct mat_t
|
|
23
|
+
{
|
|
24
|
+
inline CUDA_CALLABLE mat_t()
|
|
25
|
+
: data()
|
|
26
|
+
{}
|
|
27
|
+
|
|
28
|
+
inline CUDA_CALLABLE mat_t(Type s)
|
|
29
|
+
{
|
|
30
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
31
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
32
|
+
data[i][j] = s;
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
template <typename OtherType>
|
|
36
|
+
inline explicit CUDA_CALLABLE mat_t(const mat_t<Rows, Cols, OtherType>& other)
|
|
37
|
+
{
|
|
38
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
39
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
40
|
+
data[i][j] = other.data[i][j];
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
inline CUDA_CALLABLE mat_t(vec_t<2,Type> c0, vec_t<2,Type> c1)
|
|
44
|
+
{
|
|
45
|
+
data[0][0] = c0[0];
|
|
46
|
+
data[1][0] = c0[1];
|
|
47
|
+
|
|
48
|
+
data[0][1] = c1[0];
|
|
49
|
+
data[1][1] = c1[1];
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
inline CUDA_CALLABLE mat_t(vec_t<3,Type> c0, vec_t<3,Type> c1, vec_t<3,Type> c2)
|
|
53
|
+
{
|
|
54
|
+
data[0][0] = c0[0];
|
|
55
|
+
data[1][0] = c0[1];
|
|
56
|
+
data[2][0] = c0[2];
|
|
57
|
+
|
|
58
|
+
data[0][1] = c1[0];
|
|
59
|
+
data[1][1] = c1[1];
|
|
60
|
+
data[2][1] = c1[2];
|
|
61
|
+
|
|
62
|
+
data[0][2] = c2[0];
|
|
63
|
+
data[1][2] = c2[1];
|
|
64
|
+
data[2][2] = c2[2];
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
inline CUDA_CALLABLE mat_t(vec_t<4,Type> c0, vec_t<4,Type> c1, vec_t<4,Type> c2, vec_t<4,Type> c3)
|
|
68
|
+
{
|
|
69
|
+
data[0][0] = c0[0];
|
|
70
|
+
data[1][0] = c0[1];
|
|
71
|
+
data[2][0] = c0[2];
|
|
72
|
+
data[3][0] = c0[3];
|
|
73
|
+
|
|
74
|
+
data[0][1] = c1[0];
|
|
75
|
+
data[1][1] = c1[1];
|
|
76
|
+
data[2][1] = c1[2];
|
|
77
|
+
data[3][1] = c1[3];
|
|
78
|
+
|
|
79
|
+
data[0][2] = c2[0];
|
|
80
|
+
data[1][2] = c2[1];
|
|
81
|
+
data[2][2] = c2[2];
|
|
82
|
+
data[3][2] = c2[3];
|
|
83
|
+
|
|
84
|
+
data[0][3] = c3[0];
|
|
85
|
+
data[1][3] = c3[1];
|
|
86
|
+
data[2][3] = c3[2];
|
|
87
|
+
data[3][3] = c3[3];
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
inline CUDA_CALLABLE mat_t(Type m00, Type m01, Type m10, Type m11)
|
|
91
|
+
{
|
|
92
|
+
data[0][0] = m00;
|
|
93
|
+
data[1][0] = m10;
|
|
94
|
+
data[0][1] = m01;
|
|
95
|
+
data[1][1] = m11;
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
inline CUDA_CALLABLE mat_t(
|
|
99
|
+
Type m00, Type m01, Type m02,
|
|
100
|
+
Type m10, Type m11, Type m12,
|
|
101
|
+
Type m20, Type m21, Type m22)
|
|
102
|
+
{
|
|
103
|
+
data[0][0] = m00;
|
|
104
|
+
data[1][0] = m10;
|
|
105
|
+
data[2][0] = m20;
|
|
106
|
+
|
|
107
|
+
data[0][1] = m01;
|
|
108
|
+
data[1][1] = m11;
|
|
109
|
+
data[2][1] = m21;
|
|
110
|
+
|
|
111
|
+
data[0][2] = m02;
|
|
112
|
+
data[1][2] = m12;
|
|
113
|
+
data[2][2] = m22;
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
inline CUDA_CALLABLE mat_t(
|
|
117
|
+
Type m00, Type m01, Type m02, Type m03,
|
|
118
|
+
Type m10, Type m11, Type m12, Type m13,
|
|
119
|
+
Type m20, Type m21, Type m22, Type m23,
|
|
120
|
+
Type m30, Type m31, Type m32, Type m33)
|
|
121
|
+
{
|
|
122
|
+
data[0][0] = m00;
|
|
123
|
+
data[1][0] = m10;
|
|
124
|
+
data[2][0] = m20;
|
|
125
|
+
data[3][0] = m30;
|
|
126
|
+
|
|
127
|
+
data[0][1] = m01;
|
|
128
|
+
data[1][1] = m11;
|
|
129
|
+
data[2][1] = m21;
|
|
130
|
+
data[3][1] = m31;
|
|
131
|
+
|
|
132
|
+
data[0][2] = m02;
|
|
133
|
+
data[1][2] = m12;
|
|
134
|
+
data[2][2] = m22;
|
|
135
|
+
data[3][2] = m32;
|
|
136
|
+
|
|
137
|
+
data[0][3] = m03;
|
|
138
|
+
data[1][3] = m13;
|
|
139
|
+
data[2][3] = m23;
|
|
140
|
+
data[3][3] = m33;
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
// implemented in quat.h
|
|
144
|
+
inline CUDA_CALLABLE mat_t(const vec_t<3,Type>& pos, const quat_t<Type>& rot, const vec_t<3,Type>& scale);
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
inline CUDA_CALLABLE mat_t(const initializer_array<Rows * Cols, Type> &l)
|
|
148
|
+
{
|
|
149
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
150
|
+
{
|
|
151
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
152
|
+
{
|
|
153
|
+
data[i][j] = l[i * Cols + j];
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
inline CUDA_CALLABLE mat_t(const initializer_array<Cols, vec_t<Rows,Type> > &l)
|
|
159
|
+
{
|
|
160
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
161
|
+
{
|
|
162
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
163
|
+
{
|
|
164
|
+
data[i][j] = l[j][i];
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
CUDA_CALLABLE vec_t<Cols,Type> get_row(int index) const
|
|
170
|
+
{
|
|
171
|
+
return (vec_t<Cols,Type>&)data[index];
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
CUDA_CALLABLE void set_row(int index, const vec_t<Cols,Type>& v)
|
|
175
|
+
{
|
|
176
|
+
(vec_t<Cols,Type>&)data[index] = v;
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
CUDA_CALLABLE vec_t<Rows,Type> get_col(int index) const
|
|
180
|
+
{
|
|
181
|
+
vec_t<Rows,Type> ret;
|
|
182
|
+
for( unsigned i=0;i < Rows; ++i )
|
|
183
|
+
{
|
|
184
|
+
ret[i] = data[i][index];
|
|
185
|
+
}
|
|
186
|
+
return ret;
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
CUDA_CALLABLE void set_col(int index, const vec_t<Rows,Type>& v)
|
|
190
|
+
{
|
|
191
|
+
for( unsigned i=0;i < Rows; ++i )
|
|
192
|
+
{
|
|
193
|
+
data[i][index] = v[i];
|
|
194
|
+
}
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
// row major storage assumed to be compatible with PyTorch
|
|
198
|
+
Type data[Rows][Cols];
|
|
199
|
+
};
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
template<unsigned Rows, typename Type>
|
|
203
|
+
inline CUDA_CALLABLE mat_t<Rows, Rows, Type> identity()
|
|
204
|
+
{
|
|
205
|
+
mat_t<Rows, Rows, Type> m;
|
|
206
|
+
for( unsigned i=0; i < Rows; ++i )
|
|
207
|
+
{
|
|
208
|
+
m.data[i][i] = Type(1);
|
|
209
|
+
}
|
|
210
|
+
return m;
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
214
|
+
inline CUDA_CALLABLE bool operator==(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b)
|
|
215
|
+
{
|
|
216
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
217
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
218
|
+
if (a.data[i][j] != b.data[i][j])
|
|
219
|
+
return false;
|
|
220
|
+
|
|
221
|
+
return true;
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
// negation:
|
|
226
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
227
|
+
inline CUDA_CALLABLE mat_t<Rows,Cols,Type> operator - (mat_t<Rows,Cols,Type> a)
|
|
228
|
+
{
|
|
229
|
+
// NB: this constructor will initialize all ret's components to 0, which is
|
|
230
|
+
// unnecessary...
|
|
231
|
+
mat_t<Rows,Cols,Type> ret;
|
|
232
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
233
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
234
|
+
ret.data[i][j] = -a.data[i][j];
|
|
235
|
+
|
|
236
|
+
// Wonder if this does a load of copying when it returns... hopefully not as it's inlined?
|
|
237
|
+
return ret;
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
242
|
+
CUDA_CALLABLE inline mat_t<Rows,Cols,Type> pos(const mat_t<Rows,Cols,Type>& x)
|
|
243
|
+
{
|
|
244
|
+
return x;
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
248
|
+
CUDA_CALLABLE inline void adj_pos(const mat_t<Rows,Cols,Type>& x, mat_t<Rows,Cols,Type>& adj_x, const mat_t<Rows,Cols,Type>& adj_ret)
|
|
249
|
+
{
|
|
250
|
+
adj_x += adj_ret;
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
254
|
+
CUDA_CALLABLE inline mat_t<Rows,Cols,Type> neg(const mat_t<Rows,Cols,Type>& x)
|
|
255
|
+
{
|
|
256
|
+
return -x;
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
260
|
+
CUDA_CALLABLE inline void adj_neg(const mat_t<Rows,Cols,Type>& x, mat_t<Rows,Cols,Type>& adj_x, const mat_t<Rows,Cols,Type>& adj_ret)
|
|
261
|
+
{
|
|
262
|
+
adj_x -= adj_ret;
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
267
|
+
inline CUDA_CALLABLE mat_t<Rows,Cols,Type> atomic_add(mat_t<Rows,Cols,Type> * addr, mat_t<Rows,Cols,Type> value)
|
|
268
|
+
{
|
|
269
|
+
mat_t<Rows,Cols,Type> m;
|
|
270
|
+
|
|
271
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
272
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
273
|
+
m.data[i][j] = atomic_add(&addr->data[i][j], value.data[i][j]);
|
|
274
|
+
|
|
275
|
+
return m;
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
279
|
+
inline CUDA_CALLABLE mat_t<Rows,Cols,Type> atomic_min(mat_t<Rows,Cols,Type> * addr, mat_t<Rows,Cols,Type> value)
|
|
280
|
+
{
|
|
281
|
+
mat_t<Rows,Cols,Type> m;
|
|
282
|
+
|
|
283
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
284
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
285
|
+
m.data[i][j] = atomic_min(&addr->data[i][j], value.data[i][j]);
|
|
286
|
+
|
|
287
|
+
return m;
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
291
|
+
inline CUDA_CALLABLE mat_t<Rows,Cols,Type> atomic_max(mat_t<Rows,Cols,Type> * addr, mat_t<Rows,Cols,Type> value)
|
|
292
|
+
{
|
|
293
|
+
mat_t<Rows,Cols,Type> m;
|
|
294
|
+
|
|
295
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
296
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
297
|
+
m.data[i][j] = atomic_max(&addr->data[i][j], value.data[i][j]);
|
|
298
|
+
|
|
299
|
+
return m;
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
303
|
+
inline CUDA_CALLABLE void adj_atomic_minmax(
|
|
304
|
+
mat_t<Rows,Cols,Type> *addr,
|
|
305
|
+
mat_t<Rows,Cols,Type> *adj_addr,
|
|
306
|
+
const mat_t<Rows,Cols,Type> &value,
|
|
307
|
+
mat_t<Rows,Cols,Type> &adj_value)
|
|
308
|
+
{
|
|
309
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
310
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
311
|
+
adj_atomic_minmax(&addr->data[i][j], &adj_addr->data[i][j], value.data[i][j], adj_value.data[i][j]);
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
315
|
+
inline CUDA_CALLABLE vec_t<Cols,Type> extract(const mat_t<Rows,Cols,Type>& m, int row)
|
|
316
|
+
{
|
|
317
|
+
vec_t<Cols,Type> ret;
|
|
318
|
+
for(unsigned i=0; i < Cols; ++i)
|
|
319
|
+
{
|
|
320
|
+
ret.c[i] = m.data[row][i];
|
|
321
|
+
}
|
|
322
|
+
return ret;
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
326
|
+
inline CUDA_CALLABLE Type extract(const mat_t<Rows,Cols,Type>& m, int row, int col)
|
|
327
|
+
{
|
|
328
|
+
#ifndef NDEBUG
|
|
329
|
+
if (row < 0 || row >= Rows)
|
|
330
|
+
{
|
|
331
|
+
printf("mat row index %d out of bounds at %s %d\n", row, __FILE__, __LINE__);
|
|
332
|
+
assert(0);
|
|
333
|
+
}
|
|
334
|
+
if (col < 0 || col >= Cols)
|
|
335
|
+
{
|
|
336
|
+
printf("mat col index %d out of bounds at %s %d\n", col, __FILE__, __LINE__);
|
|
337
|
+
assert(0);
|
|
338
|
+
}
|
|
339
|
+
#endif
|
|
340
|
+
return m.data[row][col];
|
|
341
|
+
}
|
|
342
|
+
|
|
343
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
344
|
+
inline CUDA_CALLABLE vec_t<Cols, Type>* index(mat_t<Rows,Cols,Type>& m, int row)
|
|
345
|
+
{
|
|
346
|
+
#ifndef NDEBUG
|
|
347
|
+
if (row < 0 || row >= Rows)
|
|
348
|
+
{
|
|
349
|
+
printf("mat row index %d out of bounds at %s %d\n", row, __FILE__, __LINE__);
|
|
350
|
+
assert(0);
|
|
351
|
+
}
|
|
352
|
+
#endif
|
|
353
|
+
|
|
354
|
+
return reinterpret_cast<vec_t<Cols, Type>*>(&m.data[row]);
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
358
|
+
inline CUDA_CALLABLE Type* index(mat_t<Rows,Cols,Type>& m, int row, int col)
|
|
359
|
+
{
|
|
360
|
+
#ifndef NDEBUG
|
|
361
|
+
if (row < 0 || row >= Rows)
|
|
362
|
+
{
|
|
363
|
+
printf("mat row index %d out of bounds at %s %d\n", row, __FILE__, __LINE__);
|
|
364
|
+
assert(0);
|
|
365
|
+
}
|
|
366
|
+
if (col < 0 || col >= Cols)
|
|
367
|
+
{
|
|
368
|
+
printf("mat col index %d out of bounds at %s %d\n", col, __FILE__, __LINE__);
|
|
369
|
+
assert(0);
|
|
370
|
+
}
|
|
371
|
+
#endif
|
|
372
|
+
|
|
373
|
+
return &m.data[row][col];
|
|
374
|
+
}
|
|
375
|
+
|
|
376
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
377
|
+
inline CUDA_CALLABLE void adj_index(const mat_t<Rows,Cols,Type>& m, int row,
|
|
378
|
+
const mat_t<Rows,Cols,Type>& adj_m, int adj_row, const vec_t<Cols, Type>& adj_value)
|
|
379
|
+
{
|
|
380
|
+
// nop
|
|
381
|
+
}
|
|
382
|
+
|
|
383
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
384
|
+
inline CUDA_CALLABLE void adj_index(const mat_t<Rows,Cols,Type>& m, int row, int col,
|
|
385
|
+
const mat_t<Rows,Cols,Type>& adj_m, int adj_row, int adj_col, Type adj_value)
|
|
386
|
+
{
|
|
387
|
+
// nop
|
|
388
|
+
}
|
|
389
|
+
|
|
390
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
391
|
+
inline bool CUDA_CALLABLE isfinite(const mat_t<Rows,Cols,Type>& m)
|
|
392
|
+
{
|
|
393
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
394
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
395
|
+
if (!isfinite(m.data[i][j]))
|
|
396
|
+
return false;
|
|
397
|
+
return true;
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
401
|
+
inline void CUDA_CALLABLE adj_isfinite(const mat_t<Rows,Cols,Type>& m, mat_t<Rows,Cols,Type>& adj_m, const bool &adj_ret)
|
|
402
|
+
{
|
|
403
|
+
}
|
|
404
|
+
|
|
405
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
406
|
+
inline bool CUDA_CALLABLE isnan(const mat_t<Rows,Cols,Type>& m)
|
|
407
|
+
{
|
|
408
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
409
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
410
|
+
if (isnan(m.data[i][j]))
|
|
411
|
+
return true;
|
|
412
|
+
return false;
|
|
413
|
+
}
|
|
414
|
+
|
|
415
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
416
|
+
inline void CUDA_CALLABLE adj_isnan(const mat_t<Rows,Cols,Type>& m, mat_t<Rows,Cols,Type>& adj_m, const bool &adj_ret)
|
|
417
|
+
{
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
421
|
+
inline bool CUDA_CALLABLE isinf(const mat_t<Rows,Cols,Type>& m)
|
|
422
|
+
{
|
|
423
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
424
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
425
|
+
if (isinf(m.data[i][j]))
|
|
426
|
+
return true;
|
|
427
|
+
return false;
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
431
|
+
inline void CUDA_CALLABLE adj_isinf(const mat_t<Rows,Cols,Type>& m, mat_t<Rows,Cols,Type>& adj_m, const bool &adj_ret)
|
|
432
|
+
{
|
|
433
|
+
}
|
|
434
|
+
|
|
435
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
436
|
+
inline CUDA_CALLABLE mat_t<Rows,Cols,Type> add(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b)
|
|
437
|
+
{
|
|
438
|
+
mat_t<Rows,Cols,Type> t;
|
|
439
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
440
|
+
{
|
|
441
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
442
|
+
{
|
|
443
|
+
t.data[i][j] = a.data[i][j] + b.data[i][j];
|
|
444
|
+
}
|
|
445
|
+
}
|
|
446
|
+
|
|
447
|
+
return t;
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
451
|
+
inline CUDA_CALLABLE mat_t<Rows,Cols,Type> sub(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b)
|
|
452
|
+
{
|
|
453
|
+
mat_t<Rows,Cols,Type> t;
|
|
454
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
455
|
+
{
|
|
456
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
457
|
+
{
|
|
458
|
+
t.data[i][j] = a.data[i][j] - b.data[i][j];
|
|
459
|
+
}
|
|
460
|
+
}
|
|
461
|
+
|
|
462
|
+
return t;
|
|
463
|
+
}
|
|
464
|
+
|
|
465
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
466
|
+
inline CUDA_CALLABLE mat_t<Rows,Cols,Type> div(const mat_t<Rows,Cols,Type>& a, Type b)
|
|
467
|
+
{
|
|
468
|
+
mat_t<Rows,Cols,Type> t;
|
|
469
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
470
|
+
{
|
|
471
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
472
|
+
{
|
|
473
|
+
t.data[i][j] = a.data[i][j]/b;
|
|
474
|
+
}
|
|
475
|
+
}
|
|
476
|
+
|
|
477
|
+
return t;
|
|
478
|
+
}
|
|
479
|
+
|
|
480
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
481
|
+
inline CUDA_CALLABLE mat_t<Rows,Cols,Type> div(Type b, const mat_t<Rows,Cols,Type>& a)
|
|
482
|
+
{
|
|
483
|
+
mat_t<Rows,Cols,Type> t;
|
|
484
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
485
|
+
{
|
|
486
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
487
|
+
{
|
|
488
|
+
t.data[i][j] = b / a.data[i][j];
|
|
489
|
+
}
|
|
490
|
+
}
|
|
491
|
+
|
|
492
|
+
return t;
|
|
493
|
+
}
|
|
494
|
+
|
|
495
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
496
|
+
inline CUDA_CALLABLE mat_t<Rows,Cols,Type> mul(const mat_t<Rows,Cols,Type>& a, Type b)
|
|
497
|
+
{
|
|
498
|
+
mat_t<Rows,Cols,Type> t;
|
|
499
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
500
|
+
{
|
|
501
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
502
|
+
{
|
|
503
|
+
t.data[i][j] = a.data[i][j]*b;
|
|
504
|
+
}
|
|
505
|
+
}
|
|
506
|
+
|
|
507
|
+
return t;
|
|
508
|
+
}
|
|
509
|
+
|
|
510
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
511
|
+
inline CUDA_CALLABLE mat_t<Rows,Cols,Type> mul(Type b, const mat_t<Rows,Cols,Type>& a)
|
|
512
|
+
{
|
|
513
|
+
return mul(a,b);
|
|
514
|
+
}
|
|
515
|
+
|
|
516
|
+
|
|
517
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
518
|
+
inline CUDA_CALLABLE mat_t<Rows,Cols,Type> operator*(Type b, const mat_t<Rows,Cols,Type>& a)
|
|
519
|
+
{
|
|
520
|
+
return mul(a,b);
|
|
521
|
+
}
|
|
522
|
+
|
|
523
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
524
|
+
inline CUDA_CALLABLE mat_t<Rows,Cols,Type> operator*( const mat_t<Rows,Cols,Type>& a, Type b)
|
|
525
|
+
{
|
|
526
|
+
return mul(a,b);
|
|
527
|
+
}
|
|
528
|
+
|
|
529
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
530
|
+
inline CUDA_CALLABLE vec_t<Rows,Type> mul(const mat_t<Rows,Cols,Type>& a, const vec_t<Cols,Type>& b)
|
|
531
|
+
{
|
|
532
|
+
vec_t<Rows,Type> r = a.get_col(0)*b[0];
|
|
533
|
+
for( unsigned i=1; i < Cols; ++i )
|
|
534
|
+
{
|
|
535
|
+
r += a.get_col(i)*b[i];
|
|
536
|
+
}
|
|
537
|
+
return r;
|
|
538
|
+
}
|
|
539
|
+
|
|
540
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
541
|
+
inline CUDA_CALLABLE vec_t<Cols,Type> mul(const vec_t<Rows,Type>& b, const mat_t<Rows,Cols,Type>& a)
|
|
542
|
+
{
|
|
543
|
+
vec_t<Cols,Type> r = a.get_row(0)*b[0];
|
|
544
|
+
for( unsigned i=1; i < Rows; ++i )
|
|
545
|
+
{
|
|
546
|
+
r += a.get_row(i)*b[i];
|
|
547
|
+
}
|
|
548
|
+
return r;
|
|
549
|
+
}
|
|
550
|
+
|
|
551
|
+
template<unsigned Rows, unsigned Cols, unsigned ColsOut, typename Type>
|
|
552
|
+
inline CUDA_CALLABLE mat_t<Rows,ColsOut,Type> mul(const mat_t<Rows,Cols,Type>& a, const mat_t<Cols,ColsOut,Type>& b)
|
|
553
|
+
{
|
|
554
|
+
mat_t<Rows,ColsOut,Type> t(0);
|
|
555
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
556
|
+
{
|
|
557
|
+
for (unsigned j=0; j < ColsOut; ++j)
|
|
558
|
+
{
|
|
559
|
+
for (unsigned k=0; k < Cols; ++k)
|
|
560
|
+
{
|
|
561
|
+
t.data[i][j] += a.data[i][k]*b.data[k][j];
|
|
562
|
+
}
|
|
563
|
+
}
|
|
564
|
+
}
|
|
565
|
+
|
|
566
|
+
return t;
|
|
567
|
+
}
|
|
568
|
+
|
|
569
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
570
|
+
inline CUDA_CALLABLE Type ddot(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b)
|
|
571
|
+
{
|
|
572
|
+
// double dot product between a and b:
|
|
573
|
+
Type r(0);
|
|
574
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
575
|
+
{
|
|
576
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
577
|
+
{
|
|
578
|
+
r += a.data[i][j] * b.data[i][j];
|
|
579
|
+
}
|
|
580
|
+
}
|
|
581
|
+
return r;
|
|
582
|
+
}
|
|
583
|
+
|
|
584
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
585
|
+
inline CUDA_CALLABLE Type tensordot(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b)
|
|
586
|
+
{
|
|
587
|
+
// corresponds to `np.tensordot()` with all axes being contracted
|
|
588
|
+
return ddot(a, b);
|
|
589
|
+
}
|
|
590
|
+
|
|
591
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
592
|
+
inline CUDA_CALLABLE mat_t<Cols,Rows,Type> transpose(const mat_t<Rows,Cols,Type>& a)
|
|
593
|
+
{
|
|
594
|
+
mat_t<Cols,Rows,Type> t;
|
|
595
|
+
for (unsigned i=0; i < Cols; ++i)
|
|
596
|
+
{
|
|
597
|
+
for (unsigned j=0; j < Rows; ++j)
|
|
598
|
+
{
|
|
599
|
+
t.data[i][j] = a.data[j][i];
|
|
600
|
+
}
|
|
601
|
+
}
|
|
602
|
+
|
|
603
|
+
return t;
|
|
604
|
+
}
|
|
605
|
+
|
|
606
|
+
// Only implementing determinants for 2x2, 3x3 and 4x4 matrices for now...
|
|
607
|
+
template<typename Type>
|
|
608
|
+
inline CUDA_CALLABLE Type determinant(const mat_t<2,2,Type>& m)
|
|
609
|
+
{
|
|
610
|
+
return m.data[0][0]*m.data[1][1] - m.data[1][0]*m.data[0][1];
|
|
611
|
+
}
|
|
612
|
+
|
|
613
|
+
template<typename Type>
|
|
614
|
+
inline CUDA_CALLABLE Type determinant(const mat_t<3,3,Type>& m)
|
|
615
|
+
{
|
|
616
|
+
return dot(
|
|
617
|
+
vec_t<3,Type>(m.data[0][0],m.data[0][1],m.data[0][2]),
|
|
618
|
+
cross(
|
|
619
|
+
vec_t<3,Type>(m.data[1][0],m.data[1][1],m.data[1][2]),
|
|
620
|
+
vec_t<3,Type>(m.data[2][0],m.data[2][1],m.data[2][2])
|
|
621
|
+
)
|
|
622
|
+
);
|
|
623
|
+
}
|
|
624
|
+
|
|
625
|
+
template<typename Type>
|
|
626
|
+
inline CUDA_CALLABLE Type determinant(const mat_t<4,4,Type>& m)
|
|
627
|
+
{
|
|
628
|
+
// adapted from USD GfMatrix4f::Inverse()
|
|
629
|
+
Type x00, x01, x02, x03;
|
|
630
|
+
Type x10, x11, x12, x13;
|
|
631
|
+
Type x20, x21, x22, x23;
|
|
632
|
+
Type x30, x31, x32, x33;
|
|
633
|
+
double y01, y02, y03, y12, y13, y23;
|
|
634
|
+
Type z00, z10, z20, z30;
|
|
635
|
+
|
|
636
|
+
// Pickle 1st two columns of matrix into registers
|
|
637
|
+
x00 = m.data[0][0];
|
|
638
|
+
x01 = m.data[0][1];
|
|
639
|
+
x10 = m.data[1][0];
|
|
640
|
+
x11 = m.data[1][1];
|
|
641
|
+
x20 = m.data[2][0];
|
|
642
|
+
x21 = m.data[2][1];
|
|
643
|
+
x30 = m.data[3][0];
|
|
644
|
+
x31 = m.data[3][1];
|
|
645
|
+
|
|
646
|
+
// Compute all six 2x2 determinants of 1st two columns
|
|
647
|
+
y01 = x00*x11 - x10*x01;
|
|
648
|
+
y02 = x00*x21 - x20*x01;
|
|
649
|
+
y03 = x00*x31 - x30*x01;
|
|
650
|
+
y12 = x10*x21 - x20*x11;
|
|
651
|
+
y13 = x10*x31 - x30*x11;
|
|
652
|
+
y23 = x20*x31 - x30*x21;
|
|
653
|
+
|
|
654
|
+
// Pickle 2nd two columns of matrix into registers
|
|
655
|
+
x02 = m.data[0][2];
|
|
656
|
+
x03 = m.data[0][3];
|
|
657
|
+
x12 = m.data[1][2];
|
|
658
|
+
x13 = m.data[1][3];
|
|
659
|
+
x22 = m.data[2][2];
|
|
660
|
+
x23 = m.data[2][3];
|
|
661
|
+
x32 = m.data[3][2];
|
|
662
|
+
x33 = m.data[3][3];
|
|
663
|
+
|
|
664
|
+
// Compute all six 2x2 determinants of 2nd two columns
|
|
665
|
+
y01 = x02*x13 - x12*x03;
|
|
666
|
+
y02 = x02*x23 - x22*x03;
|
|
667
|
+
y03 = x02*x33 - x32*x03;
|
|
668
|
+
y12 = x12*x23 - x22*x13;
|
|
669
|
+
y13 = x12*x33 - x32*x13;
|
|
670
|
+
y23 = x22*x33 - x32*x23;
|
|
671
|
+
|
|
672
|
+
// Compute all 3x3 cofactors for 1st two columns
|
|
673
|
+
z30 = x11*y02 - x21*y01 - x01*y12;
|
|
674
|
+
z20 = x01*y13 - x11*y03 + x31*y01;
|
|
675
|
+
z10 = x21*y03 - x31*y02 - x01*y23;
|
|
676
|
+
z00 = x11*y23 - x21*y13 + x31*y12;
|
|
677
|
+
|
|
678
|
+
// compute 4x4 determinant & its reciprocal
|
|
679
|
+
double det = x30*z30 + x20*z20 + x10*z10 + x00*z00;
|
|
680
|
+
return det;
|
|
681
|
+
}
|
|
682
|
+
|
|
683
|
+
template<unsigned Rows, typename Type>
|
|
684
|
+
inline CUDA_CALLABLE Type trace(const mat_t<Rows,Rows,Type>& m)
|
|
685
|
+
{
|
|
686
|
+
Type ret = m.data[0][0];
|
|
687
|
+
for( unsigned i=1; i < Rows; ++i )
|
|
688
|
+
{
|
|
689
|
+
ret += m.data[i][i];
|
|
690
|
+
}
|
|
691
|
+
return ret;
|
|
692
|
+
}
|
|
693
|
+
|
|
694
|
+
template<unsigned Rows, typename Type>
|
|
695
|
+
inline CUDA_CALLABLE vec_t<Rows, Type> get_diag(const mat_t<Rows,Rows,Type>& m)
|
|
696
|
+
{
|
|
697
|
+
vec_t<Rows, Type> ret;
|
|
698
|
+
for( unsigned i=0; i < Rows; ++i )
|
|
699
|
+
{
|
|
700
|
+
ret[i] = m.data[i][i];
|
|
701
|
+
}
|
|
702
|
+
return ret;
|
|
703
|
+
}
|
|
704
|
+
|
|
705
|
+
// Only implementing inverses for 2x2, 3x3 and 4x4 matrices for now...
|
|
706
|
+
template<typename Type>
|
|
707
|
+
inline CUDA_CALLABLE mat_t<2,2,Type> inverse(const mat_t<2,2,Type>& m)
|
|
708
|
+
{
|
|
709
|
+
Type det = determinant(m);
|
|
710
|
+
if (det > Type(kEps) || det < -Type(kEps))
|
|
711
|
+
{
|
|
712
|
+
return mat_t<2,2,Type>( m.data[1][1], -m.data[0][1],
|
|
713
|
+
-m.data[1][0], m.data[0][0])*(Type(1.0f)/det);
|
|
714
|
+
}
|
|
715
|
+
else
|
|
716
|
+
{
|
|
717
|
+
return mat_t<2,2,Type>();
|
|
718
|
+
}
|
|
719
|
+
}
|
|
720
|
+
|
|
721
|
+
template<typename Type>
|
|
722
|
+
inline CUDA_CALLABLE mat_t<3,3,Type> inverse(const mat_t<3,3,Type>& m)
|
|
723
|
+
{
|
|
724
|
+
Type det = determinant(m);
|
|
725
|
+
|
|
726
|
+
if (det != Type(0.0f))
|
|
727
|
+
{
|
|
728
|
+
mat_t<3,3,Type> b;
|
|
729
|
+
|
|
730
|
+
b.data[0][0] = m.data[1][1]*m.data[2][2] - m.data[1][2]*m.data[2][1];
|
|
731
|
+
b.data[1][0] = m.data[1][2]*m.data[2][0] - m.data[1][0]*m.data[2][2];
|
|
732
|
+
b.data[2][0] = m.data[1][0]*m.data[2][1] - m.data[1][1]*m.data[2][0];
|
|
733
|
+
|
|
734
|
+
b.data[0][1] = m.data[0][2]*m.data[2][1] - m.data[0][1]*m.data[2][2];
|
|
735
|
+
b.data[1][1] = m.data[0][0]*m.data[2][2] - m.data[0][2]*m.data[2][0];
|
|
736
|
+
b.data[2][1] = m.data[0][1]*m.data[2][0] - m.data[0][0]*m.data[2][1];
|
|
737
|
+
|
|
738
|
+
b.data[0][2] = m.data[0][1]*m.data[1][2] - m.data[0][2]*m.data[1][1];
|
|
739
|
+
b.data[1][2] = m.data[0][2]*m.data[1][0] - m.data[0][0]*m.data[1][2];
|
|
740
|
+
b.data[2][2] = m.data[0][0]*m.data[1][1] - m.data[0][1]*m.data[1][0];
|
|
741
|
+
|
|
742
|
+
return b*(Type(1.0f)/det);
|
|
743
|
+
}
|
|
744
|
+
else
|
|
745
|
+
{
|
|
746
|
+
return mat_t<3,3,Type>();
|
|
747
|
+
}
|
|
748
|
+
}
|
|
749
|
+
|
|
750
|
+
template<typename Type>
|
|
751
|
+
inline CUDA_CALLABLE mat_t<4,4,Type> inverse(const mat_t<4,4,Type>& m)
|
|
752
|
+
{
|
|
753
|
+
// adapted from USD GfMatrix4f::Inverse()
|
|
754
|
+
Type x00, x01, x02, x03;
|
|
755
|
+
Type x10, x11, x12, x13;
|
|
756
|
+
Type x20, x21, x22, x23;
|
|
757
|
+
Type x30, x31, x32, x33;
|
|
758
|
+
double y01, y02, y03, y12, y13, y23;
|
|
759
|
+
Type z00, z10, z20, z30;
|
|
760
|
+
Type z01, z11, z21, z31;
|
|
761
|
+
double z02, z03, z12, z13, z22, z23, z32, z33;
|
|
762
|
+
|
|
763
|
+
// Pickle 1st two columns of matrix into registers
|
|
764
|
+
x00 = m.data[0][0];
|
|
765
|
+
x01 = m.data[0][1];
|
|
766
|
+
x10 = m.data[1][0];
|
|
767
|
+
x11 = m.data[1][1];
|
|
768
|
+
x20 = m.data[2][0];
|
|
769
|
+
x21 = m.data[2][1];
|
|
770
|
+
x30 = m.data[3][0];
|
|
771
|
+
x31 = m.data[3][1];
|
|
772
|
+
|
|
773
|
+
// Compute all six 2x2 determinants of 1st two columns
|
|
774
|
+
y01 = x00*x11 - x10*x01;
|
|
775
|
+
y02 = x00*x21 - x20*x01;
|
|
776
|
+
y03 = x00*x31 - x30*x01;
|
|
777
|
+
y12 = x10*x21 - x20*x11;
|
|
778
|
+
y13 = x10*x31 - x30*x11;
|
|
779
|
+
y23 = x20*x31 - x30*x21;
|
|
780
|
+
|
|
781
|
+
// Pickle 2nd two columns of matrix into registers
|
|
782
|
+
x02 = m.data[0][2];
|
|
783
|
+
x03 = m.data[0][3];
|
|
784
|
+
x12 = m.data[1][2];
|
|
785
|
+
x13 = m.data[1][3];
|
|
786
|
+
x22 = m.data[2][2];
|
|
787
|
+
x23 = m.data[2][3];
|
|
788
|
+
x32 = m.data[3][2];
|
|
789
|
+
x33 = m.data[3][3];
|
|
790
|
+
|
|
791
|
+
// Compute all 3x3 cofactors for 2nd two columns */
|
|
792
|
+
z33 = x02*y12 - x12*y02 + x22*y01;
|
|
793
|
+
z23 = x12*y03 - x32*y01 - x02*y13;
|
|
794
|
+
z13 = x02*y23 - x22*y03 + x32*y02;
|
|
795
|
+
z03 = x22*y13 - x32*y12 - x12*y23;
|
|
796
|
+
z32 = x13*y02 - x23*y01 - x03*y12;
|
|
797
|
+
z22 = x03*y13 - x13*y03 + x33*y01;
|
|
798
|
+
z12 = x23*y03 - x33*y02 - x03*y23;
|
|
799
|
+
z02 = x13*y23 - x23*y13 + x33*y12;
|
|
800
|
+
|
|
801
|
+
// Compute all six 2x2 determinants of 2nd two columns
|
|
802
|
+
y01 = x02*x13 - x12*x03;
|
|
803
|
+
y02 = x02*x23 - x22*x03;
|
|
804
|
+
y03 = x02*x33 - x32*x03;
|
|
805
|
+
y12 = x12*x23 - x22*x13;
|
|
806
|
+
y13 = x12*x33 - x32*x13;
|
|
807
|
+
y23 = x22*x33 - x32*x23;
|
|
808
|
+
|
|
809
|
+
// Compute all 3x3 cofactors for 1st two columns
|
|
810
|
+
z30 = x11*y02 - x21*y01 - x01*y12;
|
|
811
|
+
z20 = x01*y13 - x11*y03 + x31*y01;
|
|
812
|
+
z10 = x21*y03 - x31*y02 - x01*y23;
|
|
813
|
+
z00 = x11*y23 - x21*y13 + x31*y12;
|
|
814
|
+
z31 = x00*y12 - x10*y02 + x20*y01;
|
|
815
|
+
z21 = x10*y03 - x30*y01 - x00*y13;
|
|
816
|
+
z11 = x00*y23 - x20*y03 + x30*y02;
|
|
817
|
+
z01 = x20*y13 - x30*y12 - x10*y23;
|
|
818
|
+
|
|
819
|
+
// compute 4x4 determinant & its reciprocal
|
|
820
|
+
double det = x30*z30 + x20*z20 + x10*z10 + x00*z00;
|
|
821
|
+
|
|
822
|
+
if(fabs(det) > kEps)
|
|
823
|
+
{
|
|
824
|
+
mat_t<4,4,Type> invm;
|
|
825
|
+
|
|
826
|
+
double rcp = 1.0 / det;
|
|
827
|
+
|
|
828
|
+
// Multiply all 3x3 cofactors by reciprocal & transpose
|
|
829
|
+
invm.data[0][0] = Type(z00*rcp);
|
|
830
|
+
invm.data[0][1] = Type(z10*rcp);
|
|
831
|
+
invm.data[1][0] = Type(z01*rcp);
|
|
832
|
+
invm.data[0][2] = Type(z20*rcp);
|
|
833
|
+
invm.data[2][0] = Type(z02*rcp);
|
|
834
|
+
invm.data[0][3] = Type(z30*rcp);
|
|
835
|
+
invm.data[3][0] = Type(z03*rcp);
|
|
836
|
+
invm.data[1][1] = Type(z11*rcp);
|
|
837
|
+
invm.data[1][2] = Type(z21*rcp);
|
|
838
|
+
invm.data[2][1] = Type(z12*rcp);
|
|
839
|
+
invm.data[1][3] = Type(z31*rcp);
|
|
840
|
+
invm.data[3][1] = Type(z13*rcp);
|
|
841
|
+
invm.data[2][2] = Type(z22*rcp);
|
|
842
|
+
invm.data[2][3] = Type(z32*rcp);
|
|
843
|
+
invm.data[3][2] = Type(z23*rcp);
|
|
844
|
+
invm.data[3][3] = Type(z33*rcp);
|
|
845
|
+
|
|
846
|
+
return invm;
|
|
847
|
+
}
|
|
848
|
+
else
|
|
849
|
+
{
|
|
850
|
+
return mat_t<4,4,Type>();
|
|
851
|
+
}
|
|
852
|
+
}
|
|
853
|
+
|
|
854
|
+
template<unsigned Rows,typename Type>
|
|
855
|
+
inline CUDA_CALLABLE mat_t<Rows,Rows,Type> diag(const vec_t<Rows,Type>& d)
|
|
856
|
+
{
|
|
857
|
+
mat_t<Rows,Rows,Type> ret(Type(0));
|
|
858
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
859
|
+
{
|
|
860
|
+
ret.data[i][i] = d[i];
|
|
861
|
+
}
|
|
862
|
+
return ret;
|
|
863
|
+
}
|
|
864
|
+
|
|
865
|
+
template<unsigned Rows,unsigned Cols,typename Type>
|
|
866
|
+
inline CUDA_CALLABLE mat_t<Rows,Cols,Type> outer(const vec_t<Rows,Type>& a, const vec_t<Cols,Type>& b)
|
|
867
|
+
{
|
|
868
|
+
// col 0 = a * b[0] etc...
|
|
869
|
+
mat_t<Rows,Cols,Type> ret;
|
|
870
|
+
for (unsigned row=0; row < Rows; ++row)
|
|
871
|
+
{
|
|
872
|
+
for (unsigned col=0; col < Cols; ++col) // columns
|
|
873
|
+
{
|
|
874
|
+
ret.data[row][col] = a[row] * b[col];
|
|
875
|
+
}
|
|
876
|
+
}
|
|
877
|
+
return ret;
|
|
878
|
+
}
|
|
879
|
+
|
|
880
|
+
template<unsigned Cols,typename Type>
|
|
881
|
+
inline CUDA_CALLABLE vec_t<Cols,Type> outer(Type a, const vec_t<Cols,Type>& b)
|
|
882
|
+
{
|
|
883
|
+
return mul(a, b);
|
|
884
|
+
}
|
|
885
|
+
|
|
886
|
+
template<unsigned Rows,typename Type>
|
|
887
|
+
inline CUDA_CALLABLE vec_t<Rows,Type> outer(const vec_t<Rows,Type>& a, Type b)
|
|
888
|
+
{
|
|
889
|
+
return mul(a, b);
|
|
890
|
+
}
|
|
891
|
+
|
|
892
|
+
template<typename Type>
|
|
893
|
+
inline CUDA_CALLABLE mat_t<3,3,Type> skew(const vec_t<3,Type>& a)
|
|
894
|
+
{
|
|
895
|
+
mat_t<3,3,Type> out(
|
|
896
|
+
Type(0), -a[2], a[1],
|
|
897
|
+
a[2], Type(0), -a[0],
|
|
898
|
+
-a[1], a[0], Type(0)
|
|
899
|
+
);
|
|
900
|
+
|
|
901
|
+
return out;
|
|
902
|
+
}
|
|
903
|
+
|
|
904
|
+
|
|
905
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
906
|
+
inline CUDA_CALLABLE mat_t<Rows,Cols,Type> cw_mul(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b)
|
|
907
|
+
{
|
|
908
|
+
mat_t<Rows,Cols,Type> t;
|
|
909
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
910
|
+
{
|
|
911
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
912
|
+
{
|
|
913
|
+
t.data[i][j] = a.data[i][j] * b.data[i][j];
|
|
914
|
+
}
|
|
915
|
+
}
|
|
916
|
+
|
|
917
|
+
return t;
|
|
918
|
+
}
|
|
919
|
+
|
|
920
|
+
|
|
921
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
922
|
+
inline CUDA_CALLABLE mat_t<Rows,Cols,Type> cw_div(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b)
|
|
923
|
+
{
|
|
924
|
+
mat_t<Rows,Cols,Type> t;
|
|
925
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
926
|
+
{
|
|
927
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
928
|
+
{
|
|
929
|
+
t.data[i][j] = a.data[i][j] / b.data[i][j];
|
|
930
|
+
}
|
|
931
|
+
}
|
|
932
|
+
|
|
933
|
+
return t;
|
|
934
|
+
}
|
|
935
|
+
|
|
936
|
+
template<typename Type>
|
|
937
|
+
inline CUDA_CALLABLE vec_t<3,Type> transform_point(const mat_t<4,4,Type>& m, const vec_t<3,Type>& v)
|
|
938
|
+
{
|
|
939
|
+
vec_t<4,Type> out = mul(m, vec_t<4,Type>(v[0], v[1], v[2], Type(1)));
|
|
940
|
+
return vec_t<3,Type>(out[0], out[1], out[2]);
|
|
941
|
+
}
|
|
942
|
+
|
|
943
|
+
template<typename Type>
|
|
944
|
+
inline CUDA_CALLABLE vec_t<3,Type> transform_vector(const mat_t<4,4,Type>& m, const vec_t<3,Type>& v)
|
|
945
|
+
{
|
|
946
|
+
vec_t<4,Type> out = mul(m, vec_t<4,Type>(v[0], v[1], v[2], 0.f));
|
|
947
|
+
return vec_t<3,Type>(out[0], out[1], out[2]);
|
|
948
|
+
}
|
|
949
|
+
|
|
950
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
951
|
+
inline CUDA_CALLABLE void adj_extract(const mat_t<Rows,Cols,Type>& m, int row, mat_t<Rows,Cols,Type>& adj_m, int& adj_row, const vec_t<Cols,Type>& adj_ret)
|
|
952
|
+
{
|
|
953
|
+
for( unsigned col=0; col < Cols; ++col )
|
|
954
|
+
adj_m.data[row][col] += adj_ret[col];
|
|
955
|
+
}
|
|
956
|
+
|
|
957
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
958
|
+
inline void CUDA_CALLABLE adj_extract(const mat_t<Rows,Cols,Type>& m, int row, int col, mat_t<Rows,Cols,Type>& adj_m, int& adj_row, int& adj_col, Type adj_ret)
|
|
959
|
+
{
|
|
960
|
+
#ifndef NDEBUG
|
|
961
|
+
if (row < 0 || row > Rows)
|
|
962
|
+
{
|
|
963
|
+
printf("mat row index %d out of bounds at %s %d\n", row, __FILE__, __LINE__);
|
|
964
|
+
assert(0);
|
|
965
|
+
}
|
|
966
|
+
if (col < 0 || col > Cols)
|
|
967
|
+
{
|
|
968
|
+
printf("mat col index %d out of bounds at %s %d\n", col, __FILE__, __LINE__);
|
|
969
|
+
assert(0);
|
|
970
|
+
}
|
|
971
|
+
#endif
|
|
972
|
+
adj_m.data[row][col] += adj_ret;
|
|
973
|
+
}
|
|
974
|
+
|
|
975
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
976
|
+
inline CUDA_CALLABLE void adj_outer(const vec_t<Rows,Type>& a, const vec_t<Cols,Type>& b, vec_t<Rows,Type>& adj_a, vec_t<Cols,Type>& adj_b, const mat_t<Rows,Cols,Type>& adj_ret)
|
|
977
|
+
{
|
|
978
|
+
adj_a += mul(adj_ret, b);
|
|
979
|
+
adj_b += mul(transpose(adj_ret), a);
|
|
980
|
+
}
|
|
981
|
+
|
|
982
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
983
|
+
inline CUDA_CALLABLE void adj_add(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b, mat_t<Rows,Cols,Type>& adj_a, mat_t<Rows,Cols,Type>& adj_b, const mat_t<Rows,Cols,Type>& adj_ret)
|
|
984
|
+
{
|
|
985
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
986
|
+
{
|
|
987
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
988
|
+
{
|
|
989
|
+
adj_a.data[i][j] += adj_ret.data[i][j];
|
|
990
|
+
adj_b.data[i][j] += adj_ret.data[i][j];
|
|
991
|
+
}
|
|
992
|
+
}
|
|
993
|
+
}
|
|
994
|
+
|
|
995
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
996
|
+
inline CUDA_CALLABLE void adj_sub(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b, mat_t<Rows,Cols,Type>& adj_a, mat_t<Rows,Cols,Type>& adj_b, const mat_t<Rows,Cols,Type>& adj_ret)
|
|
997
|
+
{
|
|
998
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
999
|
+
{
|
|
1000
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
1001
|
+
{
|
|
1002
|
+
adj_a.data[i][j] += adj_ret.data[i][j];
|
|
1003
|
+
adj_b.data[i][j] -= adj_ret.data[i][j];
|
|
1004
|
+
}
|
|
1005
|
+
}
|
|
1006
|
+
}
|
|
1007
|
+
|
|
1008
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
1009
|
+
inline CUDA_CALLABLE void adj_div(const mat_t<Rows,Cols,Type>& a, Type s, mat_t<Rows,Cols,Type>& adj_a, Type& adj_s, const mat_t<Rows,Cols,Type>& adj_ret)
|
|
1010
|
+
{
|
|
1011
|
+
adj_s -= tensordot(a , adj_ret)/ (s * s); // - a / s^2
|
|
1012
|
+
|
|
1013
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
1014
|
+
{
|
|
1015
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
1016
|
+
{
|
|
1017
|
+
adj_a.data[i][j] += adj_ret.data[i][j] / s;
|
|
1018
|
+
}
|
|
1019
|
+
}
|
|
1020
|
+
}
|
|
1021
|
+
|
|
1022
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
1023
|
+
inline CUDA_CALLABLE void adj_div(Type s, const mat_t<Rows,Cols,Type>& a, Type& adj_s, mat_t<Rows,Cols,Type>& adj_a, const mat_t<Rows,Cols,Type>& adj_ret)
|
|
1024
|
+
{
|
|
1025
|
+
adj_s -= tensordot(a , adj_ret)/ (s * s); // - a / s^2
|
|
1026
|
+
|
|
1027
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
1028
|
+
{
|
|
1029
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
1030
|
+
{
|
|
1031
|
+
adj_a.data[i][j] += s / adj_ret.data[i][j];
|
|
1032
|
+
}
|
|
1033
|
+
}
|
|
1034
|
+
}
|
|
1035
|
+
|
|
1036
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
1037
|
+
inline CUDA_CALLABLE void adj_mul(const mat_t<Rows,Cols,Type>& a, Type b, mat_t<Rows,Cols,Type>& adj_a, Type& adj_b, const mat_t<Rows,Cols,Type>& adj_ret)
|
|
1038
|
+
{
|
|
1039
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
1040
|
+
{
|
|
1041
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
1042
|
+
{
|
|
1043
|
+
adj_a.data[i][j] += b*adj_ret.data[i][j];
|
|
1044
|
+
adj_b += a.data[i][j]*adj_ret.data[i][j];
|
|
1045
|
+
}
|
|
1046
|
+
}
|
|
1047
|
+
}
|
|
1048
|
+
|
|
1049
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
1050
|
+
inline CUDA_CALLABLE void adj_mul(Type b, const mat_t<Rows,Cols,Type>& a, Type& adj_b, mat_t<Rows,Cols,Type>& adj_a, const mat_t<Rows,Cols,Type>& adj_ret)
|
|
1051
|
+
{
|
|
1052
|
+
adj_mul(a, b, adj_a, adj_b, adj_ret);
|
|
1053
|
+
}
|
|
1054
|
+
|
|
1055
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
1056
|
+
inline CUDA_CALLABLE void adj_ddot(mat_t<Rows,Cols,Type> a, mat_t<Rows,Cols,Type> b, mat_t<Rows,Cols,Type>& adj_a, mat_t<Rows,Cols,Type>& adj_b, const Type adj_ret)
|
|
1057
|
+
{
|
|
1058
|
+
adj_a += b*adj_ret;
|
|
1059
|
+
adj_b += a*adj_ret;
|
|
1060
|
+
}
|
|
1061
|
+
|
|
1062
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
1063
|
+
inline CUDA_CALLABLE void adj_mul(const mat_t<Rows,Cols,Type>& a, const vec_t<Cols,Type>& b, mat_t<Rows,Cols,Type>& adj_a, vec_t<Cols,Type>& adj_b, const vec_t<Rows,Type>& adj_ret)
|
|
1064
|
+
{
|
|
1065
|
+
adj_a += outer(adj_ret, b);
|
|
1066
|
+
adj_b += mul(transpose(a), adj_ret);
|
|
1067
|
+
}
|
|
1068
|
+
|
|
1069
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
1070
|
+
inline CUDA_CALLABLE void adj_mul(const vec_t<Rows,Type>& b, const mat_t<Rows,Cols,Type>& a, vec_t<Rows,Type>& adj_b, mat_t<Rows,Cols,Type>& adj_a, const vec_t<Cols,Type>& adj_ret)
|
|
1071
|
+
{
|
|
1072
|
+
adj_a += outer(b, adj_ret);
|
|
1073
|
+
adj_b += mul(adj_ret, transpose(a));
|
|
1074
|
+
}
|
|
1075
|
+
|
|
1076
|
+
template<unsigned Rows, unsigned Cols, unsigned ColsOut, typename Type>
|
|
1077
|
+
inline CUDA_CALLABLE void adj_mul(const mat_t<Rows,Cols,Type>& a, const mat_t<Cols,ColsOut,Type>& b, mat_t<Rows,Cols,Type>& adj_a, mat_t<Cols,ColsOut,Type>& adj_b, const mat_t<Rows,ColsOut,Type>& adj_ret)
|
|
1078
|
+
{
|
|
1079
|
+
adj_a += mul(adj_ret, transpose(b));
|
|
1080
|
+
adj_b += mul(transpose(a), adj_ret);
|
|
1081
|
+
}
|
|
1082
|
+
|
|
1083
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
1084
|
+
inline CUDA_CALLABLE void adj_transpose(const mat_t<Rows,Cols,Type>& a, mat_t<Rows,Cols,Type>& adj_a, const mat_t<Cols,Rows,Type>& adj_ret)
|
|
1085
|
+
{
|
|
1086
|
+
adj_a += transpose(adj_ret);
|
|
1087
|
+
}
|
|
1088
|
+
|
|
1089
|
+
template<unsigned Rows, typename Type>
|
|
1090
|
+
inline CUDA_CALLABLE void adj_trace(const mat_t<Rows,Rows,Type>& m, mat_t<Rows,Rows,Type>& adj_m, Type adj_ret)
|
|
1091
|
+
{
|
|
1092
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
1093
|
+
adj_m.data[i][i] += adj_ret;
|
|
1094
|
+
}
|
|
1095
|
+
|
|
1096
|
+
template<unsigned Rows, typename Type>
|
|
1097
|
+
inline CUDA_CALLABLE void adj_diag(const vec_t<Rows,Type>& d, vec_t<Rows,Type>& adj_d, const mat_t<Rows,Rows,Type>& adj_ret)
|
|
1098
|
+
{
|
|
1099
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
1100
|
+
adj_d[i] += adj_ret.data[i][i];
|
|
1101
|
+
}
|
|
1102
|
+
|
|
1103
|
+
template<unsigned Rows, typename Type>
|
|
1104
|
+
inline CUDA_CALLABLE void adj_get_diag(const mat_t<Rows,Rows,Type>& m, mat_t<Rows,Rows,Type>& adj_m, const vec_t<Rows,Type>& adj_ret)
|
|
1105
|
+
{
|
|
1106
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
1107
|
+
adj_m.data[i][i] += adj_ret[i];
|
|
1108
|
+
}
|
|
1109
|
+
|
|
1110
|
+
template<typename Type>
|
|
1111
|
+
inline CUDA_CALLABLE void adj_determinant(const mat_t<2,2,Type>& m, mat_t<2,2,Type>& adj_m, Type adj_ret)
|
|
1112
|
+
{
|
|
1113
|
+
adj_m.data[0][0] += m.data[1][1]*adj_ret;
|
|
1114
|
+
adj_m.data[1][1] += m.data[0][0]*adj_ret;
|
|
1115
|
+
adj_m.data[0][1] -= m.data[1][0]*adj_ret;
|
|
1116
|
+
adj_m.data[1][0] -= m.data[0][1]*adj_ret;
|
|
1117
|
+
}
|
|
1118
|
+
|
|
1119
|
+
template<typename Type>
|
|
1120
|
+
inline CUDA_CALLABLE void adj_determinant(const mat_t<3,3,Type>& m, mat_t<3,3,Type>& adj_m, Type adj_ret)
|
|
1121
|
+
{
|
|
1122
|
+
(vec_t<3,Type>&)adj_m.data[0] += cross(m.get_row(1), m.get_row(2))*adj_ret;
|
|
1123
|
+
(vec_t<3,Type>&)adj_m.data[1] += cross(m.get_row(2), m.get_row(0))*adj_ret;
|
|
1124
|
+
(vec_t<3,Type>&)adj_m.data[2] += cross(m.get_row(0), m.get_row(1))*adj_ret;
|
|
1125
|
+
}
|
|
1126
|
+
|
|
1127
|
+
template<typename Type>
|
|
1128
|
+
inline CUDA_CALLABLE void adj_determinant(const mat_t<4,4,Type>& m, mat_t<4,4,Type>& adj_m, Type adj_ret)
|
|
1129
|
+
{
|
|
1130
|
+
// adapted from USD GfMatrix4f::Inverse()
|
|
1131
|
+
Type x00, x01, x02, x03;
|
|
1132
|
+
Type x10, x11, x12, x13;
|
|
1133
|
+
Type x20, x21, x22, x23;
|
|
1134
|
+
Type x30, x31, x32, x33;
|
|
1135
|
+
double y01, y02, y03, y12, y13, y23;
|
|
1136
|
+
Type z00, z10, z20, z30;
|
|
1137
|
+
Type z01, z11, z21, z31;
|
|
1138
|
+
double z02, z03, z12, z13, z22, z23, z32, z33;
|
|
1139
|
+
|
|
1140
|
+
// Pickle 1st two columns of matrix into registers
|
|
1141
|
+
x00 = m.data[0][0];
|
|
1142
|
+
x01 = m.data[0][1];
|
|
1143
|
+
x10 = m.data[1][0];
|
|
1144
|
+
x11 = m.data[1][1];
|
|
1145
|
+
x20 = m.data[2][0];
|
|
1146
|
+
x21 = m.data[2][1];
|
|
1147
|
+
x30 = m.data[3][0];
|
|
1148
|
+
x31 = m.data[3][1];
|
|
1149
|
+
|
|
1150
|
+
// Compute all six 2x2 determinants of 1st two columns
|
|
1151
|
+
y01 = x00*x11 - x10*x01;
|
|
1152
|
+
y02 = x00*x21 - x20*x01;
|
|
1153
|
+
y03 = x00*x31 - x30*x01;
|
|
1154
|
+
y12 = x10*x21 - x20*x11;
|
|
1155
|
+
y13 = x10*x31 - x30*x11;
|
|
1156
|
+
y23 = x20*x31 - x30*x21;
|
|
1157
|
+
|
|
1158
|
+
// Pickle 2nd two columns of matrix into registers
|
|
1159
|
+
x02 = m.data[0][2];
|
|
1160
|
+
x03 = m.data[0][3];
|
|
1161
|
+
x12 = m.data[1][2];
|
|
1162
|
+
x13 = m.data[1][3];
|
|
1163
|
+
x22 = m.data[2][2];
|
|
1164
|
+
x23 = m.data[2][3];
|
|
1165
|
+
x32 = m.data[3][2];
|
|
1166
|
+
x33 = m.data[3][3];
|
|
1167
|
+
|
|
1168
|
+
// Compute all 3x3 cofactors for 2nd two columns */
|
|
1169
|
+
z33 = x02*y12 - x12*y02 + x22*y01;
|
|
1170
|
+
z23 = x12*y03 - x32*y01 - x02*y13;
|
|
1171
|
+
z13 = x02*y23 - x22*y03 + x32*y02;
|
|
1172
|
+
z03 = x22*y13 - x32*y12 - x12*y23;
|
|
1173
|
+
z32 = x13*y02 - x23*y01 - x03*y12;
|
|
1174
|
+
z22 = x03*y13 - x13*y03 + x33*y01;
|
|
1175
|
+
z12 = x23*y03 - x33*y02 - x03*y23;
|
|
1176
|
+
z02 = x13*y23 - x23*y13 + x33*y12;
|
|
1177
|
+
|
|
1178
|
+
// Compute all six 2x2 determinants of 2nd two columns
|
|
1179
|
+
y01 = x02*x13 - x12*x03;
|
|
1180
|
+
y02 = x02*x23 - x22*x03;
|
|
1181
|
+
y03 = x02*x33 - x32*x03;
|
|
1182
|
+
y12 = x12*x23 - x22*x13;
|
|
1183
|
+
y13 = x12*x33 - x32*x13;
|
|
1184
|
+
y23 = x22*x33 - x32*x23;
|
|
1185
|
+
|
|
1186
|
+
// Compute all 3x3 cofactors for 1st two columns
|
|
1187
|
+
z30 = x11*y02 - x21*y01 - x01*y12;
|
|
1188
|
+
z20 = x01*y13 - x11*y03 + x31*y01;
|
|
1189
|
+
z10 = x21*y03 - x31*y02 - x01*y23;
|
|
1190
|
+
z00 = x11*y23 - x21*y13 + x31*y12;
|
|
1191
|
+
z31 = x00*y12 - x10*y02 + x20*y01;
|
|
1192
|
+
z21 = x10*y03 - x30*y01 - x00*y13;
|
|
1193
|
+
z11 = x00*y23 - x20*y03 + x30*y02;
|
|
1194
|
+
z01 = x20*y13 - x30*y12 - x10*y23;
|
|
1195
|
+
|
|
1196
|
+
// Multiply all 3x3 cofactors by adjoint & transpose
|
|
1197
|
+
adj_m.data[0][0] += Type(z00*adj_ret);
|
|
1198
|
+
adj_m.data[1][0] += Type(z10*adj_ret);
|
|
1199
|
+
adj_m.data[0][1] += Type(z01*adj_ret);
|
|
1200
|
+
adj_m.data[2][0] += Type(z20*adj_ret);
|
|
1201
|
+
adj_m.data[0][2] += Type(z02*adj_ret);
|
|
1202
|
+
adj_m.data[3][0] += Type(z30*adj_ret);
|
|
1203
|
+
adj_m.data[0][3] += Type(z03*adj_ret);
|
|
1204
|
+
adj_m.data[1][1] += Type(z11*adj_ret);
|
|
1205
|
+
adj_m.data[2][1] += Type(z21*adj_ret);
|
|
1206
|
+
adj_m.data[1][2] += Type(z12*adj_ret);
|
|
1207
|
+
adj_m.data[3][1] += Type(z31*adj_ret);
|
|
1208
|
+
adj_m.data[1][3] += Type(z13*adj_ret);
|
|
1209
|
+
adj_m.data[2][2] += Type(z22*adj_ret);
|
|
1210
|
+
adj_m.data[3][2] += Type(z32*adj_ret);
|
|
1211
|
+
adj_m.data[2][3] += Type(z23*adj_ret);
|
|
1212
|
+
adj_m.data[3][3] += Type(z33*adj_ret);
|
|
1213
|
+
}
|
|
1214
|
+
|
|
1215
|
+
template<unsigned Rows, typename Type>
|
|
1216
|
+
inline CUDA_CALLABLE void adj_inverse(const mat_t<Rows,Rows,Type>& m, mat_t<Rows,Rows,Type>& ret, mat_t<Rows,Rows,Type>& adj_m, const mat_t<Rows,Rows,Type>& adj_ret)
|
|
1217
|
+
{
|
|
1218
|
+
// todo: how to cache this from the forward pass?
|
|
1219
|
+
mat_t<Rows,Rows,Type> invt = transpose(ret);
|
|
1220
|
+
|
|
1221
|
+
// see https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf 2.2.3
|
|
1222
|
+
adj_m -= mul(mul(invt, adj_ret), invt);
|
|
1223
|
+
}
|
|
1224
|
+
|
|
1225
|
+
template<typename Type>
|
|
1226
|
+
inline CUDA_CALLABLE void adj_transform_point(const mat_t<4,4,Type>& m, const vec_t<3,Type>& v, mat_t<4,4,Type>& adj_m, vec_t<3,Type>& adj_v, const vec_t<3,Type>& adj_ret)
|
|
1227
|
+
{
|
|
1228
|
+
vec_t<4,Type> out = vec_t<4,Type>(v[0], v[1], v[2], 1.f);
|
|
1229
|
+
adj_m = add(adj_m, transpose(mat_t<4,4,Type>(adj_ret[0] * out, adj_ret[1] * out, adj_ret[2] * out, vec_t<4,Type>())));
|
|
1230
|
+
adj_v[0] += dot(vec_t<3,Type>(m.data[0][0], m.data[1][0], m.data[2][0]), adj_ret);
|
|
1231
|
+
adj_v[1] += dot(vec_t<3,Type>(m.data[0][1], m.data[1][1], m.data[2][1]), adj_ret);
|
|
1232
|
+
adj_v[2] += dot(vec_t<3,Type>(m.data[0][2], m.data[1][2], m.data[2][2]), adj_ret);
|
|
1233
|
+
}
|
|
1234
|
+
|
|
1235
|
+
template<typename Type>
|
|
1236
|
+
inline CUDA_CALLABLE void adj_transform_vector(const mat_t<4,4,Type>& m, const vec_t<3,Type>& v, mat_t<4,4,Type>& adj_m, vec_t<3,Type>& adj_v, const vec_t<3,Type>& adj_ret)
|
|
1237
|
+
{
|
|
1238
|
+
vec_t<4,Type> out = vec_t<4,Type>(v[0], v[1], v[2], 0.f);
|
|
1239
|
+
adj_m = add(adj_m, transpose(mat_t<4,4,Type>(adj_ret[0] * out, adj_ret[1] * out, adj_ret[2] * out, vec_t<4,Type>())));
|
|
1240
|
+
adj_v[0] += dot(vec_t<3,Type>(m.data[0][0], m.data[1][0], m.data[2][0]), adj_ret);
|
|
1241
|
+
adj_v[1] += dot(vec_t<3,Type>(m.data[0][1], m.data[1][1], m.data[2][1]), adj_ret);
|
|
1242
|
+
adj_v[2] += dot(vec_t<3,Type>(m.data[0][2], m.data[1][2], m.data[2][2]), adj_ret);
|
|
1243
|
+
}
|
|
1244
|
+
|
|
1245
|
+
template<typename Type>
|
|
1246
|
+
inline CUDA_CALLABLE void adj_skew(const vec_t<3,Type>& a, vec_t<3,Type>& adj_a, const mat_t<3,3,Type>& adj_ret)
|
|
1247
|
+
{
|
|
1248
|
+
adj_a[0] += adj_ret.data[2][1] - adj_ret.data[1][2];
|
|
1249
|
+
adj_a[1] += adj_ret.data[0][2] - adj_ret.data[2][0];
|
|
1250
|
+
adj_a[2] += adj_ret.data[1][0] - adj_ret.data[0][1];
|
|
1251
|
+
}
|
|
1252
|
+
|
|
1253
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
1254
|
+
inline CUDA_CALLABLE void adj_cw_mul(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b, mat_t<Rows,Cols,Type>& adj_a, mat_t<Rows,Cols,Type>& adj_b, const mat_t<Rows,Cols,Type>& adj_ret)
|
|
1255
|
+
{
|
|
1256
|
+
adj_a += cw_mul(b, adj_ret);
|
|
1257
|
+
adj_b += cw_mul(a, adj_ret);
|
|
1258
|
+
}
|
|
1259
|
+
|
|
1260
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
1261
|
+
inline CUDA_CALLABLE void adj_cw_div(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b, mat_t<Rows,Cols,Type>& ret, mat_t<Rows,Cols,Type>& adj_a, mat_t<Rows,Cols,Type>& adj_b, const mat_t<Rows,Cols,Type>& adj_ret)
|
|
1262
|
+
{
|
|
1263
|
+
adj_a += cw_div(adj_ret, b);
|
|
1264
|
+
adj_b -= cw_mul(adj_ret, cw_div(ret, b));
|
|
1265
|
+
}
|
|
1266
|
+
|
|
1267
|
+
// adjoint for the constant constructor:
|
|
1268
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
1269
|
+
inline CUDA_CALLABLE void adj_mat_t(Type s, Type& adj_s, const mat_t<Rows, Cols, Type>& adj_ret)
|
|
1270
|
+
{
|
|
1271
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
1272
|
+
{
|
|
1273
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
1274
|
+
{
|
|
1275
|
+
adj_s += adj_ret.data[i][j];
|
|
1276
|
+
}
|
|
1277
|
+
}
|
|
1278
|
+
}
|
|
1279
|
+
|
|
1280
|
+
// adjoint for the casting constructor:
|
|
1281
|
+
template<unsigned Rows, unsigned Cols, typename Type, typename OtherType>
|
|
1282
|
+
inline CUDA_CALLABLE void adj_mat_t(const mat_t<Rows, Cols, OtherType>& other, mat_t<Rows, Cols, OtherType>& adj_other, const mat_t<Rows, Cols, Type>& adj_ret)
|
|
1283
|
+
{
|
|
1284
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
1285
|
+
{
|
|
1286
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
1287
|
+
{
|
|
1288
|
+
adj_other.data[i][j] += adj_ret.data[i][j];
|
|
1289
|
+
}
|
|
1290
|
+
}
|
|
1291
|
+
}
|
|
1292
|
+
|
|
1293
|
+
// adjoint for the initializer_array scalar constructor:
|
|
1294
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
1295
|
+
inline CUDA_CALLABLE void adj_mat_t(const initializer_array<Rows * Cols, Type> &cmps, const initializer_array<Rows * Cols, Type*> &adj_cmps, const mat_t<Rows, Cols, Type>& adj_ret)
|
|
1296
|
+
{
|
|
1297
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
1298
|
+
{
|
|
1299
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
1300
|
+
{
|
|
1301
|
+
*adj_cmps[i * Cols + j] += adj_ret.data[i][j];
|
|
1302
|
+
}
|
|
1303
|
+
}
|
|
1304
|
+
}
|
|
1305
|
+
|
|
1306
|
+
template<typename Type>
|
|
1307
|
+
inline CUDA_CALLABLE void adj_mat_t(Type m00, Type m01, Type m10, Type m11, Type& adj_m00, Type& adj_m01, Type& adj_m10, Type& adj_m11, const mat_t<2, 2, Type>& adj_ret)
|
|
1308
|
+
{
|
|
1309
|
+
adj_m00 += adj_ret.data[0][0];
|
|
1310
|
+
adj_m01 += adj_ret.data[0][1];
|
|
1311
|
+
adj_m10 += adj_ret.data[1][0];
|
|
1312
|
+
adj_m11 += adj_ret.data[1][1];
|
|
1313
|
+
}
|
|
1314
|
+
|
|
1315
|
+
template<typename Type>
|
|
1316
|
+
inline CUDA_CALLABLE void adj_mat_t(Type m00, Type m01, Type m02,
|
|
1317
|
+
Type m10, Type m11, Type m12,
|
|
1318
|
+
Type m20, Type m21, Type m22,
|
|
1319
|
+
Type& a00, Type& a01, Type& a02,
|
|
1320
|
+
Type& a10, Type& a11, Type& a12,
|
|
1321
|
+
Type& a20, Type& a21, Type& a22,
|
|
1322
|
+
const mat_t<3, 3, Type>& adj_ret)
|
|
1323
|
+
{
|
|
1324
|
+
a00 += adj_ret.data[0][0];
|
|
1325
|
+
a01 += adj_ret.data[0][1];
|
|
1326
|
+
a02 += adj_ret.data[0][2];
|
|
1327
|
+
a10 += adj_ret.data[1][0];
|
|
1328
|
+
a11 += adj_ret.data[1][1];
|
|
1329
|
+
a12 += adj_ret.data[1][2];
|
|
1330
|
+
a20 += adj_ret.data[2][0];
|
|
1331
|
+
a21 += adj_ret.data[2][1];
|
|
1332
|
+
a22 += adj_ret.data[2][2];
|
|
1333
|
+
}
|
|
1334
|
+
|
|
1335
|
+
|
|
1336
|
+
template<typename Type>
|
|
1337
|
+
inline CUDA_CALLABLE void adj_mat_t(Type m00, Type m01, Type m02, Type m03,
|
|
1338
|
+
Type m10, Type m11, Type m12, Type m13,
|
|
1339
|
+
Type m20, Type m21, Type m22, Type m23,
|
|
1340
|
+
Type m30, Type m31, Type m32, Type m33,
|
|
1341
|
+
Type& a00, Type& a01, Type& a02, Type& a03,
|
|
1342
|
+
Type& a10, Type& a11, Type& a12, Type& a13,
|
|
1343
|
+
Type& a20, Type& a21, Type& a22, Type& a23,
|
|
1344
|
+
Type& a30, Type& a31, Type& a32, Type& a33,
|
|
1345
|
+
const mat_t<4, 4, Type>& adj_ret)
|
|
1346
|
+
{
|
|
1347
|
+
a00 += adj_ret.data[0][0];
|
|
1348
|
+
a01 += adj_ret.data[0][1];
|
|
1349
|
+
a02 += adj_ret.data[0][2];
|
|
1350
|
+
a03 += adj_ret.data[0][3];
|
|
1351
|
+
|
|
1352
|
+
a10 += adj_ret.data[1][0];
|
|
1353
|
+
a11 += adj_ret.data[1][1];
|
|
1354
|
+
a12 += adj_ret.data[1][2];
|
|
1355
|
+
a13 += adj_ret.data[1][3];
|
|
1356
|
+
|
|
1357
|
+
a20 += adj_ret.data[2][0];
|
|
1358
|
+
a21 += adj_ret.data[2][1];
|
|
1359
|
+
a22 += adj_ret.data[2][2];
|
|
1360
|
+
a23 += adj_ret.data[2][3];
|
|
1361
|
+
|
|
1362
|
+
a30 += adj_ret.data[3][0];
|
|
1363
|
+
a31 += adj_ret.data[3][1];
|
|
1364
|
+
a32 += adj_ret.data[3][2];
|
|
1365
|
+
a33 += adj_ret.data[3][3];
|
|
1366
|
+
}
|
|
1367
|
+
|
|
1368
|
+
|
|
1369
|
+
|
|
1370
|
+
// adjoint for the initializer_array vector constructor:
|
|
1371
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
1372
|
+
inline CUDA_CALLABLE void adj_mat_t(const initializer_array<Cols, vec_t<Rows,Type> > &cmps, const initializer_array<Cols, vec_t<Rows,Type>* > &adj_cmps, const mat_t<Rows, Cols, Type>& adj_ret)
|
|
1373
|
+
{
|
|
1374
|
+
for (unsigned j=0; j < Cols; ++j)
|
|
1375
|
+
{
|
|
1376
|
+
for (unsigned i=0; i < Rows; ++i)
|
|
1377
|
+
{
|
|
1378
|
+
(*adj_cmps[j])[i] += adj_ret.data[i][j];
|
|
1379
|
+
}
|
|
1380
|
+
}
|
|
1381
|
+
}
|
|
1382
|
+
|
|
1383
|
+
template<typename Type>
|
|
1384
|
+
inline CUDA_CALLABLE void adj_mat_t(const vec_t<2,Type> &cmps0, const vec_t<2,Type> &cmps1, vec_t<2,Type> &adj_cmps0, vec_t<2,Type> &adj_cmps1, const mat_t<2, 2, Type>& adj_ret)
|
|
1385
|
+
{
|
|
1386
|
+
for (unsigned i=0; i < 2; ++i)
|
|
1387
|
+
{
|
|
1388
|
+
adj_cmps0[i] += adj_ret.data[i][0];
|
|
1389
|
+
adj_cmps1[i] += adj_ret.data[i][1];
|
|
1390
|
+
}
|
|
1391
|
+
}
|
|
1392
|
+
|
|
1393
|
+
template<typename Type>
|
|
1394
|
+
inline CUDA_CALLABLE void adj_mat_t(const vec_t<3,Type> &cmps0, const vec_t<3,Type> &cmps1, const vec_t<3,Type> &cmps2, vec_t<3,Type> &adj_cmps0, vec_t<3,Type> &adj_cmps1, vec_t<3,Type> &adj_cmps2, const mat_t<3, 3, Type>& adj_ret)
|
|
1395
|
+
{
|
|
1396
|
+
for (unsigned i=0; i < 3; ++i)
|
|
1397
|
+
{
|
|
1398
|
+
adj_cmps0[i] += adj_ret.data[i][0];
|
|
1399
|
+
adj_cmps1[i] += adj_ret.data[i][1];
|
|
1400
|
+
adj_cmps2[i] += adj_ret.data[i][2];
|
|
1401
|
+
}
|
|
1402
|
+
}
|
|
1403
|
+
|
|
1404
|
+
template<typename Type>
|
|
1405
|
+
inline CUDA_CALLABLE void adj_mat_t(const vec_t<4,Type> &cmps0, const vec_t<4,Type> &cmps1, const vec_t<4,Type> &cmps2, const vec_t<4,Type> &cmps3, vec_t<4,Type> &adj_cmps0, vec_t<4,Type> &adj_cmps1, vec_t<4,Type> &adj_cmps2, vec_t<4,Type> &adj_cmps3, const mat_t<4, 4, Type>& adj_ret)
|
|
1406
|
+
{
|
|
1407
|
+
for (unsigned i=0; i < 4; ++i)
|
|
1408
|
+
{
|
|
1409
|
+
adj_cmps0[i] += adj_ret.data[i][0];
|
|
1410
|
+
adj_cmps1[i] += adj_ret.data[i][1];
|
|
1411
|
+
adj_cmps2[i] += adj_ret.data[i][2];
|
|
1412
|
+
adj_cmps3[i] += adj_ret.data[i][3];
|
|
1413
|
+
}
|
|
1414
|
+
}
|
|
1415
|
+
|
|
1416
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
1417
|
+
CUDA_CALLABLE inline mat_t<Rows, Cols, Type> lerp(const mat_t<Rows, Cols, Type>& a, const mat_t<Rows, Cols, Type>& b, Type t)
|
|
1418
|
+
{
|
|
1419
|
+
return a*(Type(1)-t) + b*t;
|
|
1420
|
+
}
|
|
1421
|
+
|
|
1422
|
+
template<unsigned Rows, unsigned Cols, typename Type>
|
|
1423
|
+
CUDA_CALLABLE inline void adj_lerp(const mat_t<Rows, Cols, Type>& a, const mat_t<Rows, Cols, Type>& b, Type t, mat_t<Rows, Cols, Type>& adj_a, mat_t<Rows, Cols, Type>& adj_b, Type& adj_t, const mat_t<Rows, Cols, Type>& adj_ret)
|
|
1424
|
+
{
|
|
1425
|
+
adj_a += adj_ret*(Type(1)-t);
|
|
1426
|
+
adj_b += adj_ret*t;
|
|
1427
|
+
adj_t += tensordot(b, adj_ret) - tensordot(a, adj_ret);
|
|
1428
|
+
}
|
|
1429
|
+
|
|
1430
|
+
// for integral types we do not accumulate gradients
|
|
1431
|
+
template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, int8>* buf, const mat_t<Rows, Cols, int8> &value) { }
|
|
1432
|
+
template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, uint8>* buf, const mat_t<Rows, Cols, uint8> &value) { }
|
|
1433
|
+
template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, int16>* buf, const mat_t<Rows, Cols, int16> &value) { }
|
|
1434
|
+
template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, uint16>* buf, const mat_t<Rows, Cols, uint16> &value) { }
|
|
1435
|
+
template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, int32>* buf, const mat_t<Rows, Cols, int32> &value) { }
|
|
1436
|
+
template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, uint32>* buf, const mat_t<Rows, Cols, uint32> &value) { }
|
|
1437
|
+
template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, int64>* buf, const mat_t<Rows, Cols, int64> &value) { }
|
|
1438
|
+
template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, uint64>* buf, const mat_t<Rows, Cols, uint64> &value) { }
|
|
1439
|
+
|
|
1440
|
+
using mat22h = mat_t<2,2,half>;
|
|
1441
|
+
using mat33h = mat_t<3,3,half>;
|
|
1442
|
+
using mat44h = mat_t<4,4,half>;
|
|
1443
|
+
|
|
1444
|
+
using mat22 = mat_t<2,2,float>;
|
|
1445
|
+
using mat33 = mat_t<3,3,float>;
|
|
1446
|
+
using mat44 = mat_t<4,4,float>;
|
|
1447
|
+
|
|
1448
|
+
using mat22f = mat_t<2,2,float>;
|
|
1449
|
+
using mat33f = mat_t<3,3,float>;
|
|
1450
|
+
using mat44f = mat_t<4,4,float>;
|
|
1451
|
+
|
|
1452
|
+
using mat22d = mat_t<2,2,double>;
|
|
1453
|
+
using mat33d = mat_t<3,3,double>;
|
|
1454
|
+
using mat44d = mat_t<4,4,double>;
|
|
1455
|
+
|
|
1456
|
+
inline CUDA_CALLABLE void adj_mat22(vec2 c0, vec2 c1,
|
|
1457
|
+
vec2& a0, vec2& a1,
|
|
1458
|
+
const mat22& adj_ret)
|
|
1459
|
+
{
|
|
1460
|
+
a0 += adj_ret.get_col(0);
|
|
1461
|
+
a1 += adj_ret.get_col(1);
|
|
1462
|
+
}
|
|
1463
|
+
|
|
1464
|
+
inline CUDA_CALLABLE void adj_mat22(float m00, float m01, float m10, float m11, float& adj_m00, float& adj_m01, float& adj_m10, float& adj_m11, const mat22& adj_ret)
|
|
1465
|
+
{
|
|
1466
|
+
adj_m00 += adj_ret.data[0][0];
|
|
1467
|
+
adj_m01 += adj_ret.data[0][1];
|
|
1468
|
+
adj_m10 += adj_ret.data[1][0];
|
|
1469
|
+
adj_m11 += adj_ret.data[1][1];
|
|
1470
|
+
}
|
|
1471
|
+
|
|
1472
|
+
inline CUDA_CALLABLE void adj_mat33(vec3 c0, vec3 c1, vec3 c2,
|
|
1473
|
+
vec3& a0, vec3& a1, vec3& a2,
|
|
1474
|
+
const mat33& adj_ret)
|
|
1475
|
+
{
|
|
1476
|
+
// column constructor
|
|
1477
|
+
a0 += adj_ret.get_col(0);
|
|
1478
|
+
a1 += adj_ret.get_col(1);
|
|
1479
|
+
a2 += adj_ret.get_col(2);
|
|
1480
|
+
|
|
1481
|
+
}
|
|
1482
|
+
|
|
1483
|
+
inline CUDA_CALLABLE void adj_mat33(float m00, float m01, float m02,
|
|
1484
|
+
float m10, float m11, float m12,
|
|
1485
|
+
float m20, float m21, float m22,
|
|
1486
|
+
float& a00, float& a01, float& a02,
|
|
1487
|
+
float& a10, float& a11, float& a12,
|
|
1488
|
+
float& a20, float& a21, float& a22,
|
|
1489
|
+
const mat33& adj_ret)
|
|
1490
|
+
{
|
|
1491
|
+
a00 += adj_ret.data[0][0];
|
|
1492
|
+
a01 += adj_ret.data[0][1];
|
|
1493
|
+
a02 += adj_ret.data[0][2];
|
|
1494
|
+
a10 += adj_ret.data[1][0];
|
|
1495
|
+
a11 += adj_ret.data[1][1];
|
|
1496
|
+
a12 += adj_ret.data[1][2];
|
|
1497
|
+
a20 += adj_ret.data[2][0];
|
|
1498
|
+
a21 += adj_ret.data[2][1];
|
|
1499
|
+
a22 += adj_ret.data[2][2];
|
|
1500
|
+
}
|
|
1501
|
+
|
|
1502
|
+
inline CUDA_CALLABLE void adj_mat44(
|
|
1503
|
+
vec4 c0, vec4 c1, vec4 c2, vec4 c3,
|
|
1504
|
+
vec4& a0, vec4& a1, vec4& a2, vec4& a3,
|
|
1505
|
+
const mat44& adj_ret)
|
|
1506
|
+
{
|
|
1507
|
+
// column constructor
|
|
1508
|
+
a0 += adj_ret.get_col(0);
|
|
1509
|
+
a1 += adj_ret.get_col(1);
|
|
1510
|
+
a2 += adj_ret.get_col(2);
|
|
1511
|
+
a3 += adj_ret.get_col(3);
|
|
1512
|
+
}
|
|
1513
|
+
|
|
1514
|
+
inline CUDA_CALLABLE void adj_mat44(float m00, float m01, float m02, float m03,
|
|
1515
|
+
float m10, float m11, float m12, float m13,
|
|
1516
|
+
float m20, float m21, float m22, float m23,
|
|
1517
|
+
float m30, float m31, float m32, float m33,
|
|
1518
|
+
float& a00, float& a01, float& a02, float& a03,
|
|
1519
|
+
float& a10, float& a11, float& a12, float& a13,
|
|
1520
|
+
float& a20, float& a21, float& a22, float& a23,
|
|
1521
|
+
float& a30, float& a31, float& a32, float& a33,
|
|
1522
|
+
const mat44& adj_ret)
|
|
1523
|
+
{
|
|
1524
|
+
a00 += adj_ret.data[0][0];
|
|
1525
|
+
a01 += adj_ret.data[0][1];
|
|
1526
|
+
a02 += adj_ret.data[0][2];
|
|
1527
|
+
a03 += adj_ret.data[0][3];
|
|
1528
|
+
|
|
1529
|
+
a10 += adj_ret.data[1][0];
|
|
1530
|
+
a11 += adj_ret.data[1][1];
|
|
1531
|
+
a12 += adj_ret.data[1][2];
|
|
1532
|
+
a13 += adj_ret.data[1][3];
|
|
1533
|
+
|
|
1534
|
+
a20 += adj_ret.data[2][0];
|
|
1535
|
+
a21 += adj_ret.data[2][1];
|
|
1536
|
+
a22 += adj_ret.data[2][2];
|
|
1537
|
+
a23 += adj_ret.data[2][3];
|
|
1538
|
+
|
|
1539
|
+
a30 += adj_ret.data[3][0];
|
|
1540
|
+
a31 += adj_ret.data[3][1];
|
|
1541
|
+
a32 += adj_ret.data[3][2];
|
|
1542
|
+
a33 += adj_ret.data[3][3];
|
|
1543
|
+
}
|
|
1544
|
+
|
|
1545
|
+
} // namespace wp
|