warp-lang 1.0.1__py3-none-manylinux2014_aarch64.whl → 1.1.0__py3-none-manylinux2014_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of warp-lang might be problematic. Click here for more details.

Files changed (346) hide show
  1. warp/__init__.py +108 -97
  2. warp/__init__.pyi +1 -1
  3. warp/bin/warp-clang.so +0 -0
  4. warp/bin/warp.so +0 -0
  5. warp/build.py +115 -113
  6. warp/build_dll.py +383 -375
  7. warp/builtins.py +3425 -3354
  8. warp/codegen.py +2878 -2792
  9. warp/config.py +40 -36
  10. warp/constants.py +45 -45
  11. warp/context.py +5194 -5102
  12. warp/dlpack.py +442 -442
  13. warp/examples/__init__.py +16 -16
  14. warp/examples/assets/bear.usd +0 -0
  15. warp/examples/assets/bunny.usd +0 -0
  16. warp/examples/assets/cartpole.urdf +110 -110
  17. warp/examples/assets/crazyflie.usd +0 -0
  18. warp/examples/assets/cube.usd +0 -0
  19. warp/examples/assets/nv_ant.xml +92 -92
  20. warp/examples/assets/nv_humanoid.xml +183 -183
  21. warp/examples/assets/quadruped.urdf +267 -267
  22. warp/examples/assets/rocks.nvdb +0 -0
  23. warp/examples/assets/rocks.usd +0 -0
  24. warp/examples/assets/sphere.usd +0 -0
  25. warp/examples/benchmarks/benchmark_api.py +383 -383
  26. warp/examples/benchmarks/benchmark_cloth.py +278 -279
  27. warp/examples/benchmarks/benchmark_cloth_cupy.py +88 -88
  28. warp/examples/benchmarks/benchmark_cloth_jax.py +97 -100
  29. warp/examples/benchmarks/benchmark_cloth_numba.py +146 -142
  30. warp/examples/benchmarks/benchmark_cloth_numpy.py +77 -77
  31. warp/examples/benchmarks/benchmark_cloth_pytorch.py +86 -86
  32. warp/examples/benchmarks/benchmark_cloth_taichi.py +112 -112
  33. warp/examples/benchmarks/benchmark_cloth_warp.py +146 -146
  34. warp/examples/benchmarks/benchmark_launches.py +295 -295
  35. warp/examples/browse.py +29 -28
  36. warp/examples/core/example_dem.py +234 -221
  37. warp/examples/core/example_fluid.py +293 -267
  38. warp/examples/core/example_graph_capture.py +144 -129
  39. warp/examples/core/example_marching_cubes.py +188 -176
  40. warp/examples/core/example_mesh.py +174 -154
  41. warp/examples/core/example_mesh_intersect.py +205 -193
  42. warp/examples/core/example_nvdb.py +176 -169
  43. warp/examples/core/example_raycast.py +105 -89
  44. warp/examples/core/example_raymarch.py +199 -178
  45. warp/examples/core/example_render_opengl.py +185 -141
  46. warp/examples/core/example_sph.py +405 -389
  47. warp/examples/core/example_torch.py +222 -181
  48. warp/examples/core/example_wave.py +263 -249
  49. warp/examples/fem/bsr_utils.py +378 -380
  50. warp/examples/fem/example_apic_fluid.py +407 -391
  51. warp/examples/fem/example_convection_diffusion.py +182 -168
  52. warp/examples/fem/example_convection_diffusion_dg.py +219 -209
  53. warp/examples/fem/example_convection_diffusion_dg0.py +204 -194
  54. warp/examples/fem/example_deformed_geometry.py +177 -159
  55. warp/examples/fem/example_diffusion.py +201 -173
  56. warp/examples/fem/example_diffusion_3d.py +177 -152
  57. warp/examples/fem/example_diffusion_mgpu.py +221 -214
  58. warp/examples/fem/example_mixed_elasticity.py +244 -222
  59. warp/examples/fem/example_navier_stokes.py +259 -243
  60. warp/examples/fem/example_stokes.py +220 -192
  61. warp/examples/fem/example_stokes_transfer.py +265 -249
  62. warp/examples/fem/mesh_utils.py +133 -109
  63. warp/examples/fem/plot_utils.py +292 -287
  64. warp/examples/optim/example_bounce.py +260 -248
  65. warp/examples/optim/example_cloth_throw.py +222 -210
  66. warp/examples/optim/example_diffray.py +566 -535
  67. warp/examples/optim/example_drone.py +864 -835
  68. warp/examples/optim/example_inverse_kinematics.py +176 -169
  69. warp/examples/optim/example_inverse_kinematics_torch.py +185 -170
  70. warp/examples/optim/example_spring_cage.py +239 -234
  71. warp/examples/optim/example_trajectory.py +223 -201
  72. warp/examples/optim/example_walker.py +306 -292
  73. warp/examples/sim/example_cartpole.py +139 -128
  74. warp/examples/sim/example_cloth.py +196 -184
  75. warp/examples/sim/example_granular.py +124 -113
  76. warp/examples/sim/example_granular_collision_sdf.py +197 -185
  77. warp/examples/sim/example_jacobian_ik.py +236 -213
  78. warp/examples/sim/example_particle_chain.py +118 -106
  79. warp/examples/sim/example_quadruped.py +193 -179
  80. warp/examples/sim/example_rigid_chain.py +197 -189
  81. warp/examples/sim/example_rigid_contact.py +189 -176
  82. warp/examples/sim/example_rigid_force.py +127 -126
  83. warp/examples/sim/example_rigid_gyroscopic.py +109 -97
  84. warp/examples/sim/example_rigid_soft_contact.py +134 -124
  85. warp/examples/sim/example_soft_body.py +190 -178
  86. warp/fabric.py +337 -335
  87. warp/fem/__init__.py +60 -27
  88. warp/fem/cache.py +401 -388
  89. warp/fem/dirichlet.py +178 -179
  90. warp/fem/domain.py +262 -263
  91. warp/fem/field/__init__.py +100 -101
  92. warp/fem/field/field.py +148 -149
  93. warp/fem/field/nodal_field.py +298 -299
  94. warp/fem/field/restriction.py +22 -21
  95. warp/fem/field/test.py +180 -181
  96. warp/fem/field/trial.py +183 -183
  97. warp/fem/geometry/__init__.py +15 -19
  98. warp/fem/geometry/closest_point.py +69 -70
  99. warp/fem/geometry/deformed_geometry.py +270 -271
  100. warp/fem/geometry/element.py +744 -744
  101. warp/fem/geometry/geometry.py +184 -186
  102. warp/fem/geometry/grid_2d.py +380 -373
  103. warp/fem/geometry/grid_3d.py +441 -435
  104. warp/fem/geometry/hexmesh.py +953 -953
  105. warp/fem/geometry/partition.py +374 -376
  106. warp/fem/geometry/quadmesh_2d.py +532 -532
  107. warp/fem/geometry/tetmesh.py +840 -840
  108. warp/fem/geometry/trimesh_2d.py +577 -577
  109. warp/fem/integrate.py +1630 -1615
  110. warp/fem/operator.py +190 -191
  111. warp/fem/polynomial.py +214 -213
  112. warp/fem/quadrature/__init__.py +2 -2
  113. warp/fem/quadrature/pic_quadrature.py +243 -245
  114. warp/fem/quadrature/quadrature.py +295 -294
  115. warp/fem/space/__init__.py +294 -292
  116. warp/fem/space/basis_space.py +488 -489
  117. warp/fem/space/collocated_function_space.py +100 -105
  118. warp/fem/space/dof_mapper.py +236 -236
  119. warp/fem/space/function_space.py +148 -145
  120. warp/fem/space/grid_2d_function_space.py +267 -267
  121. warp/fem/space/grid_3d_function_space.py +305 -306
  122. warp/fem/space/hexmesh_function_space.py +350 -352
  123. warp/fem/space/partition.py +350 -350
  124. warp/fem/space/quadmesh_2d_function_space.py +368 -369
  125. warp/fem/space/restriction.py +158 -160
  126. warp/fem/space/shape/__init__.py +13 -15
  127. warp/fem/space/shape/cube_shape_function.py +738 -738
  128. warp/fem/space/shape/shape_function.py +102 -103
  129. warp/fem/space/shape/square_shape_function.py +611 -611
  130. warp/fem/space/shape/tet_shape_function.py +565 -567
  131. warp/fem/space/shape/triangle_shape_function.py +429 -429
  132. warp/fem/space/tetmesh_function_space.py +294 -292
  133. warp/fem/space/topology.py +297 -295
  134. warp/fem/space/trimesh_2d_function_space.py +223 -221
  135. warp/fem/types.py +77 -77
  136. warp/fem/utils.py +495 -495
  137. warp/jax.py +166 -141
  138. warp/jax_experimental.py +341 -339
  139. warp/native/array.h +1072 -1025
  140. warp/native/builtin.h +1560 -1560
  141. warp/native/bvh.cpp +398 -398
  142. warp/native/bvh.cu +525 -525
  143. warp/native/bvh.h +429 -429
  144. warp/native/clang/clang.cpp +495 -464
  145. warp/native/crt.cpp +31 -31
  146. warp/native/crt.h +334 -334
  147. warp/native/cuda_crt.h +1049 -1049
  148. warp/native/cuda_util.cpp +549 -540
  149. warp/native/cuda_util.h +288 -203
  150. warp/native/cutlass_gemm.cpp +34 -34
  151. warp/native/cutlass_gemm.cu +372 -372
  152. warp/native/error.cpp +66 -66
  153. warp/native/error.h +27 -27
  154. warp/native/fabric.h +228 -228
  155. warp/native/hashgrid.cpp +301 -278
  156. warp/native/hashgrid.cu +78 -77
  157. warp/native/hashgrid.h +227 -227
  158. warp/native/initializer_array.h +32 -32
  159. warp/native/intersect.h +1204 -1204
  160. warp/native/intersect_adj.h +365 -365
  161. warp/native/intersect_tri.h +322 -322
  162. warp/native/marching.cpp +2 -2
  163. warp/native/marching.cu +497 -497
  164. warp/native/marching.h +2 -2
  165. warp/native/mat.h +1498 -1498
  166. warp/native/matnn.h +333 -333
  167. warp/native/mesh.cpp +203 -203
  168. warp/native/mesh.cu +293 -293
  169. warp/native/mesh.h +1887 -1887
  170. warp/native/nanovdb/NanoVDB.h +4782 -4782
  171. warp/native/nanovdb/PNanoVDB.h +2553 -2553
  172. warp/native/nanovdb/PNanoVDBWrite.h +294 -294
  173. warp/native/noise.h +850 -850
  174. warp/native/quat.h +1084 -1084
  175. warp/native/rand.h +299 -299
  176. warp/native/range.h +108 -108
  177. warp/native/reduce.cpp +156 -156
  178. warp/native/reduce.cu +348 -348
  179. warp/native/runlength_encode.cpp +61 -61
  180. warp/native/runlength_encode.cu +46 -46
  181. warp/native/scan.cpp +30 -30
  182. warp/native/scan.cu +36 -36
  183. warp/native/scan.h +7 -7
  184. warp/native/solid_angle.h +442 -442
  185. warp/native/sort.cpp +94 -94
  186. warp/native/sort.cu +97 -97
  187. warp/native/sort.h +14 -14
  188. warp/native/sparse.cpp +337 -337
  189. warp/native/sparse.cu +544 -544
  190. warp/native/spatial.h +630 -630
  191. warp/native/svd.h +562 -562
  192. warp/native/temp_buffer.h +30 -30
  193. warp/native/vec.h +1132 -1132
  194. warp/native/volume.cpp +297 -297
  195. warp/native/volume.cu +32 -32
  196. warp/native/volume.h +538 -538
  197. warp/native/volume_builder.cu +425 -425
  198. warp/native/volume_builder.h +19 -19
  199. warp/native/warp.cpp +1057 -1052
  200. warp/native/warp.cu +2943 -2828
  201. warp/native/warp.h +313 -305
  202. warp/optim/__init__.py +9 -9
  203. warp/optim/adam.py +120 -120
  204. warp/optim/linear.py +1104 -939
  205. warp/optim/sgd.py +104 -92
  206. warp/render/__init__.py +10 -10
  207. warp/render/render_opengl.py +3217 -3204
  208. warp/render/render_usd.py +768 -749
  209. warp/render/utils.py +152 -150
  210. warp/sim/__init__.py +52 -59
  211. warp/sim/articulation.py +685 -685
  212. warp/sim/collide.py +1594 -1590
  213. warp/sim/import_mjcf.py +489 -481
  214. warp/sim/import_snu.py +220 -221
  215. warp/sim/import_urdf.py +536 -516
  216. warp/sim/import_usd.py +887 -881
  217. warp/sim/inertia.py +316 -317
  218. warp/sim/integrator.py +234 -233
  219. warp/sim/integrator_euler.py +1956 -1956
  220. warp/sim/integrator_featherstone.py +1910 -1991
  221. warp/sim/integrator_xpbd.py +3294 -3312
  222. warp/sim/model.py +4473 -4314
  223. warp/sim/particles.py +113 -112
  224. warp/sim/render.py +417 -403
  225. warp/sim/utils.py +413 -410
  226. warp/sparse.py +1227 -1227
  227. warp/stubs.py +2109 -2469
  228. warp/tape.py +1162 -225
  229. warp/tests/__init__.py +1 -1
  230. warp/tests/__main__.py +4 -4
  231. warp/tests/assets/torus.usda +105 -105
  232. warp/tests/aux_test_class_kernel.py +26 -26
  233. warp/tests/aux_test_compile_consts_dummy.py +10 -10
  234. warp/tests/aux_test_conditional_unequal_types_kernels.py +21 -21
  235. warp/tests/aux_test_dependent.py +22 -22
  236. warp/tests/aux_test_grad_customs.py +23 -23
  237. warp/tests/aux_test_reference.py +11 -11
  238. warp/tests/aux_test_reference_reference.py +10 -10
  239. warp/tests/aux_test_square.py +17 -17
  240. warp/tests/aux_test_unresolved_func.py +14 -14
  241. warp/tests/aux_test_unresolved_symbol.py +14 -14
  242. warp/tests/disabled_kinematics.py +239 -239
  243. warp/tests/run_coverage_serial.py +31 -31
  244. warp/tests/test_adam.py +157 -157
  245. warp/tests/test_arithmetic.py +1124 -1124
  246. warp/tests/test_array.py +2417 -2326
  247. warp/tests/test_array_reduce.py +150 -150
  248. warp/tests/test_async.py +668 -656
  249. warp/tests/test_atomic.py +141 -141
  250. warp/tests/test_bool.py +204 -149
  251. warp/tests/test_builtins_resolution.py +1292 -1292
  252. warp/tests/test_bvh.py +164 -171
  253. warp/tests/test_closest_point_edge_edge.py +228 -228
  254. warp/tests/test_codegen.py +566 -553
  255. warp/tests/test_compile_consts.py +97 -101
  256. warp/tests/test_conditional.py +246 -246
  257. warp/tests/test_copy.py +232 -215
  258. warp/tests/test_ctypes.py +632 -632
  259. warp/tests/test_dense.py +67 -67
  260. warp/tests/test_devices.py +91 -98
  261. warp/tests/test_dlpack.py +530 -529
  262. warp/tests/test_examples.py +400 -378
  263. warp/tests/test_fabricarray.py +955 -955
  264. warp/tests/test_fast_math.py +62 -54
  265. warp/tests/test_fem.py +1277 -1278
  266. warp/tests/test_fp16.py +130 -130
  267. warp/tests/test_func.py +338 -337
  268. warp/tests/test_generics.py +571 -571
  269. warp/tests/test_grad.py +746 -640
  270. warp/tests/test_grad_customs.py +333 -336
  271. warp/tests/test_hash_grid.py +210 -164
  272. warp/tests/test_import.py +39 -39
  273. warp/tests/test_indexedarray.py +1134 -1134
  274. warp/tests/test_intersect.py +67 -67
  275. warp/tests/test_jax.py +307 -307
  276. warp/tests/test_large.py +167 -164
  277. warp/tests/test_launch.py +354 -354
  278. warp/tests/test_lerp.py +261 -261
  279. warp/tests/test_linear_solvers.py +191 -171
  280. warp/tests/test_lvalue.py +421 -493
  281. warp/tests/test_marching_cubes.py +65 -65
  282. warp/tests/test_mat.py +1801 -1827
  283. warp/tests/test_mat_lite.py +115 -115
  284. warp/tests/test_mat_scalar_ops.py +2907 -2889
  285. warp/tests/test_math.py +126 -193
  286. warp/tests/test_matmul.py +500 -499
  287. warp/tests/test_matmul_lite.py +410 -410
  288. warp/tests/test_mempool.py +188 -190
  289. warp/tests/test_mesh.py +284 -324
  290. warp/tests/test_mesh_query_aabb.py +228 -241
  291. warp/tests/test_mesh_query_point.py +692 -702
  292. warp/tests/test_mesh_query_ray.py +292 -303
  293. warp/tests/test_mlp.py +276 -276
  294. warp/tests/test_model.py +110 -110
  295. warp/tests/test_modules_lite.py +39 -39
  296. warp/tests/test_multigpu.py +163 -163
  297. warp/tests/test_noise.py +248 -248
  298. warp/tests/test_operators.py +250 -250
  299. warp/tests/test_options.py +123 -125
  300. warp/tests/test_peer.py +133 -137
  301. warp/tests/test_pinned.py +78 -78
  302. warp/tests/test_print.py +54 -54
  303. warp/tests/test_quat.py +2086 -2086
  304. warp/tests/test_rand.py +288 -288
  305. warp/tests/test_reload.py +217 -217
  306. warp/tests/test_rounding.py +179 -179
  307. warp/tests/test_runlength_encode.py +190 -190
  308. warp/tests/test_sim_grad.py +243 -0
  309. warp/tests/test_sim_kinematics.py +91 -97
  310. warp/tests/test_smoothstep.py +168 -168
  311. warp/tests/test_snippet.py +305 -266
  312. warp/tests/test_sparse.py +468 -460
  313. warp/tests/test_spatial.py +2148 -2148
  314. warp/tests/test_streams.py +486 -473
  315. warp/tests/test_struct.py +710 -675
  316. warp/tests/test_tape.py +173 -148
  317. warp/tests/test_torch.py +743 -743
  318. warp/tests/test_transient_module.py +87 -87
  319. warp/tests/test_types.py +556 -659
  320. warp/tests/test_utils.py +490 -499
  321. warp/tests/test_vec.py +1264 -1268
  322. warp/tests/test_vec_lite.py +73 -73
  323. warp/tests/test_vec_scalar_ops.py +2099 -2099
  324. warp/tests/test_verify_fp.py +94 -94
  325. warp/tests/test_volume.py +737 -736
  326. warp/tests/test_volume_write.py +255 -265
  327. warp/tests/unittest_serial.py +37 -37
  328. warp/tests/unittest_suites.py +363 -359
  329. warp/tests/unittest_utils.py +603 -578
  330. warp/tests/unused_test_misc.py +71 -71
  331. warp/tests/walkthrough_debug.py +85 -85
  332. warp/thirdparty/appdirs.py +598 -598
  333. warp/thirdparty/dlpack.py +143 -143
  334. warp/thirdparty/unittest_parallel.py +566 -561
  335. warp/torch.py +321 -295
  336. warp/types.py +4504 -4450
  337. warp/utils.py +1008 -821
  338. {warp_lang-1.0.1.dist-info → warp_lang-1.1.0.dist-info}/LICENSE.md +126 -126
  339. {warp_lang-1.0.1.dist-info → warp_lang-1.1.0.dist-info}/METADATA +338 -400
  340. warp_lang-1.1.0.dist-info/RECORD +352 -0
  341. warp/examples/assets/cube.usda +0 -42
  342. warp/examples/assets/sphere.usda +0 -56
  343. warp/examples/assets/torus.usda +0 -105
  344. warp_lang-1.0.1.dist-info/RECORD +0 -352
  345. {warp_lang-1.0.1.dist-info → warp_lang-1.1.0.dist-info}/WHEEL +0 -0
  346. {warp_lang-1.0.1.dist-info → warp_lang-1.1.0.dist-info}/top_level.txt +0 -0
warp/native/mat.h CHANGED
@@ -1,1498 +1,1498 @@
1
- /** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
2
- * NVIDIA CORPORATION and its licensors retain all intellectual property
3
- * and proprietary rights in and to this software, related documentation
4
- * and any modifications thereto. Any use, reproduction, disclosure or
5
- * distribution of this software and related documentation without an express
6
- * license agreement from NVIDIA CORPORATION is strictly prohibited.
7
- */
8
-
9
- #pragma once
10
-
11
- #include "initializer_array.h"
12
-
13
- namespace wp
14
- {
15
-
16
- //----------------------------------------------------------
17
- // mat
18
- template<typename T>
19
- struct quat_t;
20
-
21
- template<unsigned Rows, unsigned Cols, typename Type>
22
- struct mat_t
23
- {
24
- inline CUDA_CALLABLE mat_t()
25
- : data()
26
- {}
27
-
28
- inline CUDA_CALLABLE mat_t(Type s)
29
- {
30
- for (unsigned i=0; i < Rows; ++i)
31
- for (unsigned j=0; j < Cols; ++j)
32
- data[i][j] = s;
33
- }
34
-
35
- template <typename OtherType>
36
- inline explicit CUDA_CALLABLE mat_t(const mat_t<Rows, Cols, OtherType>& other)
37
- {
38
- for (unsigned i=0; i < Rows; ++i)
39
- for (unsigned j=0; j < Cols; ++j)
40
- data[i][j] = other.data[i][j];
41
- }
42
-
43
- inline CUDA_CALLABLE mat_t(vec_t<2,Type> c0, vec_t<2,Type> c1)
44
- {
45
- data[0][0] = c0[0];
46
- data[1][0] = c0[1];
47
-
48
- data[0][1] = c1[0];
49
- data[1][1] = c1[1];
50
- }
51
-
52
- inline CUDA_CALLABLE mat_t(vec_t<3,Type> c0, vec_t<3,Type> c1, vec_t<3,Type> c2)
53
- {
54
- data[0][0] = c0[0];
55
- data[1][0] = c0[1];
56
- data[2][0] = c0[2];
57
-
58
- data[0][1] = c1[0];
59
- data[1][1] = c1[1];
60
- data[2][1] = c1[2];
61
-
62
- data[0][2] = c2[0];
63
- data[1][2] = c2[1];
64
- data[2][2] = c2[2];
65
- }
66
-
67
- inline CUDA_CALLABLE mat_t(vec_t<4,Type> c0, vec_t<4,Type> c1, vec_t<4,Type> c2, vec_t<4,Type> c3)
68
- {
69
- data[0][0] = c0[0];
70
- data[1][0] = c0[1];
71
- data[2][0] = c0[2];
72
- data[3][0] = c0[3];
73
-
74
- data[0][1] = c1[0];
75
- data[1][1] = c1[1];
76
- data[2][1] = c1[2];
77
- data[3][1] = c1[3];
78
-
79
- data[0][2] = c2[0];
80
- data[1][2] = c2[1];
81
- data[2][2] = c2[2];
82
- data[3][2] = c2[3];
83
-
84
- data[0][3] = c3[0];
85
- data[1][3] = c3[1];
86
- data[2][3] = c3[2];
87
- data[3][3] = c3[3];
88
- }
89
-
90
- inline CUDA_CALLABLE mat_t(Type m00, Type m01, Type m10, Type m11)
91
- {
92
- data[0][0] = m00;
93
- data[1][0] = m10;
94
- data[0][1] = m01;
95
- data[1][1] = m11;
96
- }
97
-
98
- inline CUDA_CALLABLE mat_t(
99
- Type m00, Type m01, Type m02,
100
- Type m10, Type m11, Type m12,
101
- Type m20, Type m21, Type m22)
102
- {
103
- data[0][0] = m00;
104
- data[1][0] = m10;
105
- data[2][0] = m20;
106
-
107
- data[0][1] = m01;
108
- data[1][1] = m11;
109
- data[2][1] = m21;
110
-
111
- data[0][2] = m02;
112
- data[1][2] = m12;
113
- data[2][2] = m22;
114
- }
115
-
116
- inline CUDA_CALLABLE mat_t(
117
- Type m00, Type m01, Type m02, Type m03,
118
- Type m10, Type m11, Type m12, Type m13,
119
- Type m20, Type m21, Type m22, Type m23,
120
- Type m30, Type m31, Type m32, Type m33)
121
- {
122
- data[0][0] = m00;
123
- data[1][0] = m10;
124
- data[2][0] = m20;
125
- data[3][0] = m30;
126
-
127
- data[0][1] = m01;
128
- data[1][1] = m11;
129
- data[2][1] = m21;
130
- data[3][1] = m31;
131
-
132
- data[0][2] = m02;
133
- data[1][2] = m12;
134
- data[2][2] = m22;
135
- data[3][2] = m32;
136
-
137
- data[0][3] = m03;
138
- data[1][3] = m13;
139
- data[2][3] = m23;
140
- data[3][3] = m33;
141
- }
142
-
143
- // implemented in quat.h
144
- inline CUDA_CALLABLE mat_t(const vec_t<3,Type>& pos, const quat_t<Type>& rot, const vec_t<3,Type>& scale);
145
-
146
-
147
- inline CUDA_CALLABLE mat_t(const initializer_array<Rows * Cols, Type> &l)
148
- {
149
- for (unsigned i=0; i < Rows; ++i)
150
- {
151
- for (unsigned j=0; j < Cols; ++j)
152
- {
153
- data[i][j] = l[i * Cols + j];
154
- }
155
- }
156
- }
157
-
158
- inline CUDA_CALLABLE mat_t(const initializer_array<Cols, vec_t<Rows,Type> > &l)
159
- {
160
- for (unsigned j=0; j < Cols; ++j)
161
- {
162
- for (unsigned i=0; i < Rows; ++i)
163
- {
164
- data[i][j] = l[j][i];
165
- }
166
- }
167
- }
168
-
169
- CUDA_CALLABLE vec_t<Cols,Type> get_row(int index) const
170
- {
171
- return (vec_t<Cols,Type>&)data[index];
172
- }
173
-
174
- CUDA_CALLABLE void set_row(int index, const vec_t<Cols,Type>& v)
175
- {
176
- (vec_t<Cols,Type>&)data[index] = v;
177
- }
178
-
179
- CUDA_CALLABLE vec_t<Rows,Type> get_col(int index) const
180
- {
181
- vec_t<Rows,Type> ret;
182
- for( unsigned i=0;i < Rows; ++i )
183
- {
184
- ret[i] = data[i][index];
185
- }
186
- return ret;
187
- }
188
-
189
- CUDA_CALLABLE void set_col(int index, const vec_t<Rows,Type>& v)
190
- {
191
- for( unsigned i=0;i < Rows; ++i )
192
- {
193
- data[i][index] = v[i];
194
- }
195
- }
196
-
197
- // row major storage assumed to be compatible with PyTorch
198
- Type data[Rows][Cols];
199
- };
200
-
201
-
202
- template<unsigned Rows, typename Type>
203
- inline CUDA_CALLABLE mat_t<Rows, Rows, Type> identity()
204
- {
205
- mat_t<Rows, Rows, Type> m;
206
- for( unsigned i=0; i < Rows; ++i )
207
- {
208
- m.data[i][i] = Type(1);
209
- }
210
- return m;
211
- }
212
-
213
- template<unsigned Rows, unsigned Cols, typename Type>
214
- inline CUDA_CALLABLE bool operator==(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b)
215
- {
216
- for (unsigned i=0; i < Rows; ++i)
217
- for (unsigned j=0; j < Cols; ++j)
218
- if (a.data[i][j] != b.data[i][j])
219
- return false;
220
-
221
- return true;
222
- }
223
-
224
-
225
- // negation:
226
- template<unsigned Rows, unsigned Cols, typename Type>
227
- inline CUDA_CALLABLE mat_t<Rows,Cols,Type> operator - (mat_t<Rows,Cols,Type> a)
228
- {
229
- // NB: this constructor will initialize all ret's components to 0, which is
230
- // unnecessary...
231
- mat_t<Rows,Cols,Type> ret;
232
- for (unsigned i=0; i < Rows; ++i)
233
- for (unsigned j=0; j < Cols; ++j)
234
- ret.data[i][j] = -a.data[i][j];
235
-
236
- // Wonder if this does a load of copying when it returns... hopefully not as it's inlined?
237
- return ret;
238
- }
239
-
240
-
241
- template<unsigned Rows, unsigned Cols, typename Type>
242
- CUDA_CALLABLE inline mat_t<Rows,Cols,Type> pos(const mat_t<Rows,Cols,Type>& x)
243
- {
244
- return x;
245
- }
246
-
247
- template<unsigned Rows, unsigned Cols, typename Type>
248
- CUDA_CALLABLE inline void adj_pos(const mat_t<Rows,Cols,Type>& x, mat_t<Rows,Cols,Type>& adj_x, const mat_t<Rows,Cols,Type>& adj_ret)
249
- {
250
- adj_x += adj_ret;
251
- }
252
-
253
- template<unsigned Rows, unsigned Cols, typename Type>
254
- CUDA_CALLABLE inline mat_t<Rows,Cols,Type> neg(const mat_t<Rows,Cols,Type>& x)
255
- {
256
- return -x;
257
- }
258
-
259
- template<unsigned Rows, unsigned Cols, typename Type>
260
- CUDA_CALLABLE inline void adj_neg(const mat_t<Rows,Cols,Type>& x, mat_t<Rows,Cols,Type>& adj_x, const mat_t<Rows,Cols,Type>& adj_ret)
261
- {
262
- adj_x -= adj_ret;
263
- }
264
-
265
-
266
- template<unsigned Rows, unsigned Cols, typename Type>
267
- inline CUDA_CALLABLE mat_t<Rows,Cols,Type> atomic_add(mat_t<Rows,Cols,Type> * addr, mat_t<Rows,Cols,Type> value)
268
- {
269
- mat_t<Rows,Cols,Type> m;
270
-
271
- for (unsigned i=0; i < Rows; ++i)
272
- for (unsigned j=0; j < Cols; ++j)
273
- m.data[i][j] = atomic_add(&addr->data[i][j], value.data[i][j]);
274
-
275
- return m;
276
- }
277
-
278
- template<unsigned Rows, unsigned Cols, typename Type>
279
- inline CUDA_CALLABLE mat_t<Rows,Cols,Type> atomic_min(mat_t<Rows,Cols,Type> * addr, mat_t<Rows,Cols,Type> value)
280
- {
281
- mat_t<Rows,Cols,Type> m;
282
-
283
- for (unsigned i=0; i < Rows; ++i)
284
- for (unsigned j=0; j < Cols; ++j)
285
- m.data[i][j] = atomic_min(&addr->data[i][j], value.data[i][j]);
286
-
287
- return m;
288
- }
289
-
290
- template<unsigned Rows, unsigned Cols, typename Type>
291
- inline CUDA_CALLABLE mat_t<Rows,Cols,Type> atomic_max(mat_t<Rows,Cols,Type> * addr, mat_t<Rows,Cols,Type> value)
292
- {
293
- mat_t<Rows,Cols,Type> m;
294
-
295
- for (unsigned i=0; i < Rows; ++i)
296
- for (unsigned j=0; j < Cols; ++j)
297
- m.data[i][j] = atomic_max(&addr->data[i][j], value.data[i][j]);
298
-
299
- return m;
300
- }
301
-
302
- template<unsigned Rows, unsigned Cols, typename Type>
303
- inline CUDA_CALLABLE void adj_atomic_minmax(
304
- mat_t<Rows,Cols,Type> *addr,
305
- mat_t<Rows,Cols,Type> *adj_addr,
306
- const mat_t<Rows,Cols,Type> &value,
307
- mat_t<Rows,Cols,Type> &adj_value)
308
- {
309
- for (unsigned i=0; i < Rows; ++i)
310
- for (unsigned j=0; j < Cols; ++j)
311
- adj_atomic_minmax(&addr->data[i][j], &adj_addr->data[i][j], value.data[i][j], adj_value.data[i][j]);
312
- }
313
-
314
- template<unsigned Rows, unsigned Cols, typename Type>
315
- inline CUDA_CALLABLE vec_t<Cols,Type> extract(const mat_t<Rows,Cols,Type>& m, int row)
316
- {
317
- vec_t<Cols,Type> ret;
318
- for(unsigned i=0; i < Cols; ++i)
319
- {
320
- ret.c[i] = m.data[row][i];
321
- }
322
- return ret;
323
- }
324
-
325
- template<unsigned Rows, unsigned Cols, typename Type>
326
- inline CUDA_CALLABLE Type extract(const mat_t<Rows,Cols,Type>& m, int row, int col)
327
- {
328
- #ifndef NDEBUG
329
- if (row < 0 || row >= Rows)
330
- {
331
- printf("mat row index %d out of bounds at %s %d\n", row, __FILE__, __LINE__);
332
- assert(0);
333
- }
334
- if (col < 0 || col >= Cols)
335
- {
336
- printf("mat col index %d out of bounds at %s %d\n", col, __FILE__, __LINE__);
337
- assert(0);
338
- }
339
- #endif
340
- return m.data[row][col];
341
- }
342
-
343
- template<unsigned Rows, unsigned Cols, typename Type>
344
- inline CUDA_CALLABLE vec_t<Cols, Type>* index(mat_t<Rows,Cols,Type>& m, int row)
345
- {
346
- #ifndef NDEBUG
347
- if (row < 0 || row >= Rows)
348
- {
349
- printf("mat row index %d out of bounds at %s %d\n", row, __FILE__, __LINE__);
350
- assert(0);
351
- }
352
- #endif
353
-
354
- return reinterpret_cast<vec_t<Cols, Type>*>(&m.data[row]);
355
- }
356
-
357
- template<unsigned Rows, unsigned Cols, typename Type>
358
- inline CUDA_CALLABLE Type* index(mat_t<Rows,Cols,Type>& m, int row, int col)
359
- {
360
- #ifndef NDEBUG
361
- if (row < 0 || row >= Rows)
362
- {
363
- printf("mat row index %d out of bounds at %s %d\n", row, __FILE__, __LINE__);
364
- assert(0);
365
- }
366
- if (col < 0 || col >= Cols)
367
- {
368
- printf("mat col index %d out of bounds at %s %d\n", col, __FILE__, __LINE__);
369
- assert(0);
370
- }
371
- #endif
372
-
373
- return &m.data[row][col];
374
- }
375
-
376
- template<unsigned Rows, unsigned Cols, typename Type>
377
- inline CUDA_CALLABLE void adj_index(const mat_t<Rows,Cols,Type>& m, int row,
378
- const mat_t<Rows,Cols,Type>& adj_m, int adj_row, const vec_t<Cols, Type>& adj_value)
379
- {
380
- // nop
381
- }
382
-
383
- template<unsigned Rows, unsigned Cols, typename Type>
384
- inline CUDA_CALLABLE void adj_index(const mat_t<Rows,Cols,Type>& m, int row, int col,
385
- const mat_t<Rows,Cols,Type>& adj_m, int adj_row, int adj_col, Type adj_value)
386
- {
387
- // nop
388
- }
389
-
390
- template<unsigned Rows, unsigned Cols, typename Type>
391
- inline bool CUDA_CALLABLE isfinite(const mat_t<Rows,Cols,Type>& m)
392
- {
393
- for (unsigned i=0; i < Rows; ++i)
394
- for (unsigned j=0; j < Cols; ++j)
395
- if (!isfinite(m.data[i][j]))
396
- return false;
397
- return true;
398
- }
399
-
400
- template<unsigned Rows, unsigned Cols, typename Type>
401
- inline CUDA_CALLABLE mat_t<Rows,Cols,Type> add(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b)
402
- {
403
- mat_t<Rows,Cols,Type> t;
404
- for (unsigned i=0; i < Rows; ++i)
405
- {
406
- for (unsigned j=0; j < Cols; ++j)
407
- {
408
- t.data[i][j] = a.data[i][j] + b.data[i][j];
409
- }
410
- }
411
-
412
- return t;
413
- }
414
-
415
- template<unsigned Rows, unsigned Cols, typename Type>
416
- inline CUDA_CALLABLE mat_t<Rows,Cols,Type> sub(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b)
417
- {
418
- mat_t<Rows,Cols,Type> t;
419
- for (unsigned i=0; i < Rows; ++i)
420
- {
421
- for (unsigned j=0; j < Cols; ++j)
422
- {
423
- t.data[i][j] = a.data[i][j] - b.data[i][j];
424
- }
425
- }
426
-
427
- return t;
428
- }
429
-
430
- template<unsigned Rows, unsigned Cols, typename Type>
431
- inline CUDA_CALLABLE mat_t<Rows,Cols,Type> div(const mat_t<Rows,Cols,Type>& a, Type b)
432
- {
433
- mat_t<Rows,Cols,Type> t;
434
- for (unsigned i=0; i < Rows; ++i)
435
- {
436
- for (unsigned j=0; j < Cols; ++j)
437
- {
438
- t.data[i][j] = a.data[i][j]/b;
439
- }
440
- }
441
-
442
- return t;
443
- }
444
-
445
- template<unsigned Rows, unsigned Cols, typename Type>
446
- inline CUDA_CALLABLE mat_t<Rows,Cols,Type> div(Type b, const mat_t<Rows,Cols,Type>& a)
447
- {
448
- mat_t<Rows,Cols,Type> t;
449
- for (unsigned i=0; i < Rows; ++i)
450
- {
451
- for (unsigned j=0; j < Cols; ++j)
452
- {
453
- t.data[i][j] = b / a.data[i][j];
454
- }
455
- }
456
-
457
- return t;
458
- }
459
-
460
- template<unsigned Rows, unsigned Cols, typename Type>
461
- inline CUDA_CALLABLE mat_t<Rows,Cols,Type> mul(const mat_t<Rows,Cols,Type>& a, Type b)
462
- {
463
- mat_t<Rows,Cols,Type> t;
464
- for (unsigned i=0; i < Rows; ++i)
465
- {
466
- for (unsigned j=0; j < Cols; ++j)
467
- {
468
- t.data[i][j] = a.data[i][j]*b;
469
- }
470
- }
471
-
472
- return t;
473
- }
474
-
475
- template<unsigned Rows, unsigned Cols, typename Type>
476
- inline CUDA_CALLABLE mat_t<Rows,Cols,Type> mul(Type b, const mat_t<Rows,Cols,Type>& a)
477
- {
478
- return mul(a,b);
479
- }
480
-
481
-
482
- template<unsigned Rows, unsigned Cols, typename Type>
483
- inline CUDA_CALLABLE mat_t<Rows,Cols,Type> operator*(Type b, const mat_t<Rows,Cols,Type>& a)
484
- {
485
- return mul(a,b);
486
- }
487
-
488
- template<unsigned Rows, unsigned Cols, typename Type>
489
- inline CUDA_CALLABLE mat_t<Rows,Cols,Type> operator*( const mat_t<Rows,Cols,Type>& a, Type b)
490
- {
491
- return mul(a,b);
492
- }
493
-
494
- template<unsigned Rows, unsigned Cols, typename Type>
495
- inline CUDA_CALLABLE vec_t<Rows,Type> mul(const mat_t<Rows,Cols,Type>& a, const vec_t<Cols,Type>& b)
496
- {
497
- vec_t<Rows,Type> r = a.get_col(0)*b[0];
498
- for( unsigned i=1; i < Cols; ++i )
499
- {
500
- r += a.get_col(i)*b[i];
501
- }
502
- return r;
503
- }
504
-
505
- template<unsigned Rows, unsigned Cols, typename Type>
506
- inline CUDA_CALLABLE vec_t<Cols,Type> mul(const vec_t<Rows,Type>& b, const mat_t<Rows,Cols,Type>& a)
507
- {
508
- vec_t<Cols,Type> r = a.get_row(0)*b[0];
509
- for( unsigned i=1; i < Rows; ++i )
510
- {
511
- r += a.get_row(i)*b[i];
512
- }
513
- return r;
514
- }
515
-
516
- template<unsigned Rows, unsigned Cols, unsigned ColsOut, typename Type>
517
- inline CUDA_CALLABLE mat_t<Rows,ColsOut,Type> mul(const mat_t<Rows,Cols,Type>& a, const mat_t<Cols,ColsOut,Type>& b)
518
- {
519
- mat_t<Rows,ColsOut,Type> t(0);
520
- for (unsigned i=0; i < Rows; ++i)
521
- {
522
- for (unsigned j=0; j < ColsOut; ++j)
523
- {
524
- for (unsigned k=0; k < Cols; ++k)
525
- {
526
- t.data[i][j] += a.data[i][k]*b.data[k][j];
527
- }
528
- }
529
- }
530
-
531
- return t;
532
- }
533
-
534
- template<unsigned Rows, unsigned Cols, typename Type>
535
- inline CUDA_CALLABLE Type ddot(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b)
536
- {
537
- // double dot product between a and b:
538
- Type r(0);
539
- for (unsigned i=0; i < Rows; ++i)
540
- {
541
- for (unsigned j=0; j < Cols; ++j)
542
- {
543
- r += a.data[i][j] * b.data[i][j];
544
- }
545
- }
546
- return r;
547
- }
548
-
549
- template<unsigned Rows, unsigned Cols, typename Type>
550
- inline CUDA_CALLABLE Type tensordot(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b)
551
- {
552
- // corresponds to `np.tensordot()` with all axes being contracted
553
- return ddot(a, b);
554
- }
555
-
556
- template<unsigned Rows, unsigned Cols, typename Type>
557
- inline CUDA_CALLABLE mat_t<Cols,Rows,Type> transpose(const mat_t<Rows,Cols,Type>& a)
558
- {
559
- mat_t<Cols,Rows,Type> t;
560
- for (unsigned i=0; i < Cols; ++i)
561
- {
562
- for (unsigned j=0; j < Rows; ++j)
563
- {
564
- t.data[i][j] = a.data[j][i];
565
- }
566
- }
567
-
568
- return t;
569
- }
570
-
571
- // Only implementing determinants for 2x2, 3x3 and 4x4 matrices for now...
572
- template<typename Type>
573
- inline CUDA_CALLABLE Type determinant(const mat_t<2,2,Type>& m)
574
- {
575
- return m.data[0][0]*m.data[1][1] - m.data[1][0]*m.data[0][1];
576
- }
577
-
578
- template<typename Type>
579
- inline CUDA_CALLABLE Type determinant(const mat_t<3,3,Type>& m)
580
- {
581
- return dot(
582
- vec_t<3,Type>(m.data[0][0],m.data[0][1],m.data[0][2]),
583
- cross(
584
- vec_t<3,Type>(m.data[1][0],m.data[1][1],m.data[1][2]),
585
- vec_t<3,Type>(m.data[2][0],m.data[2][1],m.data[2][2])
586
- )
587
- );
588
- }
589
-
590
- template<typename Type>
591
- inline CUDA_CALLABLE Type determinant(const mat_t<4,4,Type>& m)
592
- {
593
- // adapted from USD GfMatrix4f::Inverse()
594
- Type x00, x01, x02, x03;
595
- Type x10, x11, x12, x13;
596
- Type x20, x21, x22, x23;
597
- Type x30, x31, x32, x33;
598
- double y01, y02, y03, y12, y13, y23;
599
- Type z00, z10, z20, z30;
600
-
601
- // Pickle 1st two columns of matrix into registers
602
- x00 = m.data[0][0];
603
- x01 = m.data[0][1];
604
- x10 = m.data[1][0];
605
- x11 = m.data[1][1];
606
- x20 = m.data[2][0];
607
- x21 = m.data[2][1];
608
- x30 = m.data[3][0];
609
- x31 = m.data[3][1];
610
-
611
- // Compute all six 2x2 determinants of 1st two columns
612
- y01 = x00*x11 - x10*x01;
613
- y02 = x00*x21 - x20*x01;
614
- y03 = x00*x31 - x30*x01;
615
- y12 = x10*x21 - x20*x11;
616
- y13 = x10*x31 - x30*x11;
617
- y23 = x20*x31 - x30*x21;
618
-
619
- // Pickle 2nd two columns of matrix into registers
620
- x02 = m.data[0][2];
621
- x03 = m.data[0][3];
622
- x12 = m.data[1][2];
623
- x13 = m.data[1][3];
624
- x22 = m.data[2][2];
625
- x23 = m.data[2][3];
626
- x32 = m.data[3][2];
627
- x33 = m.data[3][3];
628
-
629
- // Compute all six 2x2 determinants of 2nd two columns
630
- y01 = x02*x13 - x12*x03;
631
- y02 = x02*x23 - x22*x03;
632
- y03 = x02*x33 - x32*x03;
633
- y12 = x12*x23 - x22*x13;
634
- y13 = x12*x33 - x32*x13;
635
- y23 = x22*x33 - x32*x23;
636
-
637
- // Compute all 3x3 cofactors for 1st two columns
638
- z30 = x11*y02 - x21*y01 - x01*y12;
639
- z20 = x01*y13 - x11*y03 + x31*y01;
640
- z10 = x21*y03 - x31*y02 - x01*y23;
641
- z00 = x11*y23 - x21*y13 + x31*y12;
642
-
643
- // compute 4x4 determinant & its reciprocal
644
- double det = x30*z30 + x20*z20 + x10*z10 + x00*z00;
645
- return det;
646
- }
647
-
648
- template<unsigned Rows, typename Type>
649
- inline CUDA_CALLABLE Type trace(const mat_t<Rows,Rows,Type>& m)
650
- {
651
- Type ret = m.data[0][0];
652
- for( unsigned i=1; i < Rows; ++i )
653
- {
654
- ret += m.data[i][i];
655
- }
656
- return ret;
657
- }
658
-
659
- template<unsigned Rows, typename Type>
660
- inline CUDA_CALLABLE vec_t<Rows, Type> get_diag(const mat_t<Rows,Rows,Type>& m)
661
- {
662
- vec_t<Rows, Type> ret;
663
- for( unsigned i=0; i < Rows; ++i )
664
- {
665
- ret[i] = m.data[i][i];
666
- }
667
- return ret;
668
- }
669
-
670
- // Only implementing inverses for 2x2, 3x3 and 4x4 matrices for now...
671
- template<typename Type>
672
- inline CUDA_CALLABLE mat_t<2,2,Type> inverse(const mat_t<2,2,Type>& m)
673
- {
674
- Type det = determinant(m);
675
- if (det > Type(kEps) || det < -Type(kEps))
676
- {
677
- return mat_t<2,2,Type>( m.data[1][1], -m.data[0][1],
678
- -m.data[1][0], m.data[0][0])*(Type(1.0f)/det);
679
- }
680
- else
681
- {
682
- return mat_t<2,2,Type>();
683
- }
684
- }
685
-
686
- template<typename Type>
687
- inline CUDA_CALLABLE mat_t<3,3,Type> inverse(const mat_t<3,3,Type>& m)
688
- {
689
- Type det = determinant(m);
690
-
691
- if (det != Type(0.0f))
692
- {
693
- mat_t<3,3,Type> b;
694
-
695
- b.data[0][0] = m.data[1][1]*m.data[2][2] - m.data[1][2]*m.data[2][1];
696
- b.data[1][0] = m.data[1][2]*m.data[2][0] - m.data[1][0]*m.data[2][2];
697
- b.data[2][0] = m.data[1][0]*m.data[2][1] - m.data[1][1]*m.data[2][0];
698
-
699
- b.data[0][1] = m.data[0][2]*m.data[2][1] - m.data[0][1]*m.data[2][2];
700
- b.data[1][1] = m.data[0][0]*m.data[2][2] - m.data[0][2]*m.data[2][0];
701
- b.data[2][1] = m.data[0][1]*m.data[2][0] - m.data[0][0]*m.data[2][1];
702
-
703
- b.data[0][2] = m.data[0][1]*m.data[1][2] - m.data[0][2]*m.data[1][1];
704
- b.data[1][2] = m.data[0][2]*m.data[1][0] - m.data[0][0]*m.data[1][2];
705
- b.data[2][2] = m.data[0][0]*m.data[1][1] - m.data[0][1]*m.data[1][0];
706
-
707
- return b*(Type(1.0f)/det);
708
- }
709
- else
710
- {
711
- return mat_t<3,3,Type>();
712
- }
713
- }
714
-
715
- template<typename Type>
716
- inline CUDA_CALLABLE mat_t<4,4,Type> inverse(const mat_t<4,4,Type>& m)
717
- {
718
- // adapted from USD GfMatrix4f::Inverse()
719
- Type x00, x01, x02, x03;
720
- Type x10, x11, x12, x13;
721
- Type x20, x21, x22, x23;
722
- Type x30, x31, x32, x33;
723
- double y01, y02, y03, y12, y13, y23;
724
- Type z00, z10, z20, z30;
725
- Type z01, z11, z21, z31;
726
- double z02, z03, z12, z13, z22, z23, z32, z33;
727
-
728
- // Pickle 1st two columns of matrix into registers
729
- x00 = m.data[0][0];
730
- x01 = m.data[0][1];
731
- x10 = m.data[1][0];
732
- x11 = m.data[1][1];
733
- x20 = m.data[2][0];
734
- x21 = m.data[2][1];
735
- x30 = m.data[3][0];
736
- x31 = m.data[3][1];
737
-
738
- // Compute all six 2x2 determinants of 1st two columns
739
- y01 = x00*x11 - x10*x01;
740
- y02 = x00*x21 - x20*x01;
741
- y03 = x00*x31 - x30*x01;
742
- y12 = x10*x21 - x20*x11;
743
- y13 = x10*x31 - x30*x11;
744
- y23 = x20*x31 - x30*x21;
745
-
746
- // Pickle 2nd two columns of matrix into registers
747
- x02 = m.data[0][2];
748
- x03 = m.data[0][3];
749
- x12 = m.data[1][2];
750
- x13 = m.data[1][3];
751
- x22 = m.data[2][2];
752
- x23 = m.data[2][3];
753
- x32 = m.data[3][2];
754
- x33 = m.data[3][3];
755
-
756
- // Compute all 3x3 cofactors for 2nd two columns */
757
- z33 = x02*y12 - x12*y02 + x22*y01;
758
- z23 = x12*y03 - x32*y01 - x02*y13;
759
- z13 = x02*y23 - x22*y03 + x32*y02;
760
- z03 = x22*y13 - x32*y12 - x12*y23;
761
- z32 = x13*y02 - x23*y01 - x03*y12;
762
- z22 = x03*y13 - x13*y03 + x33*y01;
763
- z12 = x23*y03 - x33*y02 - x03*y23;
764
- z02 = x13*y23 - x23*y13 + x33*y12;
765
-
766
- // Compute all six 2x2 determinants of 2nd two columns
767
- y01 = x02*x13 - x12*x03;
768
- y02 = x02*x23 - x22*x03;
769
- y03 = x02*x33 - x32*x03;
770
- y12 = x12*x23 - x22*x13;
771
- y13 = x12*x33 - x32*x13;
772
- y23 = x22*x33 - x32*x23;
773
-
774
- // Compute all 3x3 cofactors for 1st two columns
775
- z30 = x11*y02 - x21*y01 - x01*y12;
776
- z20 = x01*y13 - x11*y03 + x31*y01;
777
- z10 = x21*y03 - x31*y02 - x01*y23;
778
- z00 = x11*y23 - x21*y13 + x31*y12;
779
- z31 = x00*y12 - x10*y02 + x20*y01;
780
- z21 = x10*y03 - x30*y01 - x00*y13;
781
- z11 = x00*y23 - x20*y03 + x30*y02;
782
- z01 = x20*y13 - x30*y12 - x10*y23;
783
-
784
- // compute 4x4 determinant & its reciprocal
785
- double det = x30*z30 + x20*z20 + x10*z10 + x00*z00;
786
-
787
- if(fabs(det) > kEps)
788
- {
789
- mat_t<4,4,Type> invm;
790
-
791
- double rcp = 1.0 / det;
792
-
793
- // Multiply all 3x3 cofactors by reciprocal & transpose
794
- invm.data[0][0] = Type(z00*rcp);
795
- invm.data[0][1] = Type(z10*rcp);
796
- invm.data[1][0] = Type(z01*rcp);
797
- invm.data[0][2] = Type(z20*rcp);
798
- invm.data[2][0] = Type(z02*rcp);
799
- invm.data[0][3] = Type(z30*rcp);
800
- invm.data[3][0] = Type(z03*rcp);
801
- invm.data[1][1] = Type(z11*rcp);
802
- invm.data[1][2] = Type(z21*rcp);
803
- invm.data[2][1] = Type(z12*rcp);
804
- invm.data[1][3] = Type(z31*rcp);
805
- invm.data[3][1] = Type(z13*rcp);
806
- invm.data[2][2] = Type(z22*rcp);
807
- invm.data[2][3] = Type(z32*rcp);
808
- invm.data[3][2] = Type(z23*rcp);
809
- invm.data[3][3] = Type(z33*rcp);
810
-
811
- return invm;
812
- }
813
- else
814
- {
815
- return mat_t<4,4,Type>();
816
- }
817
- }
818
-
819
- template<unsigned Rows,typename Type>
820
- inline CUDA_CALLABLE mat_t<Rows,Rows,Type> diag(const vec_t<Rows,Type>& d)
821
- {
822
- mat_t<Rows,Rows,Type> ret(Type(0));
823
- for (unsigned i=0; i < Rows; ++i)
824
- {
825
- ret.data[i][i] = d[i];
826
- }
827
- return ret;
828
- }
829
-
830
- template<unsigned Rows,unsigned Cols,typename Type>
831
- inline CUDA_CALLABLE mat_t<Rows,Cols,Type> outer(const vec_t<Rows,Type>& a, const vec_t<Cols,Type>& b)
832
- {
833
- // col 0 = a * b[0] etc...
834
- mat_t<Rows,Cols,Type> ret;
835
- for (unsigned row=0; row < Rows; ++row)
836
- {
837
- for (unsigned col=0; col < Cols; ++col) // columns
838
- {
839
- ret.data[row][col] = a[row] * b[col];
840
- }
841
- }
842
- return ret;
843
- }
844
-
845
- template<typename Type>
846
- inline CUDA_CALLABLE mat_t<3,3,Type> skew(const vec_t<3,Type>& a)
847
- {
848
- mat_t<3,3,Type> out(
849
- Type(0), -a[2], a[1],
850
- a[2], Type(0), -a[0],
851
- -a[1], a[0], Type(0)
852
- );
853
-
854
- return out;
855
- }
856
-
857
-
858
- template<unsigned Rows, unsigned Cols, typename Type>
859
- inline CUDA_CALLABLE mat_t<Rows,Cols,Type> cw_mul(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b)
860
- {
861
- mat_t<Rows,Cols,Type> t;
862
- for (unsigned i=0; i < Rows; ++i)
863
- {
864
- for (unsigned j=0; j < Cols; ++j)
865
- {
866
- t.data[i][j] = a.data[i][j] * b.data[i][j];
867
- }
868
- }
869
-
870
- return t;
871
- }
872
-
873
-
874
- template<unsigned Rows, unsigned Cols, typename Type>
875
- inline CUDA_CALLABLE mat_t<Rows,Cols,Type> cw_div(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b)
876
- {
877
- mat_t<Rows,Cols,Type> t;
878
- for (unsigned i=0; i < Rows; ++i)
879
- {
880
- for (unsigned j=0; j < Cols; ++j)
881
- {
882
- t.data[i][j] = a.data[i][j] / b.data[i][j];
883
- }
884
- }
885
-
886
- return t;
887
- }
888
-
889
- template<typename Type>
890
- inline CUDA_CALLABLE vec_t<3,Type> transform_point(const mat_t<4,4,Type>& m, const vec_t<3,Type>& v)
891
- {
892
- vec_t<4,Type> out = mul(m, vec_t<4,Type>(v[0], v[1], v[2], Type(1)));
893
- return vec_t<3,Type>(out[0], out[1], out[2]);
894
- }
895
-
896
- template<typename Type>
897
- inline CUDA_CALLABLE vec_t<3,Type> transform_vector(const mat_t<4,4,Type>& m, const vec_t<3,Type>& v)
898
- {
899
- vec_t<4,Type> out = mul(m, vec_t<4,Type>(v[0], v[1], v[2], 0.f));
900
- return vec_t<3,Type>(out[0], out[1], out[2]);
901
- }
902
-
903
- template<unsigned Rows, unsigned Cols, typename Type>
904
- inline CUDA_CALLABLE void adj_extract(const mat_t<Rows,Cols,Type>& m, int row, mat_t<Rows,Cols,Type>& adj_m, int& adj_row, const vec_t<Cols,Type>& adj_ret)
905
- {
906
- for( unsigned col=0; col < Cols; ++col )
907
- adj_m.data[row][col] += adj_ret[col];
908
- }
909
-
910
- template<unsigned Rows, unsigned Cols, typename Type>
911
- inline void CUDA_CALLABLE adj_extract(const mat_t<Rows,Cols,Type>& m, int row, int col, mat_t<Rows,Cols,Type>& adj_m, int& adj_row, int& adj_col, Type adj_ret)
912
- {
913
- #ifndef NDEBUG
914
- if (row < 0 || row > Rows)
915
- {
916
- printf("mat row index %d out of bounds at %s %d\n", row, __FILE__, __LINE__);
917
- assert(0);
918
- }
919
- if (col < 0 || col > Cols)
920
- {
921
- printf("mat col index %d out of bounds at %s %d\n", col, __FILE__, __LINE__);
922
- assert(0);
923
- }
924
- #endif
925
- adj_m.data[row][col] += adj_ret;
926
- }
927
-
928
- template<unsigned Rows, unsigned Cols, typename Type>
929
- inline CUDA_CALLABLE void adj_outer(const vec_t<Rows,Type>& a, const vec_t<Cols,Type>& b, vec_t<Rows,Type>& adj_a, vec_t<Cols,Type>& adj_b, const mat_t<Rows,Cols,Type>& adj_ret)
930
- {
931
- adj_a += mul(adj_ret, b);
932
- adj_b += mul(transpose(adj_ret), a);
933
- }
934
-
935
- template<unsigned Rows, unsigned Cols, typename Type>
936
- inline CUDA_CALLABLE void adj_add(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b, mat_t<Rows,Cols,Type>& adj_a, mat_t<Rows,Cols,Type>& adj_b, const mat_t<Rows,Cols,Type>& adj_ret)
937
- {
938
- for (unsigned i=0; i < Rows; ++i)
939
- {
940
- for (unsigned j=0; j < Cols; ++j)
941
- {
942
- adj_a.data[i][j] += adj_ret.data[i][j];
943
- adj_b.data[i][j] += adj_ret.data[i][j];
944
- }
945
- }
946
- }
947
-
948
- template<unsigned Rows, unsigned Cols, typename Type>
949
- inline CUDA_CALLABLE void adj_sub(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b, mat_t<Rows,Cols,Type>& adj_a, mat_t<Rows,Cols,Type>& adj_b, const mat_t<Rows,Cols,Type>& adj_ret)
950
- {
951
- for (unsigned i=0; i < Rows; ++i)
952
- {
953
- for (unsigned j=0; j < Cols; ++j)
954
- {
955
- adj_a.data[i][j] += adj_ret.data[i][j];
956
- adj_b.data[i][j] -= adj_ret.data[i][j];
957
- }
958
- }
959
- }
960
-
961
- template<unsigned Rows, unsigned Cols, typename Type>
962
- inline CUDA_CALLABLE void adj_div(const mat_t<Rows,Cols,Type>& a, Type s, mat_t<Rows,Cols,Type>& adj_a, Type& adj_s, const mat_t<Rows,Cols,Type>& adj_ret)
963
- {
964
- adj_s -= tensordot(a , adj_ret)/ (s * s); // - a / s^2
965
-
966
- for (unsigned i=0; i < Rows; ++i)
967
- {
968
- for (unsigned j=0; j < Cols; ++j)
969
- {
970
- adj_a.data[i][j] += adj_ret.data[i][j] / s;
971
- }
972
- }
973
- }
974
-
975
- template<unsigned Rows, unsigned Cols, typename Type>
976
- inline CUDA_CALLABLE void adj_div(Type s, const mat_t<Rows,Cols,Type>& a, Type& adj_s, mat_t<Rows,Cols,Type>& adj_a, const mat_t<Rows,Cols,Type>& adj_ret)
977
- {
978
- adj_s -= tensordot(a , adj_ret)/ (s * s); // - a / s^2
979
-
980
- for (unsigned i=0; i < Rows; ++i)
981
- {
982
- for (unsigned j=0; j < Cols; ++j)
983
- {
984
- adj_a.data[i][j] += s / adj_ret.data[i][j];
985
- }
986
- }
987
- }
988
-
989
- template<unsigned Rows, unsigned Cols, typename Type>
990
- inline CUDA_CALLABLE void adj_mul(const mat_t<Rows,Cols,Type>& a, Type b, mat_t<Rows,Cols,Type>& adj_a, Type& adj_b, const mat_t<Rows,Cols,Type>& adj_ret)
991
- {
992
- for (unsigned i=0; i < Rows; ++i)
993
- {
994
- for (unsigned j=0; j < Cols; ++j)
995
- {
996
- adj_a.data[i][j] += b*adj_ret.data[i][j];
997
- adj_b += a.data[i][j]*adj_ret.data[i][j];
998
- }
999
- }
1000
- }
1001
-
1002
- template<unsigned Rows, unsigned Cols, typename Type>
1003
- inline CUDA_CALLABLE void adj_mul(Type b, const mat_t<Rows,Cols,Type>& a, Type& adj_b, mat_t<Rows,Cols,Type>& adj_a, const mat_t<Rows,Cols,Type>& adj_ret)
1004
- {
1005
- adj_mul(a, b, adj_a, adj_b, adj_ret);
1006
- }
1007
-
1008
- template<unsigned Rows, unsigned Cols, typename Type>
1009
- inline CUDA_CALLABLE void adj_ddot(mat_t<Rows,Cols,Type> a, mat_t<Rows,Cols,Type> b, mat_t<Rows,Cols,Type>& adj_a, mat_t<Rows,Cols,Type>& adj_b, const Type adj_ret)
1010
- {
1011
- adj_a += b*adj_ret;
1012
- adj_b += a*adj_ret;
1013
- }
1014
-
1015
- template<unsigned Rows, unsigned Cols, typename Type>
1016
- inline CUDA_CALLABLE void adj_mul(const mat_t<Rows,Cols,Type>& a, const vec_t<Cols,Type>& b, mat_t<Rows,Cols,Type>& adj_a, vec_t<Cols,Type>& adj_b, const vec_t<Rows,Type>& adj_ret)
1017
- {
1018
- adj_a += outer(adj_ret, b);
1019
- adj_b += mul(transpose(a), adj_ret);
1020
- }
1021
-
1022
- template<unsigned Rows, unsigned Cols, typename Type>
1023
- inline CUDA_CALLABLE void adj_mul(const vec_t<Rows,Type>& b, const mat_t<Rows,Cols,Type>& a, vec_t<Rows,Type>& adj_b, mat_t<Rows,Cols,Type>& adj_a, const vec_t<Cols,Type>& adj_ret)
1024
- {
1025
- adj_a += outer(b, adj_ret);
1026
- adj_b += mul(adj_ret, transpose(a));
1027
- }
1028
-
1029
- template<unsigned Rows, unsigned Cols, unsigned ColsOut, typename Type>
1030
- inline CUDA_CALLABLE void adj_mul(const mat_t<Rows,Cols,Type>& a, const mat_t<Cols,ColsOut,Type>& b, mat_t<Rows,Cols,Type>& adj_a, mat_t<Cols,ColsOut,Type>& adj_b, const mat_t<Rows,ColsOut,Type>& adj_ret)
1031
- {
1032
- adj_a += mul(adj_ret, transpose(b));
1033
- adj_b += mul(transpose(a), adj_ret);
1034
- }
1035
-
1036
- template<unsigned Rows, unsigned Cols, typename Type>
1037
- inline CUDA_CALLABLE void adj_transpose(const mat_t<Rows,Cols,Type>& a, mat_t<Rows,Cols,Type>& adj_a, const mat_t<Cols,Rows,Type>& adj_ret)
1038
- {
1039
- adj_a += transpose(adj_ret);
1040
- }
1041
-
1042
- template<unsigned Rows, typename Type>
1043
- inline CUDA_CALLABLE void adj_trace(const mat_t<Rows,Rows,Type>& m, mat_t<Rows,Rows,Type>& adj_m, Type adj_ret)
1044
- {
1045
- for (unsigned i=0; i < Rows; ++i)
1046
- adj_m.data[i][i] += adj_ret;
1047
- }
1048
-
1049
- template<unsigned Rows, typename Type>
1050
- inline CUDA_CALLABLE void adj_diag(const vec_t<Rows,Type>& d, vec_t<Rows,Type>& adj_d, const mat_t<Rows,Rows,Type>& adj_ret)
1051
- {
1052
- for (unsigned i=0; i < Rows; ++i)
1053
- adj_d[i] += adj_ret.data[i][i];
1054
- }
1055
-
1056
- template<unsigned Rows, typename Type>
1057
- inline CUDA_CALLABLE void adj_get_diag(const mat_t<Rows,Rows,Type>& m, mat_t<Rows,Rows,Type>& adj_m, const vec_t<Rows,Type>& adj_ret)
1058
- {
1059
- for (unsigned i=0; i < Rows; ++i)
1060
- adj_m.data[i][i] += adj_ret[i];
1061
- }
1062
-
1063
- template<typename Type>
1064
- inline CUDA_CALLABLE void adj_determinant(const mat_t<2,2,Type>& m, mat_t<2,2,Type>& adj_m, Type adj_ret)
1065
- {
1066
- adj_m.data[0][0] += m.data[1][1]*adj_ret;
1067
- adj_m.data[1][1] += m.data[0][0]*adj_ret;
1068
- adj_m.data[0][1] -= m.data[1][0]*adj_ret;
1069
- adj_m.data[1][0] -= m.data[0][1]*adj_ret;
1070
- }
1071
-
1072
- template<typename Type>
1073
- inline CUDA_CALLABLE void adj_determinant(const mat_t<3,3,Type>& m, mat_t<3,3,Type>& adj_m, Type adj_ret)
1074
- {
1075
- (vec_t<3,Type>&)adj_m.data[0] += cross(m.get_row(1), m.get_row(2))*adj_ret;
1076
- (vec_t<3,Type>&)adj_m.data[1] += cross(m.get_row(2), m.get_row(0))*adj_ret;
1077
- (vec_t<3,Type>&)adj_m.data[2] += cross(m.get_row(0), m.get_row(1))*adj_ret;
1078
- }
1079
-
1080
- template<typename Type>
1081
- inline CUDA_CALLABLE void adj_determinant(const mat_t<4,4,Type>& m, mat_t<4,4,Type>& adj_m, Type adj_ret)
1082
- {
1083
- // adapted from USD GfMatrix4f::Inverse()
1084
- Type x00, x01, x02, x03;
1085
- Type x10, x11, x12, x13;
1086
- Type x20, x21, x22, x23;
1087
- Type x30, x31, x32, x33;
1088
- double y01, y02, y03, y12, y13, y23;
1089
- Type z00, z10, z20, z30;
1090
- Type z01, z11, z21, z31;
1091
- double z02, z03, z12, z13, z22, z23, z32, z33;
1092
-
1093
- // Pickle 1st two columns of matrix into registers
1094
- x00 = m.data[0][0];
1095
- x01 = m.data[0][1];
1096
- x10 = m.data[1][0];
1097
- x11 = m.data[1][1];
1098
- x20 = m.data[2][0];
1099
- x21 = m.data[2][1];
1100
- x30 = m.data[3][0];
1101
- x31 = m.data[3][1];
1102
-
1103
- // Compute all six 2x2 determinants of 1st two columns
1104
- y01 = x00*x11 - x10*x01;
1105
- y02 = x00*x21 - x20*x01;
1106
- y03 = x00*x31 - x30*x01;
1107
- y12 = x10*x21 - x20*x11;
1108
- y13 = x10*x31 - x30*x11;
1109
- y23 = x20*x31 - x30*x21;
1110
-
1111
- // Pickle 2nd two columns of matrix into registers
1112
- x02 = m.data[0][2];
1113
- x03 = m.data[0][3];
1114
- x12 = m.data[1][2];
1115
- x13 = m.data[1][3];
1116
- x22 = m.data[2][2];
1117
- x23 = m.data[2][3];
1118
- x32 = m.data[3][2];
1119
- x33 = m.data[3][3];
1120
-
1121
- // Compute all 3x3 cofactors for 2nd two columns */
1122
- z33 = x02*y12 - x12*y02 + x22*y01;
1123
- z23 = x12*y03 - x32*y01 - x02*y13;
1124
- z13 = x02*y23 - x22*y03 + x32*y02;
1125
- z03 = x22*y13 - x32*y12 - x12*y23;
1126
- z32 = x13*y02 - x23*y01 - x03*y12;
1127
- z22 = x03*y13 - x13*y03 + x33*y01;
1128
- z12 = x23*y03 - x33*y02 - x03*y23;
1129
- z02 = x13*y23 - x23*y13 + x33*y12;
1130
-
1131
- // Compute all six 2x2 determinants of 2nd two columns
1132
- y01 = x02*x13 - x12*x03;
1133
- y02 = x02*x23 - x22*x03;
1134
- y03 = x02*x33 - x32*x03;
1135
- y12 = x12*x23 - x22*x13;
1136
- y13 = x12*x33 - x32*x13;
1137
- y23 = x22*x33 - x32*x23;
1138
-
1139
- // Compute all 3x3 cofactors for 1st two columns
1140
- z30 = x11*y02 - x21*y01 - x01*y12;
1141
- z20 = x01*y13 - x11*y03 + x31*y01;
1142
- z10 = x21*y03 - x31*y02 - x01*y23;
1143
- z00 = x11*y23 - x21*y13 + x31*y12;
1144
- z31 = x00*y12 - x10*y02 + x20*y01;
1145
- z21 = x10*y03 - x30*y01 - x00*y13;
1146
- z11 = x00*y23 - x20*y03 + x30*y02;
1147
- z01 = x20*y13 - x30*y12 - x10*y23;
1148
-
1149
- // Multiply all 3x3 cofactors by adjoint & transpose
1150
- adj_m.data[0][0] += Type(z00*adj_ret);
1151
- adj_m.data[1][0] += Type(z10*adj_ret);
1152
- adj_m.data[0][1] += Type(z01*adj_ret);
1153
- adj_m.data[2][0] += Type(z20*adj_ret);
1154
- adj_m.data[0][2] += Type(z02*adj_ret);
1155
- adj_m.data[3][0] += Type(z30*adj_ret);
1156
- adj_m.data[0][3] += Type(z03*adj_ret);
1157
- adj_m.data[1][1] += Type(z11*adj_ret);
1158
- adj_m.data[2][1] += Type(z21*adj_ret);
1159
- adj_m.data[1][2] += Type(z12*adj_ret);
1160
- adj_m.data[3][1] += Type(z31*adj_ret);
1161
- adj_m.data[1][3] += Type(z13*adj_ret);
1162
- adj_m.data[2][2] += Type(z22*adj_ret);
1163
- adj_m.data[3][2] += Type(z32*adj_ret);
1164
- adj_m.data[2][3] += Type(z23*adj_ret);
1165
- adj_m.data[3][3] += Type(z33*adj_ret);
1166
- }
1167
-
1168
- template<unsigned Rows, typename Type>
1169
- inline CUDA_CALLABLE void adj_inverse(const mat_t<Rows,Rows,Type>& m, mat_t<Rows,Rows,Type>& ret, mat_t<Rows,Rows,Type>& adj_m, const mat_t<Rows,Rows,Type>& adj_ret)
1170
- {
1171
- // todo: how to cache this from the forward pass?
1172
- mat_t<Rows,Rows,Type> invt = transpose(ret);
1173
-
1174
- // see https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf 2.2.3
1175
- adj_m -= mul(mul(invt, adj_ret), invt);
1176
- }
1177
-
1178
- template<typename Type>
1179
- inline CUDA_CALLABLE void adj_transform_point(const mat_t<4,4,Type>& m, const vec_t<3,Type>& v, mat_t<4,4,Type>& adj_m, vec_t<3,Type>& adj_v, const vec_t<3,Type>& adj_ret)
1180
- {
1181
- vec_t<4,Type> out = vec_t<4,Type>(v[0], v[1], v[2], 1.f);
1182
- adj_m = add(adj_m, transpose(mat_t<4,4,Type>(adj_ret[0] * out, adj_ret[1] * out, adj_ret[2] * out, vec_t<4,Type>())));
1183
- adj_v[0] += dot(vec_t<3,Type>(m.data[0][0], m.data[1][0], m.data[2][0]), adj_ret);
1184
- adj_v[1] += dot(vec_t<3,Type>(m.data[0][1], m.data[1][1], m.data[2][1]), adj_ret);
1185
- adj_v[2] += dot(vec_t<3,Type>(m.data[0][2], m.data[1][2], m.data[2][2]), adj_ret);
1186
- }
1187
-
1188
- template<typename Type>
1189
- inline CUDA_CALLABLE void adj_transform_vector(const mat_t<4,4,Type>& m, const vec_t<3,Type>& v, mat_t<4,4,Type>& adj_m, vec_t<3,Type>& adj_v, const vec_t<3,Type>& adj_ret)
1190
- {
1191
- vec_t<4,Type> out = vec_t<4,Type>(v[0], v[1], v[2], 0.f);
1192
- adj_m = add(adj_m, transpose(mat_t<4,4,Type>(adj_ret[0] * out, adj_ret[1] * out, adj_ret[2] * out, vec_t<4,Type>())));
1193
- adj_v[0] += dot(vec_t<3,Type>(m.data[0][0], m.data[1][0], m.data[2][0]), adj_ret);
1194
- adj_v[1] += dot(vec_t<3,Type>(m.data[0][1], m.data[1][1], m.data[2][1]), adj_ret);
1195
- adj_v[2] += dot(vec_t<3,Type>(m.data[0][2], m.data[1][2], m.data[2][2]), adj_ret);
1196
- }
1197
-
1198
- template<typename Type>
1199
- inline CUDA_CALLABLE void adj_skew(const vec_t<3,Type>& a, vec_t<3,Type>& adj_a, const mat_t<3,3,Type>& adj_ret)
1200
- {
1201
- adj_a[0] += adj_ret.data[2][1] - adj_ret.data[1][2];
1202
- adj_a[1] += adj_ret.data[0][2] - adj_ret.data[2][0];
1203
- adj_a[2] += adj_ret.data[1][0] - adj_ret.data[0][1];
1204
- }
1205
-
1206
- template<unsigned Rows, unsigned Cols, typename Type>
1207
- inline CUDA_CALLABLE void adj_cw_mul(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b, mat_t<Rows,Cols,Type>& adj_a, mat_t<Rows,Cols,Type>& adj_b, const mat_t<Rows,Cols,Type>& adj_ret)
1208
- {
1209
- adj_a += cw_mul(b, adj_ret);
1210
- adj_b += cw_mul(a, adj_ret);
1211
- }
1212
-
1213
- template<unsigned Rows, unsigned Cols, typename Type>
1214
- inline CUDA_CALLABLE void adj_cw_div(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b, mat_t<Rows,Cols,Type>& ret, mat_t<Rows,Cols,Type>& adj_a, mat_t<Rows,Cols,Type>& adj_b, const mat_t<Rows,Cols,Type>& adj_ret)
1215
- {
1216
- adj_a += cw_div(adj_ret, b);
1217
- adj_b -= cw_mul(adj_ret, cw_div(ret, b));
1218
- }
1219
-
1220
- // adjoint for the constant constructor:
1221
- template<unsigned Rows, unsigned Cols, typename Type>
1222
- inline CUDA_CALLABLE void adj_mat_t(Type s, Type& adj_s, const mat_t<Rows, Cols, Type>& adj_ret)
1223
- {
1224
- for (unsigned i=0; i < Rows; ++i)
1225
- {
1226
- for (unsigned j=0; j < Cols; ++j)
1227
- {
1228
- adj_s += adj_ret.data[i][j];
1229
- }
1230
- }
1231
- }
1232
-
1233
- // adjoint for the casting constructor:
1234
- template<unsigned Rows, unsigned Cols, typename Type, typename OtherType>
1235
- inline CUDA_CALLABLE void adj_mat_t(const mat_t<Rows, Cols, OtherType>& other, mat_t<Rows, Cols, OtherType>& adj_other, const mat_t<Rows, Cols, Type>& adj_ret)
1236
- {
1237
- for (unsigned i=0; i < Rows; ++i)
1238
- {
1239
- for (unsigned j=0; j < Cols; ++j)
1240
- {
1241
- adj_other.data[i][j] += adj_ret.data[i][j];
1242
- }
1243
- }
1244
- }
1245
-
1246
- // adjoint for the initializer_array scalar constructor:
1247
- template<unsigned Rows, unsigned Cols, typename Type>
1248
- inline CUDA_CALLABLE void adj_mat_t(const initializer_array<Rows * Cols, Type> &cmps, const initializer_array<Rows * Cols, Type*> &adj_cmps, const mat_t<Rows, Cols, Type>& adj_ret)
1249
- {
1250
- for (unsigned i=0; i < Rows; ++i)
1251
- {
1252
- for (unsigned j=0; j < Cols; ++j)
1253
- {
1254
- *adj_cmps[i * Cols + j] += adj_ret.data[i][j];
1255
- }
1256
- }
1257
- }
1258
-
1259
- template<typename Type>
1260
- inline CUDA_CALLABLE void adj_mat_t(Type m00, Type m01, Type m10, Type m11, Type& adj_m00, Type& adj_m01, Type& adj_m10, Type& adj_m11, const mat_t<2, 2, Type>& adj_ret)
1261
- {
1262
- adj_m00 += adj_ret.data[0][0];
1263
- adj_m01 += adj_ret.data[0][1];
1264
- adj_m10 += adj_ret.data[1][0];
1265
- adj_m11 += adj_ret.data[1][1];
1266
- }
1267
-
1268
- template<typename Type>
1269
- inline CUDA_CALLABLE void adj_mat_t(Type m00, Type m01, Type m02,
1270
- Type m10, Type m11, Type m12,
1271
- Type m20, Type m21, Type m22,
1272
- Type& a00, Type& a01, Type& a02,
1273
- Type& a10, Type& a11, Type& a12,
1274
- Type& a20, Type& a21, Type& a22,
1275
- const mat_t<3, 3, Type>& adj_ret)
1276
- {
1277
- a00 += adj_ret.data[0][0];
1278
- a01 += adj_ret.data[0][1];
1279
- a02 += adj_ret.data[0][2];
1280
- a10 += adj_ret.data[1][0];
1281
- a11 += adj_ret.data[1][1];
1282
- a12 += adj_ret.data[1][2];
1283
- a20 += adj_ret.data[2][0];
1284
- a21 += adj_ret.data[2][1];
1285
- a22 += adj_ret.data[2][2];
1286
- }
1287
-
1288
-
1289
- template<typename Type>
1290
- inline CUDA_CALLABLE void adj_mat_t(Type m00, Type m01, Type m02, Type m03,
1291
- Type m10, Type m11, Type m12, Type m13,
1292
- Type m20, Type m21, Type m22, Type m23,
1293
- Type m30, Type m31, Type m32, Type m33,
1294
- Type& a00, Type& a01, Type& a02, Type& a03,
1295
- Type& a10, Type& a11, Type& a12, Type& a13,
1296
- Type& a20, Type& a21, Type& a22, Type& a23,
1297
- Type& a30, Type& a31, Type& a32, Type& a33,
1298
- const mat_t<4, 4, Type>& adj_ret)
1299
- {
1300
- a00 += adj_ret.data[0][0];
1301
- a01 += adj_ret.data[0][1];
1302
- a02 += adj_ret.data[0][2];
1303
- a03 += adj_ret.data[0][3];
1304
-
1305
- a10 += adj_ret.data[1][0];
1306
- a11 += adj_ret.data[1][1];
1307
- a12 += adj_ret.data[1][2];
1308
- a13 += adj_ret.data[1][3];
1309
-
1310
- a20 += adj_ret.data[2][0];
1311
- a21 += adj_ret.data[2][1];
1312
- a22 += adj_ret.data[2][2];
1313
- a23 += adj_ret.data[2][3];
1314
-
1315
- a30 += adj_ret.data[3][0];
1316
- a31 += adj_ret.data[3][1];
1317
- a32 += adj_ret.data[3][2];
1318
- a33 += adj_ret.data[3][3];
1319
- }
1320
-
1321
-
1322
-
1323
- // adjoint for the initializer_array vector constructor:
1324
- template<unsigned Rows, unsigned Cols, typename Type>
1325
- inline CUDA_CALLABLE void adj_mat_t(const initializer_array<Cols, vec_t<Rows,Type> > &cmps, const initializer_array<Cols, vec_t<Rows,Type>* > &adj_cmps, const mat_t<Rows, Cols, Type>& adj_ret)
1326
- {
1327
- for (unsigned j=0; j < Cols; ++j)
1328
- {
1329
- for (unsigned i=0; i < Rows; ++i)
1330
- {
1331
- (*adj_cmps[j])[i] += adj_ret.data[i][j];
1332
- }
1333
- }
1334
- }
1335
-
1336
- template<typename Type>
1337
- inline CUDA_CALLABLE void adj_mat_t(const vec_t<2,Type> &cmps0, const vec_t<2,Type> &cmps1, vec_t<2,Type> &adj_cmps0, vec_t<2,Type> &adj_cmps1, const mat_t<2, 2, Type>& adj_ret)
1338
- {
1339
- for (unsigned i=0; i < 2; ++i)
1340
- {
1341
- adj_cmps0[i] += adj_ret.data[i][0];
1342
- adj_cmps1[i] += adj_ret.data[i][1];
1343
- }
1344
- }
1345
-
1346
- template<typename Type>
1347
- inline CUDA_CALLABLE void adj_mat_t(const vec_t<3,Type> &cmps0, const vec_t<3,Type> &cmps1, const vec_t<3,Type> &cmps2, vec_t<3,Type> &adj_cmps0, vec_t<3,Type> &adj_cmps1, vec_t<3,Type> &adj_cmps2, const mat_t<3, 3, Type>& adj_ret)
1348
- {
1349
- for (unsigned i=0; i < 3; ++i)
1350
- {
1351
- adj_cmps0[i] += adj_ret.data[i][0];
1352
- adj_cmps1[i] += adj_ret.data[i][1];
1353
- adj_cmps2[i] += adj_ret.data[i][2];
1354
- }
1355
- }
1356
-
1357
- template<typename Type>
1358
- inline CUDA_CALLABLE void adj_mat_t(const vec_t<4,Type> &cmps0, const vec_t<4,Type> &cmps1, const vec_t<4,Type> &cmps2, const vec_t<4,Type> &cmps3, vec_t<4,Type> &adj_cmps0, vec_t<4,Type> &adj_cmps1, vec_t<4,Type> &adj_cmps2, vec_t<4,Type> &adj_cmps3, const mat_t<4, 4, Type>& adj_ret)
1359
- {
1360
- for (unsigned i=0; i < 4; ++i)
1361
- {
1362
- adj_cmps0[i] += adj_ret.data[i][0];
1363
- adj_cmps1[i] += adj_ret.data[i][1];
1364
- adj_cmps2[i] += adj_ret.data[i][2];
1365
- adj_cmps3[i] += adj_ret.data[i][3];
1366
- }
1367
- }
1368
-
1369
- template<unsigned Rows, unsigned Cols, typename Type>
1370
- CUDA_CALLABLE inline mat_t<Rows, Cols, Type> lerp(const mat_t<Rows, Cols, Type>& a, const mat_t<Rows, Cols, Type>& b, Type t)
1371
- {
1372
- return a*(Type(1)-t) + b*t;
1373
- }
1374
-
1375
- template<unsigned Rows, unsigned Cols, typename Type>
1376
- CUDA_CALLABLE inline void adj_lerp(const mat_t<Rows, Cols, Type>& a, const mat_t<Rows, Cols, Type>& b, Type t, mat_t<Rows, Cols, Type>& adj_a, mat_t<Rows, Cols, Type>& adj_b, Type& adj_t, const mat_t<Rows, Cols, Type>& adj_ret)
1377
- {
1378
- adj_a += adj_ret*(Type(1)-t);
1379
- adj_b += adj_ret*t;
1380
- adj_t += tensordot(b, adj_ret) - tensordot(a, adj_ret);
1381
- }
1382
-
1383
- // for integral types we do not accumulate gradients
1384
- template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, int8>* buf, const mat_t<Rows, Cols, int8> &value) { }
1385
- template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, uint8>* buf, const mat_t<Rows, Cols, uint8> &value) { }
1386
- template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, int16>* buf, const mat_t<Rows, Cols, int16> &value) { }
1387
- template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, uint16>* buf, const mat_t<Rows, Cols, uint16> &value) { }
1388
- template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, int32>* buf, const mat_t<Rows, Cols, int32> &value) { }
1389
- template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, uint32>* buf, const mat_t<Rows, Cols, uint32> &value) { }
1390
- template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, int64>* buf, const mat_t<Rows, Cols, int64> &value) { }
1391
- template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, uint64>* buf, const mat_t<Rows, Cols, uint64> &value) { }
1392
-
1393
- using mat22h = mat_t<2,2,half>;
1394
- using mat33h = mat_t<3,3,half>;
1395
- using mat44h = mat_t<4,4,half>;
1396
-
1397
- using mat22 = mat_t<2,2,float>;
1398
- using mat33 = mat_t<3,3,float>;
1399
- using mat44 = mat_t<4,4,float>;
1400
-
1401
- using mat22f = mat_t<2,2,float>;
1402
- using mat33f = mat_t<3,3,float>;
1403
- using mat44f = mat_t<4,4,float>;
1404
-
1405
- using mat22d = mat_t<2,2,double>;
1406
- using mat33d = mat_t<3,3,double>;
1407
- using mat44d = mat_t<4,4,double>;
1408
-
1409
- inline CUDA_CALLABLE void adj_mat22(vec2 c0, vec2 c1,
1410
- vec2& a0, vec2& a1,
1411
- const mat22& adj_ret)
1412
- {
1413
- a0 += adj_ret.get_col(0);
1414
- a1 += adj_ret.get_col(1);
1415
- }
1416
-
1417
- inline CUDA_CALLABLE void adj_mat22(float m00, float m01, float m10, float m11, float& adj_m00, float& adj_m01, float& adj_m10, float& adj_m11, const mat22& adj_ret)
1418
- {
1419
- adj_m00 += adj_ret.data[0][0];
1420
- adj_m01 += adj_ret.data[0][1];
1421
- adj_m10 += adj_ret.data[1][0];
1422
- adj_m11 += adj_ret.data[1][1];
1423
- }
1424
-
1425
- inline CUDA_CALLABLE void adj_mat33(vec3 c0, vec3 c1, vec3 c2,
1426
- vec3& a0, vec3& a1, vec3& a2,
1427
- const mat33& adj_ret)
1428
- {
1429
- // column constructor
1430
- a0 += adj_ret.get_col(0);
1431
- a1 += adj_ret.get_col(1);
1432
- a2 += adj_ret.get_col(2);
1433
-
1434
- }
1435
-
1436
- inline CUDA_CALLABLE void adj_mat33(float m00, float m01, float m02,
1437
- float m10, float m11, float m12,
1438
- float m20, float m21, float m22,
1439
- float& a00, float& a01, float& a02,
1440
- float& a10, float& a11, float& a12,
1441
- float& a20, float& a21, float& a22,
1442
- const mat33& adj_ret)
1443
- {
1444
- a00 += adj_ret.data[0][0];
1445
- a01 += adj_ret.data[0][1];
1446
- a02 += adj_ret.data[0][2];
1447
- a10 += adj_ret.data[1][0];
1448
- a11 += adj_ret.data[1][1];
1449
- a12 += adj_ret.data[1][2];
1450
- a20 += adj_ret.data[2][0];
1451
- a21 += adj_ret.data[2][1];
1452
- a22 += adj_ret.data[2][2];
1453
- }
1454
-
1455
- inline CUDA_CALLABLE void adj_mat44(
1456
- vec4 c0, vec4 c1, vec4 c2, vec4 c3,
1457
- vec4& a0, vec4& a1, vec4& a2, vec4& a3,
1458
- const mat44& adj_ret)
1459
- {
1460
- // column constructor
1461
- a0 += adj_ret.get_col(0);
1462
- a1 += adj_ret.get_col(1);
1463
- a2 += adj_ret.get_col(2);
1464
- a3 += adj_ret.get_col(3);
1465
- }
1466
-
1467
- inline CUDA_CALLABLE void adj_mat44(float m00, float m01, float m02, float m03,
1468
- float m10, float m11, float m12, float m13,
1469
- float m20, float m21, float m22, float m23,
1470
- float m30, float m31, float m32, float m33,
1471
- float& a00, float& a01, float& a02, float& a03,
1472
- float& a10, float& a11, float& a12, float& a13,
1473
- float& a20, float& a21, float& a22, float& a23,
1474
- float& a30, float& a31, float& a32, float& a33,
1475
- const mat44& adj_ret)
1476
- {
1477
- a00 += adj_ret.data[0][0];
1478
- a01 += adj_ret.data[0][1];
1479
- a02 += adj_ret.data[0][2];
1480
- a03 += adj_ret.data[0][3];
1481
-
1482
- a10 += adj_ret.data[1][0];
1483
- a11 += adj_ret.data[1][1];
1484
- a12 += adj_ret.data[1][2];
1485
- a13 += adj_ret.data[1][3];
1486
-
1487
- a20 += adj_ret.data[2][0];
1488
- a21 += adj_ret.data[2][1];
1489
- a22 += adj_ret.data[2][2];
1490
- a23 += adj_ret.data[2][3];
1491
-
1492
- a30 += adj_ret.data[3][0];
1493
- a31 += adj_ret.data[3][1];
1494
- a32 += adj_ret.data[3][2];
1495
- a33 += adj_ret.data[3][3];
1496
- }
1497
-
1498
- } // namespace wp
1
+ /** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
2
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
3
+ * and proprietary rights in and to this software, related documentation
4
+ * and any modifications thereto. Any use, reproduction, disclosure or
5
+ * distribution of this software and related documentation without an express
6
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include "initializer_array.h"
12
+
13
+ namespace wp
14
+ {
15
+
16
+ //----------------------------------------------------------
17
+ // mat
18
+ template<typename T>
19
+ struct quat_t;
20
+
21
+ template<unsigned Rows, unsigned Cols, typename Type>
22
+ struct mat_t
23
+ {
24
+ inline CUDA_CALLABLE mat_t()
25
+ : data()
26
+ {}
27
+
28
+ inline CUDA_CALLABLE mat_t(Type s)
29
+ {
30
+ for (unsigned i=0; i < Rows; ++i)
31
+ for (unsigned j=0; j < Cols; ++j)
32
+ data[i][j] = s;
33
+ }
34
+
35
+ template <typename OtherType>
36
+ inline explicit CUDA_CALLABLE mat_t(const mat_t<Rows, Cols, OtherType>& other)
37
+ {
38
+ for (unsigned i=0; i < Rows; ++i)
39
+ for (unsigned j=0; j < Cols; ++j)
40
+ data[i][j] = other.data[i][j];
41
+ }
42
+
43
+ inline CUDA_CALLABLE mat_t(vec_t<2,Type> c0, vec_t<2,Type> c1)
44
+ {
45
+ data[0][0] = c0[0];
46
+ data[1][0] = c0[1];
47
+
48
+ data[0][1] = c1[0];
49
+ data[1][1] = c1[1];
50
+ }
51
+
52
+ inline CUDA_CALLABLE mat_t(vec_t<3,Type> c0, vec_t<3,Type> c1, vec_t<3,Type> c2)
53
+ {
54
+ data[0][0] = c0[0];
55
+ data[1][0] = c0[1];
56
+ data[2][0] = c0[2];
57
+
58
+ data[0][1] = c1[0];
59
+ data[1][1] = c1[1];
60
+ data[2][1] = c1[2];
61
+
62
+ data[0][2] = c2[0];
63
+ data[1][2] = c2[1];
64
+ data[2][2] = c2[2];
65
+ }
66
+
67
+ inline CUDA_CALLABLE mat_t(vec_t<4,Type> c0, vec_t<4,Type> c1, vec_t<4,Type> c2, vec_t<4,Type> c3)
68
+ {
69
+ data[0][0] = c0[0];
70
+ data[1][0] = c0[1];
71
+ data[2][0] = c0[2];
72
+ data[3][0] = c0[3];
73
+
74
+ data[0][1] = c1[0];
75
+ data[1][1] = c1[1];
76
+ data[2][1] = c1[2];
77
+ data[3][1] = c1[3];
78
+
79
+ data[0][2] = c2[0];
80
+ data[1][2] = c2[1];
81
+ data[2][2] = c2[2];
82
+ data[3][2] = c2[3];
83
+
84
+ data[0][3] = c3[0];
85
+ data[1][3] = c3[1];
86
+ data[2][3] = c3[2];
87
+ data[3][3] = c3[3];
88
+ }
89
+
90
+ inline CUDA_CALLABLE mat_t(Type m00, Type m01, Type m10, Type m11)
91
+ {
92
+ data[0][0] = m00;
93
+ data[1][0] = m10;
94
+ data[0][1] = m01;
95
+ data[1][1] = m11;
96
+ }
97
+
98
+ inline CUDA_CALLABLE mat_t(
99
+ Type m00, Type m01, Type m02,
100
+ Type m10, Type m11, Type m12,
101
+ Type m20, Type m21, Type m22)
102
+ {
103
+ data[0][0] = m00;
104
+ data[1][0] = m10;
105
+ data[2][0] = m20;
106
+
107
+ data[0][1] = m01;
108
+ data[1][1] = m11;
109
+ data[2][1] = m21;
110
+
111
+ data[0][2] = m02;
112
+ data[1][2] = m12;
113
+ data[2][2] = m22;
114
+ }
115
+
116
+ inline CUDA_CALLABLE mat_t(
117
+ Type m00, Type m01, Type m02, Type m03,
118
+ Type m10, Type m11, Type m12, Type m13,
119
+ Type m20, Type m21, Type m22, Type m23,
120
+ Type m30, Type m31, Type m32, Type m33)
121
+ {
122
+ data[0][0] = m00;
123
+ data[1][0] = m10;
124
+ data[2][0] = m20;
125
+ data[3][0] = m30;
126
+
127
+ data[0][1] = m01;
128
+ data[1][1] = m11;
129
+ data[2][1] = m21;
130
+ data[3][1] = m31;
131
+
132
+ data[0][2] = m02;
133
+ data[1][2] = m12;
134
+ data[2][2] = m22;
135
+ data[3][2] = m32;
136
+
137
+ data[0][3] = m03;
138
+ data[1][3] = m13;
139
+ data[2][3] = m23;
140
+ data[3][3] = m33;
141
+ }
142
+
143
+ // implemented in quat.h
144
+ inline CUDA_CALLABLE mat_t(const vec_t<3,Type>& pos, const quat_t<Type>& rot, const vec_t<3,Type>& scale);
145
+
146
+
147
+ inline CUDA_CALLABLE mat_t(const initializer_array<Rows * Cols, Type> &l)
148
+ {
149
+ for (unsigned i=0; i < Rows; ++i)
150
+ {
151
+ for (unsigned j=0; j < Cols; ++j)
152
+ {
153
+ data[i][j] = l[i * Cols + j];
154
+ }
155
+ }
156
+ }
157
+
158
+ inline CUDA_CALLABLE mat_t(const initializer_array<Cols, vec_t<Rows,Type> > &l)
159
+ {
160
+ for (unsigned j=0; j < Cols; ++j)
161
+ {
162
+ for (unsigned i=0; i < Rows; ++i)
163
+ {
164
+ data[i][j] = l[j][i];
165
+ }
166
+ }
167
+ }
168
+
169
+ CUDA_CALLABLE vec_t<Cols,Type> get_row(int index) const
170
+ {
171
+ return (vec_t<Cols,Type>&)data[index];
172
+ }
173
+
174
+ CUDA_CALLABLE void set_row(int index, const vec_t<Cols,Type>& v)
175
+ {
176
+ (vec_t<Cols,Type>&)data[index] = v;
177
+ }
178
+
179
+ CUDA_CALLABLE vec_t<Rows,Type> get_col(int index) const
180
+ {
181
+ vec_t<Rows,Type> ret;
182
+ for( unsigned i=0;i < Rows; ++i )
183
+ {
184
+ ret[i] = data[i][index];
185
+ }
186
+ return ret;
187
+ }
188
+
189
+ CUDA_CALLABLE void set_col(int index, const vec_t<Rows,Type>& v)
190
+ {
191
+ for( unsigned i=0;i < Rows; ++i )
192
+ {
193
+ data[i][index] = v[i];
194
+ }
195
+ }
196
+
197
+ // row major storage assumed to be compatible with PyTorch
198
+ Type data[Rows][Cols];
199
+ };
200
+
201
+
202
+ template<unsigned Rows, typename Type>
203
+ inline CUDA_CALLABLE mat_t<Rows, Rows, Type> identity()
204
+ {
205
+ mat_t<Rows, Rows, Type> m;
206
+ for( unsigned i=0; i < Rows; ++i )
207
+ {
208
+ m.data[i][i] = Type(1);
209
+ }
210
+ return m;
211
+ }
212
+
213
+ template<unsigned Rows, unsigned Cols, typename Type>
214
+ inline CUDA_CALLABLE bool operator==(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b)
215
+ {
216
+ for (unsigned i=0; i < Rows; ++i)
217
+ for (unsigned j=0; j < Cols; ++j)
218
+ if (a.data[i][j] != b.data[i][j])
219
+ return false;
220
+
221
+ return true;
222
+ }
223
+
224
+
225
+ // negation:
226
+ template<unsigned Rows, unsigned Cols, typename Type>
227
+ inline CUDA_CALLABLE mat_t<Rows,Cols,Type> operator - (mat_t<Rows,Cols,Type> a)
228
+ {
229
+ // NB: this constructor will initialize all ret's components to 0, which is
230
+ // unnecessary...
231
+ mat_t<Rows,Cols,Type> ret;
232
+ for (unsigned i=0; i < Rows; ++i)
233
+ for (unsigned j=0; j < Cols; ++j)
234
+ ret.data[i][j] = -a.data[i][j];
235
+
236
+ // Wonder if this does a load of copying when it returns... hopefully not as it's inlined?
237
+ return ret;
238
+ }
239
+
240
+
241
+ template<unsigned Rows, unsigned Cols, typename Type>
242
+ CUDA_CALLABLE inline mat_t<Rows,Cols,Type> pos(const mat_t<Rows,Cols,Type>& x)
243
+ {
244
+ return x;
245
+ }
246
+
247
+ template<unsigned Rows, unsigned Cols, typename Type>
248
+ CUDA_CALLABLE inline void adj_pos(const mat_t<Rows,Cols,Type>& x, mat_t<Rows,Cols,Type>& adj_x, const mat_t<Rows,Cols,Type>& adj_ret)
249
+ {
250
+ adj_x += adj_ret;
251
+ }
252
+
253
+ template<unsigned Rows, unsigned Cols, typename Type>
254
+ CUDA_CALLABLE inline mat_t<Rows,Cols,Type> neg(const mat_t<Rows,Cols,Type>& x)
255
+ {
256
+ return -x;
257
+ }
258
+
259
+ template<unsigned Rows, unsigned Cols, typename Type>
260
+ CUDA_CALLABLE inline void adj_neg(const mat_t<Rows,Cols,Type>& x, mat_t<Rows,Cols,Type>& adj_x, const mat_t<Rows,Cols,Type>& adj_ret)
261
+ {
262
+ adj_x -= adj_ret;
263
+ }
264
+
265
+
266
+ template<unsigned Rows, unsigned Cols, typename Type>
267
+ inline CUDA_CALLABLE mat_t<Rows,Cols,Type> atomic_add(mat_t<Rows,Cols,Type> * addr, mat_t<Rows,Cols,Type> value)
268
+ {
269
+ mat_t<Rows,Cols,Type> m;
270
+
271
+ for (unsigned i=0; i < Rows; ++i)
272
+ for (unsigned j=0; j < Cols; ++j)
273
+ m.data[i][j] = atomic_add(&addr->data[i][j], value.data[i][j]);
274
+
275
+ return m;
276
+ }
277
+
278
+ template<unsigned Rows, unsigned Cols, typename Type>
279
+ inline CUDA_CALLABLE mat_t<Rows,Cols,Type> atomic_min(mat_t<Rows,Cols,Type> * addr, mat_t<Rows,Cols,Type> value)
280
+ {
281
+ mat_t<Rows,Cols,Type> m;
282
+
283
+ for (unsigned i=0; i < Rows; ++i)
284
+ for (unsigned j=0; j < Cols; ++j)
285
+ m.data[i][j] = atomic_min(&addr->data[i][j], value.data[i][j]);
286
+
287
+ return m;
288
+ }
289
+
290
+ template<unsigned Rows, unsigned Cols, typename Type>
291
+ inline CUDA_CALLABLE mat_t<Rows,Cols,Type> atomic_max(mat_t<Rows,Cols,Type> * addr, mat_t<Rows,Cols,Type> value)
292
+ {
293
+ mat_t<Rows,Cols,Type> m;
294
+
295
+ for (unsigned i=0; i < Rows; ++i)
296
+ for (unsigned j=0; j < Cols; ++j)
297
+ m.data[i][j] = atomic_max(&addr->data[i][j], value.data[i][j]);
298
+
299
+ return m;
300
+ }
301
+
302
+ template<unsigned Rows, unsigned Cols, typename Type>
303
+ inline CUDA_CALLABLE void adj_atomic_minmax(
304
+ mat_t<Rows,Cols,Type> *addr,
305
+ mat_t<Rows,Cols,Type> *adj_addr,
306
+ const mat_t<Rows,Cols,Type> &value,
307
+ mat_t<Rows,Cols,Type> &adj_value)
308
+ {
309
+ for (unsigned i=0; i < Rows; ++i)
310
+ for (unsigned j=0; j < Cols; ++j)
311
+ adj_atomic_minmax(&addr->data[i][j], &adj_addr->data[i][j], value.data[i][j], adj_value.data[i][j]);
312
+ }
313
+
314
+ template<unsigned Rows, unsigned Cols, typename Type>
315
+ inline CUDA_CALLABLE vec_t<Cols,Type> extract(const mat_t<Rows,Cols,Type>& m, int row)
316
+ {
317
+ vec_t<Cols,Type> ret;
318
+ for(unsigned i=0; i < Cols; ++i)
319
+ {
320
+ ret.c[i] = m.data[row][i];
321
+ }
322
+ return ret;
323
+ }
324
+
325
+ template<unsigned Rows, unsigned Cols, typename Type>
326
+ inline CUDA_CALLABLE Type extract(const mat_t<Rows,Cols,Type>& m, int row, int col)
327
+ {
328
+ #ifndef NDEBUG
329
+ if (row < 0 || row >= Rows)
330
+ {
331
+ printf("mat row index %d out of bounds at %s %d\n", row, __FILE__, __LINE__);
332
+ assert(0);
333
+ }
334
+ if (col < 0 || col >= Cols)
335
+ {
336
+ printf("mat col index %d out of bounds at %s %d\n", col, __FILE__, __LINE__);
337
+ assert(0);
338
+ }
339
+ #endif
340
+ return m.data[row][col];
341
+ }
342
+
343
+ template<unsigned Rows, unsigned Cols, typename Type>
344
+ inline CUDA_CALLABLE vec_t<Cols, Type>* index(mat_t<Rows,Cols,Type>& m, int row)
345
+ {
346
+ #ifndef NDEBUG
347
+ if (row < 0 || row >= Rows)
348
+ {
349
+ printf("mat row index %d out of bounds at %s %d\n", row, __FILE__, __LINE__);
350
+ assert(0);
351
+ }
352
+ #endif
353
+
354
+ return reinterpret_cast<vec_t<Cols, Type>*>(&m.data[row]);
355
+ }
356
+
357
+ template<unsigned Rows, unsigned Cols, typename Type>
358
+ inline CUDA_CALLABLE Type* index(mat_t<Rows,Cols,Type>& m, int row, int col)
359
+ {
360
+ #ifndef NDEBUG
361
+ if (row < 0 || row >= Rows)
362
+ {
363
+ printf("mat row index %d out of bounds at %s %d\n", row, __FILE__, __LINE__);
364
+ assert(0);
365
+ }
366
+ if (col < 0 || col >= Cols)
367
+ {
368
+ printf("mat col index %d out of bounds at %s %d\n", col, __FILE__, __LINE__);
369
+ assert(0);
370
+ }
371
+ #endif
372
+
373
+ return &m.data[row][col];
374
+ }
375
+
376
+ template<unsigned Rows, unsigned Cols, typename Type>
377
+ inline CUDA_CALLABLE void adj_index(const mat_t<Rows,Cols,Type>& m, int row,
378
+ const mat_t<Rows,Cols,Type>& adj_m, int adj_row, const vec_t<Cols, Type>& adj_value)
379
+ {
380
+ // nop
381
+ }
382
+
383
+ template<unsigned Rows, unsigned Cols, typename Type>
384
+ inline CUDA_CALLABLE void adj_index(const mat_t<Rows,Cols,Type>& m, int row, int col,
385
+ const mat_t<Rows,Cols,Type>& adj_m, int adj_row, int adj_col, Type adj_value)
386
+ {
387
+ // nop
388
+ }
389
+
390
+ template<unsigned Rows, unsigned Cols, typename Type>
391
+ inline bool CUDA_CALLABLE isfinite(const mat_t<Rows,Cols,Type>& m)
392
+ {
393
+ for (unsigned i=0; i < Rows; ++i)
394
+ for (unsigned j=0; j < Cols; ++j)
395
+ if (!isfinite(m.data[i][j]))
396
+ return false;
397
+ return true;
398
+ }
399
+
400
+ template<unsigned Rows, unsigned Cols, typename Type>
401
+ inline CUDA_CALLABLE mat_t<Rows,Cols,Type> add(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b)
402
+ {
403
+ mat_t<Rows,Cols,Type> t;
404
+ for (unsigned i=0; i < Rows; ++i)
405
+ {
406
+ for (unsigned j=0; j < Cols; ++j)
407
+ {
408
+ t.data[i][j] = a.data[i][j] + b.data[i][j];
409
+ }
410
+ }
411
+
412
+ return t;
413
+ }
414
+
415
+ template<unsigned Rows, unsigned Cols, typename Type>
416
+ inline CUDA_CALLABLE mat_t<Rows,Cols,Type> sub(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b)
417
+ {
418
+ mat_t<Rows,Cols,Type> t;
419
+ for (unsigned i=0; i < Rows; ++i)
420
+ {
421
+ for (unsigned j=0; j < Cols; ++j)
422
+ {
423
+ t.data[i][j] = a.data[i][j] - b.data[i][j];
424
+ }
425
+ }
426
+
427
+ return t;
428
+ }
429
+
430
+ template<unsigned Rows, unsigned Cols, typename Type>
431
+ inline CUDA_CALLABLE mat_t<Rows,Cols,Type> div(const mat_t<Rows,Cols,Type>& a, Type b)
432
+ {
433
+ mat_t<Rows,Cols,Type> t;
434
+ for (unsigned i=0; i < Rows; ++i)
435
+ {
436
+ for (unsigned j=0; j < Cols; ++j)
437
+ {
438
+ t.data[i][j] = a.data[i][j]/b;
439
+ }
440
+ }
441
+
442
+ return t;
443
+ }
444
+
445
+ template<unsigned Rows, unsigned Cols, typename Type>
446
+ inline CUDA_CALLABLE mat_t<Rows,Cols,Type> div(Type b, const mat_t<Rows,Cols,Type>& a)
447
+ {
448
+ mat_t<Rows,Cols,Type> t;
449
+ for (unsigned i=0; i < Rows; ++i)
450
+ {
451
+ for (unsigned j=0; j < Cols; ++j)
452
+ {
453
+ t.data[i][j] = b / a.data[i][j];
454
+ }
455
+ }
456
+
457
+ return t;
458
+ }
459
+
460
+ template<unsigned Rows, unsigned Cols, typename Type>
461
+ inline CUDA_CALLABLE mat_t<Rows,Cols,Type> mul(const mat_t<Rows,Cols,Type>& a, Type b)
462
+ {
463
+ mat_t<Rows,Cols,Type> t;
464
+ for (unsigned i=0; i < Rows; ++i)
465
+ {
466
+ for (unsigned j=0; j < Cols; ++j)
467
+ {
468
+ t.data[i][j] = a.data[i][j]*b;
469
+ }
470
+ }
471
+
472
+ return t;
473
+ }
474
+
475
+ template<unsigned Rows, unsigned Cols, typename Type>
476
+ inline CUDA_CALLABLE mat_t<Rows,Cols,Type> mul(Type b, const mat_t<Rows,Cols,Type>& a)
477
+ {
478
+ return mul(a,b);
479
+ }
480
+
481
+
482
+ template<unsigned Rows, unsigned Cols, typename Type>
483
+ inline CUDA_CALLABLE mat_t<Rows,Cols,Type> operator*(Type b, const mat_t<Rows,Cols,Type>& a)
484
+ {
485
+ return mul(a,b);
486
+ }
487
+
488
+ template<unsigned Rows, unsigned Cols, typename Type>
489
+ inline CUDA_CALLABLE mat_t<Rows,Cols,Type> operator*( const mat_t<Rows,Cols,Type>& a, Type b)
490
+ {
491
+ return mul(a,b);
492
+ }
493
+
494
+ template<unsigned Rows, unsigned Cols, typename Type>
495
+ inline CUDA_CALLABLE vec_t<Rows,Type> mul(const mat_t<Rows,Cols,Type>& a, const vec_t<Cols,Type>& b)
496
+ {
497
+ vec_t<Rows,Type> r = a.get_col(0)*b[0];
498
+ for( unsigned i=1; i < Cols; ++i )
499
+ {
500
+ r += a.get_col(i)*b[i];
501
+ }
502
+ return r;
503
+ }
504
+
505
+ template<unsigned Rows, unsigned Cols, typename Type>
506
+ inline CUDA_CALLABLE vec_t<Cols,Type> mul(const vec_t<Rows,Type>& b, const mat_t<Rows,Cols,Type>& a)
507
+ {
508
+ vec_t<Cols,Type> r = a.get_row(0)*b[0];
509
+ for( unsigned i=1; i < Rows; ++i )
510
+ {
511
+ r += a.get_row(i)*b[i];
512
+ }
513
+ return r;
514
+ }
515
+
516
+ template<unsigned Rows, unsigned Cols, unsigned ColsOut, typename Type>
517
+ inline CUDA_CALLABLE mat_t<Rows,ColsOut,Type> mul(const mat_t<Rows,Cols,Type>& a, const mat_t<Cols,ColsOut,Type>& b)
518
+ {
519
+ mat_t<Rows,ColsOut,Type> t(0);
520
+ for (unsigned i=0; i < Rows; ++i)
521
+ {
522
+ for (unsigned j=0; j < ColsOut; ++j)
523
+ {
524
+ for (unsigned k=0; k < Cols; ++k)
525
+ {
526
+ t.data[i][j] += a.data[i][k]*b.data[k][j];
527
+ }
528
+ }
529
+ }
530
+
531
+ return t;
532
+ }
533
+
534
+ template<unsigned Rows, unsigned Cols, typename Type>
535
+ inline CUDA_CALLABLE Type ddot(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b)
536
+ {
537
+ // double dot product between a and b:
538
+ Type r(0);
539
+ for (unsigned i=0; i < Rows; ++i)
540
+ {
541
+ for (unsigned j=0; j < Cols; ++j)
542
+ {
543
+ r += a.data[i][j] * b.data[i][j];
544
+ }
545
+ }
546
+ return r;
547
+ }
548
+
549
+ template<unsigned Rows, unsigned Cols, typename Type>
550
+ inline CUDA_CALLABLE Type tensordot(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b)
551
+ {
552
+ // corresponds to `np.tensordot()` with all axes being contracted
553
+ return ddot(a, b);
554
+ }
555
+
556
+ template<unsigned Rows, unsigned Cols, typename Type>
557
+ inline CUDA_CALLABLE mat_t<Cols,Rows,Type> transpose(const mat_t<Rows,Cols,Type>& a)
558
+ {
559
+ mat_t<Cols,Rows,Type> t;
560
+ for (unsigned i=0; i < Cols; ++i)
561
+ {
562
+ for (unsigned j=0; j < Rows; ++j)
563
+ {
564
+ t.data[i][j] = a.data[j][i];
565
+ }
566
+ }
567
+
568
+ return t;
569
+ }
570
+
571
+ // Only implementing determinants for 2x2, 3x3 and 4x4 matrices for now...
572
+ template<typename Type>
573
+ inline CUDA_CALLABLE Type determinant(const mat_t<2,2,Type>& m)
574
+ {
575
+ return m.data[0][0]*m.data[1][1] - m.data[1][0]*m.data[0][1];
576
+ }
577
+
578
+ template<typename Type>
579
+ inline CUDA_CALLABLE Type determinant(const mat_t<3,3,Type>& m)
580
+ {
581
+ return dot(
582
+ vec_t<3,Type>(m.data[0][0],m.data[0][1],m.data[0][2]),
583
+ cross(
584
+ vec_t<3,Type>(m.data[1][0],m.data[1][1],m.data[1][2]),
585
+ vec_t<3,Type>(m.data[2][0],m.data[2][1],m.data[2][2])
586
+ )
587
+ );
588
+ }
589
+
590
+ template<typename Type>
591
+ inline CUDA_CALLABLE Type determinant(const mat_t<4,4,Type>& m)
592
+ {
593
+ // adapted from USD GfMatrix4f::Inverse()
594
+ Type x00, x01, x02, x03;
595
+ Type x10, x11, x12, x13;
596
+ Type x20, x21, x22, x23;
597
+ Type x30, x31, x32, x33;
598
+ double y01, y02, y03, y12, y13, y23;
599
+ Type z00, z10, z20, z30;
600
+
601
+ // Pickle 1st two columns of matrix into registers
602
+ x00 = m.data[0][0];
603
+ x01 = m.data[0][1];
604
+ x10 = m.data[1][0];
605
+ x11 = m.data[1][1];
606
+ x20 = m.data[2][0];
607
+ x21 = m.data[2][1];
608
+ x30 = m.data[3][0];
609
+ x31 = m.data[3][1];
610
+
611
+ // Compute all six 2x2 determinants of 1st two columns
612
+ y01 = x00*x11 - x10*x01;
613
+ y02 = x00*x21 - x20*x01;
614
+ y03 = x00*x31 - x30*x01;
615
+ y12 = x10*x21 - x20*x11;
616
+ y13 = x10*x31 - x30*x11;
617
+ y23 = x20*x31 - x30*x21;
618
+
619
+ // Pickle 2nd two columns of matrix into registers
620
+ x02 = m.data[0][2];
621
+ x03 = m.data[0][3];
622
+ x12 = m.data[1][2];
623
+ x13 = m.data[1][3];
624
+ x22 = m.data[2][2];
625
+ x23 = m.data[2][3];
626
+ x32 = m.data[3][2];
627
+ x33 = m.data[3][3];
628
+
629
+ // Compute all six 2x2 determinants of 2nd two columns
630
+ y01 = x02*x13 - x12*x03;
631
+ y02 = x02*x23 - x22*x03;
632
+ y03 = x02*x33 - x32*x03;
633
+ y12 = x12*x23 - x22*x13;
634
+ y13 = x12*x33 - x32*x13;
635
+ y23 = x22*x33 - x32*x23;
636
+
637
+ // Compute all 3x3 cofactors for 1st two columns
638
+ z30 = x11*y02 - x21*y01 - x01*y12;
639
+ z20 = x01*y13 - x11*y03 + x31*y01;
640
+ z10 = x21*y03 - x31*y02 - x01*y23;
641
+ z00 = x11*y23 - x21*y13 + x31*y12;
642
+
643
+ // compute 4x4 determinant & its reciprocal
644
+ double det = x30*z30 + x20*z20 + x10*z10 + x00*z00;
645
+ return det;
646
+ }
647
+
648
+ template<unsigned Rows, typename Type>
649
+ inline CUDA_CALLABLE Type trace(const mat_t<Rows,Rows,Type>& m)
650
+ {
651
+ Type ret = m.data[0][0];
652
+ for( unsigned i=1; i < Rows; ++i )
653
+ {
654
+ ret += m.data[i][i];
655
+ }
656
+ return ret;
657
+ }
658
+
659
+ template<unsigned Rows, typename Type>
660
+ inline CUDA_CALLABLE vec_t<Rows, Type> get_diag(const mat_t<Rows,Rows,Type>& m)
661
+ {
662
+ vec_t<Rows, Type> ret;
663
+ for( unsigned i=0; i < Rows; ++i )
664
+ {
665
+ ret[i] = m.data[i][i];
666
+ }
667
+ return ret;
668
+ }
669
+
670
+ // Only implementing inverses for 2x2, 3x3 and 4x4 matrices for now...
671
+ template<typename Type>
672
+ inline CUDA_CALLABLE mat_t<2,2,Type> inverse(const mat_t<2,2,Type>& m)
673
+ {
674
+ Type det = determinant(m);
675
+ if (det > Type(kEps) || det < -Type(kEps))
676
+ {
677
+ return mat_t<2,2,Type>( m.data[1][1], -m.data[0][1],
678
+ -m.data[1][0], m.data[0][0])*(Type(1.0f)/det);
679
+ }
680
+ else
681
+ {
682
+ return mat_t<2,2,Type>();
683
+ }
684
+ }
685
+
686
+ template<typename Type>
687
+ inline CUDA_CALLABLE mat_t<3,3,Type> inverse(const mat_t<3,3,Type>& m)
688
+ {
689
+ Type det = determinant(m);
690
+
691
+ if (det != Type(0.0f))
692
+ {
693
+ mat_t<3,3,Type> b;
694
+
695
+ b.data[0][0] = m.data[1][1]*m.data[2][2] - m.data[1][2]*m.data[2][1];
696
+ b.data[1][0] = m.data[1][2]*m.data[2][0] - m.data[1][0]*m.data[2][2];
697
+ b.data[2][0] = m.data[1][0]*m.data[2][1] - m.data[1][1]*m.data[2][0];
698
+
699
+ b.data[0][1] = m.data[0][2]*m.data[2][1] - m.data[0][1]*m.data[2][2];
700
+ b.data[1][1] = m.data[0][0]*m.data[2][2] - m.data[0][2]*m.data[2][0];
701
+ b.data[2][1] = m.data[0][1]*m.data[2][0] - m.data[0][0]*m.data[2][1];
702
+
703
+ b.data[0][2] = m.data[0][1]*m.data[1][2] - m.data[0][2]*m.data[1][1];
704
+ b.data[1][2] = m.data[0][2]*m.data[1][0] - m.data[0][0]*m.data[1][2];
705
+ b.data[2][2] = m.data[0][0]*m.data[1][1] - m.data[0][1]*m.data[1][0];
706
+
707
+ return b*(Type(1.0f)/det);
708
+ }
709
+ else
710
+ {
711
+ return mat_t<3,3,Type>();
712
+ }
713
+ }
714
+
715
+ template<typename Type>
716
+ inline CUDA_CALLABLE mat_t<4,4,Type> inverse(const mat_t<4,4,Type>& m)
717
+ {
718
+ // adapted from USD GfMatrix4f::Inverse()
719
+ Type x00, x01, x02, x03;
720
+ Type x10, x11, x12, x13;
721
+ Type x20, x21, x22, x23;
722
+ Type x30, x31, x32, x33;
723
+ double y01, y02, y03, y12, y13, y23;
724
+ Type z00, z10, z20, z30;
725
+ Type z01, z11, z21, z31;
726
+ double z02, z03, z12, z13, z22, z23, z32, z33;
727
+
728
+ // Pickle 1st two columns of matrix into registers
729
+ x00 = m.data[0][0];
730
+ x01 = m.data[0][1];
731
+ x10 = m.data[1][0];
732
+ x11 = m.data[1][1];
733
+ x20 = m.data[2][0];
734
+ x21 = m.data[2][1];
735
+ x30 = m.data[3][0];
736
+ x31 = m.data[3][1];
737
+
738
+ // Compute all six 2x2 determinants of 1st two columns
739
+ y01 = x00*x11 - x10*x01;
740
+ y02 = x00*x21 - x20*x01;
741
+ y03 = x00*x31 - x30*x01;
742
+ y12 = x10*x21 - x20*x11;
743
+ y13 = x10*x31 - x30*x11;
744
+ y23 = x20*x31 - x30*x21;
745
+
746
+ // Pickle 2nd two columns of matrix into registers
747
+ x02 = m.data[0][2];
748
+ x03 = m.data[0][3];
749
+ x12 = m.data[1][2];
750
+ x13 = m.data[1][3];
751
+ x22 = m.data[2][2];
752
+ x23 = m.data[2][3];
753
+ x32 = m.data[3][2];
754
+ x33 = m.data[3][3];
755
+
756
+ // Compute all 3x3 cofactors for 2nd two columns */
757
+ z33 = x02*y12 - x12*y02 + x22*y01;
758
+ z23 = x12*y03 - x32*y01 - x02*y13;
759
+ z13 = x02*y23 - x22*y03 + x32*y02;
760
+ z03 = x22*y13 - x32*y12 - x12*y23;
761
+ z32 = x13*y02 - x23*y01 - x03*y12;
762
+ z22 = x03*y13 - x13*y03 + x33*y01;
763
+ z12 = x23*y03 - x33*y02 - x03*y23;
764
+ z02 = x13*y23 - x23*y13 + x33*y12;
765
+
766
+ // Compute all six 2x2 determinants of 2nd two columns
767
+ y01 = x02*x13 - x12*x03;
768
+ y02 = x02*x23 - x22*x03;
769
+ y03 = x02*x33 - x32*x03;
770
+ y12 = x12*x23 - x22*x13;
771
+ y13 = x12*x33 - x32*x13;
772
+ y23 = x22*x33 - x32*x23;
773
+
774
+ // Compute all 3x3 cofactors for 1st two columns
775
+ z30 = x11*y02 - x21*y01 - x01*y12;
776
+ z20 = x01*y13 - x11*y03 + x31*y01;
777
+ z10 = x21*y03 - x31*y02 - x01*y23;
778
+ z00 = x11*y23 - x21*y13 + x31*y12;
779
+ z31 = x00*y12 - x10*y02 + x20*y01;
780
+ z21 = x10*y03 - x30*y01 - x00*y13;
781
+ z11 = x00*y23 - x20*y03 + x30*y02;
782
+ z01 = x20*y13 - x30*y12 - x10*y23;
783
+
784
+ // compute 4x4 determinant & its reciprocal
785
+ double det = x30*z30 + x20*z20 + x10*z10 + x00*z00;
786
+
787
+ if(fabs(det) > kEps)
788
+ {
789
+ mat_t<4,4,Type> invm;
790
+
791
+ double rcp = 1.0 / det;
792
+
793
+ // Multiply all 3x3 cofactors by reciprocal & transpose
794
+ invm.data[0][0] = Type(z00*rcp);
795
+ invm.data[0][1] = Type(z10*rcp);
796
+ invm.data[1][0] = Type(z01*rcp);
797
+ invm.data[0][2] = Type(z20*rcp);
798
+ invm.data[2][0] = Type(z02*rcp);
799
+ invm.data[0][3] = Type(z30*rcp);
800
+ invm.data[3][0] = Type(z03*rcp);
801
+ invm.data[1][1] = Type(z11*rcp);
802
+ invm.data[1][2] = Type(z21*rcp);
803
+ invm.data[2][1] = Type(z12*rcp);
804
+ invm.data[1][3] = Type(z31*rcp);
805
+ invm.data[3][1] = Type(z13*rcp);
806
+ invm.data[2][2] = Type(z22*rcp);
807
+ invm.data[2][3] = Type(z32*rcp);
808
+ invm.data[3][2] = Type(z23*rcp);
809
+ invm.data[3][3] = Type(z33*rcp);
810
+
811
+ return invm;
812
+ }
813
+ else
814
+ {
815
+ return mat_t<4,4,Type>();
816
+ }
817
+ }
818
+
819
+ template<unsigned Rows,typename Type>
820
+ inline CUDA_CALLABLE mat_t<Rows,Rows,Type> diag(const vec_t<Rows,Type>& d)
821
+ {
822
+ mat_t<Rows,Rows,Type> ret(Type(0));
823
+ for (unsigned i=0; i < Rows; ++i)
824
+ {
825
+ ret.data[i][i] = d[i];
826
+ }
827
+ return ret;
828
+ }
829
+
830
+ template<unsigned Rows,unsigned Cols,typename Type>
831
+ inline CUDA_CALLABLE mat_t<Rows,Cols,Type> outer(const vec_t<Rows,Type>& a, const vec_t<Cols,Type>& b)
832
+ {
833
+ // col 0 = a * b[0] etc...
834
+ mat_t<Rows,Cols,Type> ret;
835
+ for (unsigned row=0; row < Rows; ++row)
836
+ {
837
+ for (unsigned col=0; col < Cols; ++col) // columns
838
+ {
839
+ ret.data[row][col] = a[row] * b[col];
840
+ }
841
+ }
842
+ return ret;
843
+ }
844
+
845
+ template<typename Type>
846
+ inline CUDA_CALLABLE mat_t<3,3,Type> skew(const vec_t<3,Type>& a)
847
+ {
848
+ mat_t<3,3,Type> out(
849
+ Type(0), -a[2], a[1],
850
+ a[2], Type(0), -a[0],
851
+ -a[1], a[0], Type(0)
852
+ );
853
+
854
+ return out;
855
+ }
856
+
857
+
858
+ template<unsigned Rows, unsigned Cols, typename Type>
859
+ inline CUDA_CALLABLE mat_t<Rows,Cols,Type> cw_mul(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b)
860
+ {
861
+ mat_t<Rows,Cols,Type> t;
862
+ for (unsigned i=0; i < Rows; ++i)
863
+ {
864
+ for (unsigned j=0; j < Cols; ++j)
865
+ {
866
+ t.data[i][j] = a.data[i][j] * b.data[i][j];
867
+ }
868
+ }
869
+
870
+ return t;
871
+ }
872
+
873
+
874
+ template<unsigned Rows, unsigned Cols, typename Type>
875
+ inline CUDA_CALLABLE mat_t<Rows,Cols,Type> cw_div(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b)
876
+ {
877
+ mat_t<Rows,Cols,Type> t;
878
+ for (unsigned i=0; i < Rows; ++i)
879
+ {
880
+ for (unsigned j=0; j < Cols; ++j)
881
+ {
882
+ t.data[i][j] = a.data[i][j] / b.data[i][j];
883
+ }
884
+ }
885
+
886
+ return t;
887
+ }
888
+
889
+ template<typename Type>
890
+ inline CUDA_CALLABLE vec_t<3,Type> transform_point(const mat_t<4,4,Type>& m, const vec_t<3,Type>& v)
891
+ {
892
+ vec_t<4,Type> out = mul(m, vec_t<4,Type>(v[0], v[1], v[2], Type(1)));
893
+ return vec_t<3,Type>(out[0], out[1], out[2]);
894
+ }
895
+
896
+ template<typename Type>
897
+ inline CUDA_CALLABLE vec_t<3,Type> transform_vector(const mat_t<4,4,Type>& m, const vec_t<3,Type>& v)
898
+ {
899
+ vec_t<4,Type> out = mul(m, vec_t<4,Type>(v[0], v[1], v[2], 0.f));
900
+ return vec_t<3,Type>(out[0], out[1], out[2]);
901
+ }
902
+
903
+ template<unsigned Rows, unsigned Cols, typename Type>
904
+ inline CUDA_CALLABLE void adj_extract(const mat_t<Rows,Cols,Type>& m, int row, mat_t<Rows,Cols,Type>& adj_m, int& adj_row, const vec_t<Cols,Type>& adj_ret)
905
+ {
906
+ for( unsigned col=0; col < Cols; ++col )
907
+ adj_m.data[row][col] += adj_ret[col];
908
+ }
909
+
910
+ template<unsigned Rows, unsigned Cols, typename Type>
911
+ inline void CUDA_CALLABLE adj_extract(const mat_t<Rows,Cols,Type>& m, int row, int col, mat_t<Rows,Cols,Type>& adj_m, int& adj_row, int& adj_col, Type adj_ret)
912
+ {
913
+ #ifndef NDEBUG
914
+ if (row < 0 || row > Rows)
915
+ {
916
+ printf("mat row index %d out of bounds at %s %d\n", row, __FILE__, __LINE__);
917
+ assert(0);
918
+ }
919
+ if (col < 0 || col > Cols)
920
+ {
921
+ printf("mat col index %d out of bounds at %s %d\n", col, __FILE__, __LINE__);
922
+ assert(0);
923
+ }
924
+ #endif
925
+ adj_m.data[row][col] += adj_ret;
926
+ }
927
+
928
+ template<unsigned Rows, unsigned Cols, typename Type>
929
+ inline CUDA_CALLABLE void adj_outer(const vec_t<Rows,Type>& a, const vec_t<Cols,Type>& b, vec_t<Rows,Type>& adj_a, vec_t<Cols,Type>& adj_b, const mat_t<Rows,Cols,Type>& adj_ret)
930
+ {
931
+ adj_a += mul(adj_ret, b);
932
+ adj_b += mul(transpose(adj_ret), a);
933
+ }
934
+
935
+ template<unsigned Rows, unsigned Cols, typename Type>
936
+ inline CUDA_CALLABLE void adj_add(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b, mat_t<Rows,Cols,Type>& adj_a, mat_t<Rows,Cols,Type>& adj_b, const mat_t<Rows,Cols,Type>& adj_ret)
937
+ {
938
+ for (unsigned i=0; i < Rows; ++i)
939
+ {
940
+ for (unsigned j=0; j < Cols; ++j)
941
+ {
942
+ adj_a.data[i][j] += adj_ret.data[i][j];
943
+ adj_b.data[i][j] += adj_ret.data[i][j];
944
+ }
945
+ }
946
+ }
947
+
948
+ template<unsigned Rows, unsigned Cols, typename Type>
949
+ inline CUDA_CALLABLE void adj_sub(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b, mat_t<Rows,Cols,Type>& adj_a, mat_t<Rows,Cols,Type>& adj_b, const mat_t<Rows,Cols,Type>& adj_ret)
950
+ {
951
+ for (unsigned i=0; i < Rows; ++i)
952
+ {
953
+ for (unsigned j=0; j < Cols; ++j)
954
+ {
955
+ adj_a.data[i][j] += adj_ret.data[i][j];
956
+ adj_b.data[i][j] -= adj_ret.data[i][j];
957
+ }
958
+ }
959
+ }
960
+
961
+ template<unsigned Rows, unsigned Cols, typename Type>
962
+ inline CUDA_CALLABLE void adj_div(const mat_t<Rows,Cols,Type>& a, Type s, mat_t<Rows,Cols,Type>& adj_a, Type& adj_s, const mat_t<Rows,Cols,Type>& adj_ret)
963
+ {
964
+ adj_s -= tensordot(a , adj_ret)/ (s * s); // - a / s^2
965
+
966
+ for (unsigned i=0; i < Rows; ++i)
967
+ {
968
+ for (unsigned j=0; j < Cols; ++j)
969
+ {
970
+ adj_a.data[i][j] += adj_ret.data[i][j] / s;
971
+ }
972
+ }
973
+ }
974
+
975
+ template<unsigned Rows, unsigned Cols, typename Type>
976
+ inline CUDA_CALLABLE void adj_div(Type s, const mat_t<Rows,Cols,Type>& a, Type& adj_s, mat_t<Rows,Cols,Type>& adj_a, const mat_t<Rows,Cols,Type>& adj_ret)
977
+ {
978
+ adj_s -= tensordot(a , adj_ret)/ (s * s); // - a / s^2
979
+
980
+ for (unsigned i=0; i < Rows; ++i)
981
+ {
982
+ for (unsigned j=0; j < Cols; ++j)
983
+ {
984
+ adj_a.data[i][j] += s / adj_ret.data[i][j];
985
+ }
986
+ }
987
+ }
988
+
989
+ template<unsigned Rows, unsigned Cols, typename Type>
990
+ inline CUDA_CALLABLE void adj_mul(const mat_t<Rows,Cols,Type>& a, Type b, mat_t<Rows,Cols,Type>& adj_a, Type& adj_b, const mat_t<Rows,Cols,Type>& adj_ret)
991
+ {
992
+ for (unsigned i=0; i < Rows; ++i)
993
+ {
994
+ for (unsigned j=0; j < Cols; ++j)
995
+ {
996
+ adj_a.data[i][j] += b*adj_ret.data[i][j];
997
+ adj_b += a.data[i][j]*adj_ret.data[i][j];
998
+ }
999
+ }
1000
+ }
1001
+
1002
+ template<unsigned Rows, unsigned Cols, typename Type>
1003
+ inline CUDA_CALLABLE void adj_mul(Type b, const mat_t<Rows,Cols,Type>& a, Type& adj_b, mat_t<Rows,Cols,Type>& adj_a, const mat_t<Rows,Cols,Type>& adj_ret)
1004
+ {
1005
+ adj_mul(a, b, adj_a, adj_b, adj_ret);
1006
+ }
1007
+
1008
+ template<unsigned Rows, unsigned Cols, typename Type>
1009
+ inline CUDA_CALLABLE void adj_ddot(mat_t<Rows,Cols,Type> a, mat_t<Rows,Cols,Type> b, mat_t<Rows,Cols,Type>& adj_a, mat_t<Rows,Cols,Type>& adj_b, const Type adj_ret)
1010
+ {
1011
+ adj_a += b*adj_ret;
1012
+ adj_b += a*adj_ret;
1013
+ }
1014
+
1015
+ template<unsigned Rows, unsigned Cols, typename Type>
1016
+ inline CUDA_CALLABLE void adj_mul(const mat_t<Rows,Cols,Type>& a, const vec_t<Cols,Type>& b, mat_t<Rows,Cols,Type>& adj_a, vec_t<Cols,Type>& adj_b, const vec_t<Rows,Type>& adj_ret)
1017
+ {
1018
+ adj_a += outer(adj_ret, b);
1019
+ adj_b += mul(transpose(a), adj_ret);
1020
+ }
1021
+
1022
+ template<unsigned Rows, unsigned Cols, typename Type>
1023
+ inline CUDA_CALLABLE void adj_mul(const vec_t<Rows,Type>& b, const mat_t<Rows,Cols,Type>& a, vec_t<Rows,Type>& adj_b, mat_t<Rows,Cols,Type>& adj_a, const vec_t<Cols,Type>& adj_ret)
1024
+ {
1025
+ adj_a += outer(b, adj_ret);
1026
+ adj_b += mul(adj_ret, transpose(a));
1027
+ }
1028
+
1029
+ template<unsigned Rows, unsigned Cols, unsigned ColsOut, typename Type>
1030
+ inline CUDA_CALLABLE void adj_mul(const mat_t<Rows,Cols,Type>& a, const mat_t<Cols,ColsOut,Type>& b, mat_t<Rows,Cols,Type>& adj_a, mat_t<Cols,ColsOut,Type>& adj_b, const mat_t<Rows,ColsOut,Type>& adj_ret)
1031
+ {
1032
+ adj_a += mul(adj_ret, transpose(b));
1033
+ adj_b += mul(transpose(a), adj_ret);
1034
+ }
1035
+
1036
+ template<unsigned Rows, unsigned Cols, typename Type>
1037
+ inline CUDA_CALLABLE void adj_transpose(const mat_t<Rows,Cols,Type>& a, mat_t<Rows,Cols,Type>& adj_a, const mat_t<Cols,Rows,Type>& adj_ret)
1038
+ {
1039
+ adj_a += transpose(adj_ret);
1040
+ }
1041
+
1042
+ template<unsigned Rows, typename Type>
1043
+ inline CUDA_CALLABLE void adj_trace(const mat_t<Rows,Rows,Type>& m, mat_t<Rows,Rows,Type>& adj_m, Type adj_ret)
1044
+ {
1045
+ for (unsigned i=0; i < Rows; ++i)
1046
+ adj_m.data[i][i] += adj_ret;
1047
+ }
1048
+
1049
+ template<unsigned Rows, typename Type>
1050
+ inline CUDA_CALLABLE void adj_diag(const vec_t<Rows,Type>& d, vec_t<Rows,Type>& adj_d, const mat_t<Rows,Rows,Type>& adj_ret)
1051
+ {
1052
+ for (unsigned i=0; i < Rows; ++i)
1053
+ adj_d[i] += adj_ret.data[i][i];
1054
+ }
1055
+
1056
+ template<unsigned Rows, typename Type>
1057
+ inline CUDA_CALLABLE void adj_get_diag(const mat_t<Rows,Rows,Type>& m, mat_t<Rows,Rows,Type>& adj_m, const vec_t<Rows,Type>& adj_ret)
1058
+ {
1059
+ for (unsigned i=0; i < Rows; ++i)
1060
+ adj_m.data[i][i] += adj_ret[i];
1061
+ }
1062
+
1063
+ template<typename Type>
1064
+ inline CUDA_CALLABLE void adj_determinant(const mat_t<2,2,Type>& m, mat_t<2,2,Type>& adj_m, Type adj_ret)
1065
+ {
1066
+ adj_m.data[0][0] += m.data[1][1]*adj_ret;
1067
+ adj_m.data[1][1] += m.data[0][0]*adj_ret;
1068
+ adj_m.data[0][1] -= m.data[1][0]*adj_ret;
1069
+ adj_m.data[1][0] -= m.data[0][1]*adj_ret;
1070
+ }
1071
+
1072
+ template<typename Type>
1073
+ inline CUDA_CALLABLE void adj_determinant(const mat_t<3,3,Type>& m, mat_t<3,3,Type>& adj_m, Type adj_ret)
1074
+ {
1075
+ (vec_t<3,Type>&)adj_m.data[0] += cross(m.get_row(1), m.get_row(2))*adj_ret;
1076
+ (vec_t<3,Type>&)adj_m.data[1] += cross(m.get_row(2), m.get_row(0))*adj_ret;
1077
+ (vec_t<3,Type>&)adj_m.data[2] += cross(m.get_row(0), m.get_row(1))*adj_ret;
1078
+ }
1079
+
1080
+ template<typename Type>
1081
+ inline CUDA_CALLABLE void adj_determinant(const mat_t<4,4,Type>& m, mat_t<4,4,Type>& adj_m, Type adj_ret)
1082
+ {
1083
+ // adapted from USD GfMatrix4f::Inverse()
1084
+ Type x00, x01, x02, x03;
1085
+ Type x10, x11, x12, x13;
1086
+ Type x20, x21, x22, x23;
1087
+ Type x30, x31, x32, x33;
1088
+ double y01, y02, y03, y12, y13, y23;
1089
+ Type z00, z10, z20, z30;
1090
+ Type z01, z11, z21, z31;
1091
+ double z02, z03, z12, z13, z22, z23, z32, z33;
1092
+
1093
+ // Pickle 1st two columns of matrix into registers
1094
+ x00 = m.data[0][0];
1095
+ x01 = m.data[0][1];
1096
+ x10 = m.data[1][0];
1097
+ x11 = m.data[1][1];
1098
+ x20 = m.data[2][0];
1099
+ x21 = m.data[2][1];
1100
+ x30 = m.data[3][0];
1101
+ x31 = m.data[3][1];
1102
+
1103
+ // Compute all six 2x2 determinants of 1st two columns
1104
+ y01 = x00*x11 - x10*x01;
1105
+ y02 = x00*x21 - x20*x01;
1106
+ y03 = x00*x31 - x30*x01;
1107
+ y12 = x10*x21 - x20*x11;
1108
+ y13 = x10*x31 - x30*x11;
1109
+ y23 = x20*x31 - x30*x21;
1110
+
1111
+ // Pickle 2nd two columns of matrix into registers
1112
+ x02 = m.data[0][2];
1113
+ x03 = m.data[0][3];
1114
+ x12 = m.data[1][2];
1115
+ x13 = m.data[1][3];
1116
+ x22 = m.data[2][2];
1117
+ x23 = m.data[2][3];
1118
+ x32 = m.data[3][2];
1119
+ x33 = m.data[3][3];
1120
+
1121
+ // Compute all 3x3 cofactors for 2nd two columns */
1122
+ z33 = x02*y12 - x12*y02 + x22*y01;
1123
+ z23 = x12*y03 - x32*y01 - x02*y13;
1124
+ z13 = x02*y23 - x22*y03 + x32*y02;
1125
+ z03 = x22*y13 - x32*y12 - x12*y23;
1126
+ z32 = x13*y02 - x23*y01 - x03*y12;
1127
+ z22 = x03*y13 - x13*y03 + x33*y01;
1128
+ z12 = x23*y03 - x33*y02 - x03*y23;
1129
+ z02 = x13*y23 - x23*y13 + x33*y12;
1130
+
1131
+ // Compute all six 2x2 determinants of 2nd two columns
1132
+ y01 = x02*x13 - x12*x03;
1133
+ y02 = x02*x23 - x22*x03;
1134
+ y03 = x02*x33 - x32*x03;
1135
+ y12 = x12*x23 - x22*x13;
1136
+ y13 = x12*x33 - x32*x13;
1137
+ y23 = x22*x33 - x32*x23;
1138
+
1139
+ // Compute all 3x3 cofactors for 1st two columns
1140
+ z30 = x11*y02 - x21*y01 - x01*y12;
1141
+ z20 = x01*y13 - x11*y03 + x31*y01;
1142
+ z10 = x21*y03 - x31*y02 - x01*y23;
1143
+ z00 = x11*y23 - x21*y13 + x31*y12;
1144
+ z31 = x00*y12 - x10*y02 + x20*y01;
1145
+ z21 = x10*y03 - x30*y01 - x00*y13;
1146
+ z11 = x00*y23 - x20*y03 + x30*y02;
1147
+ z01 = x20*y13 - x30*y12 - x10*y23;
1148
+
1149
+ // Multiply all 3x3 cofactors by adjoint & transpose
1150
+ adj_m.data[0][0] += Type(z00*adj_ret);
1151
+ adj_m.data[1][0] += Type(z10*adj_ret);
1152
+ adj_m.data[0][1] += Type(z01*adj_ret);
1153
+ adj_m.data[2][0] += Type(z20*adj_ret);
1154
+ adj_m.data[0][2] += Type(z02*adj_ret);
1155
+ adj_m.data[3][0] += Type(z30*adj_ret);
1156
+ adj_m.data[0][3] += Type(z03*adj_ret);
1157
+ adj_m.data[1][1] += Type(z11*adj_ret);
1158
+ adj_m.data[2][1] += Type(z21*adj_ret);
1159
+ adj_m.data[1][2] += Type(z12*adj_ret);
1160
+ adj_m.data[3][1] += Type(z31*adj_ret);
1161
+ adj_m.data[1][3] += Type(z13*adj_ret);
1162
+ adj_m.data[2][2] += Type(z22*adj_ret);
1163
+ adj_m.data[3][2] += Type(z32*adj_ret);
1164
+ adj_m.data[2][3] += Type(z23*adj_ret);
1165
+ adj_m.data[3][3] += Type(z33*adj_ret);
1166
+ }
1167
+
1168
+ template<unsigned Rows, typename Type>
1169
+ inline CUDA_CALLABLE void adj_inverse(const mat_t<Rows,Rows,Type>& m, mat_t<Rows,Rows,Type>& ret, mat_t<Rows,Rows,Type>& adj_m, const mat_t<Rows,Rows,Type>& adj_ret)
1170
+ {
1171
+ // todo: how to cache this from the forward pass?
1172
+ mat_t<Rows,Rows,Type> invt = transpose(ret);
1173
+
1174
+ // see https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf 2.2.3
1175
+ adj_m -= mul(mul(invt, adj_ret), invt);
1176
+ }
1177
+
1178
+ template<typename Type>
1179
+ inline CUDA_CALLABLE void adj_transform_point(const mat_t<4,4,Type>& m, const vec_t<3,Type>& v, mat_t<4,4,Type>& adj_m, vec_t<3,Type>& adj_v, const vec_t<3,Type>& adj_ret)
1180
+ {
1181
+ vec_t<4,Type> out = vec_t<4,Type>(v[0], v[1], v[2], 1.f);
1182
+ adj_m = add(adj_m, transpose(mat_t<4,4,Type>(adj_ret[0] * out, adj_ret[1] * out, adj_ret[2] * out, vec_t<4,Type>())));
1183
+ adj_v[0] += dot(vec_t<3,Type>(m.data[0][0], m.data[1][0], m.data[2][0]), adj_ret);
1184
+ adj_v[1] += dot(vec_t<3,Type>(m.data[0][1], m.data[1][1], m.data[2][1]), adj_ret);
1185
+ adj_v[2] += dot(vec_t<3,Type>(m.data[0][2], m.data[1][2], m.data[2][2]), adj_ret);
1186
+ }
1187
+
1188
+ template<typename Type>
1189
+ inline CUDA_CALLABLE void adj_transform_vector(const mat_t<4,4,Type>& m, const vec_t<3,Type>& v, mat_t<4,4,Type>& adj_m, vec_t<3,Type>& adj_v, const vec_t<3,Type>& adj_ret)
1190
+ {
1191
+ vec_t<4,Type> out = vec_t<4,Type>(v[0], v[1], v[2], 0.f);
1192
+ adj_m = add(adj_m, transpose(mat_t<4,4,Type>(adj_ret[0] * out, adj_ret[1] * out, adj_ret[2] * out, vec_t<4,Type>())));
1193
+ adj_v[0] += dot(vec_t<3,Type>(m.data[0][0], m.data[1][0], m.data[2][0]), adj_ret);
1194
+ adj_v[1] += dot(vec_t<3,Type>(m.data[0][1], m.data[1][1], m.data[2][1]), adj_ret);
1195
+ adj_v[2] += dot(vec_t<3,Type>(m.data[0][2], m.data[1][2], m.data[2][2]), adj_ret);
1196
+ }
1197
+
1198
+ template<typename Type>
1199
+ inline CUDA_CALLABLE void adj_skew(const vec_t<3,Type>& a, vec_t<3,Type>& adj_a, const mat_t<3,3,Type>& adj_ret)
1200
+ {
1201
+ adj_a[0] += adj_ret.data[2][1] - adj_ret.data[1][2];
1202
+ adj_a[1] += adj_ret.data[0][2] - adj_ret.data[2][0];
1203
+ adj_a[2] += adj_ret.data[1][0] - adj_ret.data[0][1];
1204
+ }
1205
+
1206
+ template<unsigned Rows, unsigned Cols, typename Type>
1207
+ inline CUDA_CALLABLE void adj_cw_mul(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b, mat_t<Rows,Cols,Type>& adj_a, mat_t<Rows,Cols,Type>& adj_b, const mat_t<Rows,Cols,Type>& adj_ret)
1208
+ {
1209
+ adj_a += cw_mul(b, adj_ret);
1210
+ adj_b += cw_mul(a, adj_ret);
1211
+ }
1212
+
1213
+ template<unsigned Rows, unsigned Cols, typename Type>
1214
+ inline CUDA_CALLABLE void adj_cw_div(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b, mat_t<Rows,Cols,Type>& ret, mat_t<Rows,Cols,Type>& adj_a, mat_t<Rows,Cols,Type>& adj_b, const mat_t<Rows,Cols,Type>& adj_ret)
1215
+ {
1216
+ adj_a += cw_div(adj_ret, b);
1217
+ adj_b -= cw_mul(adj_ret, cw_div(ret, b));
1218
+ }
1219
+
1220
+ // adjoint for the constant constructor:
1221
+ template<unsigned Rows, unsigned Cols, typename Type>
1222
+ inline CUDA_CALLABLE void adj_mat_t(Type s, Type& adj_s, const mat_t<Rows, Cols, Type>& adj_ret)
1223
+ {
1224
+ for (unsigned i=0; i < Rows; ++i)
1225
+ {
1226
+ for (unsigned j=0; j < Cols; ++j)
1227
+ {
1228
+ adj_s += adj_ret.data[i][j];
1229
+ }
1230
+ }
1231
+ }
1232
+
1233
+ // adjoint for the casting constructor:
1234
+ template<unsigned Rows, unsigned Cols, typename Type, typename OtherType>
1235
+ inline CUDA_CALLABLE void adj_mat_t(const mat_t<Rows, Cols, OtherType>& other, mat_t<Rows, Cols, OtherType>& adj_other, const mat_t<Rows, Cols, Type>& adj_ret)
1236
+ {
1237
+ for (unsigned i=0; i < Rows; ++i)
1238
+ {
1239
+ for (unsigned j=0; j < Cols; ++j)
1240
+ {
1241
+ adj_other.data[i][j] += adj_ret.data[i][j];
1242
+ }
1243
+ }
1244
+ }
1245
+
1246
+ // adjoint for the initializer_array scalar constructor:
1247
+ template<unsigned Rows, unsigned Cols, typename Type>
1248
+ inline CUDA_CALLABLE void adj_mat_t(const initializer_array<Rows * Cols, Type> &cmps, const initializer_array<Rows * Cols, Type*> &adj_cmps, const mat_t<Rows, Cols, Type>& adj_ret)
1249
+ {
1250
+ for (unsigned i=0; i < Rows; ++i)
1251
+ {
1252
+ for (unsigned j=0; j < Cols; ++j)
1253
+ {
1254
+ *adj_cmps[i * Cols + j] += adj_ret.data[i][j];
1255
+ }
1256
+ }
1257
+ }
1258
+
1259
+ template<typename Type>
1260
+ inline CUDA_CALLABLE void adj_mat_t(Type m00, Type m01, Type m10, Type m11, Type& adj_m00, Type& adj_m01, Type& adj_m10, Type& adj_m11, const mat_t<2, 2, Type>& adj_ret)
1261
+ {
1262
+ adj_m00 += adj_ret.data[0][0];
1263
+ adj_m01 += adj_ret.data[0][1];
1264
+ adj_m10 += adj_ret.data[1][0];
1265
+ adj_m11 += adj_ret.data[1][1];
1266
+ }
1267
+
1268
+ template<typename Type>
1269
+ inline CUDA_CALLABLE void adj_mat_t(Type m00, Type m01, Type m02,
1270
+ Type m10, Type m11, Type m12,
1271
+ Type m20, Type m21, Type m22,
1272
+ Type& a00, Type& a01, Type& a02,
1273
+ Type& a10, Type& a11, Type& a12,
1274
+ Type& a20, Type& a21, Type& a22,
1275
+ const mat_t<3, 3, Type>& adj_ret)
1276
+ {
1277
+ a00 += adj_ret.data[0][0];
1278
+ a01 += adj_ret.data[0][1];
1279
+ a02 += adj_ret.data[0][2];
1280
+ a10 += adj_ret.data[1][0];
1281
+ a11 += adj_ret.data[1][1];
1282
+ a12 += adj_ret.data[1][2];
1283
+ a20 += adj_ret.data[2][0];
1284
+ a21 += adj_ret.data[2][1];
1285
+ a22 += adj_ret.data[2][2];
1286
+ }
1287
+
1288
+
1289
+ template<typename Type>
1290
+ inline CUDA_CALLABLE void adj_mat_t(Type m00, Type m01, Type m02, Type m03,
1291
+ Type m10, Type m11, Type m12, Type m13,
1292
+ Type m20, Type m21, Type m22, Type m23,
1293
+ Type m30, Type m31, Type m32, Type m33,
1294
+ Type& a00, Type& a01, Type& a02, Type& a03,
1295
+ Type& a10, Type& a11, Type& a12, Type& a13,
1296
+ Type& a20, Type& a21, Type& a22, Type& a23,
1297
+ Type& a30, Type& a31, Type& a32, Type& a33,
1298
+ const mat_t<4, 4, Type>& adj_ret)
1299
+ {
1300
+ a00 += adj_ret.data[0][0];
1301
+ a01 += adj_ret.data[0][1];
1302
+ a02 += adj_ret.data[0][2];
1303
+ a03 += adj_ret.data[0][3];
1304
+
1305
+ a10 += adj_ret.data[1][0];
1306
+ a11 += adj_ret.data[1][1];
1307
+ a12 += adj_ret.data[1][2];
1308
+ a13 += adj_ret.data[1][3];
1309
+
1310
+ a20 += adj_ret.data[2][0];
1311
+ a21 += adj_ret.data[2][1];
1312
+ a22 += adj_ret.data[2][2];
1313
+ a23 += adj_ret.data[2][3];
1314
+
1315
+ a30 += adj_ret.data[3][0];
1316
+ a31 += adj_ret.data[3][1];
1317
+ a32 += adj_ret.data[3][2];
1318
+ a33 += adj_ret.data[3][3];
1319
+ }
1320
+
1321
+
1322
+
1323
+ // adjoint for the initializer_array vector constructor:
1324
+ template<unsigned Rows, unsigned Cols, typename Type>
1325
+ inline CUDA_CALLABLE void adj_mat_t(const initializer_array<Cols, vec_t<Rows,Type> > &cmps, const initializer_array<Cols, vec_t<Rows,Type>* > &adj_cmps, const mat_t<Rows, Cols, Type>& adj_ret)
1326
+ {
1327
+ for (unsigned j=0; j < Cols; ++j)
1328
+ {
1329
+ for (unsigned i=0; i < Rows; ++i)
1330
+ {
1331
+ (*adj_cmps[j])[i] += adj_ret.data[i][j];
1332
+ }
1333
+ }
1334
+ }
1335
+
1336
+ template<typename Type>
1337
+ inline CUDA_CALLABLE void adj_mat_t(const vec_t<2,Type> &cmps0, const vec_t<2,Type> &cmps1, vec_t<2,Type> &adj_cmps0, vec_t<2,Type> &adj_cmps1, const mat_t<2, 2, Type>& adj_ret)
1338
+ {
1339
+ for (unsigned i=0; i < 2; ++i)
1340
+ {
1341
+ adj_cmps0[i] += adj_ret.data[i][0];
1342
+ adj_cmps1[i] += adj_ret.data[i][1];
1343
+ }
1344
+ }
1345
+
1346
+ template<typename Type>
1347
+ inline CUDA_CALLABLE void adj_mat_t(const vec_t<3,Type> &cmps0, const vec_t<3,Type> &cmps1, const vec_t<3,Type> &cmps2, vec_t<3,Type> &adj_cmps0, vec_t<3,Type> &adj_cmps1, vec_t<3,Type> &adj_cmps2, const mat_t<3, 3, Type>& adj_ret)
1348
+ {
1349
+ for (unsigned i=0; i < 3; ++i)
1350
+ {
1351
+ adj_cmps0[i] += adj_ret.data[i][0];
1352
+ adj_cmps1[i] += adj_ret.data[i][1];
1353
+ adj_cmps2[i] += adj_ret.data[i][2];
1354
+ }
1355
+ }
1356
+
1357
+ template<typename Type>
1358
+ inline CUDA_CALLABLE void adj_mat_t(const vec_t<4,Type> &cmps0, const vec_t<4,Type> &cmps1, const vec_t<4,Type> &cmps2, const vec_t<4,Type> &cmps3, vec_t<4,Type> &adj_cmps0, vec_t<4,Type> &adj_cmps1, vec_t<4,Type> &adj_cmps2, vec_t<4,Type> &adj_cmps3, const mat_t<4, 4, Type>& adj_ret)
1359
+ {
1360
+ for (unsigned i=0; i < 4; ++i)
1361
+ {
1362
+ adj_cmps0[i] += adj_ret.data[i][0];
1363
+ adj_cmps1[i] += adj_ret.data[i][1];
1364
+ adj_cmps2[i] += adj_ret.data[i][2];
1365
+ adj_cmps3[i] += adj_ret.data[i][3];
1366
+ }
1367
+ }
1368
+
1369
+ template<unsigned Rows, unsigned Cols, typename Type>
1370
+ CUDA_CALLABLE inline mat_t<Rows, Cols, Type> lerp(const mat_t<Rows, Cols, Type>& a, const mat_t<Rows, Cols, Type>& b, Type t)
1371
+ {
1372
+ return a*(Type(1)-t) + b*t;
1373
+ }
1374
+
1375
+ template<unsigned Rows, unsigned Cols, typename Type>
1376
+ CUDA_CALLABLE inline void adj_lerp(const mat_t<Rows, Cols, Type>& a, const mat_t<Rows, Cols, Type>& b, Type t, mat_t<Rows, Cols, Type>& adj_a, mat_t<Rows, Cols, Type>& adj_b, Type& adj_t, const mat_t<Rows, Cols, Type>& adj_ret)
1377
+ {
1378
+ adj_a += adj_ret*(Type(1)-t);
1379
+ adj_b += adj_ret*t;
1380
+ adj_t += tensordot(b, adj_ret) - tensordot(a, adj_ret);
1381
+ }
1382
+
1383
+ // for integral types we do not accumulate gradients
1384
+ template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, int8>* buf, const mat_t<Rows, Cols, int8> &value) { }
1385
+ template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, uint8>* buf, const mat_t<Rows, Cols, uint8> &value) { }
1386
+ template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, int16>* buf, const mat_t<Rows, Cols, int16> &value) { }
1387
+ template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, uint16>* buf, const mat_t<Rows, Cols, uint16> &value) { }
1388
+ template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, int32>* buf, const mat_t<Rows, Cols, int32> &value) { }
1389
+ template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, uint32>* buf, const mat_t<Rows, Cols, uint32> &value) { }
1390
+ template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, int64>* buf, const mat_t<Rows, Cols, int64> &value) { }
1391
+ template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, uint64>* buf, const mat_t<Rows, Cols, uint64> &value) { }
1392
+
1393
+ using mat22h = mat_t<2,2,half>;
1394
+ using mat33h = mat_t<3,3,half>;
1395
+ using mat44h = mat_t<4,4,half>;
1396
+
1397
+ using mat22 = mat_t<2,2,float>;
1398
+ using mat33 = mat_t<3,3,float>;
1399
+ using mat44 = mat_t<4,4,float>;
1400
+
1401
+ using mat22f = mat_t<2,2,float>;
1402
+ using mat33f = mat_t<3,3,float>;
1403
+ using mat44f = mat_t<4,4,float>;
1404
+
1405
+ using mat22d = mat_t<2,2,double>;
1406
+ using mat33d = mat_t<3,3,double>;
1407
+ using mat44d = mat_t<4,4,double>;
1408
+
1409
+ inline CUDA_CALLABLE void adj_mat22(vec2 c0, vec2 c1,
1410
+ vec2& a0, vec2& a1,
1411
+ const mat22& adj_ret)
1412
+ {
1413
+ a0 += adj_ret.get_col(0);
1414
+ a1 += adj_ret.get_col(1);
1415
+ }
1416
+
1417
+ inline CUDA_CALLABLE void adj_mat22(float m00, float m01, float m10, float m11, float& adj_m00, float& adj_m01, float& adj_m10, float& adj_m11, const mat22& adj_ret)
1418
+ {
1419
+ adj_m00 += adj_ret.data[0][0];
1420
+ adj_m01 += adj_ret.data[0][1];
1421
+ adj_m10 += adj_ret.data[1][0];
1422
+ adj_m11 += adj_ret.data[1][1];
1423
+ }
1424
+
1425
+ inline CUDA_CALLABLE void adj_mat33(vec3 c0, vec3 c1, vec3 c2,
1426
+ vec3& a0, vec3& a1, vec3& a2,
1427
+ const mat33& adj_ret)
1428
+ {
1429
+ // column constructor
1430
+ a0 += adj_ret.get_col(0);
1431
+ a1 += adj_ret.get_col(1);
1432
+ a2 += adj_ret.get_col(2);
1433
+
1434
+ }
1435
+
1436
+ inline CUDA_CALLABLE void adj_mat33(float m00, float m01, float m02,
1437
+ float m10, float m11, float m12,
1438
+ float m20, float m21, float m22,
1439
+ float& a00, float& a01, float& a02,
1440
+ float& a10, float& a11, float& a12,
1441
+ float& a20, float& a21, float& a22,
1442
+ const mat33& adj_ret)
1443
+ {
1444
+ a00 += adj_ret.data[0][0];
1445
+ a01 += adj_ret.data[0][1];
1446
+ a02 += adj_ret.data[0][2];
1447
+ a10 += adj_ret.data[1][0];
1448
+ a11 += adj_ret.data[1][1];
1449
+ a12 += adj_ret.data[1][2];
1450
+ a20 += adj_ret.data[2][0];
1451
+ a21 += adj_ret.data[2][1];
1452
+ a22 += adj_ret.data[2][2];
1453
+ }
1454
+
1455
+ inline CUDA_CALLABLE void adj_mat44(
1456
+ vec4 c0, vec4 c1, vec4 c2, vec4 c3,
1457
+ vec4& a0, vec4& a1, vec4& a2, vec4& a3,
1458
+ const mat44& adj_ret)
1459
+ {
1460
+ // column constructor
1461
+ a0 += adj_ret.get_col(0);
1462
+ a1 += adj_ret.get_col(1);
1463
+ a2 += adj_ret.get_col(2);
1464
+ a3 += adj_ret.get_col(3);
1465
+ }
1466
+
1467
+ inline CUDA_CALLABLE void adj_mat44(float m00, float m01, float m02, float m03,
1468
+ float m10, float m11, float m12, float m13,
1469
+ float m20, float m21, float m22, float m23,
1470
+ float m30, float m31, float m32, float m33,
1471
+ float& a00, float& a01, float& a02, float& a03,
1472
+ float& a10, float& a11, float& a12, float& a13,
1473
+ float& a20, float& a21, float& a22, float& a23,
1474
+ float& a30, float& a31, float& a32, float& a33,
1475
+ const mat44& adj_ret)
1476
+ {
1477
+ a00 += adj_ret.data[0][0];
1478
+ a01 += adj_ret.data[0][1];
1479
+ a02 += adj_ret.data[0][2];
1480
+ a03 += adj_ret.data[0][3];
1481
+
1482
+ a10 += adj_ret.data[1][0];
1483
+ a11 += adj_ret.data[1][1];
1484
+ a12 += adj_ret.data[1][2];
1485
+ a13 += adj_ret.data[1][3];
1486
+
1487
+ a20 += adj_ret.data[2][0];
1488
+ a21 += adj_ret.data[2][1];
1489
+ a22 += adj_ret.data[2][2];
1490
+ a23 += adj_ret.data[2][3];
1491
+
1492
+ a30 += adj_ret.data[3][0];
1493
+ a31 += adj_ret.data[3][1];
1494
+ a32 += adj_ret.data[3][2];
1495
+ a33 += adj_ret.data[3][3];
1496
+ }
1497
+
1498
+ } // namespace wp