warp-lang 1.0.2__py3-none-manylinux2014_x86_64.whl → 1.2.0__py3-none-manylinux2014_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of warp-lang might be problematic. Click here for more details.

Files changed (356) hide show
  1. warp/__init__.py +108 -97
  2. warp/__init__.pyi +1 -1
  3. warp/bin/warp-clang.so +0 -0
  4. warp/bin/warp.so +0 -0
  5. warp/build.py +88 -113
  6. warp/build_dll.py +383 -375
  7. warp/builtins.py +3693 -3354
  8. warp/codegen.py +2925 -2792
  9. warp/config.py +40 -36
  10. warp/constants.py +49 -45
  11. warp/context.py +5409 -5102
  12. warp/dlpack.py +442 -442
  13. warp/examples/__init__.py +16 -16
  14. warp/examples/assets/bear.usd +0 -0
  15. warp/examples/assets/bunny.usd +0 -0
  16. warp/examples/assets/cartpole.urdf +110 -110
  17. warp/examples/assets/crazyflie.usd +0 -0
  18. warp/examples/assets/cube.usd +0 -0
  19. warp/examples/assets/nv_ant.xml +92 -92
  20. warp/examples/assets/nv_humanoid.xml +183 -183
  21. warp/examples/assets/quadruped.urdf +267 -267
  22. warp/examples/assets/rocks.nvdb +0 -0
  23. warp/examples/assets/rocks.usd +0 -0
  24. warp/examples/assets/sphere.usd +0 -0
  25. warp/examples/benchmarks/benchmark_api.py +381 -383
  26. warp/examples/benchmarks/benchmark_cloth.py +278 -277
  27. warp/examples/benchmarks/benchmark_cloth_cupy.py +88 -88
  28. warp/examples/benchmarks/benchmark_cloth_jax.py +97 -100
  29. warp/examples/benchmarks/benchmark_cloth_numba.py +146 -142
  30. warp/examples/benchmarks/benchmark_cloth_numpy.py +77 -77
  31. warp/examples/benchmarks/benchmark_cloth_pytorch.py +86 -86
  32. warp/examples/benchmarks/benchmark_cloth_taichi.py +112 -112
  33. warp/examples/benchmarks/benchmark_cloth_warp.py +145 -146
  34. warp/examples/benchmarks/benchmark_launches.py +293 -295
  35. warp/examples/browse.py +29 -29
  36. warp/examples/core/example_dem.py +232 -219
  37. warp/examples/core/example_fluid.py +291 -267
  38. warp/examples/core/example_graph_capture.py +142 -126
  39. warp/examples/core/example_marching_cubes.py +186 -174
  40. warp/examples/core/example_mesh.py +172 -155
  41. warp/examples/core/example_mesh_intersect.py +203 -193
  42. warp/examples/core/example_nvdb.py +174 -170
  43. warp/examples/core/example_raycast.py +103 -90
  44. warp/examples/core/example_raymarch.py +197 -178
  45. warp/examples/core/example_render_opengl.py +183 -141
  46. warp/examples/core/example_sph.py +403 -387
  47. warp/examples/core/example_torch.py +219 -181
  48. warp/examples/core/example_wave.py +261 -248
  49. warp/examples/fem/bsr_utils.py +378 -380
  50. warp/examples/fem/example_apic_fluid.py +432 -389
  51. warp/examples/fem/example_burgers.py +262 -0
  52. warp/examples/fem/example_convection_diffusion.py +180 -168
  53. warp/examples/fem/example_convection_diffusion_dg.py +217 -209
  54. warp/examples/fem/example_deformed_geometry.py +175 -159
  55. warp/examples/fem/example_diffusion.py +199 -173
  56. warp/examples/fem/example_diffusion_3d.py +178 -152
  57. warp/examples/fem/example_diffusion_mgpu.py +219 -214
  58. warp/examples/fem/example_mixed_elasticity.py +242 -222
  59. warp/examples/fem/example_navier_stokes.py +257 -243
  60. warp/examples/fem/example_stokes.py +218 -192
  61. warp/examples/fem/example_stokes_transfer.py +263 -249
  62. warp/examples/fem/mesh_utils.py +133 -109
  63. warp/examples/fem/plot_utils.py +292 -287
  64. warp/examples/optim/example_bounce.py +258 -246
  65. warp/examples/optim/example_cloth_throw.py +220 -209
  66. warp/examples/optim/example_diffray.py +564 -536
  67. warp/examples/optim/example_drone.py +862 -835
  68. warp/examples/optim/example_inverse_kinematics.py +174 -168
  69. warp/examples/optim/example_inverse_kinematics_torch.py +183 -169
  70. warp/examples/optim/example_spring_cage.py +237 -231
  71. warp/examples/optim/example_trajectory.py +221 -199
  72. warp/examples/optim/example_walker.py +304 -293
  73. warp/examples/sim/example_cartpole.py +137 -129
  74. warp/examples/sim/example_cloth.py +194 -186
  75. warp/examples/sim/example_granular.py +122 -111
  76. warp/examples/sim/example_granular_collision_sdf.py +195 -186
  77. warp/examples/sim/example_jacobian_ik.py +234 -214
  78. warp/examples/sim/example_particle_chain.py +116 -105
  79. warp/examples/sim/example_quadruped.py +191 -180
  80. warp/examples/sim/example_rigid_chain.py +195 -187
  81. warp/examples/sim/example_rigid_contact.py +187 -177
  82. warp/examples/sim/example_rigid_force.py +125 -125
  83. warp/examples/sim/example_rigid_gyroscopic.py +107 -95
  84. warp/examples/sim/example_rigid_soft_contact.py +132 -122
  85. warp/examples/sim/example_soft_body.py +188 -177
  86. warp/fabric.py +337 -335
  87. warp/fem/__init__.py +61 -27
  88. warp/fem/cache.py +403 -388
  89. warp/fem/dirichlet.py +178 -179
  90. warp/fem/domain.py +262 -263
  91. warp/fem/field/__init__.py +100 -101
  92. warp/fem/field/field.py +148 -149
  93. warp/fem/field/nodal_field.py +298 -299
  94. warp/fem/field/restriction.py +22 -21
  95. warp/fem/field/test.py +180 -181
  96. warp/fem/field/trial.py +183 -183
  97. warp/fem/geometry/__init__.py +16 -19
  98. warp/fem/geometry/closest_point.py +69 -70
  99. warp/fem/geometry/deformed_geometry.py +270 -271
  100. warp/fem/geometry/element.py +748 -744
  101. warp/fem/geometry/geometry.py +184 -186
  102. warp/fem/geometry/grid_2d.py +380 -373
  103. warp/fem/geometry/grid_3d.py +437 -435
  104. warp/fem/geometry/hexmesh.py +953 -953
  105. warp/fem/geometry/nanogrid.py +455 -0
  106. warp/fem/geometry/partition.py +374 -376
  107. warp/fem/geometry/quadmesh_2d.py +532 -532
  108. warp/fem/geometry/tetmesh.py +840 -840
  109. warp/fem/geometry/trimesh_2d.py +577 -577
  110. warp/fem/integrate.py +1684 -1615
  111. warp/fem/operator.py +190 -191
  112. warp/fem/polynomial.py +214 -213
  113. warp/fem/quadrature/__init__.py +2 -2
  114. warp/fem/quadrature/pic_quadrature.py +243 -245
  115. warp/fem/quadrature/quadrature.py +295 -294
  116. warp/fem/space/__init__.py +179 -292
  117. warp/fem/space/basis_space.py +522 -489
  118. warp/fem/space/collocated_function_space.py +100 -105
  119. warp/fem/space/dof_mapper.py +236 -236
  120. warp/fem/space/function_space.py +148 -145
  121. warp/fem/space/grid_2d_function_space.py +148 -267
  122. warp/fem/space/grid_3d_function_space.py +167 -306
  123. warp/fem/space/hexmesh_function_space.py +253 -352
  124. warp/fem/space/nanogrid_function_space.py +202 -0
  125. warp/fem/space/partition.py +350 -350
  126. warp/fem/space/quadmesh_2d_function_space.py +261 -369
  127. warp/fem/space/restriction.py +161 -160
  128. warp/fem/space/shape/__init__.py +90 -15
  129. warp/fem/space/shape/cube_shape_function.py +728 -738
  130. warp/fem/space/shape/shape_function.py +102 -103
  131. warp/fem/space/shape/square_shape_function.py +611 -611
  132. warp/fem/space/shape/tet_shape_function.py +565 -567
  133. warp/fem/space/shape/triangle_shape_function.py +429 -429
  134. warp/fem/space/tetmesh_function_space.py +224 -292
  135. warp/fem/space/topology.py +297 -295
  136. warp/fem/space/trimesh_2d_function_space.py +153 -221
  137. warp/fem/types.py +77 -77
  138. warp/fem/utils.py +495 -495
  139. warp/jax.py +166 -141
  140. warp/jax_experimental.py +341 -339
  141. warp/native/array.h +1081 -1025
  142. warp/native/builtin.h +1603 -1560
  143. warp/native/bvh.cpp +402 -398
  144. warp/native/bvh.cu +533 -525
  145. warp/native/bvh.h +430 -429
  146. warp/native/clang/clang.cpp +496 -464
  147. warp/native/crt.cpp +42 -32
  148. warp/native/crt.h +352 -335
  149. warp/native/cuda_crt.h +1049 -1049
  150. warp/native/cuda_util.cpp +549 -540
  151. warp/native/cuda_util.h +288 -203
  152. warp/native/cutlass_gemm.cpp +34 -34
  153. warp/native/cutlass_gemm.cu +372 -372
  154. warp/native/error.cpp +66 -66
  155. warp/native/error.h +27 -27
  156. warp/native/exports.h +187 -0
  157. warp/native/fabric.h +228 -228
  158. warp/native/hashgrid.cpp +301 -278
  159. warp/native/hashgrid.cu +78 -77
  160. warp/native/hashgrid.h +227 -227
  161. warp/native/initializer_array.h +32 -32
  162. warp/native/intersect.h +1204 -1204
  163. warp/native/intersect_adj.h +365 -365
  164. warp/native/intersect_tri.h +322 -322
  165. warp/native/marching.cpp +2 -2
  166. warp/native/marching.cu +497 -497
  167. warp/native/marching.h +2 -2
  168. warp/native/mat.h +1545 -1498
  169. warp/native/matnn.h +333 -333
  170. warp/native/mesh.cpp +203 -203
  171. warp/native/mesh.cu +292 -293
  172. warp/native/mesh.h +1887 -1887
  173. warp/native/nanovdb/GridHandle.h +366 -0
  174. warp/native/nanovdb/HostBuffer.h +590 -0
  175. warp/native/nanovdb/NanoVDB.h +6624 -4782
  176. warp/native/nanovdb/PNanoVDB.h +3390 -2553
  177. warp/native/noise.h +850 -850
  178. warp/native/quat.h +1112 -1085
  179. warp/native/rand.h +303 -299
  180. warp/native/range.h +108 -108
  181. warp/native/reduce.cpp +156 -156
  182. warp/native/reduce.cu +348 -348
  183. warp/native/runlength_encode.cpp +61 -61
  184. warp/native/runlength_encode.cu +46 -46
  185. warp/native/scan.cpp +30 -30
  186. warp/native/scan.cu +36 -36
  187. warp/native/scan.h +7 -7
  188. warp/native/solid_angle.h +442 -442
  189. warp/native/sort.cpp +94 -94
  190. warp/native/sort.cu +97 -97
  191. warp/native/sort.h +14 -14
  192. warp/native/sparse.cpp +337 -337
  193. warp/native/sparse.cu +544 -544
  194. warp/native/spatial.h +630 -630
  195. warp/native/svd.h +562 -562
  196. warp/native/temp_buffer.h +30 -30
  197. warp/native/vec.h +1177 -1133
  198. warp/native/volume.cpp +529 -297
  199. warp/native/volume.cu +58 -32
  200. warp/native/volume.h +960 -538
  201. warp/native/volume_builder.cu +446 -425
  202. warp/native/volume_builder.h +34 -19
  203. warp/native/volume_impl.h +61 -0
  204. warp/native/warp.cpp +1057 -1052
  205. warp/native/warp.cu +2949 -2828
  206. warp/native/warp.h +321 -305
  207. warp/optim/__init__.py +9 -9
  208. warp/optim/adam.py +120 -120
  209. warp/optim/linear.py +1104 -939
  210. warp/optim/sgd.py +104 -92
  211. warp/render/__init__.py +10 -10
  212. warp/render/render_opengl.py +3356 -3204
  213. warp/render/render_usd.py +768 -749
  214. warp/render/utils.py +152 -150
  215. warp/sim/__init__.py +52 -59
  216. warp/sim/articulation.py +685 -685
  217. warp/sim/collide.py +1594 -1590
  218. warp/sim/import_mjcf.py +489 -481
  219. warp/sim/import_snu.py +220 -221
  220. warp/sim/import_urdf.py +536 -516
  221. warp/sim/import_usd.py +887 -881
  222. warp/sim/inertia.py +316 -317
  223. warp/sim/integrator.py +234 -233
  224. warp/sim/integrator_euler.py +1956 -1956
  225. warp/sim/integrator_featherstone.py +1917 -1991
  226. warp/sim/integrator_xpbd.py +3288 -3312
  227. warp/sim/model.py +4473 -4314
  228. warp/sim/particles.py +113 -112
  229. warp/sim/render.py +417 -403
  230. warp/sim/utils.py +413 -410
  231. warp/sparse.py +1289 -1227
  232. warp/stubs.py +2192 -2469
  233. warp/tape.py +1162 -225
  234. warp/tests/__init__.py +1 -1
  235. warp/tests/__main__.py +4 -4
  236. warp/tests/assets/test_index_grid.nvdb +0 -0
  237. warp/tests/assets/torus.usda +105 -105
  238. warp/tests/aux_test_class_kernel.py +26 -26
  239. warp/tests/aux_test_compile_consts_dummy.py +10 -10
  240. warp/tests/aux_test_conditional_unequal_types_kernels.py +21 -21
  241. warp/tests/aux_test_dependent.py +20 -22
  242. warp/tests/aux_test_grad_customs.py +21 -23
  243. warp/tests/aux_test_reference.py +9 -11
  244. warp/tests/aux_test_reference_reference.py +8 -10
  245. warp/tests/aux_test_square.py +15 -17
  246. warp/tests/aux_test_unresolved_func.py +14 -14
  247. warp/tests/aux_test_unresolved_symbol.py +14 -14
  248. warp/tests/disabled_kinematics.py +237 -239
  249. warp/tests/run_coverage_serial.py +31 -31
  250. warp/tests/test_adam.py +155 -157
  251. warp/tests/test_arithmetic.py +1088 -1124
  252. warp/tests/test_array.py +2415 -2326
  253. warp/tests/test_array_reduce.py +148 -150
  254. warp/tests/test_async.py +666 -656
  255. warp/tests/test_atomic.py +139 -141
  256. warp/tests/test_bool.py +212 -149
  257. warp/tests/test_builtins_resolution.py +1290 -1292
  258. warp/tests/test_bvh.py +162 -171
  259. warp/tests/test_closest_point_edge_edge.py +227 -228
  260. warp/tests/test_codegen.py +562 -553
  261. warp/tests/test_compile_consts.py +217 -101
  262. warp/tests/test_conditional.py +244 -246
  263. warp/tests/test_copy.py +230 -215
  264. warp/tests/test_ctypes.py +630 -632
  265. warp/tests/test_dense.py +65 -67
  266. warp/tests/test_devices.py +89 -98
  267. warp/tests/test_dlpack.py +528 -529
  268. warp/tests/test_examples.py +403 -378
  269. warp/tests/test_fabricarray.py +952 -955
  270. warp/tests/test_fast_math.py +60 -54
  271. warp/tests/test_fem.py +1298 -1278
  272. warp/tests/test_fp16.py +128 -130
  273. warp/tests/test_func.py +336 -337
  274. warp/tests/test_generics.py +596 -571
  275. warp/tests/test_grad.py +885 -640
  276. warp/tests/test_grad_customs.py +331 -336
  277. warp/tests/test_hash_grid.py +208 -164
  278. warp/tests/test_import.py +37 -39
  279. warp/tests/test_indexedarray.py +1132 -1134
  280. warp/tests/test_intersect.py +65 -67
  281. warp/tests/test_jax.py +305 -307
  282. warp/tests/test_large.py +169 -164
  283. warp/tests/test_launch.py +352 -354
  284. warp/tests/test_lerp.py +217 -261
  285. warp/tests/test_linear_solvers.py +189 -171
  286. warp/tests/test_lvalue.py +419 -493
  287. warp/tests/test_marching_cubes.py +63 -65
  288. warp/tests/test_mat.py +1799 -1827
  289. warp/tests/test_mat_lite.py +113 -115
  290. warp/tests/test_mat_scalar_ops.py +2905 -2889
  291. warp/tests/test_math.py +124 -193
  292. warp/tests/test_matmul.py +498 -499
  293. warp/tests/test_matmul_lite.py +408 -410
  294. warp/tests/test_mempool.py +186 -190
  295. warp/tests/test_mesh.py +281 -324
  296. warp/tests/test_mesh_query_aabb.py +226 -241
  297. warp/tests/test_mesh_query_point.py +690 -702
  298. warp/tests/test_mesh_query_ray.py +290 -303
  299. warp/tests/test_mlp.py +274 -276
  300. warp/tests/test_model.py +108 -110
  301. warp/tests/test_module_hashing.py +111 -0
  302. warp/tests/test_modules_lite.py +36 -39
  303. warp/tests/test_multigpu.py +161 -163
  304. warp/tests/test_noise.py +244 -248
  305. warp/tests/test_operators.py +248 -250
  306. warp/tests/test_options.py +121 -125
  307. warp/tests/test_peer.py +131 -137
  308. warp/tests/test_pinned.py +76 -78
  309. warp/tests/test_print.py +52 -54
  310. warp/tests/test_quat.py +2084 -2086
  311. warp/tests/test_rand.py +324 -288
  312. warp/tests/test_reload.py +207 -217
  313. warp/tests/test_rounding.py +177 -179
  314. warp/tests/test_runlength_encode.py +188 -190
  315. warp/tests/test_sim_grad.py +241 -0
  316. warp/tests/test_sim_kinematics.py +89 -97
  317. warp/tests/test_smoothstep.py +166 -168
  318. warp/tests/test_snippet.py +303 -266
  319. warp/tests/test_sparse.py +466 -460
  320. warp/tests/test_spatial.py +2146 -2148
  321. warp/tests/test_special_values.py +362 -0
  322. warp/tests/test_streams.py +484 -473
  323. warp/tests/test_struct.py +708 -675
  324. warp/tests/test_tape.py +171 -148
  325. warp/tests/test_torch.py +741 -743
  326. warp/tests/test_transient_module.py +85 -87
  327. warp/tests/test_types.py +554 -659
  328. warp/tests/test_utils.py +488 -499
  329. warp/tests/test_vec.py +1262 -1268
  330. warp/tests/test_vec_lite.py +71 -73
  331. warp/tests/test_vec_scalar_ops.py +2097 -2099
  332. warp/tests/test_verify_fp.py +92 -94
  333. warp/tests/test_volume.py +961 -736
  334. warp/tests/test_volume_write.py +338 -265
  335. warp/tests/unittest_serial.py +38 -37
  336. warp/tests/unittest_suites.py +367 -359
  337. warp/tests/unittest_utils.py +434 -578
  338. warp/tests/unused_test_misc.py +69 -71
  339. warp/tests/walkthrough_debug.py +85 -85
  340. warp/thirdparty/appdirs.py +598 -598
  341. warp/thirdparty/dlpack.py +143 -143
  342. warp/thirdparty/unittest_parallel.py +563 -561
  343. warp/torch.py +321 -295
  344. warp/types.py +4941 -4450
  345. warp/utils.py +1008 -821
  346. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/LICENSE.md +126 -126
  347. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/METADATA +365 -400
  348. warp_lang-1.2.0.dist-info/RECORD +359 -0
  349. warp/examples/assets/cube.usda +0 -42
  350. warp/examples/assets/sphere.usda +0 -56
  351. warp/examples/assets/torus.usda +0 -105
  352. warp/examples/fem/example_convection_diffusion_dg0.py +0 -194
  353. warp/native/nanovdb/PNanoVDBWrite.h +0 -295
  354. warp_lang-1.0.2.dist-info/RECORD +0 -352
  355. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/WHEEL +0 -0
  356. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/top_level.txt +0 -0
warp/native/vec.h CHANGED
@@ -1,1133 +1,1177 @@
1
- /** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
2
- * NVIDIA CORPORATION and its licensors retain all intellectual property
3
- * and proprietary rights in and to this software, related documentation
4
- * and any modifications thereto. Any use, reproduction, disclosure or
5
- * distribution of this software and related documentation without an express
6
- * license agreement from NVIDIA CORPORATION is strictly prohibited.
7
- */
8
-
9
- #pragma once
10
-
11
- #include "initializer_array.h"
12
-
13
- namespace wp
14
- {
15
-
16
- template<unsigned Length, typename Type>
17
- struct vec_t
18
- {
19
- Type c[Length];
20
-
21
- inline CUDA_CALLABLE vec_t()
22
- : c()
23
- {}
24
-
25
- inline CUDA_CALLABLE vec_t(Type s)
26
- {
27
- for( unsigned i=0; i < Length; ++i )
28
- {
29
- c[i] = s;
30
- }
31
- }
32
-
33
- template <typename OtherType>
34
- inline explicit CUDA_CALLABLE vec_t(const vec_t<Length, OtherType>& other)
35
- {
36
- for( unsigned i=0; i < Length; ++i )
37
- {
38
- c[i] = static_cast<Type>(other[i]);
39
- }
40
- }
41
-
42
- inline CUDA_CALLABLE vec_t(Type x, Type y)
43
- {
44
- assert(Length == 2);
45
- c[0]=x;
46
- c[1]=y;
47
- }
48
-
49
- inline CUDA_CALLABLE vec_t(Type x, Type y, Type z)
50
- {
51
- assert(Length == 3);
52
- c[0]=x;
53
- c[1]=y;
54
- c[2]=z;
55
- }
56
-
57
-
58
- inline CUDA_CALLABLE vec_t(Type x, Type y, Type z, Type w)
59
- {
60
- assert(Length == 4);
61
- c[0]=x;
62
- c[1]=y;
63
- c[2]=z;
64
- c[3]=w;
65
- }
66
-
67
- inline CUDA_CALLABLE vec_t(const initializer_array<Length, Type> &l)
68
- {
69
- for( unsigned i=0; i < Length; ++i )
70
- {
71
- c[i] = l[i];
72
- }
73
- }
74
-
75
- // special screw vector constructor for spatial_vectors:
76
- inline CUDA_CALLABLE vec_t(vec_t<3,Type> w, vec_t<3,Type> v)
77
- {
78
- c[0] = w[0];
79
- c[1] = w[1];
80
- c[2] = w[2];
81
- c[3] = v[0];
82
- c[4] = v[1];
83
- c[5] = v[2];
84
- }
85
-
86
- inline CUDA_CALLABLE Type operator[](int index) const
87
- {
88
- assert(index < Length);
89
- return c[index];
90
- }
91
-
92
- inline CUDA_CALLABLE Type& operator[](int index)
93
- {
94
- assert(index < Length);
95
- return c[index];
96
- }
97
- };
98
-
99
- using vec2b = vec_t<2,int8>;
100
- using vec3b = vec_t<3,int8>;
101
- using vec4b = vec_t<4,int8>;
102
- using vec2ub = vec_t<2,uint8>;
103
- using vec3ub = vec_t<3,uint8>;
104
- using vec4ub = vec_t<4,uint8>;
105
-
106
- using vec2s = vec_t<2,int16>;
107
- using vec3s = vec_t<3,int16>;
108
- using vec4s = vec_t<4,int16>;
109
- using vec2us = vec_t<2,uint16>;
110
- using vec3us = vec_t<3,uint16>;
111
- using vec4us = vec_t<4,uint16>;
112
-
113
- using vec2i = vec_t<2,int32>;
114
- using vec3i = vec_t<3,int32>;
115
- using vec4i = vec_t<4,int32>;
116
- using vec2ui = vec_t<2,uint32>;
117
- using vec3ui = vec_t<3,uint32>;
118
- using vec4ui = vec_t<4,uint32>;
119
-
120
- using vec2l = vec_t<2,int64>;
121
- using vec3l = vec_t<3,int64>;
122
- using vec4l = vec_t<4,int64>;
123
- using vec2ul = vec_t<2,uint64>;
124
- using vec3ul = vec_t<3,uint64>;
125
- using vec4ul = vec_t<4,uint64>;
126
-
127
- using vec2h = vec_t<2,half>;
128
- using vec3h = vec_t<3,half>;
129
- using vec4h = vec_t<4,half>;
130
-
131
- using vec2 = vec_t<2,float>;
132
- using vec3 = vec_t<3,float>;
133
- using vec4 = vec_t<4,float>;
134
-
135
- using vec2f = vec_t<2,float>;
136
- using vec3f = vec_t<3,float>;
137
- using vec4f = vec_t<4,float>;
138
-
139
- using vec2d = vec_t<2,double>;
140
- using vec3d = vec_t<3,double>;
141
- using vec4d = vec_t<4,double>;
142
-
143
- //--------------
144
- // vec<Length, Type> methods
145
-
146
- // Should these accept const references as arguments? It's all
147
- // inlined so maybe it doesn't matter? Even if it does, it
148
- // probably depends on the Length of the vector...
149
-
150
- // negation:
151
- template<unsigned Length, typename Type>
152
- inline CUDA_CALLABLE vec_t<Length, Type> operator - (vec_t<Length, Type> a)
153
- {
154
- // NB: this constructor will initialize all ret's components to 0, which is
155
- // unnecessary...
156
- vec_t<Length, Type> ret;
157
- for( unsigned i=0; i < Length; ++i )
158
- {
159
- ret[i] = -a[i];
160
- }
161
-
162
- // Wonder if this does a load of copying when it returns... hopefully not as it's inlined?
163
- return ret;
164
- }
165
-
166
- template<unsigned Length, typename Type>
167
- CUDA_CALLABLE inline vec_t<Length, Type> pos(const vec_t<Length, Type>& x)
168
- {
169
- return x;
170
- }
171
-
172
- template<unsigned Length, typename Type>
173
- CUDA_CALLABLE inline vec_t<Length, Type> neg(const vec_t<Length, Type>& x)
174
- {
175
- return -x;
176
- }
177
-
178
- template<typename Type>
179
- CUDA_CALLABLE inline vec_t<3, Type> neg(const vec_t<3, Type>& x)
180
- {
181
- return vec_t<3, Type>(-x.c[0], -x.c[1], -x.c[2]);
182
- }
183
-
184
- template<typename Type>
185
- CUDA_CALLABLE inline vec_t<2, Type> neg(const vec_t<2, Type>& x)
186
- {
187
- return vec_t<2, Type>(-x.c[0], -x.c[1]);
188
- }
189
-
190
- template<unsigned Length, typename Type>
191
- CUDA_CALLABLE inline void adj_neg(const vec_t<Length, Type>& x, vec_t<Length, Type>& adj_x, const vec_t<Length, Type>& adj_ret)
192
- {
193
- adj_x -= adj_ret;
194
- }
195
-
196
- // equality:
197
- template<unsigned Length, typename Type>
198
- inline CUDA_CALLABLE bool operator ==(const vec_t<Length, Type>& a, const vec_t<Length, Type>& b)
199
- {
200
- for( unsigned i=0; i < Length; ++i )
201
- {
202
- if(a[i] != b[i])
203
- {
204
- return false;
205
- }
206
- }
207
- return true;
208
- }
209
-
210
- // scalar multiplication:
211
- template<unsigned Length, typename Type>
212
- inline CUDA_CALLABLE vec_t<Length, Type> mul(vec_t<Length, Type> a, Type s)
213
- {
214
- vec_t<Length, Type> ret;
215
- for( unsigned i=0; i < Length; ++i )
216
- {
217
- ret[i] = a[i] * s;
218
- }
219
- return ret;
220
- }
221
-
222
- template<typename Type>
223
- inline CUDA_CALLABLE vec_t<3, Type> mul(vec_t<3, Type> a, Type s)
224
- {
225
- return vec_t<3, Type>(a.c[0]*s,a.c[1]*s,a.c[2]*s);
226
- }
227
-
228
- template<typename Type>
229
- inline CUDA_CALLABLE vec_t<2, Type> mul(vec_t<2, Type> a, Type s)
230
- {
231
- return vec_t<2, Type>(a.c[0]*s,a.c[1]*s);
232
- }
233
-
234
- template<unsigned Length, typename Type>
235
- inline CUDA_CALLABLE vec_t<Length, Type> mul(Type s, vec_t<Length, Type> a)
236
- {
237
- return mul(a, s);
238
- }
239
-
240
- template<unsigned Length, typename Type>
241
- inline CUDA_CALLABLE vec_t<Length, Type> operator*(Type s, vec_t<Length, Type> a)
242
- {
243
- return mul(a, s);
244
- }
245
-
246
- template<unsigned Length, typename Type>
247
- inline CUDA_CALLABLE vec_t<Length, Type> operator*(vec_t<Length, Type> a, Type s)
248
- {
249
- return mul(a, s);
250
- }
251
-
252
-
253
- // component wise multiplication:
254
- template<unsigned Length, typename Type>
255
- inline CUDA_CALLABLE vec_t<Length, Type> cw_mul(vec_t<Length, Type> a, vec_t<Length, Type> b)
256
- {
257
- vec_t<Length, Type> ret;
258
- for( unsigned i=0; i < Length; ++i )
259
- {
260
- ret[i] = a[i] * b[i];
261
- }
262
- return ret;
263
- }
264
-
265
- // division
266
- template<unsigned Length, typename Type>
267
- inline CUDA_CALLABLE vec_t<Length, Type> div(vec_t<Length, Type> a, Type s)
268
- {
269
- vec_t<Length, Type> ret;
270
- for( unsigned i=0; i < Length; ++i )
271
- {
272
- ret[i] = a[i] / s;
273
- }
274
- return ret;
275
- }
276
-
277
- template<typename Type>
278
- inline CUDA_CALLABLE vec_t<3, Type> div(vec_t<3, Type> a, Type s)
279
- {
280
- return vec_t<3, Type>(a.c[0]/s,a.c[1]/s,a.c[2]/s);
281
- }
282
-
283
- template<typename Type>
284
- inline CUDA_CALLABLE vec_t<2, Type> div(vec_t<2, Type> a, Type s)
285
- {
286
- return vec_t<2, Type>(a.c[0]/s,a.c[1]/s);
287
- }
288
-
289
- template<unsigned Length, typename Type>
290
- inline CUDA_CALLABLE vec_t<Length, Type> div(Type s, vec_t<Length, Type> a)
291
- {
292
- vec_t<Length, Type> ret;
293
- for (unsigned i=0; i < Length; ++i)
294
- {
295
- ret[i] = s / a[i];
296
- }
297
- return ret;
298
- }
299
-
300
- template<typename Type>
301
- inline CUDA_CALLABLE vec_t<3, Type> div(Type s, vec_t<3, Type> a)
302
- {
303
- return vec_t<3, Type>(s/a.c[0],s/a.c[1],s/a.c[2]);
304
- }
305
-
306
- template<typename Type>
307
- inline CUDA_CALLABLE vec_t<2, Type> div(Type s, vec_t<2, Type> a)
308
- {
309
- return vec_t<2, Type>(s/a.c[0],s/a.c[1]);
310
- }
311
-
312
- template<unsigned Length, typename Type>
313
- inline CUDA_CALLABLE vec_t<Length, Type> operator / (vec_t<Length, Type> a, Type s)
314
- {
315
- return div(a,s);
316
- }
317
-
318
- template<unsigned Length, typename Type>
319
- inline CUDA_CALLABLE vec_t<Length, Type> operator / (Type s, vec_t<Length, Type> a)
320
- {
321
- return div(s, a);
322
- }
323
-
324
- // component wise division
325
- template<unsigned Length, typename Type>
326
- inline CUDA_CALLABLE vec_t<Length, Type> cw_div(vec_t<Length, Type> a, vec_t<Length, Type> b)
327
- {
328
- vec_t<Length, Type> ret;
329
- for( unsigned i=0; i < Length; ++i )
330
- {
331
- ret[i] = a[i] / b[i];
332
- }
333
- return ret;
334
- }
335
-
336
- // addition
337
- template<unsigned Length, typename Type>
338
- inline CUDA_CALLABLE vec_t<Length, Type> add(vec_t<Length, Type> a, vec_t<Length, Type> b)
339
- {
340
- vec_t<Length, Type> ret;
341
- for( unsigned i=0; i < Length; ++i )
342
- {
343
- ret[i] = a[i] + b[i];
344
- }
345
- return ret;
346
- }
347
-
348
- template<typename Type>
349
- inline CUDA_CALLABLE vec_t<2, Type> add(vec_t<2, Type> a, vec_t<2, Type> b)
350
- {
351
- return vec_t<2, Type>( a.c[0] + b.c[0], a.c[1] + b.c[1]);
352
- }
353
-
354
- template<typename Type>
355
- inline CUDA_CALLABLE vec_t<3, Type> add(vec_t<3, Type> a, vec_t<3, Type> b)
356
- {
357
- return vec_t<3, Type>( a.c[0] + b.c[0], a.c[1] + b.c[1], a.c[2] + b.c[2]);
358
- }
359
-
360
- // subtraction
361
- template<unsigned Length, typename Type>
362
- inline CUDA_CALLABLE vec_t<Length, Type> sub(vec_t<Length, Type> a, vec_t<Length, Type> b)
363
- {
364
- vec_t<Length, Type> ret;
365
- for( unsigned i=0; i < Length; ++i )
366
- {
367
- ret[i] = Type(a[i] - b[i]);
368
- }
369
- return ret;
370
- }
371
-
372
- template<typename Type>
373
- inline CUDA_CALLABLE vec_t<2, Type> sub(vec_t<2, Type> a, vec_t<2, Type> b)
374
- {
375
- return vec_t<2, Type>( a.c[0] - b.c[0], a.c[1] - b.c[1]);
376
- }
377
-
378
- template<typename Type>
379
- inline CUDA_CALLABLE vec_t<3, Type> sub(vec_t<3, Type> a, vec_t<3, Type> b)
380
- {
381
- return vec_t<3, Type>( a.c[0] - b.c[0], a.c[1] - b.c[1], a.c[2] - b.c[2]);
382
- }
383
-
384
- // dot product:
385
- template<unsigned Length, typename Type>
386
- inline CUDA_CALLABLE Type dot(vec_t<Length, Type> a, vec_t<Length, Type> b)
387
- {
388
- Type ret(0);
389
- for( unsigned i=0; i < Length; ++i )
390
- {
391
- ret += a[i] * b[i];
392
- }
393
- return ret;
394
- }
395
-
396
- template<typename Type>
397
- inline CUDA_CALLABLE Type dot(vec_t<2, Type> a, vec_t<2, Type> b)
398
- {
399
- return a.c[0] * b.c[0] + a.c[1] * b.c[1];
400
- }
401
-
402
- template<typename Type>
403
- inline CUDA_CALLABLE Type dot(vec_t<3, Type> a, vec_t<3, Type> b)
404
- {
405
- return a.c[0] * b.c[0] + a.c[1] * b.c[1] + a.c[2] * b.c[2];
406
- }
407
-
408
- template<unsigned Length, typename Type>
409
- inline CUDA_CALLABLE Type tensordot(vec_t<Length, Type> a, vec_t<Length, Type> b)
410
- {
411
- // corresponds to `np.tensordot()` with all axes being contracted
412
- return dot(a, b);
413
- }
414
-
415
-
416
- template<unsigned Length, typename Type>
417
- inline CUDA_CALLABLE Type extract(const vec_t<Length, Type> & a, int idx)
418
- {
419
- #ifndef NDEBUG
420
- if (idx < 0 || idx >= Length)
421
- {
422
- printf("vec index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
423
- assert(0);
424
- }
425
- #endif
426
-
427
- return a[idx];
428
- }
429
-
430
- template<unsigned Length, typename Type>
431
- inline CUDA_CALLABLE Type* index(vec_t<Length, Type>& v, int idx)
432
- {
433
- #ifndef NDEBUG
434
- if (idx < 0 || idx >= Length)
435
- {
436
- printf("vec index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
437
- assert(0);
438
- }
439
- #endif
440
-
441
- return &v[idx];
442
- }
443
-
444
- template<unsigned Length, typename Type>
445
- inline CUDA_CALLABLE Type* indexref(vec_t<Length, Type>* v, int idx)
446
- {
447
- #ifndef NDEBUG
448
- if (idx < 0 || idx >= Length)
449
- {
450
- printf("vec store %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
451
- assert(0);
452
- }
453
- #endif
454
-
455
- return &((*v)[idx]);
456
- }
457
-
458
- template<unsigned Length, typename Type>
459
- inline CUDA_CALLABLE void adj_index(vec_t<Length, Type>& v, int idx,
460
- vec_t<Length, Type>& adj_v, int adj_idx, const Type& adj_value)
461
- {
462
- // nop
463
- }
464
-
465
-
466
- template<unsigned Length, typename Type>
467
- inline CUDA_CALLABLE void adj_indexref(vec_t<Length, Type>* v, int idx,
468
- vec_t<Length, Type>& adj_v, int adj_idx, const Type& adj_value)
469
- {
470
- // nop
471
- }
472
-
473
-
474
- template<unsigned Length, typename Type>
475
- inline CUDA_CALLABLE Type length(vec_t<Length, Type> a)
476
- {
477
- return sqrt(dot(a, a));
478
- }
479
-
480
- template<unsigned Length, typename Type>
481
- inline CUDA_CALLABLE Type length_sq(vec_t<Length, Type> a)
482
- {
483
- return dot(a, a);
484
- }
485
-
486
-
487
- template<typename Type>
488
- inline CUDA_CALLABLE Type length(vec_t<2, Type> a)
489
- {
490
- return sqrt(a.c[0] * a.c[0] + a.c[1] * a.c[1]);
491
- }
492
-
493
- template<typename Type>
494
- inline CUDA_CALLABLE Type length(vec_t<3, Type> a)
495
- {
496
- return sqrt(a.c[0] * a.c[0] + a.c[1] * a.c[1] + a.c[2] * a.c[2]);
497
- }
498
-
499
- template<unsigned Length, typename Type>
500
- inline CUDA_CALLABLE vec_t<Length, Type> normalize(vec_t<Length, Type> a)
501
- {
502
- Type l = length(a);
503
- if (l > Type(kEps))
504
- return div(a,l);
505
- else
506
- return vec_t<Length, Type>();
507
- }
508
-
509
- template<typename Type>
510
- inline CUDA_CALLABLE vec_t<2, Type> normalize(vec_t<2, Type> a)
511
- {
512
- Type l = sqrt(a.c[0] * a.c[0] + a.c[1] * a.c[1]);
513
- if (l > Type(kEps))
514
- return vec_t<2, Type>(a.c[0]/l,a.c[1]/l);
515
- else
516
- return vec_t<2, Type>();
517
- }
518
-
519
- template<typename Type>
520
- inline CUDA_CALLABLE vec_t<3, Type> normalize(vec_t<3, Type> a)
521
- {
522
- Type l = sqrt(a.c[0] * a.c[0] + a.c[1] * a.c[1] + a.c[2] * a.c[2]);
523
- if (l > Type(kEps))
524
- return vec_t<3, Type>(a.c[0]/l,a.c[1]/l,a.c[2]/l);
525
- else
526
- return vec_t<3, Type>();
527
- }
528
-
529
-
530
- template<typename Type>
531
- inline CUDA_CALLABLE vec_t<3,Type> cross(vec_t<3,Type> a, vec_t<3,Type> b)
532
- {
533
- return {
534
- Type(a[1]*b[2] - a[2]*b[1]),
535
- Type(a[2]*b[0] - a[0]*b[2]),
536
- Type(a[0]*b[1] - a[1]*b[0])
537
- };
538
- }
539
-
540
-
541
- template<unsigned Length, typename Type>
542
- inline bool CUDA_CALLABLE isfinite(vec_t<Length, Type> x)
543
- {
544
- for( unsigned i=0; i < Length; ++i )
545
- {
546
- if(!isfinite(x[i]))
547
- {
548
- return false;
549
- }
550
- }
551
- return true;
552
- }
553
-
554
- // These two functions seem to compile very slowly
555
- template<unsigned Length, typename Type>
556
- inline CUDA_CALLABLE vec_t<Length,Type> min(vec_t<Length,Type> a, vec_t<Length,Type> b)
557
- {
558
- vec_t<Length,Type> ret;
559
- for( unsigned i=0; i < Length; ++i )
560
- {
561
- ret[i] = a[i] < b[i] ? a[i] : b[i];
562
- }
563
- return ret;
564
- }
565
-
566
- template<unsigned Length, typename Type>
567
- inline CUDA_CALLABLE vec_t<Length,Type> max(vec_t<Length,Type> a, vec_t<Length,Type> b)
568
- {
569
- vec_t<Length,Type> ret;
570
- for( unsigned i=0; i < Length; ++i )
571
- {
572
- ret[i] = a[i] > b[i] ? a[i] : b[i];
573
- }
574
- return ret;
575
- }
576
-
577
- template<unsigned Length, typename Type>
578
- inline CUDA_CALLABLE Type min(vec_t<Length,Type> v)
579
- {
580
- Type ret = v[0];
581
- for( unsigned i=1; i < Length; ++i )
582
- {
583
- if (v[i] < ret)
584
- ret = v[i];
585
- }
586
- return ret;
587
- }
588
-
589
- template<unsigned Length, typename Type>
590
- inline CUDA_CALLABLE Type max(vec_t<Length,Type> v)
591
- {
592
- Type ret = v[0];
593
- for( unsigned i=1; i < Length; ++i )
594
- {
595
- if (v[i] > ret)
596
- ret = v[i];
597
- }
598
- return ret;
599
- }
600
-
601
- template<unsigned Length, typename Type>
602
- inline CUDA_CALLABLE unsigned argmin(vec_t<Length,Type> v)
603
- {
604
- unsigned ret = 0;
605
- for( unsigned i=1; i < Length; ++i )
606
- {
607
- if (v[i] < v[ret])
608
- ret = i;
609
- }
610
- return ret;
611
- }
612
-
613
- template<unsigned Length, typename Type>
614
- inline CUDA_CALLABLE unsigned argmax(vec_t<Length,Type> v)
615
- {
616
- unsigned ret = 0;
617
- for( unsigned i=1; i < Length; ++i )
618
- {
619
- if (v[i] > v[ret])
620
- ret = i;
621
- }
622
- return ret;
623
- }
624
-
625
- template<unsigned Length, typename Type>
626
- inline CUDA_CALLABLE void expect_near(const vec_t<Length, Type>& actual, const vec_t<Length, Type>& expected, const Type& tolerance)
627
- {
628
- const Type diff(0);
629
- for(size_t i=0; i<Length; ++i)
630
- {
631
- diff = max(diff,abs(actual[i] - expected[i]));
632
- }
633
- if (diff > tolerance)
634
- {
635
- printf("Error, expect_near() failed with tolerance "); print(tolerance);
636
- printf("\t Expected: "); print(expected);
637
- printf("\t Actual: "); print(actual);
638
- }
639
- }
640
-
641
- template<unsigned Length, typename Type>
642
- inline CUDA_CALLABLE void adj_expect_near(const vec_t<Length, Type>& actual, const vec_t<Length, Type>& expected, Type tolerance, vec_t<Length, Type>& adj_actual, vec_t<Length, Type>& adj_expected, Type adj_tolerance)
643
- {
644
- // nop
645
- }
646
-
647
- // adjoint for the initializer_array constructor:
648
- template<unsigned Length, typename Type>
649
- inline CUDA_CALLABLE void adj_vec_t(const initializer_array<Length, Type> &cmps, const initializer_array<Length, Type*> &adj_cmps, const vec_t<Length, Type>& adj_ret)
650
- {
651
- for(unsigned i=0; i < Length; ++i)
652
- {
653
- *(adj_cmps[i]) += adj_ret[i];
654
- }
655
- }
656
-
657
-
658
- // adjoint for the component constructors:
659
- template<typename Type>
660
- inline CUDA_CALLABLE void adj_vec_t(Type cmpx, Type cmpy, Type &adj_cmpx, Type &adj_cmpy, const vec_t<2, Type>& adj_ret)
661
- {
662
- adj_cmpx += adj_ret.c[0];
663
- adj_cmpy += adj_ret.c[1];
664
- }
665
-
666
- template<typename Type>
667
- inline CUDA_CALLABLE void adj_vec_t(Type cmpx, Type cmpy, Type cmpz, Type &adj_cmpx, Type &adj_cmpy, Type &adj_cmpz, const vec_t<3, Type>& adj_ret)
668
- {
669
- adj_cmpx += adj_ret.c[0];
670
- adj_cmpy += adj_ret.c[1];
671
- adj_cmpz += adj_ret.c[2];
672
- }
673
-
674
- template<typename Type>
675
- inline CUDA_CALLABLE void adj_vec_t(Type cmpx, Type cmpy, Type cmpz, Type cmpw, Type &adj_cmpx, Type &adj_cmpy, Type &adj_cmpz, Type &adj_cmpw, const vec_t<4, Type>& adj_ret)
676
- {
677
- adj_cmpx += adj_ret.c[0];
678
- adj_cmpy += adj_ret.c[1];
679
- adj_cmpz += adj_ret.c[2];
680
- adj_cmpw += adj_ret.c[3];
681
- }
682
-
683
- // adjoint for the constant constructor:
684
- template<unsigned Length, typename Type>
685
- inline CUDA_CALLABLE void adj_vec_t(Type s, Type& adj_s, const vec_t<Length, Type>& adj_ret)
686
- {
687
- for( unsigned i=0; i < Length; ++i )
688
- {
689
- adj_s += adj_ret[i];
690
- }
691
- }
692
-
693
- // adjoint for the casting constructor
694
- template<unsigned Length, typename Type, typename OtherType>
695
- inline CUDA_CALLABLE void adj_vec_t(const vec_t<Length, OtherType>& other, vec_t<Length, OtherType>& adj_other, const vec_t<Length, Type>& adj_ret)
696
- {
697
- for( unsigned i=0; i < Length; ++i )
698
- {
699
- adj_other[i] += static_cast<OtherType>(adj_ret[i]);
700
- }
701
- }
702
-
703
- template<typename Type>
704
- CUDA_CALLABLE inline void adj_vec_t(const vec_t<3,Type>& w, const vec_t<3,Type>& v, vec_t<3,Type>& adj_w, vec_t<3,Type>& adj_v, const vec_t<6,Type>& adj_ret)
705
- {
706
- adj_w[0] += adj_ret[0];
707
- adj_w[1] += adj_ret[1];
708
- adj_w[2] += adj_ret[2];
709
- adj_v[0] += adj_ret[3];
710
- adj_v[1] += adj_ret[4];
711
- adj_v[2] += adj_ret[5];
712
- }
713
-
714
- template<unsigned Length, typename Type>
715
- inline CUDA_CALLABLE void adj_mul(vec_t<Length, Type> a, Type s, vec_t<Length, Type>& adj_a, Type& adj_s, const vec_t<Length, Type>& adj_ret)
716
- {
717
- for( unsigned i=0; i < Length; ++i )
718
- {
719
- adj_a[i] += s*adj_ret[i];
720
- }
721
-
722
- adj_s += dot(a, adj_ret);
723
-
724
- #if FP_CHECK
725
- if (!isfinite(a) || !isfinite(s) || !isfinite(adj_a) || !isfinite(adj_s) || !isfinite(adj_ret))
726
- {
727
- // \TODO: How shall we implement this error message?
728
- //printf("adj_mul((%f %f %f %f), %f, (%f %f %f %f), %f, (%f %f %f %f)\n", a.x, a.y, a.z, a.w, s, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_s, adj_ret.x, adj_ret.y, adj_ret.z, adj_ret.w);
729
- assert(0);
730
- }
731
- #endif
732
- }
733
-
734
- template<unsigned Length, typename Type>
735
- inline CUDA_CALLABLE void adj_mul(Type s, vec_t<Length, Type> a, Type& adj_s, vec_t<Length, Type>& adj_a, const vec_t<Length, Type>& adj_ret)
736
- {
737
- adj_mul(a, s, adj_a, adj_s, adj_ret);
738
- }
739
-
740
- template<unsigned Length, typename Type>
741
- inline CUDA_CALLABLE void adj_cw_mul(vec_t<Length, Type> a, vec_t<Length, Type> b, vec_t<Length, Type>& adj_a, vec_t<Length, Type>& adj_b, const vec_t<Length, Type>& adj_ret)
742
- {
743
- adj_a += cw_mul(b, adj_ret);
744
- adj_b += cw_mul(a, adj_ret);
745
- }
746
-
747
- template<unsigned Length, typename Type>
748
- inline CUDA_CALLABLE void adj_div(vec_t<Length, Type> a, Type s, vec_t<Length, Type>& adj_a, Type& adj_s, const vec_t<Length, Type>& adj_ret)
749
- {
750
-
751
- adj_s -= dot(a , adj_ret)/ (s * s); // - a / s^2
752
-
753
- for( unsigned i=0; i < Length; ++i )
754
- {
755
- adj_a[i] += adj_ret[i] / s;
756
- }
757
-
758
- #if FP_CHECK
759
- if (!isfinite(a) || !isfinite(s) || !isfinite(adj_a) || !isfinite(adj_s) || !isfinite(adj_ret))
760
- {
761
- // \TODO: How shall we implement this error message?
762
- // printf("adj_div((%f %f %f %f), %f, (%f %f %f %f), %f, (%f %f %f %f)\n", a.x, a.y, a.z, a.w, s, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_s, adj_ret.x, adj_ret.y, adj_ret.z, adj_ret.w);
763
- assert(0);
764
- }
765
- #endif
766
- }
767
-
768
- template<unsigned Length, typename Type>
769
- inline CUDA_CALLABLE void adj_div(Type s, vec_t<Length, Type> a, Type& adj_s, vec_t<Length, Type>& adj_a, const vec_t<Length, Type>& adj_ret)
770
- {
771
-
772
- adj_s -= dot(a , adj_ret)/ (s * s); // - a / s^2
773
-
774
- for( unsigned i=0; i < Length; ++i )
775
- {
776
- adj_a[i] += s / adj_ret[i];
777
- }
778
-
779
- #if FP_CHECK
780
- if (!isfinite(a) || !isfinite(s) || !isfinite(adj_a) || !isfinite(adj_s) || !isfinite(adj_ret))
781
- {
782
- // \TODO: How shall we implement this error message?
783
- // printf("adj_div((%f %f %f %f), %f, (%f %f %f %f), %f, (%f %f %f %f)\n", a.x, a.y, a.z, a.w, s, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_s, adj_ret.x, adj_ret.y, adj_ret.z, adj_ret.w);
784
- assert(0);
785
- }
786
- #endif
787
- }
788
-
789
- template<unsigned Length, typename Type>
790
- inline CUDA_CALLABLE void adj_cw_div(vec_t<Length, Type> a, vec_t<Length, Type> b, vec_t<Length, Type>& ret, vec_t<Length, Type>& adj_a, vec_t<Length, Type>& adj_b, const vec_t<Length, Type>& adj_ret) {
791
- adj_a += cw_div(adj_ret, b);
792
- adj_b -= cw_mul(adj_ret, cw_div(ret, b));
793
- }
794
-
795
- template<unsigned Length, typename Type>
796
- inline CUDA_CALLABLE void adj_add(vec_t<Length, Type> a, vec_t<Length, Type> b, vec_t<Length, Type>& adj_a, vec_t<Length, Type>& adj_b, const vec_t<Length, Type>& adj_ret)
797
- {
798
- adj_a += adj_ret;
799
- adj_b += adj_ret;
800
- }
801
-
802
- template<typename Type>
803
- inline CUDA_CALLABLE void adj_add(vec_t<2, Type> a, vec_t<2, Type> b, vec_t<2, Type>& adj_a, vec_t<2, Type>& adj_b, const vec_t<2, Type>& adj_ret)
804
- {
805
- adj_a.c[0] += adj_ret.c[0];
806
- adj_a.c[1] += adj_ret.c[1];
807
- adj_b.c[0] += adj_ret.c[0];
808
- adj_b.c[1] += adj_ret.c[1];
809
- }
810
-
811
- template<typename Type>
812
- inline CUDA_CALLABLE void adj_add(vec_t<3, Type> a, vec_t<3, Type> b, vec_t<3, Type>& adj_a, vec_t<3, Type>& adj_b, const vec_t<3, Type>& adj_ret)
813
- {
814
- adj_a.c[0] += adj_ret.c[0];
815
- adj_a.c[1] += adj_ret.c[1];
816
- adj_a.c[2] += adj_ret.c[2];
817
- adj_b.c[0] += adj_ret.c[0];
818
- adj_b.c[1] += adj_ret.c[1];
819
- adj_b.c[2] += adj_ret.c[2];
820
- }
821
-
822
- template<unsigned Length, typename Type>
823
- inline CUDA_CALLABLE void adj_sub(vec_t<Length, Type> a, vec_t<Length, Type> b, vec_t<Length, Type>& adj_a, vec_t<Length, Type>& adj_b, const vec_t<Length, Type>& adj_ret)
824
- {
825
- adj_a += adj_ret;
826
- adj_b -= adj_ret;
827
- }
828
-
829
- template<typename Type>
830
- inline CUDA_CALLABLE void adj_sub(vec_t<2, Type> a, vec_t<2, Type> b, vec_t<2, Type>& adj_a, vec_t<2, Type>& adj_b, const vec_t<2, Type>& adj_ret)
831
- {
832
- adj_a.c[0] += adj_ret.c[0];
833
- adj_a.c[1] += adj_ret.c[1];
834
- adj_b.c[0] -= adj_ret.c[0];
835
- adj_b.c[1] -= adj_ret.c[1];
836
- }
837
-
838
- template<typename Type>
839
- inline CUDA_CALLABLE void adj_sub(vec_t<3, Type> a, vec_t<3, Type> b, vec_t<3, Type>& adj_a, vec_t<3, Type>& adj_b, const vec_t<3, Type>& adj_ret)
840
- {
841
- adj_a.c[0] += adj_ret.c[0];
842
- adj_a.c[1] += adj_ret.c[1];
843
- adj_a.c[2] += adj_ret.c[2];
844
- adj_b.c[0] -= adj_ret.c[0];
845
- adj_b.c[1] -= adj_ret.c[1];
846
- adj_b.c[2] -= adj_ret.c[2];
847
- }
848
-
849
- template<unsigned Length, typename Type>
850
- inline CUDA_CALLABLE void adj_dot(vec_t<Length, Type> a, vec_t<Length, Type> b, vec_t<Length, Type>& adj_a, vec_t<Length, Type>& adj_b, const Type adj_ret)
851
- {
852
- adj_a += b*adj_ret;
853
- adj_b += a*adj_ret;
854
-
855
- #if FP_CHECK
856
- if (!isfinite(a) || !isfinite(b) || !isfinite(adj_a) || !isfinite(adj_b) || !isfinite(adj_ret))
857
- {
858
- // \TODO: How shall we implement this error message?
859
- //printf("adj_dot((%f %f %f %f), (%f %f %f %f), (%f %f %f %f), (%f %f %f %f), %f)\n", a.x, a.y, a.z, a.w, b.x, b.y, b.z, b.w, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_b.x, adj_b.y, adj_b.z, adj_b.w, adj_ret);
860
- assert(0);
861
- }
862
- #endif
863
- }
864
-
865
-
866
-
867
- template<typename Type>
868
- inline CUDA_CALLABLE void adj_dot(vec_t<2, Type> a, vec_t<2, Type> b, vec_t<2, Type>& adj_a, vec_t<2, Type>& adj_b, const Type adj_ret)
869
- {
870
- adj_a.c[0] += b.c[0]*adj_ret;
871
- adj_a.c[1] += b.c[1]*adj_ret;
872
-
873
- adj_b.c[0] += a.c[0]*adj_ret;
874
- adj_b.c[1] += a.c[1]*adj_ret;
875
- }
876
-
877
- template<typename Type>
878
- inline CUDA_CALLABLE void adj_dot(vec_t<3, Type> a, vec_t<3, Type> b, vec_t<3, Type>& adj_a, vec_t<3, Type>& adj_b, const Type adj_ret)
879
- {
880
- adj_a.c[0] += b.c[0]*adj_ret;
881
- adj_a.c[1] += b.c[1]*adj_ret;
882
- adj_a.c[2] += b.c[2]*adj_ret;
883
-
884
- adj_b.c[0] += a.c[0]*adj_ret;
885
- adj_b.c[1] += a.c[1]*adj_ret;
886
- adj_b.c[2] += a.c[2]*adj_ret;
887
- }
888
-
889
-
890
- template<unsigned Length, typename Type>
891
- inline CUDA_CALLABLE void adj_extract(const vec_t<Length, Type> & a, int idx, vec_t<Length, Type> & adj_a, int & adj_idx, Type & adj_ret)
892
- {
893
- #ifndef NDEBUG
894
- if (idx < 0 || idx > Length)
895
- {
896
- printf("Tvec2<Scalar> index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
897
- assert(0);
898
- }
899
- #endif
900
-
901
- adj_a[idx] += adj_ret;
902
- }
903
-
904
- template<unsigned Length, typename Type>
905
- inline CUDA_CALLABLE void adj_length(vec_t<Length, Type> a, Type ret, vec_t<Length, Type>& adj_a, const Type adj_ret)
906
- {
907
- if (ret > Type(kEps))
908
- {
909
- adj_a += div(a, ret) * adj_ret;
910
- }
911
-
912
- #if FP_CHECK
913
- if (!isfinite(adj_a))
914
- {
915
- // \TODO: How shall we implement this error message?
916
- //printf("%s:%d - adj_length((%f %f %f %f), (%f %f %f %f), (%f))\n", __FILE__, __LINE__, a.x, a.y, a.z, a.w, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_ret);
917
- assert(0);
918
- }
919
- #endif
920
- }
921
-
922
- template<unsigned Length, typename Type>
923
- inline CUDA_CALLABLE void adj_length_sq(vec_t<Length, Type> a, vec_t<Length, Type>& adj_a, const Type adj_ret)
924
- {
925
- adj_a += Type(2.0)*a*adj_ret;
926
-
927
- #if FP_CHECK
928
- if (!isfinite(adj_a))
929
- {
930
- // \TODO: How shall we implement this error message?
931
- //printf("%s:%d - adj_length((%f %f %f %f), (%f %f %f %f), (%f))\n", __FILE__, __LINE__, a.x, a.y, a.z, a.w, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_ret);
932
- assert(0);
933
- }
934
- #endif
935
- }
936
-
937
- template<unsigned Length, typename Type>
938
- inline CUDA_CALLABLE void adj_normalize(vec_t<Length, Type> a, vec_t<Length, Type>& ret, vec_t<Length, Type>& adj_a, const vec_t<Length, Type>& adj_ret)
939
- {
940
- Type d = length(a);
941
-
942
- if (d > Type(kEps))
943
- {
944
- Type invd = Type(1.0f)/d;
945
-
946
- adj_a += (adj_ret*invd - ret*(dot(ret, adj_ret))*invd);
947
-
948
- #if FP_CHECK
949
- if (!isfinite(adj_a))
950
- {
951
- // \TODO: How shall we implement this error message?
952
- //printf("%s:%d - adj_normalize((%f %f %f %f), (%f %f %f %f), (%f, %f, %f, %f))\n", __FILE__, __LINE__, a.x, a.y, a.z, a.w, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_ret.x, adj_ret.y, adj_ret.z, adj_ret.w);
953
- assert(0);
954
- }
955
- #endif
956
- }
957
- }
958
-
959
- template<typename Type>
960
- inline CUDA_CALLABLE void adj_cross(vec_t<3,Type> a, vec_t<3,Type> b, vec_t<3,Type>& adj_a, vec_t<3,Type>& adj_b, const vec_t<3,Type>& adj_ret)
961
- {
962
- // todo: sign check
963
- adj_a += cross(b, adj_ret);
964
- adj_b -= cross(a, adj_ret);
965
- }
966
-
967
- template<unsigned Length, typename Type>
968
- inline CUDA_CALLABLE void adj_min(const vec_t<Length,Type> &a, const vec_t<Length,Type> &b, vec_t<Length,Type>& adj_a, vec_t<Length,Type>& adj_b, const vec_t<Length,Type> &adj_ret)
969
- {
970
- for( unsigned i=0; i < Length; ++i )
971
- {
972
- if (a[i] < b[i])
973
- adj_a[i] += adj_ret[i];
974
- else
975
- adj_b[i] += adj_ret[i];
976
- }
977
- }
978
-
979
- template<unsigned Length, typename Type>
980
- inline CUDA_CALLABLE void adj_max(const vec_t<Length,Type> &a, const vec_t<Length,Type> &b, vec_t<Length,Type>& adj_a, vec_t<Length,Type>& adj_b, const vec_t<Length,Type> &adj_ret)
981
- {
982
- for( unsigned i=0; i < Length; ++i )
983
- {
984
- if (a[i] > b[i])
985
- adj_a[i] += adj_ret[i];
986
- else
987
- adj_b[i] += adj_ret[i];
988
- }
989
- }
990
-
991
- template<unsigned Length, typename Type>
992
- inline CUDA_CALLABLE void adj_min(const vec_t<Length,Type> &v, vec_t<Length,Type>& adj_v, const Type &adj_ret)
993
- {
994
- unsigned i = argmin(v);
995
- adj_v[i] += adj_ret;
996
- }
997
-
998
- template<unsigned Length, typename Type>
999
- inline CUDA_CALLABLE void adj_max(const vec_t<Length,Type> &v, vec_t<Length,Type>& adj_v, const Type &adj_ret)
1000
- {
1001
- unsigned i = argmax(v);
1002
- adj_v[i] += adj_ret;
1003
- }
1004
-
1005
- // Do I need to specialize these for different lengths?
1006
- template<unsigned Length, typename Type>
1007
- inline CUDA_CALLABLE vec_t<Length, Type> atomic_add(vec_t<Length, Type> * addr, vec_t<Length, Type> value)
1008
- {
1009
- vec_t<Length, Type> ret;
1010
- for( unsigned i=0; i < Length; ++i )
1011
- {
1012
- ret[i] = atomic_add(&(addr -> c[i]), value[i]);
1013
- }
1014
-
1015
- return ret;
1016
- }
1017
-
1018
- template<unsigned Length, typename Type>
1019
- inline CUDA_CALLABLE vec_t<Length, Type> atomic_min(vec_t<Length, Type> * addr, vec_t<Length, Type> value)
1020
- {
1021
- vec_t<Length, Type> ret;
1022
- for( unsigned i=0; i < Length; ++i )
1023
- {
1024
- ret[i] = atomic_min(&(addr -> c[i]), value[i]);
1025
- }
1026
-
1027
- return ret;
1028
- }
1029
-
1030
- template<unsigned Length, typename Type>
1031
- inline CUDA_CALLABLE vec_t<Length, Type> atomic_max(vec_t<Length, Type> * addr, vec_t<Length, Type> value)
1032
- {
1033
- vec_t<Length, Type> ret;
1034
- for( unsigned i=0; i < Length; ++i )
1035
- {
1036
- ret[i] = atomic_max(&(addr -> c[i]), value[i]);
1037
- }
1038
-
1039
- return ret;
1040
- }
1041
-
1042
- template<unsigned Length, typename Type>
1043
- inline CUDA_CALLABLE void adj_atomic_minmax(
1044
- vec_t<Length,Type> *addr,
1045
- vec_t<Length,Type> *adj_addr,
1046
- const vec_t<Length,Type> &value,
1047
- vec_t<Length,Type> &adj_value)
1048
- {
1049
- for (unsigned i=0; i < Length; ++i)
1050
- adj_atomic_minmax(&(addr->c[i]), &(adj_addr->c[i]), value[i], adj_value[i]);
1051
- }
1052
-
1053
- // ok, the original implementation of this didn't take the absolute values.
1054
- // I wouldn't consider this expected behavior. It looks like it's only
1055
- // being used for bounding boxes at the moment, where this doesn't matter,
1056
- // but you often use it for ray tracing where it does. Not sure if the
1057
- // fabs() incurs a performance hit...
1058
- template<unsigned Length, typename Type>
1059
- CUDA_CALLABLE inline int longest_axis(const vec_t<Length, Type>& v)
1060
- {
1061
- Type lmax = abs(v[0]);
1062
- int ret(0);
1063
- for( unsigned i=1; i < Length; ++i )
1064
- {
1065
- Type l = abs(v[i]);
1066
- if( l > lmax )
1067
- {
1068
- ret = i;
1069
- lmax = l;
1070
- }
1071
- }
1072
- return ret;
1073
- }
1074
-
1075
- template<unsigned Length, typename Type>
1076
- CUDA_CALLABLE inline vec_t<Length,Type> lerp(const vec_t<Length,Type>& a, const vec_t<Length,Type>& b, Type t)
1077
- {
1078
- return a*(Type(1)-t) + b*t;
1079
- }
1080
-
1081
- template<unsigned Length, typename Type>
1082
- CUDA_CALLABLE inline void adj_lerp(const vec_t<Length,Type>& a, const vec_t<Length,Type>& b, Type t, vec_t<Length,Type>& adj_a, vec_t<Length,Type>& adj_b, Type& adj_t, const vec_t<Length,Type>& adj_ret)
1083
- {
1084
- adj_a += adj_ret*(Type(1)-t);
1085
- adj_b += adj_ret*t;
1086
- adj_t += tensordot(b, adj_ret) - tensordot(a, adj_ret);
1087
- }
1088
-
1089
- // for integral types we do not accumulate gradients
1090
- template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, int8>* buf, const vec_t<Length, int8> &value) { }
1091
- template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, uint8>* buf, const vec_t<Length, uint8> &value) { }
1092
- template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, int16>* buf, const vec_t<Length, int16> &value) { }
1093
- template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, uint16>* buf, const vec_t<Length, uint16> &value) { }
1094
- template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, int32>* buf, const vec_t<Length, int32> &value) { }
1095
- template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, uint32>* buf, const vec_t<Length, uint32> &value) { }
1096
- template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, int64>* buf, const vec_t<Length, int64> &value) { }
1097
- template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, uint64>* buf, const vec_t<Length, uint64> &value) { }
1098
-
1099
-
1100
- // adjoints for some of the constructors, used in intersect.h
1101
- inline CUDA_CALLABLE void adj_vec2(float x, float y, float& adj_x, float& adj_y, const vec2& adj_ret)
1102
- {
1103
- adj_x += adj_ret[0];
1104
- adj_y += adj_ret[1];
1105
- }
1106
-
1107
- inline CUDA_CALLABLE void adj_vec3(float x, float y, float z, float& adj_x, float& adj_y, float& adj_z, const vec3& adj_ret)
1108
- {
1109
- adj_x += adj_ret[0];
1110
- adj_y += adj_ret[1];
1111
- adj_z += adj_ret[2];
1112
- }
1113
-
1114
- inline CUDA_CALLABLE void adj_vec4(float x, float y, float z, float w, float& adj_x, float& adj_y, float& adj_z, float& adj_w, const vec4& adj_ret)
1115
- {
1116
- adj_x += adj_ret[0];
1117
- adj_y += adj_ret[1];
1118
- adj_z += adj_ret[2];
1119
- adj_w += adj_ret[3];
1120
- }
1121
-
1122
- inline CUDA_CALLABLE void adj_vec3(float s, float& adj_s, const vec3& adj_ret)
1123
- {
1124
- adj_vec_t(s, adj_s, adj_ret);
1125
- }
1126
-
1127
- inline CUDA_CALLABLE void adj_vec4(float s, float& adj_s, const vec4& adj_ret)
1128
- {
1129
- adj_vec_t(s, adj_s, adj_ret);
1130
- }
1131
-
1132
-
1133
- } // namespace wp
1
+ /** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
2
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
3
+ * and proprietary rights in and to this software, related documentation
4
+ * and any modifications thereto. Any use, reproduction, disclosure or
5
+ * distribution of this software and related documentation without an express
6
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include "initializer_array.h"
12
+
13
+ namespace wp
14
+ {
15
+
16
+ template<unsigned Length, typename Type>
17
+ struct vec_t
18
+ {
19
+ Type c[Length];
20
+
21
+ inline CUDA_CALLABLE vec_t()
22
+ : c()
23
+ {}
24
+
25
+ inline CUDA_CALLABLE vec_t(Type s)
26
+ {
27
+ for( unsigned i=0; i < Length; ++i )
28
+ {
29
+ c[i] = s;
30
+ }
31
+ }
32
+
33
+ template <typename OtherType>
34
+ inline explicit CUDA_CALLABLE vec_t(const vec_t<Length, OtherType>& other)
35
+ {
36
+ for( unsigned i=0; i < Length; ++i )
37
+ {
38
+ c[i] = static_cast<Type>(other[i]);
39
+ }
40
+ }
41
+
42
+ inline CUDA_CALLABLE vec_t(Type x, Type y)
43
+ {
44
+ assert(Length == 2);
45
+ c[0]=x;
46
+ c[1]=y;
47
+ }
48
+
49
+ inline CUDA_CALLABLE vec_t(Type x, Type y, Type z)
50
+ {
51
+ assert(Length == 3);
52
+ c[0]=x;
53
+ c[1]=y;
54
+ c[2]=z;
55
+ }
56
+
57
+
58
+ inline CUDA_CALLABLE vec_t(Type x, Type y, Type z, Type w)
59
+ {
60
+ assert(Length == 4);
61
+ c[0]=x;
62
+ c[1]=y;
63
+ c[2]=z;
64
+ c[3]=w;
65
+ }
66
+
67
+ inline CUDA_CALLABLE vec_t(const initializer_array<Length, Type> &l)
68
+ {
69
+ for( unsigned i=0; i < Length; ++i )
70
+ {
71
+ c[i] = l[i];
72
+ }
73
+ }
74
+
75
+ // special screw vector constructor for spatial_vectors:
76
+ inline CUDA_CALLABLE vec_t(vec_t<3,Type> w, vec_t<3,Type> v)
77
+ {
78
+ c[0] = w[0];
79
+ c[1] = w[1];
80
+ c[2] = w[2];
81
+ c[3] = v[0];
82
+ c[4] = v[1];
83
+ c[5] = v[2];
84
+ }
85
+
86
+ inline CUDA_CALLABLE Type operator[](int index) const
87
+ {
88
+ assert(index < Length);
89
+ return c[index];
90
+ }
91
+
92
+ inline CUDA_CALLABLE Type& operator[](int index)
93
+ {
94
+ assert(index < Length);
95
+ return c[index];
96
+ }
97
+ };
98
+
99
+ using vec2b = vec_t<2,int8>;
100
+ using vec3b = vec_t<3,int8>;
101
+ using vec4b = vec_t<4,int8>;
102
+ using vec2ub = vec_t<2,uint8>;
103
+ using vec3ub = vec_t<3,uint8>;
104
+ using vec4ub = vec_t<4,uint8>;
105
+
106
+ using vec2s = vec_t<2,int16>;
107
+ using vec3s = vec_t<3,int16>;
108
+ using vec4s = vec_t<4,int16>;
109
+ using vec2us = vec_t<2,uint16>;
110
+ using vec3us = vec_t<3,uint16>;
111
+ using vec4us = vec_t<4,uint16>;
112
+
113
+ using vec2i = vec_t<2,int32>;
114
+ using vec3i = vec_t<3,int32>;
115
+ using vec4i = vec_t<4,int32>;
116
+ using vec2ui = vec_t<2,uint32>;
117
+ using vec3ui = vec_t<3,uint32>;
118
+ using vec4ui = vec_t<4,uint32>;
119
+
120
+ using vec2l = vec_t<2,int64>;
121
+ using vec3l = vec_t<3,int64>;
122
+ using vec4l = vec_t<4,int64>;
123
+ using vec2ul = vec_t<2,uint64>;
124
+ using vec3ul = vec_t<3,uint64>;
125
+ using vec4ul = vec_t<4,uint64>;
126
+
127
+ using vec2h = vec_t<2,half>;
128
+ using vec3h = vec_t<3,half>;
129
+ using vec4h = vec_t<4,half>;
130
+
131
+ using vec2 = vec_t<2,float>;
132
+ using vec3 = vec_t<3,float>;
133
+ using vec4 = vec_t<4,float>;
134
+
135
+ using vec2f = vec_t<2,float>;
136
+ using vec3f = vec_t<3,float>;
137
+ using vec4f = vec_t<4,float>;
138
+
139
+ using vec2d = vec_t<2,double>;
140
+ using vec3d = vec_t<3,double>;
141
+ using vec4d = vec_t<4,double>;
142
+
143
+ //--------------
144
+ // vec<Length, Type> methods
145
+
146
+ // Should these accept const references as arguments? It's all
147
+ // inlined so maybe it doesn't matter? Even if it does, it
148
+ // probably depends on the Length of the vector...
149
+
150
+ // negation:
151
+ template<unsigned Length, typename Type>
152
+ inline CUDA_CALLABLE vec_t<Length, Type> operator - (vec_t<Length, Type> a)
153
+ {
154
+ // NB: this constructor will initialize all ret's components to 0, which is
155
+ // unnecessary...
156
+ vec_t<Length, Type> ret;
157
+ for( unsigned i=0; i < Length; ++i )
158
+ {
159
+ ret[i] = -a[i];
160
+ }
161
+
162
+ // Wonder if this does a load of copying when it returns... hopefully not as it's inlined?
163
+ return ret;
164
+ }
165
+
166
+ template<unsigned Length, typename Type>
167
+ CUDA_CALLABLE inline vec_t<Length, Type> pos(const vec_t<Length, Type>& x)
168
+ {
169
+ return x;
170
+ }
171
+
172
+ template<unsigned Length, typename Type>
173
+ CUDA_CALLABLE inline vec_t<Length, Type> neg(const vec_t<Length, Type>& x)
174
+ {
175
+ return -x;
176
+ }
177
+
178
+ template<typename Type>
179
+ CUDA_CALLABLE inline vec_t<3, Type> neg(const vec_t<3, Type>& x)
180
+ {
181
+ return vec_t<3, Type>(-x.c[0], -x.c[1], -x.c[2]);
182
+ }
183
+
184
+ template<typename Type>
185
+ CUDA_CALLABLE inline vec_t<2, Type> neg(const vec_t<2, Type>& x)
186
+ {
187
+ return vec_t<2, Type>(-x.c[0], -x.c[1]);
188
+ }
189
+
190
+ template<unsigned Length, typename Type>
191
+ CUDA_CALLABLE inline void adj_neg(const vec_t<Length, Type>& x, vec_t<Length, Type>& adj_x, const vec_t<Length, Type>& adj_ret)
192
+ {
193
+ adj_x -= adj_ret;
194
+ }
195
+
196
+ // equality:
197
+ template<unsigned Length, typename Type>
198
+ inline CUDA_CALLABLE bool operator ==(const vec_t<Length, Type>& a, const vec_t<Length, Type>& b)
199
+ {
200
+ for( unsigned i=0; i < Length; ++i )
201
+ {
202
+ if(a[i] != b[i])
203
+ {
204
+ return false;
205
+ }
206
+ }
207
+ return true;
208
+ }
209
+
210
+ // scalar multiplication:
211
+ template<unsigned Length, typename Type>
212
+ inline CUDA_CALLABLE vec_t<Length, Type> mul(vec_t<Length, Type> a, Type s)
213
+ {
214
+ vec_t<Length, Type> ret;
215
+ for( unsigned i=0; i < Length; ++i )
216
+ {
217
+ ret[i] = a[i] * s;
218
+ }
219
+ return ret;
220
+ }
221
+
222
+ template<typename Type>
223
+ inline CUDA_CALLABLE vec_t<3, Type> mul(vec_t<3, Type> a, Type s)
224
+ {
225
+ return vec_t<3, Type>(a.c[0]*s,a.c[1]*s,a.c[2]*s);
226
+ }
227
+
228
+ template<typename Type>
229
+ inline CUDA_CALLABLE vec_t<2, Type> mul(vec_t<2, Type> a, Type s)
230
+ {
231
+ return vec_t<2, Type>(a.c[0]*s,a.c[1]*s);
232
+ }
233
+
234
+ template<unsigned Length, typename Type>
235
+ inline CUDA_CALLABLE vec_t<Length, Type> mul(Type s, vec_t<Length, Type> a)
236
+ {
237
+ return mul(a, s);
238
+ }
239
+
240
+ template<unsigned Length, typename Type>
241
+ inline CUDA_CALLABLE vec_t<Length, Type> operator*(Type s, vec_t<Length, Type> a)
242
+ {
243
+ return mul(a, s);
244
+ }
245
+
246
+ template<unsigned Length, typename Type>
247
+ inline CUDA_CALLABLE vec_t<Length, Type> operator*(vec_t<Length, Type> a, Type s)
248
+ {
249
+ return mul(a, s);
250
+ }
251
+
252
+
253
+ // component wise multiplication:
254
+ template<unsigned Length, typename Type>
255
+ inline CUDA_CALLABLE vec_t<Length, Type> cw_mul(vec_t<Length, Type> a, vec_t<Length, Type> b)
256
+ {
257
+ vec_t<Length, Type> ret;
258
+ for( unsigned i=0; i < Length; ++i )
259
+ {
260
+ ret[i] = a[i] * b[i];
261
+ }
262
+ return ret;
263
+ }
264
+
265
+ // division
266
+ template<unsigned Length, typename Type>
267
+ inline CUDA_CALLABLE vec_t<Length, Type> div(vec_t<Length, Type> a, Type s)
268
+ {
269
+ vec_t<Length, Type> ret;
270
+ for( unsigned i=0; i < Length; ++i )
271
+ {
272
+ ret[i] = a[i] / s;
273
+ }
274
+ return ret;
275
+ }
276
+
277
+ template<typename Type>
278
+ inline CUDA_CALLABLE vec_t<3, Type> div(vec_t<3, Type> a, Type s)
279
+ {
280
+ return vec_t<3, Type>(a.c[0]/s,a.c[1]/s,a.c[2]/s);
281
+ }
282
+
283
+ template<typename Type>
284
+ inline CUDA_CALLABLE vec_t<2, Type> div(vec_t<2, Type> a, Type s)
285
+ {
286
+ return vec_t<2, Type>(a.c[0]/s,a.c[1]/s);
287
+ }
288
+
289
+ template<unsigned Length, typename Type>
290
+ inline CUDA_CALLABLE vec_t<Length, Type> div(Type s, vec_t<Length, Type> a)
291
+ {
292
+ vec_t<Length, Type> ret;
293
+ for (unsigned i=0; i < Length; ++i)
294
+ {
295
+ ret[i] = s / a[i];
296
+ }
297
+ return ret;
298
+ }
299
+
300
+ template<typename Type>
301
+ inline CUDA_CALLABLE vec_t<3, Type> div(Type s, vec_t<3, Type> a)
302
+ {
303
+ return vec_t<3, Type>(s/a.c[0],s/a.c[1],s/a.c[2]);
304
+ }
305
+
306
+ template<typename Type>
307
+ inline CUDA_CALLABLE vec_t<2, Type> div(Type s, vec_t<2, Type> a)
308
+ {
309
+ return vec_t<2, Type>(s/a.c[0],s/a.c[1]);
310
+ }
311
+
312
+ template<unsigned Length, typename Type>
313
+ inline CUDA_CALLABLE vec_t<Length, Type> operator / (vec_t<Length, Type> a, Type s)
314
+ {
315
+ return div(a,s);
316
+ }
317
+
318
+ template<unsigned Length, typename Type>
319
+ inline CUDA_CALLABLE vec_t<Length, Type> operator / (Type s, vec_t<Length, Type> a)
320
+ {
321
+ return div(s, a);
322
+ }
323
+
324
+ // component wise division
325
+ template<unsigned Length, typename Type>
326
+ inline CUDA_CALLABLE vec_t<Length, Type> cw_div(vec_t<Length, Type> a, vec_t<Length, Type> b)
327
+ {
328
+ vec_t<Length, Type> ret;
329
+ for( unsigned i=0; i < Length; ++i )
330
+ {
331
+ ret[i] = a[i] / b[i];
332
+ }
333
+ return ret;
334
+ }
335
+
336
+ // addition
337
+ template<unsigned Length, typename Type>
338
+ inline CUDA_CALLABLE vec_t<Length, Type> add(vec_t<Length, Type> a, vec_t<Length, Type> b)
339
+ {
340
+ vec_t<Length, Type> ret;
341
+ for( unsigned i=0; i < Length; ++i )
342
+ {
343
+ ret[i] = a[i] + b[i];
344
+ }
345
+ return ret;
346
+ }
347
+
348
+ template<typename Type>
349
+ inline CUDA_CALLABLE vec_t<2, Type> add(vec_t<2, Type> a, vec_t<2, Type> b)
350
+ {
351
+ return vec_t<2, Type>( a.c[0] + b.c[0], a.c[1] + b.c[1]);
352
+ }
353
+
354
+ template<typename Type>
355
+ inline CUDA_CALLABLE vec_t<3, Type> add(vec_t<3, Type> a, vec_t<3, Type> b)
356
+ {
357
+ return vec_t<3, Type>( a.c[0] + b.c[0], a.c[1] + b.c[1], a.c[2] + b.c[2]);
358
+ }
359
+
360
+ // subtraction
361
+ template<unsigned Length, typename Type>
362
+ inline CUDA_CALLABLE vec_t<Length, Type> sub(vec_t<Length, Type> a, vec_t<Length, Type> b)
363
+ {
364
+ vec_t<Length, Type> ret;
365
+ for( unsigned i=0; i < Length; ++i )
366
+ {
367
+ ret[i] = Type(a[i] - b[i]);
368
+ }
369
+ return ret;
370
+ }
371
+
372
+ template<typename Type>
373
+ inline CUDA_CALLABLE vec_t<2, Type> sub(vec_t<2, Type> a, vec_t<2, Type> b)
374
+ {
375
+ return vec_t<2, Type>( a.c[0] - b.c[0], a.c[1] - b.c[1]);
376
+ }
377
+
378
+ template<typename Type>
379
+ inline CUDA_CALLABLE vec_t<3, Type> sub(vec_t<3, Type> a, vec_t<3, Type> b)
380
+ {
381
+ return vec_t<3, Type>( a.c[0] - b.c[0], a.c[1] - b.c[1], a.c[2] - b.c[2]);
382
+ }
383
+
384
+ // dot product:
385
+ template<unsigned Length, typename Type>
386
+ inline CUDA_CALLABLE Type dot(vec_t<Length, Type> a, vec_t<Length, Type> b)
387
+ {
388
+ Type ret(0);
389
+ for( unsigned i=0; i < Length; ++i )
390
+ {
391
+ ret += a[i] * b[i];
392
+ }
393
+ return ret;
394
+ }
395
+
396
+ template<typename Type>
397
+ inline CUDA_CALLABLE Type dot(vec_t<2, Type> a, vec_t<2, Type> b)
398
+ {
399
+ return a.c[0] * b.c[0] + a.c[1] * b.c[1];
400
+ }
401
+
402
+ template<typename Type>
403
+ inline CUDA_CALLABLE Type dot(vec_t<3, Type> a, vec_t<3, Type> b)
404
+ {
405
+ return a.c[0] * b.c[0] + a.c[1] * b.c[1] + a.c[2] * b.c[2];
406
+ }
407
+
408
+ template<unsigned Length, typename Type>
409
+ inline CUDA_CALLABLE Type tensordot(vec_t<Length, Type> a, vec_t<Length, Type> b)
410
+ {
411
+ // corresponds to `np.tensordot()` with all axes being contracted
412
+ return dot(a, b);
413
+ }
414
+
415
+
416
+ template<unsigned Length, typename Type>
417
+ inline CUDA_CALLABLE Type extract(const vec_t<Length, Type> & a, int idx)
418
+ {
419
+ #ifndef NDEBUG
420
+ if (idx < 0 || idx >= Length)
421
+ {
422
+ printf("vec index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
423
+ assert(0);
424
+ }
425
+ #endif
426
+
427
+ return a[idx];
428
+ }
429
+
430
+ template<unsigned Length, typename Type>
431
+ inline CUDA_CALLABLE Type* index(vec_t<Length, Type>& v, int idx)
432
+ {
433
+ #ifndef NDEBUG
434
+ if (idx < 0 || idx >= Length)
435
+ {
436
+ printf("vec index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
437
+ assert(0);
438
+ }
439
+ #endif
440
+
441
+ return &v[idx];
442
+ }
443
+
444
+ template<unsigned Length, typename Type>
445
+ inline CUDA_CALLABLE Type* indexref(vec_t<Length, Type>* v, int idx)
446
+ {
447
+ #ifndef NDEBUG
448
+ if (idx < 0 || idx >= Length)
449
+ {
450
+ printf("vec store %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
451
+ assert(0);
452
+ }
453
+ #endif
454
+
455
+ return &((*v)[idx]);
456
+ }
457
+
458
+ template<unsigned Length, typename Type>
459
+ inline CUDA_CALLABLE void adj_index(vec_t<Length, Type>& v, int idx,
460
+ vec_t<Length, Type>& adj_v, int adj_idx, const Type& adj_value)
461
+ {
462
+ // nop
463
+ }
464
+
465
+
466
+ template<unsigned Length, typename Type>
467
+ inline CUDA_CALLABLE void adj_indexref(vec_t<Length, Type>* v, int idx,
468
+ vec_t<Length, Type>& adj_v, int adj_idx, const Type& adj_value)
469
+ {
470
+ // nop
471
+ }
472
+
473
+
474
+ template<unsigned Length, typename Type>
475
+ inline CUDA_CALLABLE Type length(vec_t<Length, Type> a)
476
+ {
477
+ return sqrt(dot(a, a));
478
+ }
479
+
480
+ template<unsigned Length, typename Type>
481
+ inline CUDA_CALLABLE Type length_sq(vec_t<Length, Type> a)
482
+ {
483
+ return dot(a, a);
484
+ }
485
+
486
+
487
+ template<typename Type>
488
+ inline CUDA_CALLABLE Type length(vec_t<2, Type> a)
489
+ {
490
+ return sqrt(a.c[0] * a.c[0] + a.c[1] * a.c[1]);
491
+ }
492
+
493
+ template<typename Type>
494
+ inline CUDA_CALLABLE Type length(vec_t<3, Type> a)
495
+ {
496
+ return sqrt(a.c[0] * a.c[0] + a.c[1] * a.c[1] + a.c[2] * a.c[2]);
497
+ }
498
+
499
+ template<unsigned Length, typename Type>
500
+ inline CUDA_CALLABLE vec_t<Length, Type> normalize(vec_t<Length, Type> a)
501
+ {
502
+ Type l = length(a);
503
+ if (l > Type(kEps))
504
+ return div(a,l);
505
+ else
506
+ return vec_t<Length, Type>();
507
+ }
508
+
509
+ template<typename Type>
510
+ inline CUDA_CALLABLE vec_t<2, Type> normalize(vec_t<2, Type> a)
511
+ {
512
+ Type l = sqrt(a.c[0] * a.c[0] + a.c[1] * a.c[1]);
513
+ if (l > Type(kEps))
514
+ return vec_t<2, Type>(a.c[0]/l,a.c[1]/l);
515
+ else
516
+ return vec_t<2, Type>();
517
+ }
518
+
519
+ template<typename Type>
520
+ inline CUDA_CALLABLE vec_t<3, Type> normalize(vec_t<3, Type> a)
521
+ {
522
+ Type l = sqrt(a.c[0] * a.c[0] + a.c[1] * a.c[1] + a.c[2] * a.c[2]);
523
+ if (l > Type(kEps))
524
+ return vec_t<3, Type>(a.c[0]/l,a.c[1]/l,a.c[2]/l);
525
+ else
526
+ return vec_t<3, Type>();
527
+ }
528
+
529
+
530
+ template<typename Type>
531
+ inline CUDA_CALLABLE vec_t<3,Type> cross(vec_t<3,Type> a, vec_t<3,Type> b)
532
+ {
533
+ return {
534
+ Type(a[1]*b[2] - a[2]*b[1]),
535
+ Type(a[2]*b[0] - a[0]*b[2]),
536
+ Type(a[0]*b[1] - a[1]*b[0])
537
+ };
538
+ }
539
+
540
+
541
+ template<unsigned Length, typename Type>
542
+ inline bool CUDA_CALLABLE isfinite(vec_t<Length, Type> x)
543
+ {
544
+ for( unsigned i=0; i < Length; ++i )
545
+ {
546
+ if(!isfinite(x[i]))
547
+ {
548
+ return false;
549
+ }
550
+ }
551
+ return true;
552
+ }
553
+
554
+ template<unsigned Length, typename Type>
555
+ inline bool CUDA_CALLABLE isnan(vec_t<Length, Type> x)
556
+ {
557
+ for( unsigned i=0; i < Length; ++i )
558
+ {
559
+ if(isnan(x[i]))
560
+ {
561
+ return true;
562
+ }
563
+ }
564
+ return false;
565
+ }
566
+
567
+ template<unsigned Length, typename Type>
568
+ inline bool CUDA_CALLABLE isinf(vec_t<Length, Type> x)
569
+ {
570
+ for( unsigned i=0; i < Length; ++i )
571
+ {
572
+ if(isinf(x[i]))
573
+ {
574
+ return true;
575
+ }
576
+ }
577
+ return false;
578
+ }
579
+
580
+ // These two functions seem to compile very slowly
581
+ template<unsigned Length, typename Type>
582
+ inline CUDA_CALLABLE vec_t<Length,Type> min(vec_t<Length,Type> a, vec_t<Length,Type> b)
583
+ {
584
+ vec_t<Length,Type> ret;
585
+ for( unsigned i=0; i < Length; ++i )
586
+ {
587
+ ret[i] = a[i] < b[i] ? a[i] : b[i];
588
+ }
589
+ return ret;
590
+ }
591
+
592
+ template<unsigned Length, typename Type>
593
+ inline CUDA_CALLABLE vec_t<Length,Type> max(vec_t<Length,Type> a, vec_t<Length,Type> b)
594
+ {
595
+ vec_t<Length,Type> ret;
596
+ for( unsigned i=0; i < Length; ++i )
597
+ {
598
+ ret[i] = a[i] > b[i] ? a[i] : b[i];
599
+ }
600
+ return ret;
601
+ }
602
+
603
+ template<unsigned Length, typename Type>
604
+ inline CUDA_CALLABLE Type min(vec_t<Length,Type> v)
605
+ {
606
+ Type ret = v[0];
607
+ for( unsigned i=1; i < Length; ++i )
608
+ {
609
+ if (v[i] < ret)
610
+ ret = v[i];
611
+ }
612
+ return ret;
613
+ }
614
+
615
+ template<unsigned Length, typename Type>
616
+ inline CUDA_CALLABLE Type max(vec_t<Length,Type> v)
617
+ {
618
+ Type ret = v[0];
619
+ for( unsigned i=1; i < Length; ++i )
620
+ {
621
+ if (v[i] > ret)
622
+ ret = v[i];
623
+ }
624
+ return ret;
625
+ }
626
+
627
+ template<unsigned Length, typename Type>
628
+ inline CUDA_CALLABLE unsigned argmin(vec_t<Length,Type> v)
629
+ {
630
+ unsigned ret = 0;
631
+ for( unsigned i=1; i < Length; ++i )
632
+ {
633
+ if (v[i] < v[ret])
634
+ ret = i;
635
+ }
636
+ return ret;
637
+ }
638
+
639
+ template<unsigned Length, typename Type>
640
+ inline CUDA_CALLABLE unsigned argmax(vec_t<Length,Type> v)
641
+ {
642
+ unsigned ret = 0;
643
+ for( unsigned i=1; i < Length; ++i )
644
+ {
645
+ if (v[i] > v[ret])
646
+ ret = i;
647
+ }
648
+ return ret;
649
+ }
650
+
651
+ template<unsigned Length, typename Type>
652
+ inline CUDA_CALLABLE void expect_near(const vec_t<Length, Type>& actual, const vec_t<Length, Type>& expected, const Type& tolerance)
653
+ {
654
+ const Type diff(0);
655
+ for(size_t i=0; i<Length; ++i)
656
+ {
657
+ diff = max(diff,abs(actual[i] - expected[i]));
658
+ }
659
+ if (diff > tolerance)
660
+ {
661
+ printf("Error, expect_near() failed with tolerance "); print(tolerance);
662
+ printf("\t Expected: "); print(expected);
663
+ printf("\t Actual: "); print(actual);
664
+ }
665
+ }
666
+
667
+ template<unsigned Length, typename Type>
668
+ inline CUDA_CALLABLE void adj_expect_near(const vec_t<Length, Type>& actual, const vec_t<Length, Type>& expected, Type tolerance, vec_t<Length, Type>& adj_actual, vec_t<Length, Type>& adj_expected, Type adj_tolerance)
669
+ {
670
+ // nop
671
+ }
672
+
673
+ // adjoint for the initializer_array constructor:
674
+ template<unsigned Length, typename Type>
675
+ inline CUDA_CALLABLE void adj_vec_t(const initializer_array<Length, Type> &cmps, const initializer_array<Length, Type*> &adj_cmps, const vec_t<Length, Type>& adj_ret)
676
+ {
677
+ for(unsigned i=0; i < Length; ++i)
678
+ {
679
+ *(adj_cmps[i]) += adj_ret[i];
680
+ }
681
+ }
682
+
683
+
684
+ // adjoint for the component constructors:
685
+ template<typename Type>
686
+ inline CUDA_CALLABLE void adj_vec_t(Type cmpx, Type cmpy, Type &adj_cmpx, Type &adj_cmpy, const vec_t<2, Type>& adj_ret)
687
+ {
688
+ adj_cmpx += adj_ret.c[0];
689
+ adj_cmpy += adj_ret.c[1];
690
+ }
691
+
692
+ template<typename Type>
693
+ inline CUDA_CALLABLE void adj_vec_t(Type cmpx, Type cmpy, Type cmpz, Type &adj_cmpx, Type &adj_cmpy, Type &adj_cmpz, const vec_t<3, Type>& adj_ret)
694
+ {
695
+ adj_cmpx += adj_ret.c[0];
696
+ adj_cmpy += adj_ret.c[1];
697
+ adj_cmpz += adj_ret.c[2];
698
+ }
699
+
700
+ template<typename Type>
701
+ inline CUDA_CALLABLE void adj_vec_t(Type cmpx, Type cmpy, Type cmpz, Type cmpw, Type &adj_cmpx, Type &adj_cmpy, Type &adj_cmpz, Type &adj_cmpw, const vec_t<4, Type>& adj_ret)
702
+ {
703
+ adj_cmpx += adj_ret.c[0];
704
+ adj_cmpy += adj_ret.c[1];
705
+ adj_cmpz += adj_ret.c[2];
706
+ adj_cmpw += adj_ret.c[3];
707
+ }
708
+
709
+ // adjoint for the constant constructor:
710
+ template<unsigned Length, typename Type>
711
+ inline CUDA_CALLABLE void adj_vec_t(Type s, Type& adj_s, const vec_t<Length, Type>& adj_ret)
712
+ {
713
+ for( unsigned i=0; i < Length; ++i )
714
+ {
715
+ adj_s += adj_ret[i];
716
+ }
717
+ }
718
+
719
+ // adjoint for the casting constructor
720
+ template<unsigned Length, typename Type, typename OtherType>
721
+ inline CUDA_CALLABLE void adj_vec_t(const vec_t<Length, OtherType>& other, vec_t<Length, OtherType>& adj_other, const vec_t<Length, Type>& adj_ret)
722
+ {
723
+ for( unsigned i=0; i < Length; ++i )
724
+ {
725
+ adj_other[i] += static_cast<OtherType>(adj_ret[i]);
726
+ }
727
+ }
728
+
729
+ template<typename Type>
730
+ CUDA_CALLABLE inline void adj_vec_t(const vec_t<3,Type>& w, const vec_t<3,Type>& v, vec_t<3,Type>& adj_w, vec_t<3,Type>& adj_v, const vec_t<6,Type>& adj_ret)
731
+ {
732
+ adj_w[0] += adj_ret[0];
733
+ adj_w[1] += adj_ret[1];
734
+ adj_w[2] += adj_ret[2];
735
+ adj_v[0] += adj_ret[3];
736
+ adj_v[1] += adj_ret[4];
737
+ adj_v[2] += adj_ret[5];
738
+ }
739
+
740
+ template<unsigned Length, typename Type>
741
+ inline CUDA_CALLABLE void adj_mul(vec_t<Length, Type> a, Type s, vec_t<Length, Type>& adj_a, Type& adj_s, const vec_t<Length, Type>& adj_ret)
742
+ {
743
+ for( unsigned i=0; i < Length; ++i )
744
+ {
745
+ adj_a[i] += s*adj_ret[i];
746
+ }
747
+
748
+ adj_s += dot(a, adj_ret);
749
+
750
+ #if FP_CHECK
751
+ if (!isfinite(a) || !isfinite(s) || !isfinite(adj_a) || !isfinite(adj_s) || !isfinite(adj_ret))
752
+ {
753
+ // \TODO: How shall we implement this error message?
754
+ //printf("adj_mul((%f %f %f %f), %f, (%f %f %f %f), %f, (%f %f %f %f)\n", a.x, a.y, a.z, a.w, s, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_s, adj_ret.x, adj_ret.y, adj_ret.z, adj_ret.w);
755
+ assert(0);
756
+ }
757
+ #endif
758
+ }
759
+
760
+ template<unsigned Length, typename Type>
761
+ inline CUDA_CALLABLE void adj_mul(Type s, vec_t<Length, Type> a, Type& adj_s, vec_t<Length, Type>& adj_a, const vec_t<Length, Type>& adj_ret)
762
+ {
763
+ adj_mul(a, s, adj_a, adj_s, adj_ret);
764
+ }
765
+
766
+ template<unsigned Length, typename Type>
767
+ inline CUDA_CALLABLE void adj_cw_mul(vec_t<Length, Type> a, vec_t<Length, Type> b, vec_t<Length, Type>& adj_a, vec_t<Length, Type>& adj_b, const vec_t<Length, Type>& adj_ret)
768
+ {
769
+ adj_a += cw_mul(b, adj_ret);
770
+ adj_b += cw_mul(a, adj_ret);
771
+ }
772
+
773
+ template<unsigned Length, typename Type>
774
+ inline CUDA_CALLABLE void adj_div(vec_t<Length, Type> a, Type s, vec_t<Length, Type>& adj_a, Type& adj_s, const vec_t<Length, Type>& adj_ret)
775
+ {
776
+
777
+ adj_s -= dot(a , adj_ret)/ (s * s); // - a / s^2
778
+
779
+ for( unsigned i=0; i < Length; ++i )
780
+ {
781
+ adj_a[i] += adj_ret[i] / s;
782
+ }
783
+
784
+ #if FP_CHECK
785
+ if (!isfinite(a) || !isfinite(s) || !isfinite(adj_a) || !isfinite(adj_s) || !isfinite(adj_ret))
786
+ {
787
+ // \TODO: How shall we implement this error message?
788
+ // printf("adj_div((%f %f %f %f), %f, (%f %f %f %f), %f, (%f %f %f %f)\n", a.x, a.y, a.z, a.w, s, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_s, adj_ret.x, adj_ret.y, adj_ret.z, adj_ret.w);
789
+ assert(0);
790
+ }
791
+ #endif
792
+ }
793
+
794
+ template<unsigned Length, typename Type>
795
+ inline CUDA_CALLABLE void adj_div(Type s, vec_t<Length, Type> a, Type& adj_s, vec_t<Length, Type>& adj_a, const vec_t<Length, Type>& adj_ret)
796
+ {
797
+
798
+ adj_s -= dot(a , adj_ret)/ (s * s); // - a / s^2
799
+
800
+ for( unsigned i=0; i < Length; ++i )
801
+ {
802
+ adj_a[i] += s / adj_ret[i];
803
+ }
804
+
805
+ #if FP_CHECK
806
+ if (!isfinite(a) || !isfinite(s) || !isfinite(adj_a) || !isfinite(adj_s) || !isfinite(adj_ret))
807
+ {
808
+ // \TODO: How shall we implement this error message?
809
+ // printf("adj_div((%f %f %f %f), %f, (%f %f %f %f), %f, (%f %f %f %f)\n", a.x, a.y, a.z, a.w, s, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_s, adj_ret.x, adj_ret.y, adj_ret.z, adj_ret.w);
810
+ assert(0);
811
+ }
812
+ #endif
813
+ }
814
+
815
+ template<unsigned Length, typename Type>
816
+ inline CUDA_CALLABLE void adj_cw_div(vec_t<Length, Type> a, vec_t<Length, Type> b, vec_t<Length, Type>& ret, vec_t<Length, Type>& adj_a, vec_t<Length, Type>& adj_b, const vec_t<Length, Type>& adj_ret) {
817
+ adj_a += cw_div(adj_ret, b);
818
+ adj_b -= cw_mul(adj_ret, cw_div(ret, b));
819
+ }
820
+
821
+ template<unsigned Length, typename Type>
822
+ inline CUDA_CALLABLE void adj_add(vec_t<Length, Type> a, vec_t<Length, Type> b, vec_t<Length, Type>& adj_a, vec_t<Length, Type>& adj_b, const vec_t<Length, Type>& adj_ret)
823
+ {
824
+ adj_a += adj_ret;
825
+ adj_b += adj_ret;
826
+ }
827
+
828
+ template<typename Type>
829
+ inline CUDA_CALLABLE void adj_add(vec_t<2, Type> a, vec_t<2, Type> b, vec_t<2, Type>& adj_a, vec_t<2, Type>& adj_b, const vec_t<2, Type>& adj_ret)
830
+ {
831
+ adj_a.c[0] += adj_ret.c[0];
832
+ adj_a.c[1] += adj_ret.c[1];
833
+ adj_b.c[0] += adj_ret.c[0];
834
+ adj_b.c[1] += adj_ret.c[1];
835
+ }
836
+
837
+ template<typename Type>
838
+ inline CUDA_CALLABLE void adj_add(vec_t<3, Type> a, vec_t<3, Type> b, vec_t<3, Type>& adj_a, vec_t<3, Type>& adj_b, const vec_t<3, Type>& adj_ret)
839
+ {
840
+ adj_a.c[0] += adj_ret.c[0];
841
+ adj_a.c[1] += adj_ret.c[1];
842
+ adj_a.c[2] += adj_ret.c[2];
843
+ adj_b.c[0] += adj_ret.c[0];
844
+ adj_b.c[1] += adj_ret.c[1];
845
+ adj_b.c[2] += adj_ret.c[2];
846
+ }
847
+
848
+ template<unsigned Length, typename Type>
849
+ inline CUDA_CALLABLE void adj_sub(vec_t<Length, Type> a, vec_t<Length, Type> b, vec_t<Length, Type>& adj_a, vec_t<Length, Type>& adj_b, const vec_t<Length, Type>& adj_ret)
850
+ {
851
+ adj_a += adj_ret;
852
+ adj_b -= adj_ret;
853
+ }
854
+
855
+ template<typename Type>
856
+ inline CUDA_CALLABLE void adj_sub(vec_t<2, Type> a, vec_t<2, Type> b, vec_t<2, Type>& adj_a, vec_t<2, Type>& adj_b, const vec_t<2, Type>& adj_ret)
857
+ {
858
+ adj_a.c[0] += adj_ret.c[0];
859
+ adj_a.c[1] += adj_ret.c[1];
860
+ adj_b.c[0] -= adj_ret.c[0];
861
+ adj_b.c[1] -= adj_ret.c[1];
862
+ }
863
+
864
+ template<typename Type>
865
+ inline CUDA_CALLABLE void adj_sub(vec_t<3, Type> a, vec_t<3, Type> b, vec_t<3, Type>& adj_a, vec_t<3, Type>& adj_b, const vec_t<3, Type>& adj_ret)
866
+ {
867
+ adj_a.c[0] += adj_ret.c[0];
868
+ adj_a.c[1] += adj_ret.c[1];
869
+ adj_a.c[2] += adj_ret.c[2];
870
+ adj_b.c[0] -= adj_ret.c[0];
871
+ adj_b.c[1] -= adj_ret.c[1];
872
+ adj_b.c[2] -= adj_ret.c[2];
873
+ }
874
+
875
+ template<unsigned Length, typename Type>
876
+ inline CUDA_CALLABLE void adj_dot(vec_t<Length, Type> a, vec_t<Length, Type> b, vec_t<Length, Type>& adj_a, vec_t<Length, Type>& adj_b, const Type adj_ret)
877
+ {
878
+ adj_a += b*adj_ret;
879
+ adj_b += a*adj_ret;
880
+
881
+ #if FP_CHECK
882
+ if (!isfinite(a) || !isfinite(b) || !isfinite(adj_a) || !isfinite(adj_b) || !isfinite(adj_ret))
883
+ {
884
+ // \TODO: How shall we implement this error message?
885
+ //printf("adj_dot((%f %f %f %f), (%f %f %f %f), (%f %f %f %f), (%f %f %f %f), %f)\n", a.x, a.y, a.z, a.w, b.x, b.y, b.z, b.w, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_b.x, adj_b.y, adj_b.z, adj_b.w, adj_ret);
886
+ assert(0);
887
+ }
888
+ #endif
889
+ }
890
+
891
+
892
+
893
+ template<typename Type>
894
+ inline CUDA_CALLABLE void adj_dot(vec_t<2, Type> a, vec_t<2, Type> b, vec_t<2, Type>& adj_a, vec_t<2, Type>& adj_b, const Type adj_ret)
895
+ {
896
+ adj_a.c[0] += b.c[0]*adj_ret;
897
+ adj_a.c[1] += b.c[1]*adj_ret;
898
+
899
+ adj_b.c[0] += a.c[0]*adj_ret;
900
+ adj_b.c[1] += a.c[1]*adj_ret;
901
+ }
902
+
903
+ template<typename Type>
904
+ inline CUDA_CALLABLE void adj_dot(vec_t<3, Type> a, vec_t<3, Type> b, vec_t<3, Type>& adj_a, vec_t<3, Type>& adj_b, const Type adj_ret)
905
+ {
906
+ adj_a.c[0] += b.c[0]*adj_ret;
907
+ adj_a.c[1] += b.c[1]*adj_ret;
908
+ adj_a.c[2] += b.c[2]*adj_ret;
909
+
910
+ adj_b.c[0] += a.c[0]*adj_ret;
911
+ adj_b.c[1] += a.c[1]*adj_ret;
912
+ adj_b.c[2] += a.c[2]*adj_ret;
913
+ }
914
+
915
+
916
+ template<unsigned Length, typename Type>
917
+ inline CUDA_CALLABLE void adj_extract(const vec_t<Length, Type> & a, int idx, vec_t<Length, Type> & adj_a, int & adj_idx, Type & adj_ret)
918
+ {
919
+ #ifndef NDEBUG
920
+ if (idx < 0 || idx > Length)
921
+ {
922
+ printf("Tvec2<Scalar> index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__);
923
+ assert(0);
924
+ }
925
+ #endif
926
+
927
+ adj_a[idx] += adj_ret;
928
+ }
929
+
930
+ template<unsigned Length, typename Type>
931
+ inline CUDA_CALLABLE void adj_length(vec_t<Length, Type> a, Type ret, vec_t<Length, Type>& adj_a, const Type adj_ret)
932
+ {
933
+ if (ret > Type(kEps))
934
+ {
935
+ adj_a += div(a, ret) * adj_ret;
936
+ }
937
+
938
+ #if FP_CHECK
939
+ if (!isfinite(adj_a))
940
+ {
941
+ // \TODO: How shall we implement this error message?
942
+ //printf("%s:%d - adj_length((%f %f %f %f), (%f %f %f %f), (%f))\n", __FILE__, __LINE__, a.x, a.y, a.z, a.w, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_ret);
943
+ assert(0);
944
+ }
945
+ #endif
946
+ }
947
+
948
+ template<unsigned Length, typename Type>
949
+ inline CUDA_CALLABLE void adj_length_sq(vec_t<Length, Type> a, vec_t<Length, Type>& adj_a, const Type adj_ret)
950
+ {
951
+ adj_a += Type(2.0)*a*adj_ret;
952
+
953
+ #if FP_CHECK
954
+ if (!isfinite(adj_a))
955
+ {
956
+ // \TODO: How shall we implement this error message?
957
+ //printf("%s:%d - adj_length((%f %f %f %f), (%f %f %f %f), (%f))\n", __FILE__, __LINE__, a.x, a.y, a.z, a.w, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_ret);
958
+ assert(0);
959
+ }
960
+ #endif
961
+ }
962
+
963
+ template<unsigned Length, typename Type>
964
+ inline CUDA_CALLABLE void adj_normalize(vec_t<Length, Type> a, vec_t<Length, Type>& ret, vec_t<Length, Type>& adj_a, const vec_t<Length, Type>& adj_ret)
965
+ {
966
+ Type d = length(a);
967
+
968
+ if (d > Type(kEps))
969
+ {
970
+ Type invd = Type(1.0f)/d;
971
+
972
+ adj_a += (adj_ret*invd - ret*(dot(ret, adj_ret))*invd);
973
+
974
+ #if FP_CHECK
975
+ if (!isfinite(adj_a))
976
+ {
977
+ // \TODO: How shall we implement this error message?
978
+ //printf("%s:%d - adj_normalize((%f %f %f %f), (%f %f %f %f), (%f, %f, %f, %f))\n", __FILE__, __LINE__, a.x, a.y, a.z, a.w, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_ret.x, adj_ret.y, adj_ret.z, adj_ret.w);
979
+ assert(0);
980
+ }
981
+ #endif
982
+ }
983
+ }
984
+
985
+ template<typename Type>
986
+ inline CUDA_CALLABLE void adj_cross(vec_t<3,Type> a, vec_t<3,Type> b, vec_t<3,Type>& adj_a, vec_t<3,Type>& adj_b, const vec_t<3,Type>& adj_ret)
987
+ {
988
+ // todo: sign check
989
+ adj_a += cross(b, adj_ret);
990
+ adj_b -= cross(a, adj_ret);
991
+ }
992
+
993
+ template<unsigned Length, typename Type>
994
+ inline CUDA_CALLABLE void adj_isfinite(const vec_t<Length, Type> &x, vec_t<Length,Type>& adj_x, const bool &adj_ret)
995
+ {
996
+
997
+ }
998
+
999
+ template<unsigned Length, typename Type>
1000
+ inline CUDA_CALLABLE void adj_isnan(const vec_t<Length, Type> &x, vec_t<Length,Type>& adj_x, const bool &adj_ret)
1001
+ {
1002
+
1003
+ }
1004
+
1005
+ template<unsigned Length, typename Type>
1006
+ inline CUDA_CALLABLE void adj_isinf(const vec_t<Length, Type> &x, vec_t<Length,Type>& adj_x, const bool &adj_ret)
1007
+ {
1008
+
1009
+ }
1010
+
1011
+ template<unsigned Length, typename Type>
1012
+ inline CUDA_CALLABLE void adj_min(const vec_t<Length,Type> &a, const vec_t<Length,Type> &b, vec_t<Length,Type>& adj_a, vec_t<Length,Type>& adj_b, const vec_t<Length,Type> &adj_ret)
1013
+ {
1014
+ for( unsigned i=0; i < Length; ++i )
1015
+ {
1016
+ if (a[i] < b[i])
1017
+ adj_a[i] += adj_ret[i];
1018
+ else
1019
+ adj_b[i] += adj_ret[i];
1020
+ }
1021
+ }
1022
+
1023
+ template<unsigned Length, typename Type>
1024
+ inline CUDA_CALLABLE void adj_max(const vec_t<Length,Type> &a, const vec_t<Length,Type> &b, vec_t<Length,Type>& adj_a, vec_t<Length,Type>& adj_b, const vec_t<Length,Type> &adj_ret)
1025
+ {
1026
+ for( unsigned i=0; i < Length; ++i )
1027
+ {
1028
+ if (a[i] > b[i])
1029
+ adj_a[i] += adj_ret[i];
1030
+ else
1031
+ adj_b[i] += adj_ret[i];
1032
+ }
1033
+ }
1034
+
1035
+ template<unsigned Length, typename Type>
1036
+ inline CUDA_CALLABLE void adj_min(const vec_t<Length,Type> &v, vec_t<Length,Type>& adj_v, const Type &adj_ret)
1037
+ {
1038
+ unsigned i = argmin(v);
1039
+ adj_v[i] += adj_ret;
1040
+ }
1041
+
1042
+ template<unsigned Length, typename Type>
1043
+ inline CUDA_CALLABLE void adj_max(const vec_t<Length,Type> &v, vec_t<Length,Type>& adj_v, const Type &adj_ret)
1044
+ {
1045
+ unsigned i = argmax(v);
1046
+ adj_v[i] += adj_ret;
1047
+ }
1048
+
1049
+ // Do I need to specialize these for different lengths?
1050
+ template<unsigned Length, typename Type>
1051
+ inline CUDA_CALLABLE vec_t<Length, Type> atomic_add(vec_t<Length, Type> * addr, vec_t<Length, Type> value)
1052
+ {
1053
+ vec_t<Length, Type> ret;
1054
+ for( unsigned i=0; i < Length; ++i )
1055
+ {
1056
+ ret[i] = atomic_add(&(addr -> c[i]), value[i]);
1057
+ }
1058
+
1059
+ return ret;
1060
+ }
1061
+
1062
+ template<unsigned Length, typename Type>
1063
+ inline CUDA_CALLABLE vec_t<Length, Type> atomic_min(vec_t<Length, Type> * addr, vec_t<Length, Type> value)
1064
+ {
1065
+ vec_t<Length, Type> ret;
1066
+ for( unsigned i=0; i < Length; ++i )
1067
+ {
1068
+ ret[i] = atomic_min(&(addr -> c[i]), value[i]);
1069
+ }
1070
+
1071
+ return ret;
1072
+ }
1073
+
1074
+ template<unsigned Length, typename Type>
1075
+ inline CUDA_CALLABLE vec_t<Length, Type> atomic_max(vec_t<Length, Type> * addr, vec_t<Length, Type> value)
1076
+ {
1077
+ vec_t<Length, Type> ret;
1078
+ for( unsigned i=0; i < Length; ++i )
1079
+ {
1080
+ ret[i] = atomic_max(&(addr -> c[i]), value[i]);
1081
+ }
1082
+
1083
+ return ret;
1084
+ }
1085
+
1086
+ template<unsigned Length, typename Type>
1087
+ inline CUDA_CALLABLE void adj_atomic_minmax(
1088
+ vec_t<Length,Type> *addr,
1089
+ vec_t<Length,Type> *adj_addr,
1090
+ const vec_t<Length,Type> &value,
1091
+ vec_t<Length,Type> &adj_value)
1092
+ {
1093
+ for (unsigned i=0; i < Length; ++i)
1094
+ adj_atomic_minmax(&(addr->c[i]), &(adj_addr->c[i]), value[i], adj_value[i]);
1095
+ }
1096
+
1097
+ // ok, the original implementation of this didn't take the absolute values.
1098
+ // I wouldn't consider this expected behavior. It looks like it's only
1099
+ // being used for bounding boxes at the moment, where this doesn't matter,
1100
+ // but you often use it for ray tracing where it does. Not sure if the
1101
+ // fabs() incurs a performance hit...
1102
+ template<unsigned Length, typename Type>
1103
+ CUDA_CALLABLE inline int longest_axis(const vec_t<Length, Type>& v)
1104
+ {
1105
+ Type lmax = abs(v[0]);
1106
+ int ret(0);
1107
+ for( unsigned i=1; i < Length; ++i )
1108
+ {
1109
+ Type l = abs(v[i]);
1110
+ if( l > lmax )
1111
+ {
1112
+ ret = i;
1113
+ lmax = l;
1114
+ }
1115
+ }
1116
+ return ret;
1117
+ }
1118
+
1119
+ template<unsigned Length, typename Type>
1120
+ CUDA_CALLABLE inline vec_t<Length,Type> lerp(const vec_t<Length,Type>& a, const vec_t<Length,Type>& b, Type t)
1121
+ {
1122
+ return a*(Type(1)-t) + b*t;
1123
+ }
1124
+
1125
+ template<unsigned Length, typename Type>
1126
+ CUDA_CALLABLE inline void adj_lerp(const vec_t<Length,Type>& a, const vec_t<Length,Type>& b, Type t, vec_t<Length,Type>& adj_a, vec_t<Length,Type>& adj_b, Type& adj_t, const vec_t<Length,Type>& adj_ret)
1127
+ {
1128
+ adj_a += adj_ret*(Type(1)-t);
1129
+ adj_b += adj_ret*t;
1130
+ adj_t += tensordot(b, adj_ret) - tensordot(a, adj_ret);
1131
+ }
1132
+
1133
+ // for integral types we do not accumulate gradients
1134
+ template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, int8>* buf, const vec_t<Length, int8> &value) { }
1135
+ template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, uint8>* buf, const vec_t<Length, uint8> &value) { }
1136
+ template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, int16>* buf, const vec_t<Length, int16> &value) { }
1137
+ template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, uint16>* buf, const vec_t<Length, uint16> &value) { }
1138
+ template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, int32>* buf, const vec_t<Length, int32> &value) { }
1139
+ template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, uint32>* buf, const vec_t<Length, uint32> &value) { }
1140
+ template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, int64>* buf, const vec_t<Length, int64> &value) { }
1141
+ template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, uint64>* buf, const vec_t<Length, uint64> &value) { }
1142
+
1143
+
1144
+ // adjoints for some of the constructors, used in intersect.h
1145
+ inline CUDA_CALLABLE void adj_vec2(float x, float y, float& adj_x, float& adj_y, const vec2& adj_ret)
1146
+ {
1147
+ adj_x += adj_ret[0];
1148
+ adj_y += adj_ret[1];
1149
+ }
1150
+
1151
+ inline CUDA_CALLABLE void adj_vec3(float x, float y, float z, float& adj_x, float& adj_y, float& adj_z, const vec3& adj_ret)
1152
+ {
1153
+ adj_x += adj_ret[0];
1154
+ adj_y += adj_ret[1];
1155
+ adj_z += adj_ret[2];
1156
+ }
1157
+
1158
+ inline CUDA_CALLABLE void adj_vec4(float x, float y, float z, float w, float& adj_x, float& adj_y, float& adj_z, float& adj_w, const vec4& adj_ret)
1159
+ {
1160
+ adj_x += adj_ret[0];
1161
+ adj_y += adj_ret[1];
1162
+ adj_z += adj_ret[2];
1163
+ adj_w += adj_ret[3];
1164
+ }
1165
+
1166
+ inline CUDA_CALLABLE void adj_vec3(float s, float& adj_s, const vec3& adj_ret)
1167
+ {
1168
+ adj_vec_t(s, adj_s, adj_ret);
1169
+ }
1170
+
1171
+ inline CUDA_CALLABLE void adj_vec4(float s, float& adj_s, const vec4& adj_ret)
1172
+ {
1173
+ adj_vec_t(s, adj_s, adj_ret);
1174
+ }
1175
+
1176
+
1177
+ } // namespace wp