warp-lang 1.0.2__py3-none-win_amd64.whl → 1.2.0__py3-none-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of warp-lang might be problematic. Click here for more details.

Files changed (356) hide show
  1. warp/__init__.py +108 -97
  2. warp/__init__.pyi +1 -1
  3. warp/bin/warp-clang.dll +0 -0
  4. warp/bin/warp.dll +0 -0
  5. warp/build.py +88 -113
  6. warp/build_dll.py +383 -375
  7. warp/builtins.py +3693 -3354
  8. warp/codegen.py +2925 -2792
  9. warp/config.py +40 -36
  10. warp/constants.py +49 -45
  11. warp/context.py +5409 -5102
  12. warp/dlpack.py +442 -442
  13. warp/examples/__init__.py +16 -16
  14. warp/examples/assets/bear.usd +0 -0
  15. warp/examples/assets/bunny.usd +0 -0
  16. warp/examples/assets/cartpole.urdf +110 -110
  17. warp/examples/assets/crazyflie.usd +0 -0
  18. warp/examples/assets/cube.usd +0 -0
  19. warp/examples/assets/nv_ant.xml +92 -92
  20. warp/examples/assets/nv_humanoid.xml +183 -183
  21. warp/examples/assets/quadruped.urdf +267 -267
  22. warp/examples/assets/rocks.nvdb +0 -0
  23. warp/examples/assets/rocks.usd +0 -0
  24. warp/examples/assets/sphere.usd +0 -0
  25. warp/examples/benchmarks/benchmark_api.py +381 -383
  26. warp/examples/benchmarks/benchmark_cloth.py +278 -277
  27. warp/examples/benchmarks/benchmark_cloth_cupy.py +88 -88
  28. warp/examples/benchmarks/benchmark_cloth_jax.py +97 -100
  29. warp/examples/benchmarks/benchmark_cloth_numba.py +146 -142
  30. warp/examples/benchmarks/benchmark_cloth_numpy.py +77 -77
  31. warp/examples/benchmarks/benchmark_cloth_pytorch.py +86 -86
  32. warp/examples/benchmarks/benchmark_cloth_taichi.py +112 -112
  33. warp/examples/benchmarks/benchmark_cloth_warp.py +145 -146
  34. warp/examples/benchmarks/benchmark_launches.py +293 -295
  35. warp/examples/browse.py +29 -29
  36. warp/examples/core/example_dem.py +232 -219
  37. warp/examples/core/example_fluid.py +291 -267
  38. warp/examples/core/example_graph_capture.py +142 -126
  39. warp/examples/core/example_marching_cubes.py +186 -174
  40. warp/examples/core/example_mesh.py +172 -155
  41. warp/examples/core/example_mesh_intersect.py +203 -193
  42. warp/examples/core/example_nvdb.py +174 -170
  43. warp/examples/core/example_raycast.py +103 -90
  44. warp/examples/core/example_raymarch.py +197 -178
  45. warp/examples/core/example_render_opengl.py +183 -141
  46. warp/examples/core/example_sph.py +403 -387
  47. warp/examples/core/example_torch.py +219 -181
  48. warp/examples/core/example_wave.py +261 -248
  49. warp/examples/fem/bsr_utils.py +378 -380
  50. warp/examples/fem/example_apic_fluid.py +432 -389
  51. warp/examples/fem/example_burgers.py +262 -0
  52. warp/examples/fem/example_convection_diffusion.py +180 -168
  53. warp/examples/fem/example_convection_diffusion_dg.py +217 -209
  54. warp/examples/fem/example_deformed_geometry.py +175 -159
  55. warp/examples/fem/example_diffusion.py +199 -173
  56. warp/examples/fem/example_diffusion_3d.py +178 -152
  57. warp/examples/fem/example_diffusion_mgpu.py +219 -214
  58. warp/examples/fem/example_mixed_elasticity.py +242 -222
  59. warp/examples/fem/example_navier_stokes.py +257 -243
  60. warp/examples/fem/example_stokes.py +218 -192
  61. warp/examples/fem/example_stokes_transfer.py +263 -249
  62. warp/examples/fem/mesh_utils.py +133 -109
  63. warp/examples/fem/plot_utils.py +292 -287
  64. warp/examples/optim/example_bounce.py +258 -246
  65. warp/examples/optim/example_cloth_throw.py +220 -209
  66. warp/examples/optim/example_diffray.py +564 -536
  67. warp/examples/optim/example_drone.py +862 -835
  68. warp/examples/optim/example_inverse_kinematics.py +174 -168
  69. warp/examples/optim/example_inverse_kinematics_torch.py +183 -169
  70. warp/examples/optim/example_spring_cage.py +237 -231
  71. warp/examples/optim/example_trajectory.py +221 -199
  72. warp/examples/optim/example_walker.py +304 -293
  73. warp/examples/sim/example_cartpole.py +137 -129
  74. warp/examples/sim/example_cloth.py +194 -186
  75. warp/examples/sim/example_granular.py +122 -111
  76. warp/examples/sim/example_granular_collision_sdf.py +195 -186
  77. warp/examples/sim/example_jacobian_ik.py +234 -214
  78. warp/examples/sim/example_particle_chain.py +116 -105
  79. warp/examples/sim/example_quadruped.py +191 -180
  80. warp/examples/sim/example_rigid_chain.py +195 -187
  81. warp/examples/sim/example_rigid_contact.py +187 -177
  82. warp/examples/sim/example_rigid_force.py +125 -125
  83. warp/examples/sim/example_rigid_gyroscopic.py +107 -95
  84. warp/examples/sim/example_rigid_soft_contact.py +132 -122
  85. warp/examples/sim/example_soft_body.py +188 -177
  86. warp/fabric.py +337 -335
  87. warp/fem/__init__.py +61 -27
  88. warp/fem/cache.py +403 -388
  89. warp/fem/dirichlet.py +178 -179
  90. warp/fem/domain.py +262 -263
  91. warp/fem/field/__init__.py +100 -101
  92. warp/fem/field/field.py +148 -149
  93. warp/fem/field/nodal_field.py +298 -299
  94. warp/fem/field/restriction.py +22 -21
  95. warp/fem/field/test.py +180 -181
  96. warp/fem/field/trial.py +183 -183
  97. warp/fem/geometry/__init__.py +16 -19
  98. warp/fem/geometry/closest_point.py +69 -70
  99. warp/fem/geometry/deformed_geometry.py +270 -271
  100. warp/fem/geometry/element.py +748 -744
  101. warp/fem/geometry/geometry.py +184 -186
  102. warp/fem/geometry/grid_2d.py +380 -373
  103. warp/fem/geometry/grid_3d.py +437 -435
  104. warp/fem/geometry/hexmesh.py +953 -953
  105. warp/fem/geometry/nanogrid.py +455 -0
  106. warp/fem/geometry/partition.py +374 -376
  107. warp/fem/geometry/quadmesh_2d.py +532 -532
  108. warp/fem/geometry/tetmesh.py +840 -840
  109. warp/fem/geometry/trimesh_2d.py +577 -577
  110. warp/fem/integrate.py +1684 -1615
  111. warp/fem/operator.py +190 -191
  112. warp/fem/polynomial.py +214 -213
  113. warp/fem/quadrature/__init__.py +2 -2
  114. warp/fem/quadrature/pic_quadrature.py +243 -245
  115. warp/fem/quadrature/quadrature.py +295 -294
  116. warp/fem/space/__init__.py +179 -292
  117. warp/fem/space/basis_space.py +522 -489
  118. warp/fem/space/collocated_function_space.py +100 -105
  119. warp/fem/space/dof_mapper.py +236 -236
  120. warp/fem/space/function_space.py +148 -145
  121. warp/fem/space/grid_2d_function_space.py +148 -267
  122. warp/fem/space/grid_3d_function_space.py +167 -306
  123. warp/fem/space/hexmesh_function_space.py +253 -352
  124. warp/fem/space/nanogrid_function_space.py +202 -0
  125. warp/fem/space/partition.py +350 -350
  126. warp/fem/space/quadmesh_2d_function_space.py +261 -369
  127. warp/fem/space/restriction.py +161 -160
  128. warp/fem/space/shape/__init__.py +90 -15
  129. warp/fem/space/shape/cube_shape_function.py +728 -738
  130. warp/fem/space/shape/shape_function.py +102 -103
  131. warp/fem/space/shape/square_shape_function.py +611 -611
  132. warp/fem/space/shape/tet_shape_function.py +565 -567
  133. warp/fem/space/shape/triangle_shape_function.py +429 -429
  134. warp/fem/space/tetmesh_function_space.py +224 -292
  135. warp/fem/space/topology.py +297 -295
  136. warp/fem/space/trimesh_2d_function_space.py +153 -221
  137. warp/fem/types.py +77 -77
  138. warp/fem/utils.py +495 -495
  139. warp/jax.py +166 -141
  140. warp/jax_experimental.py +341 -339
  141. warp/native/array.h +1081 -1025
  142. warp/native/builtin.h +1603 -1560
  143. warp/native/bvh.cpp +402 -398
  144. warp/native/bvh.cu +533 -525
  145. warp/native/bvh.h +430 -429
  146. warp/native/clang/clang.cpp +496 -464
  147. warp/native/crt.cpp +42 -32
  148. warp/native/crt.h +352 -335
  149. warp/native/cuda_crt.h +1049 -1049
  150. warp/native/cuda_util.cpp +549 -540
  151. warp/native/cuda_util.h +288 -203
  152. warp/native/cutlass_gemm.cpp +34 -34
  153. warp/native/cutlass_gemm.cu +372 -372
  154. warp/native/error.cpp +66 -66
  155. warp/native/error.h +27 -27
  156. warp/native/exports.h +187 -0
  157. warp/native/fabric.h +228 -228
  158. warp/native/hashgrid.cpp +301 -278
  159. warp/native/hashgrid.cu +78 -77
  160. warp/native/hashgrid.h +227 -227
  161. warp/native/initializer_array.h +32 -32
  162. warp/native/intersect.h +1204 -1204
  163. warp/native/intersect_adj.h +365 -365
  164. warp/native/intersect_tri.h +322 -322
  165. warp/native/marching.cpp +2 -2
  166. warp/native/marching.cu +497 -497
  167. warp/native/marching.h +2 -2
  168. warp/native/mat.h +1545 -1498
  169. warp/native/matnn.h +333 -333
  170. warp/native/mesh.cpp +203 -203
  171. warp/native/mesh.cu +292 -293
  172. warp/native/mesh.h +1887 -1887
  173. warp/native/nanovdb/GridHandle.h +366 -0
  174. warp/native/nanovdb/HostBuffer.h +590 -0
  175. warp/native/nanovdb/NanoVDB.h +6624 -4782
  176. warp/native/nanovdb/PNanoVDB.h +3390 -2553
  177. warp/native/noise.h +850 -850
  178. warp/native/quat.h +1112 -1085
  179. warp/native/rand.h +303 -299
  180. warp/native/range.h +108 -108
  181. warp/native/reduce.cpp +156 -156
  182. warp/native/reduce.cu +348 -348
  183. warp/native/runlength_encode.cpp +61 -61
  184. warp/native/runlength_encode.cu +46 -46
  185. warp/native/scan.cpp +30 -30
  186. warp/native/scan.cu +36 -36
  187. warp/native/scan.h +7 -7
  188. warp/native/solid_angle.h +442 -442
  189. warp/native/sort.cpp +94 -94
  190. warp/native/sort.cu +97 -97
  191. warp/native/sort.h +14 -14
  192. warp/native/sparse.cpp +337 -337
  193. warp/native/sparse.cu +544 -544
  194. warp/native/spatial.h +630 -630
  195. warp/native/svd.h +562 -562
  196. warp/native/temp_buffer.h +30 -30
  197. warp/native/vec.h +1177 -1133
  198. warp/native/volume.cpp +529 -297
  199. warp/native/volume.cu +58 -32
  200. warp/native/volume.h +960 -538
  201. warp/native/volume_builder.cu +446 -425
  202. warp/native/volume_builder.h +34 -19
  203. warp/native/volume_impl.h +61 -0
  204. warp/native/warp.cpp +1057 -1052
  205. warp/native/warp.cu +2949 -2828
  206. warp/native/warp.h +321 -305
  207. warp/optim/__init__.py +9 -9
  208. warp/optim/adam.py +120 -120
  209. warp/optim/linear.py +1104 -939
  210. warp/optim/sgd.py +104 -92
  211. warp/render/__init__.py +10 -10
  212. warp/render/render_opengl.py +3356 -3204
  213. warp/render/render_usd.py +768 -749
  214. warp/render/utils.py +152 -150
  215. warp/sim/__init__.py +52 -59
  216. warp/sim/articulation.py +685 -685
  217. warp/sim/collide.py +1594 -1590
  218. warp/sim/import_mjcf.py +489 -481
  219. warp/sim/import_snu.py +220 -221
  220. warp/sim/import_urdf.py +536 -516
  221. warp/sim/import_usd.py +887 -881
  222. warp/sim/inertia.py +316 -317
  223. warp/sim/integrator.py +234 -233
  224. warp/sim/integrator_euler.py +1956 -1956
  225. warp/sim/integrator_featherstone.py +1917 -1991
  226. warp/sim/integrator_xpbd.py +3288 -3312
  227. warp/sim/model.py +4473 -4314
  228. warp/sim/particles.py +113 -112
  229. warp/sim/render.py +417 -403
  230. warp/sim/utils.py +413 -410
  231. warp/sparse.py +1289 -1227
  232. warp/stubs.py +2192 -2469
  233. warp/tape.py +1162 -225
  234. warp/tests/__init__.py +1 -1
  235. warp/tests/__main__.py +4 -4
  236. warp/tests/assets/test_index_grid.nvdb +0 -0
  237. warp/tests/assets/torus.usda +105 -105
  238. warp/tests/aux_test_class_kernel.py +26 -26
  239. warp/tests/aux_test_compile_consts_dummy.py +10 -10
  240. warp/tests/aux_test_conditional_unequal_types_kernels.py +21 -21
  241. warp/tests/aux_test_dependent.py +20 -22
  242. warp/tests/aux_test_grad_customs.py +21 -23
  243. warp/tests/aux_test_reference.py +9 -11
  244. warp/tests/aux_test_reference_reference.py +8 -10
  245. warp/tests/aux_test_square.py +15 -17
  246. warp/tests/aux_test_unresolved_func.py +14 -14
  247. warp/tests/aux_test_unresolved_symbol.py +14 -14
  248. warp/tests/disabled_kinematics.py +237 -239
  249. warp/tests/run_coverage_serial.py +31 -31
  250. warp/tests/test_adam.py +155 -157
  251. warp/tests/test_arithmetic.py +1088 -1124
  252. warp/tests/test_array.py +2415 -2326
  253. warp/tests/test_array_reduce.py +148 -150
  254. warp/tests/test_async.py +666 -656
  255. warp/tests/test_atomic.py +139 -141
  256. warp/tests/test_bool.py +212 -149
  257. warp/tests/test_builtins_resolution.py +1290 -1292
  258. warp/tests/test_bvh.py +162 -171
  259. warp/tests/test_closest_point_edge_edge.py +227 -228
  260. warp/tests/test_codegen.py +562 -553
  261. warp/tests/test_compile_consts.py +217 -101
  262. warp/tests/test_conditional.py +244 -246
  263. warp/tests/test_copy.py +230 -215
  264. warp/tests/test_ctypes.py +630 -632
  265. warp/tests/test_dense.py +65 -67
  266. warp/tests/test_devices.py +89 -98
  267. warp/tests/test_dlpack.py +528 -529
  268. warp/tests/test_examples.py +403 -378
  269. warp/tests/test_fabricarray.py +952 -955
  270. warp/tests/test_fast_math.py +60 -54
  271. warp/tests/test_fem.py +1298 -1278
  272. warp/tests/test_fp16.py +128 -130
  273. warp/tests/test_func.py +336 -337
  274. warp/tests/test_generics.py +596 -571
  275. warp/tests/test_grad.py +885 -640
  276. warp/tests/test_grad_customs.py +331 -336
  277. warp/tests/test_hash_grid.py +208 -164
  278. warp/tests/test_import.py +37 -39
  279. warp/tests/test_indexedarray.py +1132 -1134
  280. warp/tests/test_intersect.py +65 -67
  281. warp/tests/test_jax.py +305 -307
  282. warp/tests/test_large.py +169 -164
  283. warp/tests/test_launch.py +352 -354
  284. warp/tests/test_lerp.py +217 -261
  285. warp/tests/test_linear_solvers.py +189 -171
  286. warp/tests/test_lvalue.py +419 -493
  287. warp/tests/test_marching_cubes.py +63 -65
  288. warp/tests/test_mat.py +1799 -1827
  289. warp/tests/test_mat_lite.py +113 -115
  290. warp/tests/test_mat_scalar_ops.py +2905 -2889
  291. warp/tests/test_math.py +124 -193
  292. warp/tests/test_matmul.py +498 -499
  293. warp/tests/test_matmul_lite.py +408 -410
  294. warp/tests/test_mempool.py +186 -190
  295. warp/tests/test_mesh.py +281 -324
  296. warp/tests/test_mesh_query_aabb.py +226 -241
  297. warp/tests/test_mesh_query_point.py +690 -702
  298. warp/tests/test_mesh_query_ray.py +290 -303
  299. warp/tests/test_mlp.py +274 -276
  300. warp/tests/test_model.py +108 -110
  301. warp/tests/test_module_hashing.py +111 -0
  302. warp/tests/test_modules_lite.py +36 -39
  303. warp/tests/test_multigpu.py +161 -163
  304. warp/tests/test_noise.py +244 -248
  305. warp/tests/test_operators.py +248 -250
  306. warp/tests/test_options.py +121 -125
  307. warp/tests/test_peer.py +131 -137
  308. warp/tests/test_pinned.py +76 -78
  309. warp/tests/test_print.py +52 -54
  310. warp/tests/test_quat.py +2084 -2086
  311. warp/tests/test_rand.py +324 -288
  312. warp/tests/test_reload.py +207 -217
  313. warp/tests/test_rounding.py +177 -179
  314. warp/tests/test_runlength_encode.py +188 -190
  315. warp/tests/test_sim_grad.py +241 -0
  316. warp/tests/test_sim_kinematics.py +89 -97
  317. warp/tests/test_smoothstep.py +166 -168
  318. warp/tests/test_snippet.py +303 -266
  319. warp/tests/test_sparse.py +466 -460
  320. warp/tests/test_spatial.py +2146 -2148
  321. warp/tests/test_special_values.py +362 -0
  322. warp/tests/test_streams.py +484 -473
  323. warp/tests/test_struct.py +708 -675
  324. warp/tests/test_tape.py +171 -148
  325. warp/tests/test_torch.py +741 -743
  326. warp/tests/test_transient_module.py +85 -87
  327. warp/tests/test_types.py +554 -659
  328. warp/tests/test_utils.py +488 -499
  329. warp/tests/test_vec.py +1262 -1268
  330. warp/tests/test_vec_lite.py +71 -73
  331. warp/tests/test_vec_scalar_ops.py +2097 -2099
  332. warp/tests/test_verify_fp.py +92 -94
  333. warp/tests/test_volume.py +961 -736
  334. warp/tests/test_volume_write.py +338 -265
  335. warp/tests/unittest_serial.py +38 -37
  336. warp/tests/unittest_suites.py +367 -359
  337. warp/tests/unittest_utils.py +434 -578
  338. warp/tests/unused_test_misc.py +69 -71
  339. warp/tests/walkthrough_debug.py +85 -85
  340. warp/thirdparty/appdirs.py +598 -598
  341. warp/thirdparty/dlpack.py +143 -143
  342. warp/thirdparty/unittest_parallel.py +563 -561
  343. warp/torch.py +321 -295
  344. warp/types.py +4941 -4450
  345. warp/utils.py +1008 -821
  346. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/LICENSE.md +126 -126
  347. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/METADATA +365 -400
  348. warp_lang-1.2.0.dist-info/RECORD +359 -0
  349. warp/examples/assets/cube.usda +0 -42
  350. warp/examples/assets/sphere.usda +0 -56
  351. warp/examples/assets/torus.usda +0 -105
  352. warp/examples/fem/example_convection_diffusion_dg0.py +0 -194
  353. warp/native/nanovdb/PNanoVDBWrite.h +0 -295
  354. warp_lang-1.0.2.dist-info/RECORD +0 -352
  355. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/WHEEL +0 -0
  356. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/top_level.txt +0 -0
warp/native/array.h CHANGED
@@ -1,1025 +1,1081 @@
1
- #pragma once
2
-
3
- #include "builtin.h"
4
-
5
- namespace wp
6
- {
7
-
8
- #if FP_CHECK
9
-
10
- #define FP_ASSERT_FWD(value) \
11
- print(value); \
12
- printf(")\n"); \
13
- assert(0); \
14
-
15
- #define FP_ASSERT_ADJ(value, adj_value) \
16
- print(value); \
17
- printf(", "); \
18
- print(adj_value); \
19
- printf(")\n"); \
20
- assert(0); \
21
-
22
- #define FP_VERIFY_FWD(value) \
23
- if (!isfinite(value)) { \
24
- printf("%s:%d - %s(addr", __FILE__, __LINE__, __FUNCTION__); \
25
- FP_ASSERT_FWD(value) \
26
- } \
27
-
28
- #define FP_VERIFY_FWD_1(value) \
29
- if (!isfinite(value)) { \
30
- printf("%s:%d - %s(arr, %d) ", __FILE__, __LINE__, __FUNCTION__, i); \
31
- FP_ASSERT_FWD(value) \
32
- } \
33
-
34
- #define FP_VERIFY_FWD_2(value) \
35
- if (!isfinite(value)) { \
36
- printf("%s:%d - %s(arr, %d, %d) ", __FILE__, __LINE__, __FUNCTION__, i, j); \
37
- FP_ASSERT_FWD(value) \
38
- } \
39
-
40
- #define FP_VERIFY_FWD_3(value) \
41
- if (!isfinite(value)) { \
42
- printf("%s:%d - %s(arr, %d, %d, %d) ", __FILE__, __LINE__, __FUNCTION__, i, j, k); \
43
- FP_ASSERT_FWD(value) \
44
- } \
45
-
46
- #define FP_VERIFY_FWD_4(value) \
47
- if (!isfinite(value)) { \
48
- printf("%s:%d - %s(arr, %d, %d, %d, %d) ", __FILE__, __LINE__, __FUNCTION__, i, j, k, l); \
49
- FP_ASSERT_FWD(value) \
50
- } \
51
-
52
- #define FP_VERIFY_ADJ(value, adj_value) \
53
- if (!isfinite(value) || !isfinite(adj_value)) \
54
- { \
55
- printf("%s:%d - %s(addr", __FILE__, __LINE__, __FUNCTION__); \
56
- FP_ASSERT_ADJ(value, adj_value); \
57
- } \
58
-
59
- #define FP_VERIFY_ADJ_1(value, adj_value) \
60
- if (!isfinite(value) || !isfinite(adj_value)) \
61
- { \
62
- printf("%s:%d - %s(arr, %d) ", __FILE__, __LINE__, __FUNCTION__, i); \
63
- FP_ASSERT_ADJ(value, adj_value); \
64
- } \
65
-
66
- #define FP_VERIFY_ADJ_2(value, adj_value) \
67
- if (!isfinite(value) || !isfinite(adj_value)) \
68
- { \
69
- printf("%s:%d - %s(arr, %d, %d) ", __FILE__, __LINE__, __FUNCTION__, i, j); \
70
- FP_ASSERT_ADJ(value, adj_value); \
71
- } \
72
-
73
- #define FP_VERIFY_ADJ_3(value, adj_value) \
74
- if (!isfinite(value) || !isfinite(adj_value)) \
75
- { \
76
- printf("%s:%d - %s(arr, %d, %d, %d) ", __FILE__, __LINE__, __FUNCTION__, i, j, k); \
77
- FP_ASSERT_ADJ(value, adj_value); \
78
- } \
79
-
80
- #define FP_VERIFY_ADJ_4(value, adj_value) \
81
- if (!isfinite(value) || !isfinite(adj_value)) \
82
- { \
83
- printf("%s:%d - %s(arr, %d, %d, %d, %d) ", __FILE__, __LINE__, __FUNCTION__, i, j, k, l); \
84
- FP_ASSERT_ADJ(value, adj_value); \
85
- } \
86
-
87
-
88
- #else
89
-
90
- #define FP_VERIFY_FWD(value) {}
91
- #define FP_VERIFY_FWD_1(value) {}
92
- #define FP_VERIFY_FWD_2(value) {}
93
- #define FP_VERIFY_FWD_3(value) {}
94
- #define FP_VERIFY_FWD_4(value) {}
95
-
96
- #define FP_VERIFY_ADJ(value, adj_value) {}
97
- #define FP_VERIFY_ADJ_1(value, adj_value) {}
98
- #define FP_VERIFY_ADJ_2(value, adj_value) {}
99
- #define FP_VERIFY_ADJ_3(value, adj_value) {}
100
- #define FP_VERIFY_ADJ_4(value, adj_value) {}
101
-
102
- #endif // WP_FP_CHECK
103
-
104
- const int ARRAY_MAX_DIMS = 4; // must match constant in types.py
105
-
106
- // must match constants in types.py
107
- const int ARRAY_TYPE_REGULAR = 0;
108
- const int ARRAY_TYPE_INDEXED = 1;
109
- const int ARRAY_TYPE_FABRIC = 2;
110
- const int ARRAY_TYPE_FABRIC_INDEXED = 3;
111
-
112
- struct shape_t
113
- {
114
- int dims[ARRAY_MAX_DIMS];
115
-
116
- CUDA_CALLABLE inline shape_t()
117
- : dims()
118
- {}
119
-
120
- CUDA_CALLABLE inline int operator[](int i) const
121
- {
122
- assert(i < ARRAY_MAX_DIMS);
123
- return dims[i];
124
- }
125
-
126
- CUDA_CALLABLE inline int& operator[](int i)
127
- {
128
- assert(i < ARRAY_MAX_DIMS);
129
- return dims[i];
130
- }
131
- };
132
-
133
- CUDA_CALLABLE inline int extract(const shape_t& s, int i)
134
- {
135
- return s.dims[i];
136
- }
137
-
138
- CUDA_CALLABLE inline void adj_extract(const shape_t& s, int i, const shape_t& adj_s, int adj_i, int adj_ret) {}
139
-
140
- inline CUDA_CALLABLE void print(shape_t s)
141
- {
142
- // todo: only print valid dims, currently shape has a fixed size
143
- // but we don't know how many dims are valid (e.g.: 1d, 2d, etc)
144
- // should probably store ndim with shape
145
- printf("(%d, %d, %d, %d)\n", s.dims[0], s.dims[1], s.dims[2], s.dims[3]);
146
- }
147
- inline CUDA_CALLABLE void adj_print(shape_t s, shape_t& shape_t) {}
148
-
149
-
150
- template <typename T>
151
- struct array_t
152
- {
153
- CUDA_CALLABLE inline array_t()
154
- : data(nullptr),
155
- grad(nullptr),
156
- shape(),
157
- strides(),
158
- ndim(0)
159
- {}
160
-
161
- CUDA_CALLABLE array_t(T* data, int size, T* grad=nullptr) : data(data), grad(grad) {
162
- // constructor for 1d array
163
- shape.dims[0] = size;
164
- shape.dims[1] = 0;
165
- shape.dims[2] = 0;
166
- shape.dims[3] = 0;
167
- ndim = 1;
168
- strides[0] = sizeof(T);
169
- strides[1] = 0;
170
- strides[2] = 0;
171
- strides[3] = 0;
172
- }
173
- CUDA_CALLABLE array_t(T* data, int dim0, int dim1, T* grad=nullptr) : data(data), grad(grad) {
174
- // constructor for 2d array
175
- shape.dims[0] = dim0;
176
- shape.dims[1] = dim1;
177
- shape.dims[2] = 0;
178
- shape.dims[3] = 0;
179
- ndim = 2;
180
- strides[0] = dim1 * sizeof(T);
181
- strides[1] = sizeof(T);
182
- strides[2] = 0;
183
- strides[3] = 0;
184
- }
185
- CUDA_CALLABLE array_t(T* data, int dim0, int dim1, int dim2, T* grad=nullptr) : data(data), grad(grad) {
186
- // constructor for 3d array
187
- shape.dims[0] = dim0;
188
- shape.dims[1] = dim1;
189
- shape.dims[2] = dim2;
190
- shape.dims[3] = 0;
191
- ndim = 3;
192
- strides[0] = dim1 * dim2 * sizeof(T);
193
- strides[1] = dim2 * sizeof(T);
194
- strides[2] = sizeof(T);
195
- strides[3] = 0;
196
- }
197
- CUDA_CALLABLE array_t(T* data, int dim0, int dim1, int dim2, int dim3, T* grad=nullptr) : data(data), grad(grad) {
198
- // constructor for 4d array
199
- shape.dims[0] = dim0;
200
- shape.dims[1] = dim1;
201
- shape.dims[2] = dim2;
202
- shape.dims[3] = dim3;
203
- ndim = 4;
204
- strides[0] = dim1 * dim2 * dim3 * sizeof(T);
205
- strides[1] = dim2 * dim3 * sizeof(T);
206
- strides[2] = dim3 * sizeof(T);
207
- strides[3] = sizeof(T);
208
- }
209
-
210
- CUDA_CALLABLE inline bool empty() const { return !data; }
211
-
212
- T* data;
213
- T* grad;
214
- shape_t shape;
215
- int strides[ARRAY_MAX_DIMS];
216
- int ndim;
217
-
218
- CUDA_CALLABLE inline operator T*() const { return data; }
219
- };
220
-
221
-
222
- // TODO:
223
- // - templated index type?
224
- // - templated dimensionality? (also for array_t to save space when passing arrays to kernels)
225
- template <typename T>
226
- struct indexedarray_t
227
- {
228
- CUDA_CALLABLE inline indexedarray_t()
229
- : arr(),
230
- indices(),
231
- shape()
232
- {}
233
-
234
- CUDA_CALLABLE inline bool empty() const { return !arr.data; }
235
-
236
- array_t<T> arr;
237
- int* indices[ARRAY_MAX_DIMS]; // index array per dimension (can be NULL)
238
- shape_t shape; // element count per dimension (num. indices if indexed, array dim if not)
239
- };
240
-
241
-
242
- // return stride (in bytes) of the given index
243
- template <typename T>
244
- CUDA_CALLABLE inline size_t stride(const array_t<T>& a, int dim)
245
- {
246
- return size_t(a.strides[dim]);
247
- }
248
-
249
- template <typename T>
250
- CUDA_CALLABLE inline T* data_at_byte_offset(const array_t<T>& a, size_t byte_offset)
251
- {
252
- return reinterpret_cast<T*>(reinterpret_cast<char*>(a.data) + byte_offset);
253
- }
254
-
255
- template <typename T>
256
- CUDA_CALLABLE inline T* grad_at_byte_offset(const array_t<T>& a, size_t byte_offset)
257
- {
258
- return reinterpret_cast<T*>(reinterpret_cast<char*>(a.grad) + byte_offset);
259
- }
260
-
261
- template <typename T>
262
- CUDA_CALLABLE inline size_t byte_offset(const array_t<T>& arr, int i)
263
- {
264
- assert(i >= 0 && i < arr.shape[0]);
265
-
266
- return i*stride(arr, 0);
267
- }
268
-
269
- template <typename T>
270
- CUDA_CALLABLE inline size_t byte_offset(const array_t<T>& arr, int i, int j)
271
- {
272
- assert(i >= 0 && i < arr.shape[0]);
273
- assert(j >= 0 && j < arr.shape[1]);
274
-
275
- return i*stride(arr, 0) + j*stride(arr, 1);
276
- }
277
-
278
- template <typename T>
279
- CUDA_CALLABLE inline size_t byte_offset(const array_t<T>& arr, int i, int j, int k)
280
- {
281
- assert(i >= 0 && i < arr.shape[0]);
282
- assert(j >= 0 && j < arr.shape[1]);
283
- assert(k >= 0 && k < arr.shape[2]);
284
-
285
- return i*stride(arr, 0) + j*stride(arr, 1) + k*stride(arr, 2);
286
- }
287
-
288
- template <typename T>
289
- CUDA_CALLABLE inline size_t byte_offset(const array_t<T>& arr, int i, int j, int k, int l)
290
- {
291
- assert(i >= 0 && i < arr.shape[0]);
292
- assert(j >= 0 && j < arr.shape[1]);
293
- assert(k >= 0 && k < arr.shape[2]);
294
- assert(l >= 0 && l < arr.shape[3]);
295
-
296
- return i*stride(arr, 0) + j*stride(arr, 1) + k*stride(arr, 2) + l*stride(arr, 3);
297
- }
298
-
299
- template <typename T>
300
- CUDA_CALLABLE inline T& index(const array_t<T>& arr, int i)
301
- {
302
- assert(arr.ndim == 1);
303
- T& result = *data_at_byte_offset(arr, byte_offset(arr, i));
304
- FP_VERIFY_FWD_1(result)
305
-
306
- return result;
307
- }
308
-
309
- template <typename T>
310
- CUDA_CALLABLE inline T& index(const array_t<T>& arr, int i, int j)
311
- {
312
- assert(arr.ndim == 2);
313
- T& result = *data_at_byte_offset(arr, byte_offset(arr, i, j));
314
- FP_VERIFY_FWD_2(result)
315
-
316
- return result;
317
- }
318
-
319
- template <typename T>
320
- CUDA_CALLABLE inline T& index(const array_t<T>& arr, int i, int j, int k)
321
- {
322
- assert(arr.ndim == 3);
323
- T& result = *data_at_byte_offset(arr, byte_offset(arr, i, j, k));
324
- FP_VERIFY_FWD_3(result)
325
-
326
- return result;
327
- }
328
-
329
- template <typename T>
330
- CUDA_CALLABLE inline T& index(const array_t<T>& arr, int i, int j, int k, int l)
331
- {
332
- assert(arr.ndim == 4);
333
- T& result = *data_at_byte_offset(arr, byte_offset(arr, i, j, k, l));
334
- FP_VERIFY_FWD_4(result)
335
-
336
- return result;
337
- }
338
-
339
- template <typename T>
340
- CUDA_CALLABLE inline T& index_grad(const array_t<T>& arr, int i)
341
- {
342
- T& result = *grad_at_byte_offset(arr, byte_offset(arr, i));
343
- FP_VERIFY_FWD_1(result)
344
-
345
- return result;
346
- }
347
-
348
- template <typename T>
349
- CUDA_CALLABLE inline T& index_grad(const array_t<T>& arr, int i, int j)
350
- {
351
- T& result = *grad_at_byte_offset(arr, byte_offset(arr, i, j));
352
- FP_VERIFY_FWD_2(result)
353
-
354
- return result;
355
- }
356
-
357
- template <typename T>
358
- CUDA_CALLABLE inline T& index_grad(const array_t<T>& arr, int i, int j, int k)
359
- {
360
- T& result = *grad_at_byte_offset(arr, byte_offset(arr, i, j, k));
361
- FP_VERIFY_FWD_3(result)
362
-
363
- return result;
364
- }
365
-
366
- template <typename T>
367
- CUDA_CALLABLE inline T& index_grad(const array_t<T>& arr, int i, int j, int k, int l)
368
- {
369
- T& result = *grad_at_byte_offset(arr, byte_offset(arr, i, j, k, l));
370
- FP_VERIFY_FWD_4(result)
371
-
372
- return result;
373
- }
374
-
375
-
376
- template <typename T>
377
- CUDA_CALLABLE inline T& index(const indexedarray_t<T>& iarr, int i)
378
- {
379
- assert(iarr.arr.ndim == 1);
380
- assert(i >= 0 && i < iarr.shape[0]);
381
-
382
- if (iarr.indices[0])
383
- {
384
- i = iarr.indices[0][i];
385
- assert(i >= 0 && i < iarr.arr.shape[0]);
386
- }
387
-
388
- T& result = *data_at_byte_offset(iarr.arr, byte_offset(iarr.arr, i));
389
- FP_VERIFY_FWD_1(result)
390
-
391
- return result;
392
- }
393
-
394
- template <typename T>
395
- CUDA_CALLABLE inline T& index(const indexedarray_t<T>& iarr, int i, int j)
396
- {
397
- assert(iarr.arr.ndim == 2);
398
- assert(i >= 0 && i < iarr.shape[0]);
399
- assert(j >= 0 && j < iarr.shape[1]);
400
-
401
- if (iarr.indices[0])
402
- {
403
- i = iarr.indices[0][i];
404
- assert(i >= 0 && i < iarr.arr.shape[0]);
405
- }
406
- if (iarr.indices[1])
407
- {
408
- j = iarr.indices[1][j];
409
- assert(j >= 0 && j < iarr.arr.shape[1]);
410
- }
411
-
412
- T& result = *data_at_byte_offset(iarr.arr, byte_offset(iarr.arr, i, j));
413
- FP_VERIFY_FWD_1(result)
414
-
415
- return result;
416
- }
417
-
418
- template <typename T>
419
- CUDA_CALLABLE inline T& index(const indexedarray_t<T>& iarr, int i, int j, int k)
420
- {
421
- assert(iarr.arr.ndim == 3);
422
- assert(i >= 0 && i < iarr.shape[0]);
423
- assert(j >= 0 && j < iarr.shape[1]);
424
- assert(k >= 0 && k < iarr.shape[2]);
425
-
426
- if (iarr.indices[0])
427
- {
428
- i = iarr.indices[0][i];
429
- assert(i >= 0 && i < iarr.arr.shape[0]);
430
- }
431
- if (iarr.indices[1])
432
- {
433
- j = iarr.indices[1][j];
434
- assert(j >= 0 && j < iarr.arr.shape[1]);
435
- }
436
- if (iarr.indices[2])
437
- {
438
- k = iarr.indices[2][k];
439
- assert(k >= 0 && k < iarr.arr.shape[2]);
440
- }
441
-
442
- T& result = *data_at_byte_offset(iarr.arr, byte_offset(iarr.arr, i, j, k));
443
- FP_VERIFY_FWD_1(result)
444
-
445
- return result;
446
- }
447
-
448
- template <typename T>
449
- CUDA_CALLABLE inline T& index(const indexedarray_t<T>& iarr, int i, int j, int k, int l)
450
- {
451
- assert(iarr.arr.ndim == 4);
452
- assert(i >= 0 && i < iarr.shape[0]);
453
- assert(j >= 0 && j < iarr.shape[1]);
454
- assert(k >= 0 && k < iarr.shape[2]);
455
- assert(l >= 0 && l < iarr.shape[3]);
456
-
457
- if (iarr.indices[0])
458
- {
459
- i = iarr.indices[0][i];
460
- assert(i >= 0 && i < iarr.arr.shape[0]);
461
- }
462
- if (iarr.indices[1])
463
- {
464
- j = iarr.indices[1][j];
465
- assert(j >= 0 && j < iarr.arr.shape[1]);
466
- }
467
- if (iarr.indices[2])
468
- {
469
- k = iarr.indices[2][k];
470
- assert(k >= 0 && k < iarr.arr.shape[2]);
471
- }
472
- if (iarr.indices[3])
473
- {
474
- l = iarr.indices[3][l];
475
- assert(l >= 0 && l < iarr.arr.shape[3]);
476
- }
477
-
478
- T& result = *data_at_byte_offset(iarr.arr, byte_offset(iarr.arr, i, j, k, l));
479
- FP_VERIFY_FWD_1(result)
480
-
481
- return result;
482
- }
483
-
484
-
485
- template <typename T>
486
- CUDA_CALLABLE inline array_t<T> view(array_t<T>& src, int i)
487
- {
488
- assert(src.ndim > 1);
489
- assert(i >= 0 && i < src.shape[0]);
490
-
491
- array_t<T> a;
492
- a.data = data_at_byte_offset(src, byte_offset(src, i));
493
- a.shape[0] = src.shape[1];
494
- a.shape[1] = src.shape[2];
495
- a.shape[2] = src.shape[3];
496
- a.strides[0] = src.strides[1];
497
- a.strides[1] = src.strides[2];
498
- a.strides[2] = src.strides[3];
499
- a.ndim = src.ndim-1;
500
-
501
- return a;
502
- }
503
-
504
- template <typename T>
505
- CUDA_CALLABLE inline array_t<T> view(array_t<T>& src, int i, int j)
506
- {
507
- assert(src.ndim > 2);
508
- assert(i >= 0 && i < src.shape[0]);
509
- assert(j >= 0 && j < src.shape[1]);
510
-
511
- array_t<T> a;
512
- a.data = data_at_byte_offset(src, byte_offset(src, i, j));
513
- a.shape[0] = src.shape[2];
514
- a.shape[1] = src.shape[3];
515
- a.strides[0] = src.strides[2];
516
- a.strides[1] = src.strides[3];
517
- a.ndim = src.ndim-2;
518
-
519
- return a;
520
- }
521
-
522
- template <typename T>
523
- CUDA_CALLABLE inline array_t<T> view(array_t<T>& src, int i, int j, int k)
524
- {
525
- assert(src.ndim > 3);
526
- assert(i >= 0 && i < src.shape[0]);
527
- assert(j >= 0 && j < src.shape[1]);
528
- assert(k >= 0 && k < src.shape[2]);
529
-
530
- array_t<T> a;
531
- a.data = data_at_byte_offset(src, byte_offset(src, i, j, k));
532
- a.shape[0] = src.shape[3];
533
- a.strides[0] = src.strides[3];
534
- a.ndim = src.ndim-3;
535
-
536
- return a;
537
- }
538
-
539
-
540
- template <typename T>
541
- CUDA_CALLABLE inline indexedarray_t<T> view(indexedarray_t<T>& src, int i)
542
- {
543
- assert(src.arr.ndim > 1);
544
-
545
- if (src.indices[0])
546
- {
547
- assert(i >= 0 && i < src.shape[0]);
548
- i = src.indices[0][i];
549
- }
550
-
551
- indexedarray_t<T> a;
552
- a.arr = view(src.arr, i);
553
- a.indices[0] = src.indices[1];
554
- a.indices[1] = src.indices[2];
555
- a.indices[2] = src.indices[3];
556
- a.shape[0] = src.shape[1];
557
- a.shape[1] = src.shape[2];
558
- a.shape[2] = src.shape[3];
559
-
560
- return a;
561
- }
562
-
563
- template <typename T>
564
- CUDA_CALLABLE inline indexedarray_t<T> view(indexedarray_t<T>& src, int i, int j)
565
- {
566
- assert(src.arr.ndim > 2);
567
-
568
- if (src.indices[0])
569
- {
570
- assert(i >= 0 && i < src.shape[0]);
571
- i = src.indices[0][i];
572
- }
573
- if (src.indices[1])
574
- {
575
- assert(j >= 0 && j < src.shape[1]);
576
- j = src.indices[1][j];
577
- }
578
-
579
- indexedarray_t<T> a;
580
- a.arr = view(src.arr, i, j);
581
- a.indices[0] = src.indices[2];
582
- a.indices[1] = src.indices[3];
583
- a.shape[0] = src.shape[2];
584
- a.shape[1] = src.shape[3];
585
-
586
- return a;
587
- }
588
-
589
- template <typename T>
590
- CUDA_CALLABLE inline indexedarray_t<T> view(indexedarray_t<T>& src, int i, int j, int k)
591
- {
592
- assert(src.arr.ndim > 3);
593
-
594
- if (src.indices[0])
595
- {
596
- assert(i >= 0 && i < src.shape[0]);
597
- i = src.indices[0][i];
598
- }
599
- if (src.indices[1])
600
- {
601
- assert(j >= 0 && j < src.shape[1]);
602
- j = src.indices[1][j];
603
- }
604
- if (src.indices[2])
605
- {
606
- assert(k >= 0 && k < src.shape[2]);
607
- k = src.indices[2][k];
608
- }
609
-
610
- indexedarray_t<T> a;
611
- a.arr = view(src.arr, i, j, k);
612
- a.indices[0] = src.indices[3];
613
- a.shape[0] = src.shape[3];
614
-
615
- return a;
616
- }
617
-
618
- template<template<typename> class A1, template<typename> class A2, template<typename> class A3, typename T>
619
- inline CUDA_CALLABLE void adj_view(A1<T>& src, int i, A2<T>& adj_src, int adj_i, A3<T> adj_ret) {}
620
- template<template<typename> class A1, template<typename> class A2, template<typename> class A3, typename T>
621
- inline CUDA_CALLABLE void adj_view(A1<T>& src, int i, int j, A2<T>& adj_src, int adj_i, int adj_j, A3<T> adj_ret) {}
622
- template<template<typename> class A1, template<typename> class A2, template<typename> class A3, typename T>
623
- inline CUDA_CALLABLE void adj_view(A1<T>& src, int i, int j, int k, A2<T>& adj_src, int adj_i, int adj_j, int adj_k, A3<T> adj_ret) {}
624
-
625
- // TODO: lower_bound() for indexed arrays?
626
-
627
- template <typename T>
628
- CUDA_CALLABLE inline int lower_bound(const array_t<T>& arr, int arr_begin, int arr_end, T value)
629
- {
630
- assert(arr.ndim == 1);
631
-
632
- int lower = arr_begin;
633
- int upper = arr_end - 1;
634
-
635
- while(lower < upper)
636
- {
637
- int mid = lower + (upper - lower) / 2;
638
-
639
- if (arr[mid] < value)
640
- {
641
- lower = mid + 1;
642
- }
643
- else
644
- {
645
- upper = mid;
646
- }
647
- }
648
-
649
- return lower;
650
- }
651
-
652
- template <typename T>
653
- CUDA_CALLABLE inline int lower_bound(const array_t<T>& arr, T value)
654
- {
655
- return lower_bound(arr, 0, arr.shape[0], value);
656
- }
657
-
658
- template <typename T> inline CUDA_CALLABLE void adj_lower_bound(const array_t<T>& arr, T value, array_t<T> adj_arr, T adj_value, int adj_ret) {}
659
- template <typename T> inline CUDA_CALLABLE void adj_lower_bound(const array_t<T>& arr, int arr_begin, int arr_end, T value, array_t<T> adj_arr, int adj_arr_begin, int adj_arr_end, T adj_value, int adj_ret) {}
660
-
661
- template<template<typename> class A, typename T>
662
- inline CUDA_CALLABLE T atomic_add(const A<T>& buf, int i, T value) { return atomic_add(&index(buf, i), value); }
663
- template<template<typename> class A, typename T>
664
- inline CUDA_CALLABLE T atomic_add(const A<T>& buf, int i, int j, T value) { return atomic_add(&index(buf, i, j), value); }
665
- template<template<typename> class A, typename T>
666
- inline CUDA_CALLABLE T atomic_add(const A<T>& buf, int i, int j, int k, T value) { return atomic_add(&index(buf, i, j, k), value); }
667
- template<template<typename> class A, typename T>
668
- inline CUDA_CALLABLE T atomic_add(const A<T>& buf, int i, int j, int k, int l, T value) { return atomic_add(&index(buf, i, j, k, l), value); }
669
-
670
- template<template<typename> class A, typename T>
671
- inline CUDA_CALLABLE T atomic_sub(const A<T>& buf, int i, T value) { return atomic_add(&index(buf, i), -value); }
672
- template<template<typename> class A, typename T>
673
- inline CUDA_CALLABLE T atomic_sub(const A<T>& buf, int i, int j, T value) { return atomic_add(&index(buf, i, j), -value); }
674
- template<template<typename> class A, typename T>
675
- inline CUDA_CALLABLE T atomic_sub(const A<T>& buf, int i, int j, int k, T value) { return atomic_add(&index(buf, i, j, k), -value); }
676
- template<template<typename> class A, typename T>
677
- inline CUDA_CALLABLE T atomic_sub(const A<T>& buf, int i, int j, int k, int l, T value) { return atomic_add(&index(buf, i, j, k, l), -value); }
678
-
679
- template<template<typename> class A, typename T>
680
- inline CUDA_CALLABLE T atomic_min(const A<T>& buf, int i, T value) { return atomic_min(&index(buf, i), value); }
681
- template<template<typename> class A, typename T>
682
- inline CUDA_CALLABLE T atomic_min(const A<T>& buf, int i, int j, T value) { return atomic_min(&index(buf, i, j), value); }
683
- template<template<typename> class A, typename T>
684
- inline CUDA_CALLABLE T atomic_min(const A<T>& buf, int i, int j, int k, T value) { return atomic_min(&index(buf, i, j, k), value); }
685
- template<template<typename> class A, typename T>
686
- inline CUDA_CALLABLE T atomic_min(const A<T>& buf, int i, int j, int k, int l, T value) { return atomic_min(&index(buf, i, j, k, l), value); }
687
-
688
- template<template<typename> class A, typename T>
689
- inline CUDA_CALLABLE T atomic_max(const A<T>& buf, int i, T value) { return atomic_max(&index(buf, i), value); }
690
- template<template<typename> class A, typename T>
691
- inline CUDA_CALLABLE T atomic_max(const A<T>& buf, int i, int j, T value) { return atomic_max(&index(buf, i, j), value); }
692
- template<template<typename> class A, typename T>
693
- inline CUDA_CALLABLE T atomic_max(const A<T>& buf, int i, int j, int k, T value) { return atomic_max(&index(buf, i, j, k), value); }
694
- template<template<typename> class A, typename T>
695
- inline CUDA_CALLABLE T atomic_max(const A<T>& buf, int i, int j, int k, int l, T value) { return atomic_max(&index(buf, i, j, k, l), value); }
696
-
697
- template<template<typename> class A, typename T>
698
- inline CUDA_CALLABLE T* address(const A<T>& buf, int i) { return &index(buf, i); }
699
- template<template<typename> class A, typename T>
700
- inline CUDA_CALLABLE T* address(const A<T>& buf, int i, int j) { return &index(buf, i, j); }
701
- template<template<typename> class A, typename T>
702
- inline CUDA_CALLABLE T* address(const A<T>& buf, int i, int j, int k) { return &index(buf, i, j, k); }
703
- template<template<typename> class A, typename T>
704
- inline CUDA_CALLABLE T* address(const A<T>& buf, int i, int j, int k, int l) { return &index(buf, i, j, k, l); }
705
-
706
- template<template<typename> class A, typename T>
707
- inline CUDA_CALLABLE void array_store(const A<T>& buf, int i, T value)
708
- {
709
- FP_VERIFY_FWD_1(value)
710
-
711
- index(buf, i) = value;
712
- }
713
- template<template<typename> class A, typename T>
714
- inline CUDA_CALLABLE void array_store(const A<T>& buf, int i, int j, T value)
715
- {
716
- FP_VERIFY_FWD_2(value)
717
-
718
- index(buf, i, j) = value;
719
- }
720
- template<template<typename> class A, typename T>
721
- inline CUDA_CALLABLE void array_store(const A<T>& buf, int i, int j, int k, T value)
722
- {
723
- FP_VERIFY_FWD_3(value)
724
-
725
- index(buf, i, j, k) = value;
726
- }
727
- template<template<typename> class A, typename T>
728
- inline CUDA_CALLABLE void array_store(const A<T>& buf, int i, int j, int k, int l, T value)
729
- {
730
- FP_VERIFY_FWD_4(value)
731
-
732
- index(buf, i, j, k, l) = value;
733
- }
734
-
735
- template<typename T>
736
- inline CUDA_CALLABLE void store(T* address, T value)
737
- {
738
- FP_VERIFY_FWD(value)
739
-
740
- *address = value;
741
- }
742
-
743
- template<typename T>
744
- inline CUDA_CALLABLE T load(T* address)
745
- {
746
- T value = *address;
747
- FP_VERIFY_FWD(value)
748
-
749
- return value;
750
- }
751
-
752
- // select operator to check for array being null
753
- template <typename T1, typename T2>
754
- CUDA_CALLABLE inline T2 select(const array_t<T1>& arr, const T2& a, const T2& b) { return arr.data?b:a; }
755
-
756
- template <typename T1, typename T2>
757
- CUDA_CALLABLE inline void adj_select(const array_t<T1>& arr, const T2& a, const T2& b, const array_t<T1>& adj_cond, T2& adj_a, T2& adj_b, const T2& adj_ret)
758
- {
759
- if (arr.data)
760
- adj_b += adj_ret;
761
- else
762
- adj_a += adj_ret;
763
- }
764
-
765
- // stub for the case where we have an nested array inside a struct and
766
- // atomic add the whole struct onto an array (e.g.: during backwards pass)
767
- template <typename T>
768
- CUDA_CALLABLE inline void atomic_add(array_t<T>*, array_t<T>) {}
769
-
770
- // for float and vector types this is just an alias for an atomic add
771
- template <typename T>
772
- CUDA_CALLABLE inline void adj_atomic_add(T* buf, T value) { atomic_add(buf, value); }
773
-
774
-
775
- // for integral types we do not accumulate gradients
776
- CUDA_CALLABLE inline void adj_atomic_add(int8* buf, int8 value) { }
777
- CUDA_CALLABLE inline void adj_atomic_add(uint8* buf, uint8 value) { }
778
- CUDA_CALLABLE inline void adj_atomic_add(int16* buf, int16 value) { }
779
- CUDA_CALLABLE inline void adj_atomic_add(uint16* buf, uint16 value) { }
780
- CUDA_CALLABLE inline void adj_atomic_add(int32* buf, int32 value) { }
781
- CUDA_CALLABLE inline void adj_atomic_add(uint32* buf, uint32 value) { }
782
- CUDA_CALLABLE inline void adj_atomic_add(int64* buf, int64 value) { }
783
- CUDA_CALLABLE inline void adj_atomic_add(uint64* buf, uint64 value) { }
784
-
785
- CUDA_CALLABLE inline void adj_atomic_add(bool* buf, bool value) { }
786
-
787
- // only generate gradients for T types
788
- template<typename T>
789
- inline CUDA_CALLABLE void adj_address(const array_t<T>& buf, int i, const array_t<T>& adj_buf, int& adj_i, const T& adj_output)
790
- {
791
- if (buf.grad)
792
- adj_atomic_add(&index_grad(buf, i), adj_output);
793
- }
794
- template<typename T>
795
- inline CUDA_CALLABLE void adj_address(const array_t<T>& buf, int i, int j, const array_t<T>& adj_buf, int& adj_i, int& adj_j, const T& adj_output)
796
- {
797
- if (buf.grad)
798
- adj_atomic_add(&index_grad(buf, i, j), adj_output);
799
- }
800
- template<typename T>
801
- inline CUDA_CALLABLE void adj_address(const array_t<T>& buf, int i, int j, int k, const array_t<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, const T& adj_output)
802
- {
803
- if (buf.grad)
804
- adj_atomic_add(&index_grad(buf, i, j, k), adj_output);
805
- }
806
- template<typename T>
807
- inline CUDA_CALLABLE void adj_address(const array_t<T>& buf, int i, int j, int k, int l, const array_t<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, const T& adj_output)
808
- {
809
- if (buf.grad)
810
- adj_atomic_add(&index_grad(buf, i, j, k, l), adj_output);
811
- }
812
-
813
- template<typename T>
814
- inline CUDA_CALLABLE void adj_array_store(const array_t<T>& buf, int i, T value, const array_t<T>& adj_buf, int& adj_i, T& adj_value)
815
- {
816
- if (buf.grad)
817
- adj_value += index_grad(buf, i);
818
-
819
- FP_VERIFY_ADJ_1(value, adj_value)
820
- }
821
- template<typename T>
822
- inline CUDA_CALLABLE void adj_array_store(const array_t<T>& buf, int i, int j, T value, const array_t<T>& adj_buf, int& adj_i, int& adj_j, T& adj_value)
823
- {
824
- if (buf.grad)
825
- adj_value += index_grad(buf, i, j);
826
-
827
- FP_VERIFY_ADJ_2(value, adj_value)
828
-
829
- }
830
- template<typename T>
831
- inline CUDA_CALLABLE void adj_array_store(const array_t<T>& buf, int i, int j, int k, T value, const array_t<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, T& adj_value)
832
- {
833
- if (buf.grad)
834
- adj_value += index_grad(buf, i, j, k);
835
-
836
- FP_VERIFY_ADJ_3(value, adj_value)
837
- }
838
- template<typename T>
839
- inline CUDA_CALLABLE void adj_array_store(const array_t<T>& buf, int i, int j, int k, int l, T value, const array_t<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, T& adj_value)
840
- {
841
- if (buf.grad)
842
- adj_value += index_grad(buf, i, j, k, l);
843
-
844
- FP_VERIFY_ADJ_4(value, adj_value)
845
- }
846
-
847
- template<typename T>
848
- inline CUDA_CALLABLE void adj_store(const T* address, T value, const T& adj_address, T& adj_value)
849
- {
850
- // nop; generic store() operations are not differentiable, only array_store() is
851
- FP_VERIFY_ADJ(value, adj_value)
852
- }
853
-
854
- template<typename T>
855
- inline CUDA_CALLABLE void adj_load(const T* address, const T& adj_address, T& adj_value)
856
- {
857
- // nop; generic load() operations are not differentiable
858
- }
859
-
860
- template<typename T>
861
- inline CUDA_CALLABLE void adj_atomic_add(const array_t<T>& buf, int i, T value, const array_t<T>& adj_buf, int& adj_i, T& adj_value, const T& adj_ret)
862
- {
863
- if (buf.grad)
864
- adj_value += index_grad(buf, i);
865
-
866
- FP_VERIFY_ADJ_1(value, adj_value)
867
- }
868
- template<typename T>
869
- inline CUDA_CALLABLE void adj_atomic_add(const array_t<T>& buf, int i, int j, T value, const array_t<T>& adj_buf, int& adj_i, int& adj_j, T& adj_value, const T& adj_ret)
870
- {
871
- if (buf.grad)
872
- adj_value += index_grad(buf, i, j);
873
-
874
- FP_VERIFY_ADJ_2(value, adj_value)
875
- }
876
- template<typename T>
877
- inline CUDA_CALLABLE void adj_atomic_add(const array_t<T>& buf, int i, int j, int k, T value, const array_t<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, T& adj_value, const T& adj_ret)
878
- {
879
- if (buf.grad)
880
- adj_value += index_grad(buf, i, j, k);
881
-
882
- FP_VERIFY_ADJ_3(value, adj_value)
883
- }
884
- template<typename T>
885
- inline CUDA_CALLABLE void adj_atomic_add(const array_t<T>& buf, int i, int j, int k, int l, T value, const array_t<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, T& adj_value, const T& adj_ret)
886
- {
887
- if (buf.grad)
888
- adj_value += index_grad(buf, i, j, k, l);
889
-
890
- FP_VERIFY_ADJ_4(value, adj_value)
891
- }
892
-
893
-
894
- template<typename T>
895
- inline CUDA_CALLABLE void adj_atomic_sub(const array_t<T>& buf, int i, T value, const array_t<T>& adj_buf, int& adj_i, T& adj_value, const T& adj_ret)
896
- {
897
- if (buf.grad)
898
- adj_value -= index_grad(buf, i);
899
-
900
- FP_VERIFY_ADJ_1(value, adj_value)
901
- }
902
- template<typename T>
903
- inline CUDA_CALLABLE void adj_atomic_sub(const array_t<T>& buf, int i, int j, T value, const array_t<T>& adj_buf, int& adj_i, int& adj_j, T& adj_value, const T& adj_ret)
904
- {
905
- if (buf.grad)
906
- adj_value -= index_grad(buf, i, j);
907
-
908
- FP_VERIFY_ADJ_2(value, adj_value)
909
- }
910
- template<typename T>
911
- inline CUDA_CALLABLE void adj_atomic_sub(const array_t<T>& buf, int i, int j, int k, T value, const array_t<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, T& adj_value, const T& adj_ret)
912
- {
913
- if (buf.grad)
914
- adj_value -= index_grad(buf, i, j, k);
915
-
916
- FP_VERIFY_ADJ_3(value, adj_value)
917
- }
918
- template<typename T>
919
- inline CUDA_CALLABLE void adj_atomic_sub(const array_t<T>& buf, int i, int j, int k, int l, T value, const array_t<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, T& adj_value, const T& adj_ret)
920
- {
921
- if (buf.grad)
922
- adj_value -= index_grad(buf, i, j, k, l);
923
-
924
- FP_VERIFY_ADJ_4(value, adj_value)
925
- }
926
-
927
- // generic array types that do not support gradient computation (indexedarray, etc.)
928
- template<template<typename> class A1, template<typename> class A2, typename T>
929
- inline CUDA_CALLABLE void adj_address(const A1<T>& buf, int i, const A2<T>& adj_buf, int& adj_i, const T& adj_output) {}
930
- template<template<typename> class A1, template<typename> class A2, typename T>
931
- inline CUDA_CALLABLE void adj_address(const A1<T>& buf, int i, int j, const A2<T>& adj_buf, int& adj_i, int& adj_j, const T& adj_output) {}
932
- template<template<typename> class A1, template<typename> class A2, typename T>
933
- inline CUDA_CALLABLE void adj_address(const A1<T>& buf, int i, int j, int k, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, const T& adj_output) {}
934
- template<template<typename> class A1, template<typename> class A2, typename T>
935
- inline CUDA_CALLABLE void adj_address(const A1<T>& buf, int i, int j, int k, int l, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, const T& adj_output) {}
936
-
937
- template<template<typename> class A1, template<typename> class A2, typename T>
938
- inline CUDA_CALLABLE void adj_array_store(const A1<T>& buf, int i, T value, const A2<T>& adj_buf, int& adj_i, T& adj_value) {}
939
- template<template<typename> class A1, template<typename> class A2, typename T>
940
- inline CUDA_CALLABLE void adj_array_store(const A1<T>& buf, int i, int j, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, T& adj_value) {}
941
- template<template<typename> class A1, template<typename> class A2, typename T>
942
- inline CUDA_CALLABLE void adj_array_store(const A1<T>& buf, int i, int j, int k, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, T& adj_value) {}
943
- template<template<typename> class A1, template<typename> class A2, typename T>
944
- inline CUDA_CALLABLE void adj_array_store(const A1<T>& buf, int i, int j, int k, int l, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, T& adj_value) {}
945
-
946
- template<template<typename> class A1, template<typename> class A2, typename T>
947
- inline CUDA_CALLABLE void adj_atomic_add(const A1<T>& buf, int i, T value, const A2<T>& adj_buf, int& adj_i, T& adj_value, const T& adj_ret) {}
948
- template<template<typename> class A1, template<typename> class A2, typename T>
949
- inline CUDA_CALLABLE void adj_atomic_add(const A1<T>& buf, int i, int j, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, T& adj_value, const T& adj_ret) {}
950
- template<template<typename> class A1, template<typename> class A2, typename T>
951
- inline CUDA_CALLABLE void adj_atomic_add(const A1<T>& buf, int i, int j, int k, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, T& adj_value, const T& adj_ret) {}
952
- template<template<typename> class A1, template<typename> class A2, typename T>
953
- inline CUDA_CALLABLE void adj_atomic_add(const A1<T>& buf, int i, int j, int k, int l, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, T& adj_value, const T& adj_ret) {}
954
-
955
- template<template<typename> class A1, template<typename> class A2, typename T>
956
- inline CUDA_CALLABLE void adj_atomic_sub(const A1<T>& buf, int i, T value, const A2<T>& adj_buf, int& adj_i, T& adj_value, const T& adj_ret) {}
957
- template<template<typename> class A1, template<typename> class A2, typename T>
958
- inline CUDA_CALLABLE void adj_atomic_sub(const A1<T>& buf, int i, int j, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, T& adj_value, const T& adj_ret) {}
959
- template<template<typename> class A1, template<typename> class A2, typename T>
960
- inline CUDA_CALLABLE void adj_atomic_sub(const A1<T>& buf, int i, int j, int k, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, T& adj_value, const T& adj_ret) {}
961
- template<template<typename> class A1, template<typename> class A2, typename T>
962
- inline CUDA_CALLABLE void adj_atomic_sub(const A1<T>& buf, int i, int j, int k, int l, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, T& adj_value, const T& adj_ret) {}
963
-
964
- // generic handler for scalar values
965
- template<template<typename> class A1, template<typename> class A2, typename T>
966
- inline CUDA_CALLABLE void adj_atomic_min(const A1<T>& buf, int i, T value, const A2<T>& adj_buf, int& adj_i, T& adj_value, const T& adj_ret) {
967
- if (buf.grad)
968
- adj_atomic_minmax(&index(buf, i), &index_grad(buf, i), value, adj_value);
969
-
970
- FP_VERIFY_ADJ_1(value, adj_value)
971
- }
972
- template<template<typename> class A1, template<typename> class A2, typename T>
973
- inline CUDA_CALLABLE void adj_atomic_min(const A1<T>& buf, int i, int j, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, T& adj_value, const T& adj_ret) {
974
- if (buf.grad)
975
- adj_atomic_minmax(&index(buf, i, j), &index_grad(buf, i, j), value, adj_value);
976
-
977
- FP_VERIFY_ADJ_2(value, adj_value)
978
- }
979
- template<template<typename> class A1, template<typename> class A2, typename T>
980
- inline CUDA_CALLABLE void adj_atomic_min(const A1<T>& buf, int i, int j, int k, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, T& adj_value, const T& adj_ret) {
981
- if (buf.grad)
982
- adj_atomic_minmax(&index(buf, i, j, k), &index_grad(buf, i, j, k), value, adj_value);
983
-
984
- FP_VERIFY_ADJ_3(value, adj_value)
985
- }
986
- template<template<typename> class A1, template<typename> class A2, typename T>
987
- inline CUDA_CALLABLE void adj_atomic_min(const A1<T>& buf, int i, int j, int k, int l, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, T& adj_value, const T& adj_ret) {
988
- if (buf.grad)
989
- adj_atomic_minmax(&index(buf, i, j, k, l), &index_grad(buf, i, j, k, l), value, adj_value);
990
-
991
- FP_VERIFY_ADJ_4(value, adj_value)
992
- }
993
-
994
- template<template<typename> class A1, template<typename> class A2, typename T>
995
- inline CUDA_CALLABLE void adj_atomic_max(const A1<T>& buf, int i, T value, const A2<T>& adj_buf, int& adj_i, T& adj_value, const T& adj_ret) {
996
- if (buf.grad)
997
- adj_atomic_minmax(&index(buf, i), &index_grad(buf, i), value, adj_value);
998
-
999
- FP_VERIFY_ADJ_1(value, adj_value)
1000
- }
1001
- template<template<typename> class A1, template<typename> class A2, typename T>
1002
- inline CUDA_CALLABLE void adj_atomic_max(const A1<T>& buf, int i, int j, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, T& adj_value, const T& adj_ret) {
1003
- if (buf.grad)
1004
- adj_atomic_minmax(&index(buf, i, j), &index_grad(buf, i, j), value, adj_value);
1005
-
1006
- FP_VERIFY_ADJ_2(value, adj_value)
1007
- }
1008
- template<template<typename> class A1, template<typename> class A2, typename T>
1009
- inline CUDA_CALLABLE void adj_atomic_max(const A1<T>& buf, int i, int j, int k, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, T& adj_value, const T& adj_ret) {
1010
- if (buf.grad)
1011
- adj_atomic_minmax(&index(buf, i, j, k), &index_grad(buf, i, j, k), value, adj_value);
1012
-
1013
- FP_VERIFY_ADJ_3(value, adj_value)
1014
- }
1015
- template<template<typename> class A1, template<typename> class A2, typename T>
1016
- inline CUDA_CALLABLE void adj_atomic_max(const A1<T>& buf, int i, int j, int k, int l, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, T& adj_value, const T& adj_ret) {
1017
- if (buf.grad)
1018
- adj_atomic_minmax(&index(buf, i, j, k, l), &index_grad(buf, i, j, k, l), value, adj_value);
1019
-
1020
- FP_VERIFY_ADJ_4(value, adj_value)
1021
- }
1022
-
1023
- } // namespace wp
1024
-
1025
- #include "fabric.h"
1
+ #pragma once
2
+
3
+ #include "builtin.h"
4
+
5
+ namespace wp
6
+ {
7
+
8
+ #if FP_CHECK
9
+
10
+ #define FP_ASSERT_FWD(value) \
11
+ print(value); \
12
+ printf(")\n"); \
13
+ assert(0); \
14
+
15
+ #define FP_ASSERT_ADJ(value, adj_value) \
16
+ print(value); \
17
+ printf(", "); \
18
+ print(adj_value); \
19
+ printf(")\n"); \
20
+ assert(0); \
21
+
22
+ #define FP_VERIFY_FWD(value) \
23
+ if (!isfinite(value)) { \
24
+ printf("%s:%d - %s(addr", __FILE__, __LINE__, __FUNCTION__); \
25
+ FP_ASSERT_FWD(value) \
26
+ } \
27
+
28
+ #define FP_VERIFY_FWD_1(value) \
29
+ if (!isfinite(value)) { \
30
+ printf("%s:%d - %s(arr, %d) ", __FILE__, __LINE__, __FUNCTION__, i); \
31
+ FP_ASSERT_FWD(value) \
32
+ } \
33
+
34
+ #define FP_VERIFY_FWD_2(value) \
35
+ if (!isfinite(value)) { \
36
+ printf("%s:%d - %s(arr, %d, %d) ", __FILE__, __LINE__, __FUNCTION__, i, j); \
37
+ FP_ASSERT_FWD(value) \
38
+ } \
39
+
40
+ #define FP_VERIFY_FWD_3(value) \
41
+ if (!isfinite(value)) { \
42
+ printf("%s:%d - %s(arr, %d, %d, %d) ", __FILE__, __LINE__, __FUNCTION__, i, j, k); \
43
+ FP_ASSERT_FWD(value) \
44
+ } \
45
+
46
+ #define FP_VERIFY_FWD_4(value) \
47
+ if (!isfinite(value)) { \
48
+ printf("%s:%d - %s(arr, %d, %d, %d, %d) ", __FILE__, __LINE__, __FUNCTION__, i, j, k, l); \
49
+ FP_ASSERT_FWD(value) \
50
+ } \
51
+
52
+ #define FP_VERIFY_ADJ(value, adj_value) \
53
+ if (!isfinite(value) || !isfinite(adj_value)) \
54
+ { \
55
+ printf("%s:%d - %s(addr", __FILE__, __LINE__, __FUNCTION__); \
56
+ FP_ASSERT_ADJ(value, adj_value); \
57
+ } \
58
+
59
+ #define FP_VERIFY_ADJ_1(value, adj_value) \
60
+ if (!isfinite(value) || !isfinite(adj_value)) \
61
+ { \
62
+ printf("%s:%d - %s(arr, %d) ", __FILE__, __LINE__, __FUNCTION__, i); \
63
+ FP_ASSERT_ADJ(value, adj_value); \
64
+ } \
65
+
66
+ #define FP_VERIFY_ADJ_2(value, adj_value) \
67
+ if (!isfinite(value) || !isfinite(adj_value)) \
68
+ { \
69
+ printf("%s:%d - %s(arr, %d, %d) ", __FILE__, __LINE__, __FUNCTION__, i, j); \
70
+ FP_ASSERT_ADJ(value, adj_value); \
71
+ } \
72
+
73
+ #define FP_VERIFY_ADJ_3(value, adj_value) \
74
+ if (!isfinite(value) || !isfinite(adj_value)) \
75
+ { \
76
+ printf("%s:%d - %s(arr, %d, %d, %d) ", __FILE__, __LINE__, __FUNCTION__, i, j, k); \
77
+ FP_ASSERT_ADJ(value, adj_value); \
78
+ } \
79
+
80
+ #define FP_VERIFY_ADJ_4(value, adj_value) \
81
+ if (!isfinite(value) || !isfinite(adj_value)) \
82
+ { \
83
+ printf("%s:%d - %s(arr, %d, %d, %d, %d) ", __FILE__, __LINE__, __FUNCTION__, i, j, k, l); \
84
+ FP_ASSERT_ADJ(value, adj_value); \
85
+ } \
86
+
87
+
88
+ #else
89
+
90
+ #define FP_VERIFY_FWD(value) {}
91
+ #define FP_VERIFY_FWD_1(value) {}
92
+ #define FP_VERIFY_FWD_2(value) {}
93
+ #define FP_VERIFY_FWD_3(value) {}
94
+ #define FP_VERIFY_FWD_4(value) {}
95
+
96
+ #define FP_VERIFY_ADJ(value, adj_value) {}
97
+ #define FP_VERIFY_ADJ_1(value, adj_value) {}
98
+ #define FP_VERIFY_ADJ_2(value, adj_value) {}
99
+ #define FP_VERIFY_ADJ_3(value, adj_value) {}
100
+ #define FP_VERIFY_ADJ_4(value, adj_value) {}
101
+
102
+ #endif // WP_FP_CHECK
103
+
104
+ const int ARRAY_MAX_DIMS = 4; // must match constant in types.py
105
+
106
+ // must match constants in types.py
107
+ const int ARRAY_TYPE_REGULAR = 0;
108
+ const int ARRAY_TYPE_INDEXED = 1;
109
+ const int ARRAY_TYPE_FABRIC = 2;
110
+ const int ARRAY_TYPE_FABRIC_INDEXED = 3;
111
+
112
+ struct shape_t
113
+ {
114
+ int dims[ARRAY_MAX_DIMS];
115
+
116
+ CUDA_CALLABLE inline shape_t()
117
+ : dims()
118
+ {}
119
+
120
+ CUDA_CALLABLE inline int operator[](int i) const
121
+ {
122
+ assert(i < ARRAY_MAX_DIMS);
123
+ return dims[i];
124
+ }
125
+
126
+ CUDA_CALLABLE inline int& operator[](int i)
127
+ {
128
+ assert(i < ARRAY_MAX_DIMS);
129
+ return dims[i];
130
+ }
131
+ };
132
+
133
+ CUDA_CALLABLE inline int extract(const shape_t& s, int i)
134
+ {
135
+ return s.dims[i];
136
+ }
137
+
138
+ CUDA_CALLABLE inline void adj_extract(const shape_t& s, int i, const shape_t& adj_s, int adj_i, int adj_ret) {}
139
+
140
+ inline CUDA_CALLABLE void print(shape_t s)
141
+ {
142
+ // todo: only print valid dims, currently shape has a fixed size
143
+ // but we don't know how many dims are valid (e.g.: 1d, 2d, etc)
144
+ // should probably store ndim with shape
145
+ printf("(%d, %d, %d, %d)\n", s.dims[0], s.dims[1], s.dims[2], s.dims[3]);
146
+ }
147
+ inline CUDA_CALLABLE void adj_print(shape_t s, shape_t& shape_t) {}
148
+
149
+
150
+ template <typename T>
151
+ struct array_t
152
+ {
153
+ CUDA_CALLABLE inline array_t()
154
+ : data(nullptr),
155
+ grad(nullptr),
156
+ shape(),
157
+ strides(),
158
+ ndim(0)
159
+ {}
160
+
161
+ CUDA_CALLABLE array_t(T* data, int size, T* grad=nullptr) : data(data), grad(grad) {
162
+ // constructor for 1d array
163
+ shape.dims[0] = size;
164
+ shape.dims[1] = 0;
165
+ shape.dims[2] = 0;
166
+ shape.dims[3] = 0;
167
+ ndim = 1;
168
+ strides[0] = sizeof(T);
169
+ strides[1] = 0;
170
+ strides[2] = 0;
171
+ strides[3] = 0;
172
+ }
173
+ CUDA_CALLABLE array_t(T* data, int dim0, int dim1, T* grad=nullptr) : data(data), grad(grad) {
174
+ // constructor for 2d array
175
+ shape.dims[0] = dim0;
176
+ shape.dims[1] = dim1;
177
+ shape.dims[2] = 0;
178
+ shape.dims[3] = 0;
179
+ ndim = 2;
180
+ strides[0] = dim1 * sizeof(T);
181
+ strides[1] = sizeof(T);
182
+ strides[2] = 0;
183
+ strides[3] = 0;
184
+ }
185
+ CUDA_CALLABLE array_t(T* data, int dim0, int dim1, int dim2, T* grad=nullptr) : data(data), grad(grad) {
186
+ // constructor for 3d array
187
+ shape.dims[0] = dim0;
188
+ shape.dims[1] = dim1;
189
+ shape.dims[2] = dim2;
190
+ shape.dims[3] = 0;
191
+ ndim = 3;
192
+ strides[0] = dim1 * dim2 * sizeof(T);
193
+ strides[1] = dim2 * sizeof(T);
194
+ strides[2] = sizeof(T);
195
+ strides[3] = 0;
196
+ }
197
+ CUDA_CALLABLE array_t(T* data, int dim0, int dim1, int dim2, int dim3, T* grad=nullptr) : data(data), grad(grad) {
198
+ // constructor for 4d array
199
+ shape.dims[0] = dim0;
200
+ shape.dims[1] = dim1;
201
+ shape.dims[2] = dim2;
202
+ shape.dims[3] = dim3;
203
+ ndim = 4;
204
+ strides[0] = dim1 * dim2 * dim3 * sizeof(T);
205
+ strides[1] = dim2 * dim3 * sizeof(T);
206
+ strides[2] = dim3 * sizeof(T);
207
+ strides[3] = sizeof(T);
208
+ }
209
+
210
+ CUDA_CALLABLE inline bool empty() const { return !data; }
211
+
212
+ T* data;
213
+ T* grad;
214
+ shape_t shape;
215
+ int strides[ARRAY_MAX_DIMS];
216
+ int ndim;
217
+
218
+ CUDA_CALLABLE inline operator T*() const { return data; }
219
+ };
220
+
221
+
222
+ // TODO:
223
+ // - templated index type?
224
+ // - templated dimensionality? (also for array_t to save space when passing arrays to kernels)
225
+ template <typename T>
226
+ struct indexedarray_t
227
+ {
228
+ CUDA_CALLABLE inline indexedarray_t()
229
+ : arr(),
230
+ indices(),
231
+ shape()
232
+ {}
233
+
234
+ CUDA_CALLABLE inline bool empty() const { return !arr.data; }
235
+
236
+ array_t<T> arr;
237
+ int* indices[ARRAY_MAX_DIMS]; // index array per dimension (can be NULL)
238
+ shape_t shape; // element count per dimension (num. indices if indexed, array dim if not)
239
+ };
240
+
241
+
242
+ // return stride (in bytes) of the given index
243
+ template <typename T>
244
+ CUDA_CALLABLE inline size_t stride(const array_t<T>& a, int dim)
245
+ {
246
+ return size_t(a.strides[dim]);
247
+ }
248
+
249
+ template <typename T>
250
+ CUDA_CALLABLE inline T* data_at_byte_offset(const array_t<T>& a, size_t byte_offset)
251
+ {
252
+ return reinterpret_cast<T*>(reinterpret_cast<char*>(a.data) + byte_offset);
253
+ }
254
+
255
+ template <typename T>
256
+ CUDA_CALLABLE inline T* grad_at_byte_offset(const array_t<T>& a, size_t byte_offset)
257
+ {
258
+ return reinterpret_cast<T*>(reinterpret_cast<char*>(a.grad) + byte_offset);
259
+ }
260
+
261
+ template <typename T>
262
+ CUDA_CALLABLE inline size_t byte_offset(const array_t<T>& arr, int i)
263
+ {
264
+ assert(i >= 0 && i < arr.shape[0]);
265
+
266
+ return i*stride(arr, 0);
267
+ }
268
+
269
+ template <typename T>
270
+ CUDA_CALLABLE inline size_t byte_offset(const array_t<T>& arr, int i, int j)
271
+ {
272
+ assert(i >= 0 && i < arr.shape[0]);
273
+ assert(j >= 0 && j < arr.shape[1]);
274
+
275
+ return i*stride(arr, 0) + j*stride(arr, 1);
276
+ }
277
+
278
+ template <typename T>
279
+ CUDA_CALLABLE inline size_t byte_offset(const array_t<T>& arr, int i, int j, int k)
280
+ {
281
+ assert(i >= 0 && i < arr.shape[0]);
282
+ assert(j >= 0 && j < arr.shape[1]);
283
+ assert(k >= 0 && k < arr.shape[2]);
284
+
285
+ return i*stride(arr, 0) + j*stride(arr, 1) + k*stride(arr, 2);
286
+ }
287
+
288
+ template <typename T>
289
+ CUDA_CALLABLE inline size_t byte_offset(const array_t<T>& arr, int i, int j, int k, int l)
290
+ {
291
+ assert(i >= 0 && i < arr.shape[0]);
292
+ assert(j >= 0 && j < arr.shape[1]);
293
+ assert(k >= 0 && k < arr.shape[2]);
294
+ assert(l >= 0 && l < arr.shape[3]);
295
+
296
+ return i*stride(arr, 0) + j*stride(arr, 1) + k*stride(arr, 2) + l*stride(arr, 3);
297
+ }
298
+
299
+ template <typename T>
300
+ CUDA_CALLABLE inline T& index(const array_t<T>& arr, int i)
301
+ {
302
+ assert(arr.ndim == 1);
303
+ T& result = *data_at_byte_offset(arr, byte_offset(arr, i));
304
+ FP_VERIFY_FWD_1(result)
305
+
306
+ return result;
307
+ }
308
+
309
+ template <typename T>
310
+ CUDA_CALLABLE inline T& index(const array_t<T>& arr, int i, int j)
311
+ {
312
+ assert(arr.ndim == 2);
313
+ T& result = *data_at_byte_offset(arr, byte_offset(arr, i, j));
314
+ FP_VERIFY_FWD_2(result)
315
+
316
+ return result;
317
+ }
318
+
319
+ template <typename T>
320
+ CUDA_CALLABLE inline T& index(const array_t<T>& arr, int i, int j, int k)
321
+ {
322
+ assert(arr.ndim == 3);
323
+ T& result = *data_at_byte_offset(arr, byte_offset(arr, i, j, k));
324
+ FP_VERIFY_FWD_3(result)
325
+
326
+ return result;
327
+ }
328
+
329
+ template <typename T>
330
+ CUDA_CALLABLE inline T& index(const array_t<T>& arr, int i, int j, int k, int l)
331
+ {
332
+ assert(arr.ndim == 4);
333
+ T& result = *data_at_byte_offset(arr, byte_offset(arr, i, j, k, l));
334
+ FP_VERIFY_FWD_4(result)
335
+
336
+ return result;
337
+ }
338
+
339
+ template <typename T>
340
+ CUDA_CALLABLE inline T& index_grad(const array_t<T>& arr, int i)
341
+ {
342
+ T& result = *grad_at_byte_offset(arr, byte_offset(arr, i));
343
+ FP_VERIFY_FWD_1(result)
344
+
345
+ return result;
346
+ }
347
+
348
+ template <typename T>
349
+ CUDA_CALLABLE inline T& index_grad(const array_t<T>& arr, int i, int j)
350
+ {
351
+ T& result = *grad_at_byte_offset(arr, byte_offset(arr, i, j));
352
+ FP_VERIFY_FWD_2(result)
353
+
354
+ return result;
355
+ }
356
+
357
+ template <typename T>
358
+ CUDA_CALLABLE inline T& index_grad(const array_t<T>& arr, int i, int j, int k)
359
+ {
360
+ T& result = *grad_at_byte_offset(arr, byte_offset(arr, i, j, k));
361
+ FP_VERIFY_FWD_3(result)
362
+
363
+ return result;
364
+ }
365
+
366
+ template <typename T>
367
+ CUDA_CALLABLE inline T& index_grad(const array_t<T>& arr, int i, int j, int k, int l)
368
+ {
369
+ T& result = *grad_at_byte_offset(arr, byte_offset(arr, i, j, k, l));
370
+ FP_VERIFY_FWD_4(result)
371
+
372
+ return result;
373
+ }
374
+
375
+
376
+ template <typename T>
377
+ CUDA_CALLABLE inline T& index(const indexedarray_t<T>& iarr, int i)
378
+ {
379
+ assert(iarr.arr.ndim == 1);
380
+ assert(i >= 0 && i < iarr.shape[0]);
381
+
382
+ if (iarr.indices[0])
383
+ {
384
+ i = iarr.indices[0][i];
385
+ assert(i >= 0 && i < iarr.arr.shape[0]);
386
+ }
387
+
388
+ T& result = *data_at_byte_offset(iarr.arr, byte_offset(iarr.arr, i));
389
+ FP_VERIFY_FWD_1(result)
390
+
391
+ return result;
392
+ }
393
+
394
+ template <typename T>
395
+ CUDA_CALLABLE inline T& index(const indexedarray_t<T>& iarr, int i, int j)
396
+ {
397
+ assert(iarr.arr.ndim == 2);
398
+ assert(i >= 0 && i < iarr.shape[0]);
399
+ assert(j >= 0 && j < iarr.shape[1]);
400
+
401
+ if (iarr.indices[0])
402
+ {
403
+ i = iarr.indices[0][i];
404
+ assert(i >= 0 && i < iarr.arr.shape[0]);
405
+ }
406
+ if (iarr.indices[1])
407
+ {
408
+ j = iarr.indices[1][j];
409
+ assert(j >= 0 && j < iarr.arr.shape[1]);
410
+ }
411
+
412
+ T& result = *data_at_byte_offset(iarr.arr, byte_offset(iarr.arr, i, j));
413
+ FP_VERIFY_FWD_1(result)
414
+
415
+ return result;
416
+ }
417
+
418
+ template <typename T>
419
+ CUDA_CALLABLE inline T& index(const indexedarray_t<T>& iarr, int i, int j, int k)
420
+ {
421
+ assert(iarr.arr.ndim == 3);
422
+ assert(i >= 0 && i < iarr.shape[0]);
423
+ assert(j >= 0 && j < iarr.shape[1]);
424
+ assert(k >= 0 && k < iarr.shape[2]);
425
+
426
+ if (iarr.indices[0])
427
+ {
428
+ i = iarr.indices[0][i];
429
+ assert(i >= 0 && i < iarr.arr.shape[0]);
430
+ }
431
+ if (iarr.indices[1])
432
+ {
433
+ j = iarr.indices[1][j];
434
+ assert(j >= 0 && j < iarr.arr.shape[1]);
435
+ }
436
+ if (iarr.indices[2])
437
+ {
438
+ k = iarr.indices[2][k];
439
+ assert(k >= 0 && k < iarr.arr.shape[2]);
440
+ }
441
+
442
+ T& result = *data_at_byte_offset(iarr.arr, byte_offset(iarr.arr, i, j, k));
443
+ FP_VERIFY_FWD_1(result)
444
+
445
+ return result;
446
+ }
447
+
448
+ template <typename T>
449
+ CUDA_CALLABLE inline T& index(const indexedarray_t<T>& iarr, int i, int j, int k, int l)
450
+ {
451
+ assert(iarr.arr.ndim == 4);
452
+ assert(i >= 0 && i < iarr.shape[0]);
453
+ assert(j >= 0 && j < iarr.shape[1]);
454
+ assert(k >= 0 && k < iarr.shape[2]);
455
+ assert(l >= 0 && l < iarr.shape[3]);
456
+
457
+ if (iarr.indices[0])
458
+ {
459
+ i = iarr.indices[0][i];
460
+ assert(i >= 0 && i < iarr.arr.shape[0]);
461
+ }
462
+ if (iarr.indices[1])
463
+ {
464
+ j = iarr.indices[1][j];
465
+ assert(j >= 0 && j < iarr.arr.shape[1]);
466
+ }
467
+ if (iarr.indices[2])
468
+ {
469
+ k = iarr.indices[2][k];
470
+ assert(k >= 0 && k < iarr.arr.shape[2]);
471
+ }
472
+ if (iarr.indices[3])
473
+ {
474
+ l = iarr.indices[3][l];
475
+ assert(l >= 0 && l < iarr.arr.shape[3]);
476
+ }
477
+
478
+ T& result = *data_at_byte_offset(iarr.arr, byte_offset(iarr.arr, i, j, k, l));
479
+ FP_VERIFY_FWD_1(result)
480
+
481
+ return result;
482
+ }
483
+
484
+
485
+ template <typename T>
486
+ CUDA_CALLABLE inline array_t<T> view(array_t<T>& src, int i)
487
+ {
488
+ assert(src.ndim > 1);
489
+ assert(i >= 0 && i < src.shape[0]);
490
+
491
+ array_t<T> a;
492
+ size_t offset = byte_offset(src, i);
493
+ a.data = data_at_byte_offset(src, offset);
494
+ if (src.grad)
495
+ a.grad = grad_at_byte_offset(src, offset);
496
+ a.shape[0] = src.shape[1];
497
+ a.shape[1] = src.shape[2];
498
+ a.shape[2] = src.shape[3];
499
+ a.strides[0] = src.strides[1];
500
+ a.strides[1] = src.strides[2];
501
+ a.strides[2] = src.strides[3];
502
+ a.ndim = src.ndim-1;
503
+
504
+ return a;
505
+ }
506
+
507
+ template <typename T>
508
+ CUDA_CALLABLE inline array_t<T> view(array_t<T>& src, int i, int j)
509
+ {
510
+ assert(src.ndim > 2);
511
+ assert(i >= 0 && i < src.shape[0]);
512
+ assert(j >= 0 && j < src.shape[1]);
513
+
514
+ array_t<T> a;
515
+ size_t offset = byte_offset(src, i, j);
516
+ a.data = data_at_byte_offset(src, offset);
517
+ if (src.grad)
518
+ a.grad = grad_at_byte_offset(src, offset);
519
+ a.shape[0] = src.shape[2];
520
+ a.shape[1] = src.shape[3];
521
+ a.strides[0] = src.strides[2];
522
+ a.strides[1] = src.strides[3];
523
+ a.ndim = src.ndim-2;
524
+
525
+ return a;
526
+ }
527
+
528
+ template <typename T>
529
+ CUDA_CALLABLE inline array_t<T> view(array_t<T>& src, int i, int j, int k)
530
+ {
531
+ assert(src.ndim > 3);
532
+ assert(i >= 0 && i < src.shape[0]);
533
+ assert(j >= 0 && j < src.shape[1]);
534
+ assert(k >= 0 && k < src.shape[2]);
535
+
536
+ array_t<T> a;
537
+ size_t offset = byte_offset(src, i, j, k);
538
+ a.data = data_at_byte_offset(src, offset);
539
+ if (src.grad)
540
+ a.grad = grad_at_byte_offset(src, offset);
541
+ a.shape[0] = src.shape[3];
542
+ a.strides[0] = src.strides[3];
543
+ a.ndim = src.ndim-3;
544
+
545
+ return a;
546
+ }
547
+
548
+
549
+ template <typename T>
550
+ CUDA_CALLABLE inline indexedarray_t<T> view(indexedarray_t<T>& src, int i)
551
+ {
552
+ assert(src.arr.ndim > 1);
553
+
554
+ if (src.indices[0])
555
+ {
556
+ assert(i >= 0 && i < src.shape[0]);
557
+ i = src.indices[0][i];
558
+ }
559
+
560
+ indexedarray_t<T> a;
561
+ a.arr = view(src.arr, i);
562
+ a.indices[0] = src.indices[1];
563
+ a.indices[1] = src.indices[2];
564
+ a.indices[2] = src.indices[3];
565
+ a.shape[0] = src.shape[1];
566
+ a.shape[1] = src.shape[2];
567
+ a.shape[2] = src.shape[3];
568
+
569
+ return a;
570
+ }
571
+
572
+ template <typename T>
573
+ CUDA_CALLABLE inline indexedarray_t<T> view(indexedarray_t<T>& src, int i, int j)
574
+ {
575
+ assert(src.arr.ndim > 2);
576
+
577
+ if (src.indices[0])
578
+ {
579
+ assert(i >= 0 && i < src.shape[0]);
580
+ i = src.indices[0][i];
581
+ }
582
+ if (src.indices[1])
583
+ {
584
+ assert(j >= 0 && j < src.shape[1]);
585
+ j = src.indices[1][j];
586
+ }
587
+
588
+ indexedarray_t<T> a;
589
+ a.arr = view(src.arr, i, j);
590
+ a.indices[0] = src.indices[2];
591
+ a.indices[1] = src.indices[3];
592
+ a.shape[0] = src.shape[2];
593
+ a.shape[1] = src.shape[3];
594
+
595
+ return a;
596
+ }
597
+
598
+ template <typename T>
599
+ CUDA_CALLABLE inline indexedarray_t<T> view(indexedarray_t<T>& src, int i, int j, int k)
600
+ {
601
+ assert(src.arr.ndim > 3);
602
+
603
+ if (src.indices[0])
604
+ {
605
+ assert(i >= 0 && i < src.shape[0]);
606
+ i = src.indices[0][i];
607
+ }
608
+ if (src.indices[1])
609
+ {
610
+ assert(j >= 0 && j < src.shape[1]);
611
+ j = src.indices[1][j];
612
+ }
613
+ if (src.indices[2])
614
+ {
615
+ assert(k >= 0 && k < src.shape[2]);
616
+ k = src.indices[2][k];
617
+ }
618
+
619
+ indexedarray_t<T> a;
620
+ a.arr = view(src.arr, i, j, k);
621
+ a.indices[0] = src.indices[3];
622
+ a.shape[0] = src.shape[3];
623
+
624
+ return a;
625
+ }
626
+
627
+ template<template<typename> class A1, template<typename> class A2, template<typename> class A3, typename T>
628
+ inline CUDA_CALLABLE void adj_view(A1<T>& src, int i, A2<T>& adj_src, int adj_i, A3<T> adj_ret) {}
629
+ template<template<typename> class A1, template<typename> class A2, template<typename> class A3, typename T>
630
+ inline CUDA_CALLABLE void adj_view(A1<T>& src, int i, int j, A2<T>& adj_src, int adj_i, int adj_j, A3<T> adj_ret) {}
631
+ template<template<typename> class A1, template<typename> class A2, template<typename> class A3, typename T>
632
+ inline CUDA_CALLABLE void adj_view(A1<T>& src, int i, int j, int k, A2<T>& adj_src, int adj_i, int adj_j, int adj_k, A3<T> adj_ret) {}
633
+
634
+ // TODO: lower_bound() for indexed arrays?
635
+
636
+ template <typename T>
637
+ CUDA_CALLABLE inline int lower_bound(const array_t<T>& arr, int arr_begin, int arr_end, T value)
638
+ {
639
+ assert(arr.ndim == 1);
640
+
641
+ int lower = arr_begin;
642
+ int upper = arr_end - 1;
643
+
644
+ while(lower < upper)
645
+ {
646
+ int mid = lower + (upper - lower) / 2;
647
+
648
+ if (arr[mid] < value)
649
+ {
650
+ lower = mid + 1;
651
+ }
652
+ else
653
+ {
654
+ upper = mid;
655
+ }
656
+ }
657
+
658
+ return lower;
659
+ }
660
+
661
+ template <typename T>
662
+ CUDA_CALLABLE inline int lower_bound(const array_t<T>& arr, T value)
663
+ {
664
+ return lower_bound(arr, 0, arr.shape[0], value);
665
+ }
666
+
667
+ template <typename T> inline CUDA_CALLABLE void adj_lower_bound(const array_t<T>& arr, T value, array_t<T> adj_arr, T adj_value, int adj_ret) {}
668
+ template <typename T> inline CUDA_CALLABLE void adj_lower_bound(const array_t<T>& arr, int arr_begin, int arr_end, T value, array_t<T> adj_arr, int adj_arr_begin, int adj_arr_end, T adj_value, int adj_ret) {}
669
+
670
+ template<template<typename> class A, typename T>
671
+ inline CUDA_CALLABLE T atomic_add(const A<T>& buf, int i, T value) { return atomic_add(&index(buf, i), value); }
672
+ template<template<typename> class A, typename T>
673
+ inline CUDA_CALLABLE T atomic_add(const A<T>& buf, int i, int j, T value) { return atomic_add(&index(buf, i, j), value); }
674
+ template<template<typename> class A, typename T>
675
+ inline CUDA_CALLABLE T atomic_add(const A<T>& buf, int i, int j, int k, T value) { return atomic_add(&index(buf, i, j, k), value); }
676
+ template<template<typename> class A, typename T>
677
+ inline CUDA_CALLABLE T atomic_add(const A<T>& buf, int i, int j, int k, int l, T value) { return atomic_add(&index(buf, i, j, k, l), value); }
678
+
679
+ template<template<typename> class A, typename T>
680
+ inline CUDA_CALLABLE T atomic_sub(const A<T>& buf, int i, T value) { return atomic_add(&index(buf, i), -value); }
681
+ template<template<typename> class A, typename T>
682
+ inline CUDA_CALLABLE T atomic_sub(const A<T>& buf, int i, int j, T value) { return atomic_add(&index(buf, i, j), -value); }
683
+ template<template<typename> class A, typename T>
684
+ inline CUDA_CALLABLE T atomic_sub(const A<T>& buf, int i, int j, int k, T value) { return atomic_add(&index(buf, i, j, k), -value); }
685
+ template<template<typename> class A, typename T>
686
+ inline CUDA_CALLABLE T atomic_sub(const A<T>& buf, int i, int j, int k, int l, T value) { return atomic_add(&index(buf, i, j, k, l), -value); }
687
+
688
+ template<template<typename> class A, typename T>
689
+ inline CUDA_CALLABLE T atomic_min(const A<T>& buf, int i, T value) { return atomic_min(&index(buf, i), value); }
690
+ template<template<typename> class A, typename T>
691
+ inline CUDA_CALLABLE T atomic_min(const A<T>& buf, int i, int j, T value) { return atomic_min(&index(buf, i, j), value); }
692
+ template<template<typename> class A, typename T>
693
+ inline CUDA_CALLABLE T atomic_min(const A<T>& buf, int i, int j, int k, T value) { return atomic_min(&index(buf, i, j, k), value); }
694
+ template<template<typename> class A, typename T>
695
+ inline CUDA_CALLABLE T atomic_min(const A<T>& buf, int i, int j, int k, int l, T value) { return atomic_min(&index(buf, i, j, k, l), value); }
696
+
697
+ template<template<typename> class A, typename T>
698
+ inline CUDA_CALLABLE T atomic_max(const A<T>& buf, int i, T value) { return atomic_max(&index(buf, i), value); }
699
+ template<template<typename> class A, typename T>
700
+ inline CUDA_CALLABLE T atomic_max(const A<T>& buf, int i, int j, T value) { return atomic_max(&index(buf, i, j), value); }
701
+ template<template<typename> class A, typename T>
702
+ inline CUDA_CALLABLE T atomic_max(const A<T>& buf, int i, int j, int k, T value) { return atomic_max(&index(buf, i, j, k), value); }
703
+ template<template<typename> class A, typename T>
704
+ inline CUDA_CALLABLE T atomic_max(const A<T>& buf, int i, int j, int k, int l, T value) { return atomic_max(&index(buf, i, j, k, l), value); }
705
+
706
+ template<template<typename> class A, typename T>
707
+ inline CUDA_CALLABLE T* address(const A<T>& buf, int i) { return &index(buf, i); }
708
+ template<template<typename> class A, typename T>
709
+ inline CUDA_CALLABLE T* address(const A<T>& buf, int i, int j) { return &index(buf, i, j); }
710
+ template<template<typename> class A, typename T>
711
+ inline CUDA_CALLABLE T* address(const A<T>& buf, int i, int j, int k) { return &index(buf, i, j, k); }
712
+ template<template<typename> class A, typename T>
713
+ inline CUDA_CALLABLE T* address(const A<T>& buf, int i, int j, int k, int l) { return &index(buf, i, j, k, l); }
714
+
715
+ template<template<typename> class A, typename T>
716
+ inline CUDA_CALLABLE void array_store(const A<T>& buf, int i, T value)
717
+ {
718
+ FP_VERIFY_FWD_1(value)
719
+
720
+ index(buf, i) = value;
721
+ }
722
+ template<template<typename> class A, typename T>
723
+ inline CUDA_CALLABLE void array_store(const A<T>& buf, int i, int j, T value)
724
+ {
725
+ FP_VERIFY_FWD_2(value)
726
+
727
+ index(buf, i, j) = value;
728
+ }
729
+ template<template<typename> class A, typename T>
730
+ inline CUDA_CALLABLE void array_store(const A<T>& buf, int i, int j, int k, T value)
731
+ {
732
+ FP_VERIFY_FWD_3(value)
733
+
734
+ index(buf, i, j, k) = value;
735
+ }
736
+ template<template<typename> class A, typename T>
737
+ inline CUDA_CALLABLE void array_store(const A<T>& buf, int i, int j, int k, int l, T value)
738
+ {
739
+ FP_VERIFY_FWD_4(value)
740
+
741
+ index(buf, i, j, k, l) = value;
742
+ }
743
+
744
+ template<typename T>
745
+ inline CUDA_CALLABLE void store(T* address, T value)
746
+ {
747
+ FP_VERIFY_FWD(value)
748
+
749
+ *address = value;
750
+ }
751
+
752
+ template<typename T>
753
+ inline CUDA_CALLABLE T load(T* address)
754
+ {
755
+ T value = *address;
756
+ FP_VERIFY_FWD(value)
757
+
758
+ return value;
759
+ }
760
+
761
+ // select operator to check for array being null
762
+ template <typename T1, typename T2>
763
+ CUDA_CALLABLE inline T2 select(const array_t<T1>& arr, const T2& a, const T2& b) { return arr.data?b:a; }
764
+
765
+ template <typename T1, typename T2>
766
+ CUDA_CALLABLE inline void adj_select(const array_t<T1>& arr, const T2& a, const T2& b, const array_t<T1>& adj_cond, T2& adj_a, T2& adj_b, const T2& adj_ret)
767
+ {
768
+ if (arr.data)
769
+ adj_b += adj_ret;
770
+ else
771
+ adj_a += adj_ret;
772
+ }
773
+
774
+ // stub for the case where we have an nested array inside a struct and
775
+ // atomic add the whole struct onto an array (e.g.: during backwards pass)
776
+ template <typename T>
777
+ CUDA_CALLABLE inline void atomic_add(array_t<T>*, array_t<T>) {}
778
+
779
+ // for float and vector types this is just an alias for an atomic add
780
+ template <typename T>
781
+ CUDA_CALLABLE inline void adj_atomic_add(T* buf, T value) { atomic_add(buf, value); }
782
+
783
+
784
+ // for integral types we do not accumulate gradients
785
+ CUDA_CALLABLE inline void adj_atomic_add(int8* buf, int8 value) { }
786
+ CUDA_CALLABLE inline void adj_atomic_add(uint8* buf, uint8 value) { }
787
+ CUDA_CALLABLE inline void adj_atomic_add(int16* buf, int16 value) { }
788
+ CUDA_CALLABLE inline void adj_atomic_add(uint16* buf, uint16 value) { }
789
+ CUDA_CALLABLE inline void adj_atomic_add(int32* buf, int32 value) { }
790
+ CUDA_CALLABLE inline void adj_atomic_add(uint32* buf, uint32 value) { }
791
+ CUDA_CALLABLE inline void adj_atomic_add(int64* buf, int64 value) { }
792
+ CUDA_CALLABLE inline void adj_atomic_add(uint64* buf, uint64 value) { }
793
+
794
+ CUDA_CALLABLE inline void adj_atomic_add(bool* buf, bool value) { }
795
+
796
+ // only generate gradients for T types
797
+ template<typename T>
798
+ inline CUDA_CALLABLE void adj_address(const array_t<T>& buf, int i, const array_t<T>& adj_buf, int& adj_i, const T& adj_output)
799
+ {
800
+ if (adj_buf.data)
801
+ adj_atomic_add(&index(adj_buf, i), adj_output);
802
+ else if (buf.grad)
803
+ adj_atomic_add(&index_grad(buf, i), adj_output);
804
+ }
805
+ template<typename T>
806
+ inline CUDA_CALLABLE void adj_address(const array_t<T>& buf, int i, int j, const array_t<T>& adj_buf, int& adj_i, int& adj_j, const T& adj_output)
807
+ {
808
+ if (adj_buf.data)
809
+ adj_atomic_add(&index(adj_buf, i, j), adj_output);
810
+ else if (buf.grad)
811
+ adj_atomic_add(&index_grad(buf, i, j), adj_output);
812
+ }
813
+ template<typename T>
814
+ inline CUDA_CALLABLE void adj_address(const array_t<T>& buf, int i, int j, int k, const array_t<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, const T& adj_output)
815
+ {
816
+ if (adj_buf.data)
817
+ adj_atomic_add(&index(adj_buf, i, j, k), adj_output);
818
+ else if (buf.grad)
819
+ adj_atomic_add(&index_grad(buf, i, j, k), adj_output);
820
+ }
821
+ template<typename T>
822
+ inline CUDA_CALLABLE void adj_address(const array_t<T>& buf, int i, int j, int k, int l, const array_t<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, const T& adj_output)
823
+ {
824
+ if (adj_buf.data)
825
+ adj_atomic_add(&index(adj_buf, i, j, k, l), adj_output);
826
+ else if (buf.grad)
827
+ adj_atomic_add(&index_grad(buf, i, j, k, l), adj_output);
828
+ }
829
+
830
+ template<typename T>
831
+ inline CUDA_CALLABLE void adj_array_store(const array_t<T>& buf, int i, T value, const array_t<T>& adj_buf, int& adj_i, T& adj_value)
832
+ {
833
+ if (adj_buf.data)
834
+ adj_value += index(adj_buf, i);
835
+ else if (buf.grad)
836
+ adj_value += index_grad(buf, i);
837
+
838
+ FP_VERIFY_ADJ_1(value, adj_value)
839
+ }
840
+ template<typename T>
841
+ inline CUDA_CALLABLE void adj_array_store(const array_t<T>& buf, int i, int j, T value, const array_t<T>& adj_buf, int& adj_i, int& adj_j, T& adj_value)
842
+ {
843
+ if (adj_buf.data)
844
+ adj_value += index(adj_buf, i, j);
845
+ else if (buf.grad)
846
+ adj_value += index_grad(buf, i, j);
847
+
848
+ FP_VERIFY_ADJ_2(value, adj_value)
849
+ }
850
+ template<typename T>
851
+ inline CUDA_CALLABLE void adj_array_store(const array_t<T>& buf, int i, int j, int k, T value, const array_t<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, T& adj_value)
852
+ {
853
+ if (adj_buf.data)
854
+ adj_value += index(adj_buf, i, j, k);
855
+ else if (buf.grad)
856
+ adj_value += index_grad(buf, i, j, k);
857
+
858
+ FP_VERIFY_ADJ_3(value, adj_value)
859
+ }
860
+ template<typename T>
861
+ inline CUDA_CALLABLE void adj_array_store(const array_t<T>& buf, int i, int j, int k, int l, T value, const array_t<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, T& adj_value)
862
+ {
863
+ if (adj_buf.data)
864
+ adj_value += index(adj_buf, i, j, k, l);
865
+ else if (buf.grad)
866
+ adj_value += index_grad(buf, i, j, k, l);
867
+
868
+ FP_VERIFY_ADJ_4(value, adj_value)
869
+ }
870
+
871
+ template<typename T>
872
+ inline CUDA_CALLABLE void adj_store(const T* address, T value, const T& adj_address, T& adj_value)
873
+ {
874
+ // nop; generic store() operations are not differentiable, only array_store() is
875
+ FP_VERIFY_ADJ(value, adj_value)
876
+ }
877
+
878
+ template<typename T>
879
+ inline CUDA_CALLABLE void adj_load(const T* address, const T& adj_address, T& adj_value)
880
+ {
881
+ // nop; generic load() operations are not differentiable
882
+ }
883
+
884
+ template<typename T>
885
+ inline CUDA_CALLABLE void adj_atomic_add(const array_t<T>& buf, int i, T value, const array_t<T>& adj_buf, int& adj_i, T& adj_value, const T& adj_ret)
886
+ {
887
+ if (adj_buf.data)
888
+ adj_value += index(adj_buf, i);
889
+ else if (buf.grad)
890
+ adj_value += index_grad(buf, i);
891
+
892
+ FP_VERIFY_ADJ_1(value, adj_value)
893
+ }
894
+ template<typename T>
895
+ inline CUDA_CALLABLE void adj_atomic_add(const array_t<T>& buf, int i, int j, T value, const array_t<T>& adj_buf, int& adj_i, int& adj_j, T& adj_value, const T& adj_ret)
896
+ {
897
+ if (adj_buf.data)
898
+ adj_value += index(adj_buf, i, j);
899
+ else if (buf.grad)
900
+ adj_value += index_grad(buf, i, j);
901
+
902
+ FP_VERIFY_ADJ_2(value, adj_value)
903
+ }
904
+ template<typename T>
905
+ inline CUDA_CALLABLE void adj_atomic_add(const array_t<T>& buf, int i, int j, int k, T value, const array_t<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, T& adj_value, const T& adj_ret)
906
+ {
907
+ if (adj_buf.data)
908
+ adj_value += index(adj_buf, i, j, k);
909
+ else if (buf.grad)
910
+ adj_value += index_grad(buf, i, j, k);
911
+
912
+ FP_VERIFY_ADJ_3(value, adj_value)
913
+ }
914
+ template<typename T>
915
+ inline CUDA_CALLABLE void adj_atomic_add(const array_t<T>& buf, int i, int j, int k, int l, T value, const array_t<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, T& adj_value, const T& adj_ret)
916
+ {
917
+ if (adj_buf.data)
918
+ adj_value += index(adj_buf, i, j, k, l);
919
+ else if (buf.grad)
920
+ adj_value += index_grad(buf, i, j, k, l);
921
+
922
+ FP_VERIFY_ADJ_4(value, adj_value)
923
+ }
924
+
925
+
926
+ template<typename T>
927
+ inline CUDA_CALLABLE void adj_atomic_sub(const array_t<T>& buf, int i, T value, const array_t<T>& adj_buf, int& adj_i, T& adj_value, const T& adj_ret)
928
+ {
929
+ if (adj_buf.data)
930
+ adj_value -= index(adj_buf, i);
931
+ else if (buf.grad)
932
+ adj_value -= index_grad(buf, i);
933
+
934
+ FP_VERIFY_ADJ_1(value, adj_value)
935
+ }
936
+ template<typename T>
937
+ inline CUDA_CALLABLE void adj_atomic_sub(const array_t<T>& buf, int i, int j, T value, const array_t<T>& adj_buf, int& adj_i, int& adj_j, T& adj_value, const T& adj_ret)
938
+ {
939
+ if (adj_buf.data)
940
+ adj_value -= index(adj_buf, i, j);
941
+ else if (buf.grad)
942
+ adj_value -= index_grad(buf, i, j);
943
+
944
+ FP_VERIFY_ADJ_2(value, adj_value)
945
+ }
946
+ template<typename T>
947
+ inline CUDA_CALLABLE void adj_atomic_sub(const array_t<T>& buf, int i, int j, int k, T value, const array_t<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, T& adj_value, const T& adj_ret)
948
+ {
949
+ if (adj_buf.data)
950
+ adj_value -= index(adj_buf, i, j, k);
951
+ else if (buf.grad)
952
+ adj_value -= index_grad(buf, i, j, k);
953
+
954
+ FP_VERIFY_ADJ_3(value, adj_value)
955
+ }
956
+ template<typename T>
957
+ inline CUDA_CALLABLE void adj_atomic_sub(const array_t<T>& buf, int i, int j, int k, int l, T value, const array_t<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, T& adj_value, const T& adj_ret)
958
+ {
959
+ if (adj_buf.data)
960
+ adj_value -= index(adj_buf, i, j, k, l);
961
+ else if (buf.grad)
962
+ adj_value -= index_grad(buf, i, j, k, l);
963
+
964
+ FP_VERIFY_ADJ_4(value, adj_value)
965
+ }
966
+
967
+ // generic array types that do not support gradient computation (indexedarray, etc.)
968
+ template<template<typename> class A1, template<typename> class A2, typename T>
969
+ inline CUDA_CALLABLE void adj_address(const A1<T>& buf, int i, const A2<T>& adj_buf, int& adj_i, const T& adj_output) {}
970
+ template<template<typename> class A1, template<typename> class A2, typename T>
971
+ inline CUDA_CALLABLE void adj_address(const A1<T>& buf, int i, int j, const A2<T>& adj_buf, int& adj_i, int& adj_j, const T& adj_output) {}
972
+ template<template<typename> class A1, template<typename> class A2, typename T>
973
+ inline CUDA_CALLABLE void adj_address(const A1<T>& buf, int i, int j, int k, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, const T& adj_output) {}
974
+ template<template<typename> class A1, template<typename> class A2, typename T>
975
+ inline CUDA_CALLABLE void adj_address(const A1<T>& buf, int i, int j, int k, int l, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, const T& adj_output) {}
976
+
977
+ template<template<typename> class A1, template<typename> class A2, typename T>
978
+ inline CUDA_CALLABLE void adj_array_store(const A1<T>& buf, int i, T value, const A2<T>& adj_buf, int& adj_i, T& adj_value) {}
979
+ template<template<typename> class A1, template<typename> class A2, typename T>
980
+ inline CUDA_CALLABLE void adj_array_store(const A1<T>& buf, int i, int j, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, T& adj_value) {}
981
+ template<template<typename> class A1, template<typename> class A2, typename T>
982
+ inline CUDA_CALLABLE void adj_array_store(const A1<T>& buf, int i, int j, int k, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, T& adj_value) {}
983
+ template<template<typename> class A1, template<typename> class A2, typename T>
984
+ inline CUDA_CALLABLE void adj_array_store(const A1<T>& buf, int i, int j, int k, int l, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, T& adj_value) {}
985
+
986
+ template<template<typename> class A1, template<typename> class A2, typename T>
987
+ inline CUDA_CALLABLE void adj_atomic_add(const A1<T>& buf, int i, T value, const A2<T>& adj_buf, int& adj_i, T& adj_value, const T& adj_ret) {}
988
+ template<template<typename> class A1, template<typename> class A2, typename T>
989
+ inline CUDA_CALLABLE void adj_atomic_add(const A1<T>& buf, int i, int j, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, T& adj_value, const T& adj_ret) {}
990
+ template<template<typename> class A1, template<typename> class A2, typename T>
991
+ inline CUDA_CALLABLE void adj_atomic_add(const A1<T>& buf, int i, int j, int k, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, T& adj_value, const T& adj_ret) {}
992
+ template<template<typename> class A1, template<typename> class A2, typename T>
993
+ inline CUDA_CALLABLE void adj_atomic_add(const A1<T>& buf, int i, int j, int k, int l, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, T& adj_value, const T& adj_ret) {}
994
+
995
+ template<template<typename> class A1, template<typename> class A2, typename T>
996
+ inline CUDA_CALLABLE void adj_atomic_sub(const A1<T>& buf, int i, T value, const A2<T>& adj_buf, int& adj_i, T& adj_value, const T& adj_ret) {}
997
+ template<template<typename> class A1, template<typename> class A2, typename T>
998
+ inline CUDA_CALLABLE void adj_atomic_sub(const A1<T>& buf, int i, int j, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, T& adj_value, const T& adj_ret) {}
999
+ template<template<typename> class A1, template<typename> class A2, typename T>
1000
+ inline CUDA_CALLABLE void adj_atomic_sub(const A1<T>& buf, int i, int j, int k, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, T& adj_value, const T& adj_ret) {}
1001
+ template<template<typename> class A1, template<typename> class A2, typename T>
1002
+ inline CUDA_CALLABLE void adj_atomic_sub(const A1<T>& buf, int i, int j, int k, int l, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, T& adj_value, const T& adj_ret) {}
1003
+
1004
+ // generic handler for scalar values
1005
+ template<template<typename> class A1, template<typename> class A2, typename T>
1006
+ inline CUDA_CALLABLE void adj_atomic_min(const A1<T>& buf, int i, T value, const A2<T>& adj_buf, int& adj_i, T& adj_value, const T& adj_ret) {
1007
+ if (adj_buf.data)
1008
+ adj_atomic_minmax(&index(buf, i), &index(adj_buf, i), value, adj_value);
1009
+ else if (buf.grad)
1010
+ adj_atomic_minmax(&index(buf, i), &index_grad(buf, i), value, adj_value);
1011
+
1012
+ FP_VERIFY_ADJ_1(value, adj_value)
1013
+ }
1014
+ template<template<typename> class A1, template<typename> class A2, typename T>
1015
+ inline CUDA_CALLABLE void adj_atomic_min(const A1<T>& buf, int i, int j, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, T& adj_value, const T& adj_ret) {
1016
+ if (adj_buf.data)
1017
+ adj_atomic_minmax(&index(buf, i, j), &index(adj_buf, i, j), value, adj_value);
1018
+ else if (buf.grad)
1019
+ adj_atomic_minmax(&index(buf, i, j), &index_grad(buf, i, j), value, adj_value);
1020
+
1021
+ FP_VERIFY_ADJ_2(value, adj_value)
1022
+ }
1023
+ template<template<typename> class A1, template<typename> class A2, typename T>
1024
+ inline CUDA_CALLABLE void adj_atomic_min(const A1<T>& buf, int i, int j, int k, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, T& adj_value, const T& adj_ret) {
1025
+ if (adj_buf.data)
1026
+ adj_atomic_minmax(&index(buf, i, j, k), &index(adj_buf, i, j, k), value, adj_value);
1027
+ else if (buf.grad)
1028
+ adj_atomic_minmax(&index(buf, i, j, k), &index_grad(buf, i, j, k), value, adj_value);
1029
+
1030
+ FP_VERIFY_ADJ_3(value, adj_value)
1031
+ }
1032
+ template<template<typename> class A1, template<typename> class A2, typename T>
1033
+ inline CUDA_CALLABLE void adj_atomic_min(const A1<T>& buf, int i, int j, int k, int l, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, T& adj_value, const T& adj_ret) {
1034
+ if (adj_buf.data)
1035
+ adj_atomic_minmax(&index(buf, i, j, k, l), &index(adj_buf, i, j, k, l), value, adj_value);
1036
+ else if (buf.grad)
1037
+ adj_atomic_minmax(&index(buf, i, j, k, l), &index_grad(buf, i, j, k, l), value, adj_value);
1038
+
1039
+ FP_VERIFY_ADJ_4(value, adj_value)
1040
+ }
1041
+
1042
+ template<template<typename> class A1, template<typename> class A2, typename T>
1043
+ inline CUDA_CALLABLE void adj_atomic_max(const A1<T>& buf, int i, T value, const A2<T>& adj_buf, int& adj_i, T& adj_value, const T& adj_ret) {
1044
+ if (adj_buf.data)
1045
+ adj_atomic_minmax(&index(buf, i), &index(adj_buf, i), value, adj_value);
1046
+ else if (buf.grad)
1047
+ adj_atomic_minmax(&index(buf, i), &index_grad(buf, i), value, adj_value);
1048
+
1049
+ FP_VERIFY_ADJ_1(value, adj_value)
1050
+ }
1051
+ template<template<typename> class A1, template<typename> class A2, typename T>
1052
+ inline CUDA_CALLABLE void adj_atomic_max(const A1<T>& buf, int i, int j, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, T& adj_value, const T& adj_ret) {
1053
+ if (adj_buf.data)
1054
+ adj_atomic_minmax(&index(buf, i, j), &index(adj_buf, i, j), value, adj_value);
1055
+ else if (buf.grad)
1056
+ adj_atomic_minmax(&index(buf, i, j), &index_grad(buf, i, j), value, adj_value);
1057
+
1058
+ FP_VERIFY_ADJ_2(value, adj_value)
1059
+ }
1060
+ template<template<typename> class A1, template<typename> class A2, typename T>
1061
+ inline CUDA_CALLABLE void adj_atomic_max(const A1<T>& buf, int i, int j, int k, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, T& adj_value, const T& adj_ret) {
1062
+ if (adj_buf.data)
1063
+ adj_atomic_minmax(&index(buf, i, j, k), &index(adj_buf, i, j, k), value, adj_value);
1064
+ else if (buf.grad)
1065
+ adj_atomic_minmax(&index(buf, i, j, k), &index_grad(buf, i, j, k), value, adj_value);
1066
+
1067
+ FP_VERIFY_ADJ_3(value, adj_value)
1068
+ }
1069
+ template<template<typename> class A1, template<typename> class A2, typename T>
1070
+ inline CUDA_CALLABLE void adj_atomic_max(const A1<T>& buf, int i, int j, int k, int l, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, T& adj_value, const T& adj_ret) {
1071
+ if (adj_buf.data)
1072
+ adj_atomic_minmax(&index(buf, i, j, k, l), &index(adj_buf, i, j, k, l), value, adj_value);
1073
+ else if (buf.grad)
1074
+ adj_atomic_minmax(&index(buf, i, j, k, l), &index_grad(buf, i, j, k, l), value, adj_value);
1075
+
1076
+ FP_VERIFY_ADJ_4(value, adj_value)
1077
+ }
1078
+
1079
+ } // namespace wp
1080
+
1081
+ #include "fabric.h"