warp-lang 1.0.2__py3-none-manylinux2014_x86_64.whl → 1.2.0__py3-none-manylinux2014_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of warp-lang might be problematic. Click here for more details.

Files changed (356) hide show
  1. warp/__init__.py +108 -97
  2. warp/__init__.pyi +1 -1
  3. warp/bin/warp-clang.so +0 -0
  4. warp/bin/warp.so +0 -0
  5. warp/build.py +88 -113
  6. warp/build_dll.py +383 -375
  7. warp/builtins.py +3693 -3354
  8. warp/codegen.py +2925 -2792
  9. warp/config.py +40 -36
  10. warp/constants.py +49 -45
  11. warp/context.py +5409 -5102
  12. warp/dlpack.py +442 -442
  13. warp/examples/__init__.py +16 -16
  14. warp/examples/assets/bear.usd +0 -0
  15. warp/examples/assets/bunny.usd +0 -0
  16. warp/examples/assets/cartpole.urdf +110 -110
  17. warp/examples/assets/crazyflie.usd +0 -0
  18. warp/examples/assets/cube.usd +0 -0
  19. warp/examples/assets/nv_ant.xml +92 -92
  20. warp/examples/assets/nv_humanoid.xml +183 -183
  21. warp/examples/assets/quadruped.urdf +267 -267
  22. warp/examples/assets/rocks.nvdb +0 -0
  23. warp/examples/assets/rocks.usd +0 -0
  24. warp/examples/assets/sphere.usd +0 -0
  25. warp/examples/benchmarks/benchmark_api.py +381 -383
  26. warp/examples/benchmarks/benchmark_cloth.py +278 -277
  27. warp/examples/benchmarks/benchmark_cloth_cupy.py +88 -88
  28. warp/examples/benchmarks/benchmark_cloth_jax.py +97 -100
  29. warp/examples/benchmarks/benchmark_cloth_numba.py +146 -142
  30. warp/examples/benchmarks/benchmark_cloth_numpy.py +77 -77
  31. warp/examples/benchmarks/benchmark_cloth_pytorch.py +86 -86
  32. warp/examples/benchmarks/benchmark_cloth_taichi.py +112 -112
  33. warp/examples/benchmarks/benchmark_cloth_warp.py +145 -146
  34. warp/examples/benchmarks/benchmark_launches.py +293 -295
  35. warp/examples/browse.py +29 -29
  36. warp/examples/core/example_dem.py +232 -219
  37. warp/examples/core/example_fluid.py +291 -267
  38. warp/examples/core/example_graph_capture.py +142 -126
  39. warp/examples/core/example_marching_cubes.py +186 -174
  40. warp/examples/core/example_mesh.py +172 -155
  41. warp/examples/core/example_mesh_intersect.py +203 -193
  42. warp/examples/core/example_nvdb.py +174 -170
  43. warp/examples/core/example_raycast.py +103 -90
  44. warp/examples/core/example_raymarch.py +197 -178
  45. warp/examples/core/example_render_opengl.py +183 -141
  46. warp/examples/core/example_sph.py +403 -387
  47. warp/examples/core/example_torch.py +219 -181
  48. warp/examples/core/example_wave.py +261 -248
  49. warp/examples/fem/bsr_utils.py +378 -380
  50. warp/examples/fem/example_apic_fluid.py +432 -389
  51. warp/examples/fem/example_burgers.py +262 -0
  52. warp/examples/fem/example_convection_diffusion.py +180 -168
  53. warp/examples/fem/example_convection_diffusion_dg.py +217 -209
  54. warp/examples/fem/example_deformed_geometry.py +175 -159
  55. warp/examples/fem/example_diffusion.py +199 -173
  56. warp/examples/fem/example_diffusion_3d.py +178 -152
  57. warp/examples/fem/example_diffusion_mgpu.py +219 -214
  58. warp/examples/fem/example_mixed_elasticity.py +242 -222
  59. warp/examples/fem/example_navier_stokes.py +257 -243
  60. warp/examples/fem/example_stokes.py +218 -192
  61. warp/examples/fem/example_stokes_transfer.py +263 -249
  62. warp/examples/fem/mesh_utils.py +133 -109
  63. warp/examples/fem/plot_utils.py +292 -287
  64. warp/examples/optim/example_bounce.py +258 -246
  65. warp/examples/optim/example_cloth_throw.py +220 -209
  66. warp/examples/optim/example_diffray.py +564 -536
  67. warp/examples/optim/example_drone.py +862 -835
  68. warp/examples/optim/example_inverse_kinematics.py +174 -168
  69. warp/examples/optim/example_inverse_kinematics_torch.py +183 -169
  70. warp/examples/optim/example_spring_cage.py +237 -231
  71. warp/examples/optim/example_trajectory.py +221 -199
  72. warp/examples/optim/example_walker.py +304 -293
  73. warp/examples/sim/example_cartpole.py +137 -129
  74. warp/examples/sim/example_cloth.py +194 -186
  75. warp/examples/sim/example_granular.py +122 -111
  76. warp/examples/sim/example_granular_collision_sdf.py +195 -186
  77. warp/examples/sim/example_jacobian_ik.py +234 -214
  78. warp/examples/sim/example_particle_chain.py +116 -105
  79. warp/examples/sim/example_quadruped.py +191 -180
  80. warp/examples/sim/example_rigid_chain.py +195 -187
  81. warp/examples/sim/example_rigid_contact.py +187 -177
  82. warp/examples/sim/example_rigid_force.py +125 -125
  83. warp/examples/sim/example_rigid_gyroscopic.py +107 -95
  84. warp/examples/sim/example_rigid_soft_contact.py +132 -122
  85. warp/examples/sim/example_soft_body.py +188 -177
  86. warp/fabric.py +337 -335
  87. warp/fem/__init__.py +61 -27
  88. warp/fem/cache.py +403 -388
  89. warp/fem/dirichlet.py +178 -179
  90. warp/fem/domain.py +262 -263
  91. warp/fem/field/__init__.py +100 -101
  92. warp/fem/field/field.py +148 -149
  93. warp/fem/field/nodal_field.py +298 -299
  94. warp/fem/field/restriction.py +22 -21
  95. warp/fem/field/test.py +180 -181
  96. warp/fem/field/trial.py +183 -183
  97. warp/fem/geometry/__init__.py +16 -19
  98. warp/fem/geometry/closest_point.py +69 -70
  99. warp/fem/geometry/deformed_geometry.py +270 -271
  100. warp/fem/geometry/element.py +748 -744
  101. warp/fem/geometry/geometry.py +184 -186
  102. warp/fem/geometry/grid_2d.py +380 -373
  103. warp/fem/geometry/grid_3d.py +437 -435
  104. warp/fem/geometry/hexmesh.py +953 -953
  105. warp/fem/geometry/nanogrid.py +455 -0
  106. warp/fem/geometry/partition.py +374 -376
  107. warp/fem/geometry/quadmesh_2d.py +532 -532
  108. warp/fem/geometry/tetmesh.py +840 -840
  109. warp/fem/geometry/trimesh_2d.py +577 -577
  110. warp/fem/integrate.py +1684 -1615
  111. warp/fem/operator.py +190 -191
  112. warp/fem/polynomial.py +214 -213
  113. warp/fem/quadrature/__init__.py +2 -2
  114. warp/fem/quadrature/pic_quadrature.py +243 -245
  115. warp/fem/quadrature/quadrature.py +295 -294
  116. warp/fem/space/__init__.py +179 -292
  117. warp/fem/space/basis_space.py +522 -489
  118. warp/fem/space/collocated_function_space.py +100 -105
  119. warp/fem/space/dof_mapper.py +236 -236
  120. warp/fem/space/function_space.py +148 -145
  121. warp/fem/space/grid_2d_function_space.py +148 -267
  122. warp/fem/space/grid_3d_function_space.py +167 -306
  123. warp/fem/space/hexmesh_function_space.py +253 -352
  124. warp/fem/space/nanogrid_function_space.py +202 -0
  125. warp/fem/space/partition.py +350 -350
  126. warp/fem/space/quadmesh_2d_function_space.py +261 -369
  127. warp/fem/space/restriction.py +161 -160
  128. warp/fem/space/shape/__init__.py +90 -15
  129. warp/fem/space/shape/cube_shape_function.py +728 -738
  130. warp/fem/space/shape/shape_function.py +102 -103
  131. warp/fem/space/shape/square_shape_function.py +611 -611
  132. warp/fem/space/shape/tet_shape_function.py +565 -567
  133. warp/fem/space/shape/triangle_shape_function.py +429 -429
  134. warp/fem/space/tetmesh_function_space.py +224 -292
  135. warp/fem/space/topology.py +297 -295
  136. warp/fem/space/trimesh_2d_function_space.py +153 -221
  137. warp/fem/types.py +77 -77
  138. warp/fem/utils.py +495 -495
  139. warp/jax.py +166 -141
  140. warp/jax_experimental.py +341 -339
  141. warp/native/array.h +1081 -1025
  142. warp/native/builtin.h +1603 -1560
  143. warp/native/bvh.cpp +402 -398
  144. warp/native/bvh.cu +533 -525
  145. warp/native/bvh.h +430 -429
  146. warp/native/clang/clang.cpp +496 -464
  147. warp/native/crt.cpp +42 -32
  148. warp/native/crt.h +352 -335
  149. warp/native/cuda_crt.h +1049 -1049
  150. warp/native/cuda_util.cpp +549 -540
  151. warp/native/cuda_util.h +288 -203
  152. warp/native/cutlass_gemm.cpp +34 -34
  153. warp/native/cutlass_gemm.cu +372 -372
  154. warp/native/error.cpp +66 -66
  155. warp/native/error.h +27 -27
  156. warp/native/exports.h +187 -0
  157. warp/native/fabric.h +228 -228
  158. warp/native/hashgrid.cpp +301 -278
  159. warp/native/hashgrid.cu +78 -77
  160. warp/native/hashgrid.h +227 -227
  161. warp/native/initializer_array.h +32 -32
  162. warp/native/intersect.h +1204 -1204
  163. warp/native/intersect_adj.h +365 -365
  164. warp/native/intersect_tri.h +322 -322
  165. warp/native/marching.cpp +2 -2
  166. warp/native/marching.cu +497 -497
  167. warp/native/marching.h +2 -2
  168. warp/native/mat.h +1545 -1498
  169. warp/native/matnn.h +333 -333
  170. warp/native/mesh.cpp +203 -203
  171. warp/native/mesh.cu +292 -293
  172. warp/native/mesh.h +1887 -1887
  173. warp/native/nanovdb/GridHandle.h +366 -0
  174. warp/native/nanovdb/HostBuffer.h +590 -0
  175. warp/native/nanovdb/NanoVDB.h +6624 -4782
  176. warp/native/nanovdb/PNanoVDB.h +3390 -2553
  177. warp/native/noise.h +850 -850
  178. warp/native/quat.h +1112 -1085
  179. warp/native/rand.h +303 -299
  180. warp/native/range.h +108 -108
  181. warp/native/reduce.cpp +156 -156
  182. warp/native/reduce.cu +348 -348
  183. warp/native/runlength_encode.cpp +61 -61
  184. warp/native/runlength_encode.cu +46 -46
  185. warp/native/scan.cpp +30 -30
  186. warp/native/scan.cu +36 -36
  187. warp/native/scan.h +7 -7
  188. warp/native/solid_angle.h +442 -442
  189. warp/native/sort.cpp +94 -94
  190. warp/native/sort.cu +97 -97
  191. warp/native/sort.h +14 -14
  192. warp/native/sparse.cpp +337 -337
  193. warp/native/sparse.cu +544 -544
  194. warp/native/spatial.h +630 -630
  195. warp/native/svd.h +562 -562
  196. warp/native/temp_buffer.h +30 -30
  197. warp/native/vec.h +1177 -1133
  198. warp/native/volume.cpp +529 -297
  199. warp/native/volume.cu +58 -32
  200. warp/native/volume.h +960 -538
  201. warp/native/volume_builder.cu +446 -425
  202. warp/native/volume_builder.h +34 -19
  203. warp/native/volume_impl.h +61 -0
  204. warp/native/warp.cpp +1057 -1052
  205. warp/native/warp.cu +2949 -2828
  206. warp/native/warp.h +321 -305
  207. warp/optim/__init__.py +9 -9
  208. warp/optim/adam.py +120 -120
  209. warp/optim/linear.py +1104 -939
  210. warp/optim/sgd.py +104 -92
  211. warp/render/__init__.py +10 -10
  212. warp/render/render_opengl.py +3356 -3204
  213. warp/render/render_usd.py +768 -749
  214. warp/render/utils.py +152 -150
  215. warp/sim/__init__.py +52 -59
  216. warp/sim/articulation.py +685 -685
  217. warp/sim/collide.py +1594 -1590
  218. warp/sim/import_mjcf.py +489 -481
  219. warp/sim/import_snu.py +220 -221
  220. warp/sim/import_urdf.py +536 -516
  221. warp/sim/import_usd.py +887 -881
  222. warp/sim/inertia.py +316 -317
  223. warp/sim/integrator.py +234 -233
  224. warp/sim/integrator_euler.py +1956 -1956
  225. warp/sim/integrator_featherstone.py +1917 -1991
  226. warp/sim/integrator_xpbd.py +3288 -3312
  227. warp/sim/model.py +4473 -4314
  228. warp/sim/particles.py +113 -112
  229. warp/sim/render.py +417 -403
  230. warp/sim/utils.py +413 -410
  231. warp/sparse.py +1289 -1227
  232. warp/stubs.py +2192 -2469
  233. warp/tape.py +1162 -225
  234. warp/tests/__init__.py +1 -1
  235. warp/tests/__main__.py +4 -4
  236. warp/tests/assets/test_index_grid.nvdb +0 -0
  237. warp/tests/assets/torus.usda +105 -105
  238. warp/tests/aux_test_class_kernel.py +26 -26
  239. warp/tests/aux_test_compile_consts_dummy.py +10 -10
  240. warp/tests/aux_test_conditional_unequal_types_kernels.py +21 -21
  241. warp/tests/aux_test_dependent.py +20 -22
  242. warp/tests/aux_test_grad_customs.py +21 -23
  243. warp/tests/aux_test_reference.py +9 -11
  244. warp/tests/aux_test_reference_reference.py +8 -10
  245. warp/tests/aux_test_square.py +15 -17
  246. warp/tests/aux_test_unresolved_func.py +14 -14
  247. warp/tests/aux_test_unresolved_symbol.py +14 -14
  248. warp/tests/disabled_kinematics.py +237 -239
  249. warp/tests/run_coverage_serial.py +31 -31
  250. warp/tests/test_adam.py +155 -157
  251. warp/tests/test_arithmetic.py +1088 -1124
  252. warp/tests/test_array.py +2415 -2326
  253. warp/tests/test_array_reduce.py +148 -150
  254. warp/tests/test_async.py +666 -656
  255. warp/tests/test_atomic.py +139 -141
  256. warp/tests/test_bool.py +212 -149
  257. warp/tests/test_builtins_resolution.py +1290 -1292
  258. warp/tests/test_bvh.py +162 -171
  259. warp/tests/test_closest_point_edge_edge.py +227 -228
  260. warp/tests/test_codegen.py +562 -553
  261. warp/tests/test_compile_consts.py +217 -101
  262. warp/tests/test_conditional.py +244 -246
  263. warp/tests/test_copy.py +230 -215
  264. warp/tests/test_ctypes.py +630 -632
  265. warp/tests/test_dense.py +65 -67
  266. warp/tests/test_devices.py +89 -98
  267. warp/tests/test_dlpack.py +528 -529
  268. warp/tests/test_examples.py +403 -378
  269. warp/tests/test_fabricarray.py +952 -955
  270. warp/tests/test_fast_math.py +60 -54
  271. warp/tests/test_fem.py +1298 -1278
  272. warp/tests/test_fp16.py +128 -130
  273. warp/tests/test_func.py +336 -337
  274. warp/tests/test_generics.py +596 -571
  275. warp/tests/test_grad.py +885 -640
  276. warp/tests/test_grad_customs.py +331 -336
  277. warp/tests/test_hash_grid.py +208 -164
  278. warp/tests/test_import.py +37 -39
  279. warp/tests/test_indexedarray.py +1132 -1134
  280. warp/tests/test_intersect.py +65 -67
  281. warp/tests/test_jax.py +305 -307
  282. warp/tests/test_large.py +169 -164
  283. warp/tests/test_launch.py +352 -354
  284. warp/tests/test_lerp.py +217 -261
  285. warp/tests/test_linear_solvers.py +189 -171
  286. warp/tests/test_lvalue.py +419 -493
  287. warp/tests/test_marching_cubes.py +63 -65
  288. warp/tests/test_mat.py +1799 -1827
  289. warp/tests/test_mat_lite.py +113 -115
  290. warp/tests/test_mat_scalar_ops.py +2905 -2889
  291. warp/tests/test_math.py +124 -193
  292. warp/tests/test_matmul.py +498 -499
  293. warp/tests/test_matmul_lite.py +408 -410
  294. warp/tests/test_mempool.py +186 -190
  295. warp/tests/test_mesh.py +281 -324
  296. warp/tests/test_mesh_query_aabb.py +226 -241
  297. warp/tests/test_mesh_query_point.py +690 -702
  298. warp/tests/test_mesh_query_ray.py +290 -303
  299. warp/tests/test_mlp.py +274 -276
  300. warp/tests/test_model.py +108 -110
  301. warp/tests/test_module_hashing.py +111 -0
  302. warp/tests/test_modules_lite.py +36 -39
  303. warp/tests/test_multigpu.py +161 -163
  304. warp/tests/test_noise.py +244 -248
  305. warp/tests/test_operators.py +248 -250
  306. warp/tests/test_options.py +121 -125
  307. warp/tests/test_peer.py +131 -137
  308. warp/tests/test_pinned.py +76 -78
  309. warp/tests/test_print.py +52 -54
  310. warp/tests/test_quat.py +2084 -2086
  311. warp/tests/test_rand.py +324 -288
  312. warp/tests/test_reload.py +207 -217
  313. warp/tests/test_rounding.py +177 -179
  314. warp/tests/test_runlength_encode.py +188 -190
  315. warp/tests/test_sim_grad.py +241 -0
  316. warp/tests/test_sim_kinematics.py +89 -97
  317. warp/tests/test_smoothstep.py +166 -168
  318. warp/tests/test_snippet.py +303 -266
  319. warp/tests/test_sparse.py +466 -460
  320. warp/tests/test_spatial.py +2146 -2148
  321. warp/tests/test_special_values.py +362 -0
  322. warp/tests/test_streams.py +484 -473
  323. warp/tests/test_struct.py +708 -675
  324. warp/tests/test_tape.py +171 -148
  325. warp/tests/test_torch.py +741 -743
  326. warp/tests/test_transient_module.py +85 -87
  327. warp/tests/test_types.py +554 -659
  328. warp/tests/test_utils.py +488 -499
  329. warp/tests/test_vec.py +1262 -1268
  330. warp/tests/test_vec_lite.py +71 -73
  331. warp/tests/test_vec_scalar_ops.py +2097 -2099
  332. warp/tests/test_verify_fp.py +92 -94
  333. warp/tests/test_volume.py +961 -736
  334. warp/tests/test_volume_write.py +338 -265
  335. warp/tests/unittest_serial.py +38 -37
  336. warp/tests/unittest_suites.py +367 -359
  337. warp/tests/unittest_utils.py +434 -578
  338. warp/tests/unused_test_misc.py +69 -71
  339. warp/tests/walkthrough_debug.py +85 -85
  340. warp/thirdparty/appdirs.py +598 -598
  341. warp/thirdparty/dlpack.py +143 -143
  342. warp/thirdparty/unittest_parallel.py +563 -561
  343. warp/torch.py +321 -295
  344. warp/types.py +4941 -4450
  345. warp/utils.py +1008 -821
  346. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/LICENSE.md +126 -126
  347. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/METADATA +365 -400
  348. warp_lang-1.2.0.dist-info/RECORD +359 -0
  349. warp/examples/assets/cube.usda +0 -42
  350. warp/examples/assets/sphere.usda +0 -56
  351. warp/examples/assets/torus.usda +0 -105
  352. warp/examples/fem/example_convection_diffusion_dg0.py +0 -194
  353. warp/native/nanovdb/PNanoVDBWrite.h +0 -295
  354. warp_lang-1.0.2.dist-info/RECORD +0 -352
  355. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/WHEEL +0 -0
  356. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/top_level.txt +0 -0
warp/tests/test_grad.py CHANGED
@@ -1,640 +1,885 @@
1
- # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
2
- # NVIDIA CORPORATION and its licensors retain all intellectual property
3
- # and proprietary rights in and to this software, related documentation
4
- # and any modifications thereto. Any use, reproduction, disclosure or
5
- # distribution of this software and related documentation without an express
6
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
7
-
8
- import unittest
9
- from typing import Any
10
-
11
- import numpy as np
12
-
13
- import warp as wp
14
- from warp.tests.unittest_utils import *
15
-
16
- wp.init()
17
-
18
-
19
- @wp.kernel
20
- def scalar_grad(x: wp.array(dtype=float), y: wp.array(dtype=float)):
21
- y[0] = x[0] ** 2.0
22
-
23
-
24
- def test_scalar_grad(test, device):
25
- x = wp.array([3.0], dtype=float, device=device, requires_grad=True)
26
- y = wp.zeros_like(x)
27
-
28
- tape = wp.Tape()
29
- with tape:
30
- wp.launch(scalar_grad, dim=1, inputs=[x, y], device=device)
31
-
32
- tape.backward(y)
33
-
34
- assert_np_equal(tape.gradients[x].numpy(), np.array(6.0))
35
-
36
-
37
- @wp.kernel
38
- def for_loop_grad(n: int, x: wp.array(dtype=float), s: wp.array(dtype=float)):
39
- sum = float(0.0)
40
-
41
- for i in range(n):
42
- sum = sum + x[i] * 2.0
43
-
44
- s[0] = sum
45
-
46
-
47
- def test_for_loop_grad(test, device):
48
- n = 32
49
- val = np.ones(n, dtype=np.float32)
50
-
51
- x = wp.array(val, device=device, requires_grad=True)
52
- sum = wp.zeros(1, dtype=wp.float32, device=device, requires_grad=True)
53
-
54
- tape = wp.Tape()
55
- with tape:
56
- wp.launch(for_loop_grad, dim=1, inputs=[n, x, sum], device=device)
57
-
58
- # ensure forward pass outputs correct
59
- assert_np_equal(sum.numpy(), 2.0 * np.sum(x.numpy()))
60
-
61
- tape.backward(loss=sum)
62
-
63
- # ensure forward pass outputs persist
64
- assert_np_equal(sum.numpy(), 2.0 * np.sum(x.numpy()))
65
- # ensure gradients correct
66
- assert_np_equal(tape.gradients[x].numpy(), 2.0 * val)
67
-
68
-
69
- def test_for_loop_graph_grad(test, device):
70
- wp.load_module(device=device)
71
-
72
- n = 32
73
- val = np.ones(n, dtype=np.float32)
74
-
75
- x = wp.array(val, device=device, requires_grad=True)
76
- sum = wp.zeros(1, dtype=wp.float32, device=device, requires_grad=True)
77
-
78
- wp.capture_begin(device, force_module_load=False)
79
- try:
80
- tape = wp.Tape()
81
- with tape:
82
- wp.launch(for_loop_grad, dim=1, inputs=[n, x, sum], device=device)
83
-
84
- tape.backward(loss=sum)
85
- finally:
86
- graph = wp.capture_end(device)
87
-
88
- wp.capture_launch(graph)
89
- wp.synchronize_device(device)
90
-
91
- # ensure forward pass outputs persist
92
- assert_np_equal(sum.numpy(), 2.0 * np.sum(x.numpy()))
93
- # ensure gradients correct
94
- assert_np_equal(x.grad.numpy(), 2.0 * val)
95
-
96
- wp.capture_launch(graph)
97
- wp.synchronize_device(device)
98
-
99
-
100
- @wp.kernel
101
- def for_loop_nested_if_grad(n: int, x: wp.array(dtype=float), s: wp.array(dtype=float)):
102
- sum = float(0.0)
103
-
104
- for i in range(n):
105
- if i < 16:
106
- if i < 8:
107
- sum = sum + x[i] * 2.0
108
- else:
109
- sum = sum + x[i] * 4.0
110
- else:
111
- if i < 24:
112
- sum = sum + x[i] * 6.0
113
- else:
114
- sum = sum + x[i] * 8.0
115
-
116
- s[0] = sum
117
-
118
-
119
- def test_for_loop_nested_if_grad(test, device):
120
- n = 32
121
- val = np.ones(n, dtype=np.float32)
122
- # fmt: off
123
- expected_val = [
124
- 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
125
- 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0,
126
- 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0,
127
- 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0,
128
- ]
129
- expected_grad = [
130
- 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
131
- 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0,
132
- 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0,
133
- 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0,
134
- ]
135
- # fmt: on
136
-
137
- x = wp.array(val, device=device, requires_grad=True)
138
- sum = wp.zeros(1, dtype=wp.float32, device=device, requires_grad=True)
139
-
140
- tape = wp.Tape()
141
- with tape:
142
- wp.launch(for_loop_nested_if_grad, dim=1, inputs=[n, x, sum], device=device)
143
-
144
- assert_np_equal(sum.numpy(), np.sum(expected_val))
145
-
146
- tape.backward(loss=sum)
147
-
148
- assert_np_equal(sum.numpy(), np.sum(expected_val))
149
- assert_np_equal(tape.gradients[x].numpy(), np.array(expected_grad))
150
-
151
-
152
- @wp.kernel
153
- def for_loop_grad_nested(n: int, x: wp.array(dtype=float), s: wp.array(dtype=float)):
154
- sum = float(0.0)
155
-
156
- for i in range(n):
157
- for j in range(n):
158
- sum = sum + x[i * n + j] * float(i * n + j) + 1.0
159
-
160
- s[0] = sum
161
-
162
-
163
- def test_for_loop_nested_for_grad(test, device):
164
- x = wp.zeros(9, dtype=float, device=device, requires_grad=True)
165
- s = wp.zeros(1, dtype=float, device=device, requires_grad=True)
166
-
167
- tape = wp.Tape()
168
- with tape:
169
- wp.launch(for_loop_grad_nested, dim=1, inputs=[3, x, s], device=device)
170
-
171
- tape.backward(s)
172
-
173
- assert_np_equal(s.numpy(), np.array([9.0]))
174
- assert_np_equal(tape.gradients[x].numpy(), np.arange(0.0, 9.0, 1.0))
175
-
176
-
177
- # differentiating thought most while loops is not supported
178
- # since doing things like i = i + 1 breaks adjointing
179
-
180
- # @wp.kernel
181
- # def while_loop_grad(n: int,
182
- # x: wp.array(dtype=float),
183
- # c: wp.array(dtype=int),
184
- # s: wp.array(dtype=float)):
185
-
186
- # tid = wp.tid()
187
-
188
- # while i < n:
189
- # s[0] = s[0] + x[i]*2.0
190
- # i = i + 1
191
-
192
-
193
- # def test_while_loop_grad(test, device):
194
-
195
- # n = 32
196
- # x = wp.array(np.ones(n, dtype=np.float32), device=device, requires_grad=True)
197
- # c = wp.zeros(1, dtype=int, device=device)
198
- # sum = wp.zeros(1, dtype=wp.float32, device=device)
199
-
200
- # tape = wp.Tape()
201
- # with tape:
202
- # wp.launch(while_loop_grad, dim=1, inputs=[n, x, c, sum], device=device)
203
-
204
- # tape.backward(loss=sum)
205
-
206
- # assert_np_equal(sum.numpy(), 2.0*np.sum(x.numpy()))
207
- # assert_np_equal(tape.gradients[x].numpy(), 2.0*np.ones_like(x.numpy()))
208
-
209
-
210
- @wp.kernel
211
- def preserve_outputs(
212
- n: int, x: wp.array(dtype=float), c: wp.array(dtype=float), s1: wp.array(dtype=float), s2: wp.array(dtype=float)
213
- ):
214
- tid = wp.tid()
215
-
216
- # plain store
217
- c[tid] = x[tid] * 2.0
218
-
219
- # atomic stores
220
- wp.atomic_add(s1, 0, x[tid] * 3.0)
221
- wp.atomic_sub(s2, 0, x[tid] * 2.0)
222
-
223
-
224
- # tests that outputs from the forward pass are
225
- # preserved by the backward pass, i.e.: stores
226
- # are omitted during the forward reply
227
- def test_preserve_outputs_grad(test, device):
228
- n = 32
229
-
230
- val = np.ones(n, dtype=np.float32)
231
-
232
- x = wp.array(val, device=device, requires_grad=True)
233
- c = wp.zeros_like(x)
234
-
235
- s1 = wp.zeros(1, dtype=wp.float32, device=device, requires_grad=True)
236
- s2 = wp.zeros(1, dtype=wp.float32, device=device, requires_grad=True)
237
-
238
- tape = wp.Tape()
239
- with tape:
240
- wp.launch(preserve_outputs, dim=n, inputs=[n, x, c, s1, s2], device=device)
241
-
242
- # ensure forward pass results are correct
243
- assert_np_equal(x.numpy(), val)
244
- assert_np_equal(c.numpy(), val * 2.0)
245
- assert_np_equal(s1.numpy(), np.array(3.0 * n))
246
- assert_np_equal(s2.numpy(), np.array(-2.0 * n))
247
-
248
- # run backward on first loss
249
- tape.backward(loss=s1)
250
-
251
- # ensure inputs, copy and sum are unchanged by backwards pass
252
- assert_np_equal(x.numpy(), val)
253
- assert_np_equal(c.numpy(), val * 2.0)
254
- assert_np_equal(s1.numpy(), np.array(3.0 * n))
255
- assert_np_equal(s2.numpy(), np.array(-2.0 * n))
256
-
257
- # ensure gradients are correct
258
- assert_np_equal(tape.gradients[x].numpy(), 3.0 * val)
259
-
260
- # run backward on second loss
261
- tape.zero()
262
- tape.backward(loss=s2)
263
-
264
- assert_np_equal(x.numpy(), val)
265
- assert_np_equal(c.numpy(), val * 2.0)
266
- assert_np_equal(s1.numpy(), np.array(3.0 * n))
267
- assert_np_equal(s2.numpy(), np.array(-2.0 * n))
268
-
269
- # ensure gradients are correct
270
- assert_np_equal(tape.gradients[x].numpy(), -2.0 * val)
271
-
272
-
273
- def gradcheck(func, func_name, inputs, device, eps=1e-4, tol=1e-2):
274
- """
275
- Checks that the gradient of the Warp kernel is correct by comparing it to the
276
- numerical gradient computed using finite differences.
277
- """
278
-
279
- kernel = wp.Kernel(func=func, key=func_name)
280
-
281
- def f(xs):
282
- # call the kernel without taping for finite differences
283
- wp_xs = [wp.array(xs[i], ndim=1, dtype=inputs[i].dtype, device=device) for i in range(len(inputs))]
284
- output = wp.zeros(1, dtype=wp.float32, device=device)
285
- wp.launch(kernel, dim=1, inputs=wp_xs, outputs=[output], device=device)
286
- return output.numpy()[0]
287
-
288
- # compute numerical gradient
289
- numerical_grad = []
290
- np_xs = []
291
- for i in range(len(inputs)):
292
- np_xs.append(inputs[i].numpy().flatten().copy())
293
- numerical_grad.append(np.zeros_like(np_xs[-1]))
294
- inputs[i].requires_grad = True
295
-
296
- for i in range(len(np_xs)):
297
- for j in range(len(np_xs[i])):
298
- np_xs[i][j] += eps
299
- y1 = f(np_xs)
300
- np_xs[i][j] -= 2 * eps
301
- y2 = f(np_xs)
302
- np_xs[i][j] += eps
303
- numerical_grad[i][j] = (y1 - y2) / (2 * eps)
304
-
305
- # compute analytical gradient
306
- tape = wp.Tape()
307
- output = wp.zeros(1, dtype=wp.float32, device=device, requires_grad=True)
308
- with tape:
309
- wp.launch(kernel, dim=1, inputs=inputs, outputs=[output], device=device)
310
-
311
- tape.backward(loss=output)
312
-
313
- # compare gradients
314
- for i in range(len(inputs)):
315
- grad = tape.gradients[inputs[i]]
316
- assert_np_equal(grad.numpy(), numerical_grad[i], tol=tol)
317
-
318
- tape.zero()
319
-
320
-
321
- def test_vector_math_grad(test, device):
322
- rng = np.random.default_rng(123)
323
-
324
- # test unary operations
325
- for dim, vec_type in [(2, wp.vec2), (3, wp.vec3), (4, wp.vec4), (4, wp.quat)]:
326
-
327
- def check_length(vs: wp.array(dtype=vec_type), out: wp.array(dtype=float)):
328
- out[0] = wp.length(vs[0])
329
-
330
- def check_length_sq(vs: wp.array(dtype=vec_type), out: wp.array(dtype=float)):
331
- out[0] = wp.length_sq(vs[0])
332
-
333
- def check_normalize(vs: wp.array(dtype=vec_type), out: wp.array(dtype=float)):
334
- out[0] = wp.length_sq(wp.normalize(vs[0])) # compress to scalar output
335
-
336
- # run the tests with 5 different random inputs
337
- for _ in range(5):
338
- x = wp.array(rng.random(size=(1, dim), dtype=np.float32), dtype=vec_type, device=device)
339
- gradcheck(check_length, f"check_length_{vec_type.__name__}", [x], device)
340
- gradcheck(check_length_sq, f"check_length_sq_{vec_type.__name__}", [x], device)
341
- gradcheck(check_normalize, f"check_normalize_{vec_type.__name__}", [x], device)
342
-
343
-
344
- def test_matrix_math_grad(test, device):
345
- rng = np.random.default_rng(123)
346
-
347
- # test unary operations
348
- for dim, mat_type in [(2, wp.mat22), (3, wp.mat33), (4, wp.mat44)]:
349
-
350
- def check_determinant(vs: wp.array(dtype=mat_type), out: wp.array(dtype=float)):
351
- out[0] = wp.determinant(vs[0])
352
-
353
- def check_trace(vs: wp.array(dtype=mat_type), out: wp.array(dtype=float)):
354
- out[0] = wp.trace(vs[0])
355
-
356
- # run the tests with 5 different random inputs
357
- for _ in range(5):
358
- x = wp.array(rng.random(size=(1, dim, dim), dtype=np.float32), ndim=1, dtype=mat_type, device=device)
359
- gradcheck(check_determinant, f"check_length_{mat_type.__name__}", [x], device)
360
- gradcheck(check_trace, f"check_length_sq_{mat_type.__name__}", [x], device)
361
-
362
-
363
- def test_3d_math_grad(test, device):
364
- rng = np.random.default_rng(123)
365
-
366
- # test binary operations
367
- def check_cross(vs: wp.array(dtype=wp.vec3), out: wp.array(dtype=float)):
368
- out[0] = wp.length(wp.cross(vs[0], vs[1]))
369
-
370
- def check_dot(vs: wp.array(dtype=wp.vec3), out: wp.array(dtype=float)):
371
- out[0] = wp.dot(vs[0], vs[1])
372
-
373
- def check_mat33(vs: wp.array(dtype=wp.vec3), out: wp.array(dtype=float)):
374
- a = vs[0]
375
- b = vs[1]
376
- c = wp.cross(a, b)
377
- m = wp.mat33(a[0], b[0], c[0], a[1], b[1], c[1], a[2], b[2], c[2])
378
- out[0] = wp.determinant(m)
379
-
380
- def check_trace_diagonal(vs: wp.array(dtype=wp.vec3), out: wp.array(dtype=float)):
381
- a = vs[0]
382
- b = vs[1]
383
- c = wp.cross(a, b)
384
- m = wp.mat33(
385
- 1.0 / (a[0] + 10.0),
386
- 0.0,
387
- 0.0,
388
- 0.0,
389
- 1.0 / (b[1] + 10.0),
390
- 0.0,
391
- 0.0,
392
- 0.0,
393
- 1.0 / (c[2] + 10.0),
394
- )
395
- out[0] = wp.trace(m)
396
-
397
- def check_rot_rpy(vs: wp.array(dtype=wp.vec3), out: wp.array(dtype=float)):
398
- v = vs[0]
399
- q = wp.quat_rpy(v[0], v[1], v[2])
400
- out[0] = wp.length(wp.quat_rotate(q, vs[1]))
401
-
402
- def check_rot_axis_angle(vs: wp.array(dtype=wp.vec3), out: wp.array(dtype=float)):
403
- v = wp.normalize(vs[0])
404
- q = wp.quat_from_axis_angle(v, 0.5)
405
- out[0] = wp.length(wp.quat_rotate(q, vs[1]))
406
-
407
- def check_rot_quat_inv(vs: wp.array(dtype=wp.vec3), out: wp.array(dtype=float)):
408
- v = vs[0]
409
- q = wp.normalize(wp.quat(v[0], v[1], v[2], 1.0))
410
- out[0] = wp.length(wp.quat_rotate_inv(q, vs[1]))
411
-
412
- # run the tests with 5 different random inputs
413
- for _ in range(5):
414
- x = wp.array(
415
- rng.standard_normal(size=(2, 3), dtype=np.float32), dtype=wp.vec3, device=device, requires_grad=True
416
- )
417
- gradcheck(check_cross, "check_cross_3d", [x], device)
418
- gradcheck(check_dot, "check_dot_3d", [x], device)
419
- gradcheck(check_mat33, "check_mat33_3d", [x], device, eps=2e-2)
420
- gradcheck(check_trace_diagonal, "check_trace_diagonal_3d", [x], device)
421
- gradcheck(check_rot_rpy, "check_rot_rpy_3d", [x], device)
422
- gradcheck(check_rot_axis_angle, "check_rot_axis_angle_3d", [x], device)
423
- gradcheck(check_rot_quat_inv, "check_rot_quat_inv_3d", [x], device)
424
-
425
-
426
- def test_multi_valued_function_grad(test, device):
427
- rng = np.random.default_rng(123)
428
-
429
- @wp.func
430
- def multi_valued(x: float, y: float, z: float):
431
- return wp.sin(x), wp.cos(y) * z, wp.sqrt(wp.abs(z)) / wp.abs(x)
432
-
433
- # test multi-valued functions
434
- def check_multi_valued(vs: wp.array(dtype=wp.vec3), out: wp.array(dtype=float)):
435
- tid = wp.tid()
436
- v = vs[tid]
437
- a, b, c = multi_valued(v[0], v[1], v[2])
438
- out[tid] = a + b + c
439
-
440
- # run the tests with 5 different random inputs
441
- for _ in range(5):
442
- x = wp.array(
443
- rng.standard_normal(size=(2, 3), dtype=np.float32), dtype=wp.vec3, device=device, requires_grad=True
444
- )
445
- gradcheck(check_multi_valued, "check_multi_valued_3d", [x], device)
446
-
447
-
448
- def test_mesh_grad(test, device):
449
- pos = wp.array(
450
- [
451
- [0.0, 0.0, 0.0],
452
- [1.0, 0.0, 0.0],
453
- [0.0, 1.0, 0.0],
454
- [0.0, 0.0, 1.0],
455
- ],
456
- dtype=wp.vec3,
457
- device=device,
458
- requires_grad=True,
459
- )
460
- indices = wp.array(
461
- [0, 1, 2, 0, 2, 3, 0, 3, 1, 1, 3, 2],
462
- dtype=wp.int32,
463
- device=device,
464
- )
465
-
466
- mesh = wp.Mesh(points=pos, indices=indices)
467
-
468
- @wp.func
469
- def compute_triangle_area(mesh_id: wp.uint64, tri_id: int):
470
- mesh = wp.mesh_get(mesh_id)
471
- i, j, k = mesh.indices[tri_id * 3 + 0], mesh.indices[tri_id * 3 + 1], mesh.indices[tri_id * 3 + 2]
472
- a = mesh.points[i]
473
- b = mesh.points[j]
474
- c = mesh.points[k]
475
- return wp.length(wp.cross(b - a, c - a)) * 0.5
476
-
477
- @wp.kernel
478
- def compute_area(mesh_id: wp.uint64, out: wp.array(dtype=wp.float32)):
479
- wp.atomic_add(out, 0, compute_triangle_area(mesh_id, wp.tid()))
480
-
481
- num_tris = int(len(indices) / 3)
482
-
483
- # compute analytical gradient
484
- tape = wp.Tape()
485
- output = wp.zeros(1, dtype=wp.float32, device=device, requires_grad=True)
486
- with tape:
487
- wp.launch(compute_area, dim=num_tris, inputs=[mesh.id], outputs=[output], device=device)
488
-
489
- tape.backward(loss=output)
490
-
491
- ad_grad = mesh.points.grad.numpy()
492
-
493
- # compute finite differences
494
- eps = 1e-3
495
- pos_np = pos.numpy()
496
- fd_grad = np.zeros_like(ad_grad)
497
-
498
- for i in range(len(pos)):
499
- for j in range(3):
500
- pos_np[i, j] += eps
501
- pos = wp.array(pos_np, dtype=wp.vec3, device=device)
502
- mesh = wp.Mesh(points=pos, indices=indices)
503
- output.zero_()
504
- wp.launch(compute_area, dim=num_tris, inputs=[mesh.id], outputs=[output], device=device)
505
- f1 = output.numpy()[0]
506
- pos_np[i, j] -= 2 * eps
507
- pos = wp.array(pos_np, dtype=wp.vec3, device=device)
508
- mesh = wp.Mesh(points=pos, indices=indices)
509
- output.zero_()
510
- wp.launch(compute_area, dim=num_tris, inputs=[mesh.id], outputs=[output], device=device)
511
- f2 = output.numpy()[0]
512
- pos_np[i, j] += eps
513
- fd_grad[i, j] = (f1 - f2) / (2 * eps)
514
-
515
- assert np.allclose(ad_grad, fd_grad, atol=1e-3)
516
-
517
-
518
- @wp.func
519
- def name_clash(a: float, b: float) -> float:
520
- return a + b
521
-
522
-
523
- @wp.func_grad(name_clash)
524
- def adj_name_clash(a: float, b: float, adj_ret: float):
525
- # names `adj_a` and `adj_b` must not clash with function args of generated function
526
- adj_a = 0.0
527
- adj_b = 0.0
528
- if a < 0.0:
529
- adj_a = adj_ret
530
- if b > 0.0:
531
- adj_b = adj_ret
532
-
533
- wp.adjoint[a] += adj_a
534
- wp.adjoint[b] += adj_b
535
-
536
-
537
- @wp.kernel
538
- def name_clash_kernel(
539
- input_a: wp.array(dtype=float),
540
- input_b: wp.array(dtype=float),
541
- output: wp.array(dtype=float),
542
- ):
543
- tid = wp.tid()
544
- output[tid] = name_clash(input_a[tid], input_b[tid])
545
-
546
-
547
- def test_name_clash(test, device):
548
- # tests that no name clashes occur when variable names such as `adj_a` are used in custom gradient code
549
- with wp.ScopedDevice(device):
550
- input_a = wp.array([1.0, -2.0, 3.0], dtype=wp.float32, requires_grad=True)
551
- input_b = wp.array([4.0, 5.0, -6.0], dtype=wp.float32, requires_grad=True)
552
- output = wp.zeros(3, dtype=wp.float32, requires_grad=True)
553
-
554
- tape = wp.Tape()
555
- with tape:
556
- wp.launch(name_clash_kernel, dim=len(input_a), inputs=[input_a, input_b], outputs=[output])
557
-
558
- tape.backward(grads={output: wp.array(np.ones(len(input_a), dtype=np.float32))})
559
-
560
- assert_np_equal(input_a.grad.numpy(), np.array([0.0, 1.0, 0.0]))
561
- assert_np_equal(input_b.grad.numpy(), np.array([1.0, 1.0, 0.0]))
562
-
563
-
564
- @wp.struct
565
- class NestedStruct:
566
- v: wp.vec2
567
-
568
-
569
- @wp.struct
570
- class ParentStruct:
571
- a: float
572
- n: NestedStruct
573
-
574
-
575
- @wp.func
576
- def noop(a: Any):
577
- pass
578
-
579
-
580
- @wp.func
581
- def sum2(v: wp.vec2):
582
- return v[0] + v[1]
583
-
584
-
585
- @wp.kernel
586
- def test_struct_attribute_gradient_kernel(src: wp.array(dtype=float), res: wp.array(dtype=float)):
587
- tid = wp.tid()
588
-
589
- p = ParentStruct(src[tid], NestedStruct(wp.vec2(2.0 * src[tid])))
590
-
591
- # test that we are not losing gradients when accessing attributes
592
- noop(p.a)
593
- noop(p.n)
594
- noop(p.n.v)
595
-
596
- res[tid] = p.a + sum2(p.n.v)
597
-
598
-
599
- def test_struct_attribute_gradient(test_case, device):
600
- src = wp.array([1], dtype=float, requires_grad=True)
601
- res = wp.empty_like(src)
602
-
603
- tape = wp.Tape()
604
- with tape:
605
- wp.launch(test_struct_attribute_gradient_kernel, dim=1, inputs=[src, res])
606
-
607
- res.grad.fill_(1.0)
608
- tape.backward()
609
-
610
- test_case.assertEqual(src.grad.numpy()[0], 5.0)
611
-
612
-
613
- devices = get_test_devices()
614
-
615
-
616
- class TestGrad(unittest.TestCase):
617
- pass
618
-
619
-
620
- # add_function_test(TestGrad, "test_while_loop_grad", test_while_loop_grad, devices=devices)
621
- add_function_test(TestGrad, "test_for_loop_nested_for_grad", test_for_loop_nested_for_grad, devices=devices)
622
- add_function_test(TestGrad, "test_scalar_grad", test_scalar_grad, devices=devices)
623
- add_function_test(TestGrad, "test_for_loop_grad", test_for_loop_grad, devices=devices)
624
- add_function_test(
625
- TestGrad, "test_for_loop_graph_grad", test_for_loop_graph_grad, devices=get_unique_cuda_test_devices()
626
- )
627
- add_function_test(TestGrad, "test_for_loop_nested_if_grad", test_for_loop_nested_if_grad, devices=devices)
628
- add_function_test(TestGrad, "test_preserve_outputs_grad", test_preserve_outputs_grad, devices=devices)
629
- add_function_test(TestGrad, "test_vector_math_grad", test_vector_math_grad, devices=devices)
630
- add_function_test(TestGrad, "test_matrix_math_grad", test_matrix_math_grad, devices=devices)
631
- add_function_test(TestGrad, "test_3d_math_grad", test_3d_math_grad, devices=devices)
632
- add_function_test(TestGrad, "test_multi_valued_function_grad", test_multi_valued_function_grad, devices=devices)
633
- add_function_test(TestGrad, "test_mesh_grad", test_mesh_grad, devices=devices)
634
- add_function_test(TestGrad, "test_name_clash", test_name_clash, devices=devices)
635
- add_function_test(TestGrad, "test_struct_attribute_gradient", test_struct_attribute_gradient, devices=devices)
636
-
637
-
638
- if __name__ == "__main__":
639
- wp.build.clear_kernel_cache()
640
- unittest.main(verbosity=2, failfast=False)
1
+ # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
2
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
3
+ # and proprietary rights in and to this software, related documentation
4
+ # and any modifications thereto. Any use, reproduction, disclosure or
5
+ # distribution of this software and related documentation without an express
6
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
7
+
8
+ import unittest
9
+ from typing import Any
10
+
11
+ import numpy as np
12
+
13
+ import warp as wp
14
+ from warp.tests.unittest_utils import *
15
+
16
+
17
+ @wp.kernel
18
+ def scalar_grad(x: wp.array(dtype=float), y: wp.array(dtype=float)):
19
+ y[0] = x[0] ** 2.0
20
+
21
+
22
+ def test_scalar_grad(test, device):
23
+ x = wp.array([3.0], dtype=float, device=device, requires_grad=True)
24
+ y = wp.zeros_like(x)
25
+
26
+ tape = wp.Tape()
27
+ with tape:
28
+ wp.launch(scalar_grad, dim=1, inputs=[x, y], device=device)
29
+
30
+ tape.backward(y)
31
+
32
+ assert_np_equal(tape.gradients[x].numpy(), np.array(6.0))
33
+
34
+
35
+ @wp.kernel
36
+ def for_loop_grad(n: int, x: wp.array(dtype=float), s: wp.array(dtype=float)):
37
+ sum = float(0.0)
38
+
39
+ for i in range(n):
40
+ sum = sum + x[i] * 2.0
41
+
42
+ s[0] = sum
43
+
44
+
45
+ def test_for_loop_grad(test, device):
46
+ n = 32
47
+ val = np.ones(n, dtype=np.float32)
48
+
49
+ x = wp.array(val, device=device, requires_grad=True)
50
+ sum = wp.zeros(1, dtype=wp.float32, device=device, requires_grad=True)
51
+
52
+ tape = wp.Tape()
53
+ with tape:
54
+ wp.launch(for_loop_grad, dim=1, inputs=[n, x, sum], device=device)
55
+
56
+ # ensure forward pass outputs correct
57
+ assert_np_equal(sum.numpy(), 2.0 * np.sum(x.numpy()))
58
+
59
+ tape.backward(loss=sum)
60
+
61
+ # ensure forward pass outputs persist
62
+ assert_np_equal(sum.numpy(), 2.0 * np.sum(x.numpy()))
63
+ # ensure gradients correct
64
+ assert_np_equal(tape.gradients[x].numpy(), 2.0 * val)
65
+
66
+
67
+ def test_for_loop_graph_grad(test, device):
68
+ wp.load_module(device=device)
69
+
70
+ n = 32
71
+ val = np.ones(n, dtype=np.float32)
72
+
73
+ x = wp.array(val, device=device, requires_grad=True)
74
+ sum = wp.zeros(1, dtype=wp.float32, device=device, requires_grad=True)
75
+
76
+ wp.capture_begin(device, force_module_load=False)
77
+ try:
78
+ tape = wp.Tape()
79
+ with tape:
80
+ wp.launch(for_loop_grad, dim=1, inputs=[n, x, sum], device=device)
81
+
82
+ tape.backward(loss=sum)
83
+ finally:
84
+ graph = wp.capture_end(device)
85
+
86
+ wp.capture_launch(graph)
87
+ wp.synchronize_device(device)
88
+
89
+ # ensure forward pass outputs persist
90
+ assert_np_equal(sum.numpy(), 2.0 * np.sum(x.numpy()))
91
+ # ensure gradients correct
92
+ assert_np_equal(x.grad.numpy(), 2.0 * val)
93
+
94
+ wp.capture_launch(graph)
95
+ wp.synchronize_device(device)
96
+
97
+
98
+ @wp.kernel
99
+ def for_loop_nested_if_grad(n: int, x: wp.array(dtype=float), s: wp.array(dtype=float)):
100
+ sum = float(0.0)
101
+
102
+ for i in range(n):
103
+ if i < 16:
104
+ if i < 8:
105
+ sum = sum + x[i] * 2.0
106
+ else:
107
+ sum = sum + x[i] * 4.0
108
+ else:
109
+ if i < 24:
110
+ sum = sum + x[i] * 6.0
111
+ else:
112
+ sum = sum + x[i] * 8.0
113
+
114
+ s[0] = sum
115
+
116
+
117
+ def test_for_loop_nested_if_grad(test, device):
118
+ n = 32
119
+ val = np.ones(n, dtype=np.float32)
120
+ # fmt: off
121
+ expected_val = [
122
+ 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
123
+ 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0,
124
+ 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0,
125
+ 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0,
126
+ ]
127
+ expected_grad = [
128
+ 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
129
+ 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0,
130
+ 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0,
131
+ 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0,
132
+ ]
133
+ # fmt: on
134
+
135
+ x = wp.array(val, device=device, requires_grad=True)
136
+ sum = wp.zeros(1, dtype=wp.float32, device=device, requires_grad=True)
137
+
138
+ tape = wp.Tape()
139
+ with tape:
140
+ wp.launch(for_loop_nested_if_grad, dim=1, inputs=[n, x, sum], device=device)
141
+
142
+ assert_np_equal(sum.numpy(), np.sum(expected_val))
143
+
144
+ tape.backward(loss=sum)
145
+
146
+ assert_np_equal(sum.numpy(), np.sum(expected_val))
147
+ assert_np_equal(tape.gradients[x].numpy(), np.array(expected_grad))
148
+
149
+
150
+ @wp.kernel
151
+ def for_loop_grad_nested(n: int, x: wp.array(dtype=float), s: wp.array(dtype=float)):
152
+ sum = float(0.0)
153
+
154
+ for i in range(n):
155
+ for j in range(n):
156
+ sum = sum + x[i * n + j] * float(i * n + j) + 1.0
157
+
158
+ s[0] = sum
159
+
160
+
161
+ def test_for_loop_nested_for_grad(test, device):
162
+ x = wp.zeros(9, dtype=float, device=device, requires_grad=True)
163
+ s = wp.zeros(1, dtype=float, device=device, requires_grad=True)
164
+
165
+ tape = wp.Tape()
166
+ with tape:
167
+ wp.launch(for_loop_grad_nested, dim=1, inputs=[3, x, s], device=device)
168
+
169
+ tape.backward(s)
170
+
171
+ assert_np_equal(s.numpy(), np.array([9.0]))
172
+ assert_np_equal(tape.gradients[x].numpy(), np.arange(0.0, 9.0, 1.0))
173
+
174
+
175
+ # differentiating thought most while loops is not supported
176
+ # since doing things like i = i + 1 breaks adjointing
177
+
178
+ # @wp.kernel
179
+ # def while_loop_grad(n: int,
180
+ # x: wp.array(dtype=float),
181
+ # c: wp.array(dtype=int),
182
+ # s: wp.array(dtype=float)):
183
+
184
+ # tid = wp.tid()
185
+
186
+ # while i < n:
187
+ # s[0] = s[0] + x[i]*2.0
188
+ # i = i + 1
189
+
190
+
191
+ # def test_while_loop_grad(test, device):
192
+
193
+ # n = 32
194
+ # x = wp.array(np.ones(n, dtype=np.float32), device=device, requires_grad=True)
195
+ # c = wp.zeros(1, dtype=int, device=device)
196
+ # sum = wp.zeros(1, dtype=wp.float32, device=device)
197
+
198
+ # tape = wp.Tape()
199
+ # with tape:
200
+ # wp.launch(while_loop_grad, dim=1, inputs=[n, x, c, sum], device=device)
201
+
202
+ # tape.backward(loss=sum)
203
+
204
+ # assert_np_equal(sum.numpy(), 2.0*np.sum(x.numpy()))
205
+ # assert_np_equal(tape.gradients[x].numpy(), 2.0*np.ones_like(x.numpy()))
206
+
207
+
208
+ @wp.kernel
209
+ def preserve_outputs(
210
+ n: int, x: wp.array(dtype=float), c: wp.array(dtype=float), s1: wp.array(dtype=float), s2: wp.array(dtype=float)
211
+ ):
212
+ tid = wp.tid()
213
+
214
+ # plain store
215
+ c[tid] = x[tid] * 2.0
216
+
217
+ # atomic stores
218
+ wp.atomic_add(s1, 0, x[tid] * 3.0)
219
+ wp.atomic_sub(s2, 0, x[tid] * 2.0)
220
+
221
+
222
+ # tests that outputs from the forward pass are
223
+ # preserved by the backward pass, i.e.: stores
224
+ # are omitted during the forward reply
225
+ def test_preserve_outputs_grad(test, device):
226
+ n = 32
227
+
228
+ val = np.ones(n, dtype=np.float32)
229
+
230
+ x = wp.array(val, device=device, requires_grad=True)
231
+ c = wp.zeros_like(x)
232
+
233
+ s1 = wp.zeros(1, dtype=wp.float32, device=device, requires_grad=True)
234
+ s2 = wp.zeros(1, dtype=wp.float32, device=device, requires_grad=True)
235
+
236
+ tape = wp.Tape()
237
+ with tape:
238
+ wp.launch(preserve_outputs, dim=n, inputs=[n, x, c, s1, s2], device=device)
239
+
240
+ # ensure forward pass results are correct
241
+ assert_np_equal(x.numpy(), val)
242
+ assert_np_equal(c.numpy(), val * 2.0)
243
+ assert_np_equal(s1.numpy(), np.array(3.0 * n))
244
+ assert_np_equal(s2.numpy(), np.array(-2.0 * n))
245
+
246
+ # run backward on first loss
247
+ tape.backward(loss=s1)
248
+
249
+ # ensure inputs, copy and sum are unchanged by backwards pass
250
+ assert_np_equal(x.numpy(), val)
251
+ assert_np_equal(c.numpy(), val * 2.0)
252
+ assert_np_equal(s1.numpy(), np.array(3.0 * n))
253
+ assert_np_equal(s2.numpy(), np.array(-2.0 * n))
254
+
255
+ # ensure gradients are correct
256
+ assert_np_equal(tape.gradients[x].numpy(), 3.0 * val)
257
+
258
+ # run backward on second loss
259
+ tape.zero()
260
+ tape.backward(loss=s2)
261
+
262
+ assert_np_equal(x.numpy(), val)
263
+ assert_np_equal(c.numpy(), val * 2.0)
264
+ assert_np_equal(s1.numpy(), np.array(3.0 * n))
265
+ assert_np_equal(s2.numpy(), np.array(-2.0 * n))
266
+
267
+ # ensure gradients are correct
268
+ assert_np_equal(tape.gradients[x].numpy(), -2.0 * val)
269
+
270
+
271
+ def gradcheck(func, func_name, inputs, device, eps=1e-4, tol=1e-2):
272
+ """
273
+ Checks that the gradient of the Warp kernel is correct by comparing it to the
274
+ numerical gradient computed using finite differences.
275
+ """
276
+
277
+ kernel = wp.Kernel(func=func, key=func_name)
278
+
279
+ def f(xs):
280
+ # call the kernel without taping for finite differences
281
+ wp_xs = [wp.array(xs[i], ndim=1, dtype=inputs[i].dtype, device=device) for i in range(len(inputs))]
282
+ output = wp.zeros(1, dtype=wp.float32, device=device)
283
+ wp.launch(kernel, dim=1, inputs=wp_xs, outputs=[output], device=device)
284
+ return output.numpy()[0]
285
+
286
+ # compute numerical gradient
287
+ numerical_grad = []
288
+ np_xs = []
289
+ for i in range(len(inputs)):
290
+ np_xs.append(inputs[i].numpy().flatten().copy())
291
+ numerical_grad.append(np.zeros_like(np_xs[-1]))
292
+ inputs[i].requires_grad = True
293
+
294
+ for i in range(len(np_xs)):
295
+ for j in range(len(np_xs[i])):
296
+ np_xs[i][j] += eps
297
+ y1 = f(np_xs)
298
+ np_xs[i][j] -= 2 * eps
299
+ y2 = f(np_xs)
300
+ np_xs[i][j] += eps
301
+ numerical_grad[i][j] = (y1 - y2) / (2 * eps)
302
+
303
+ # compute analytical gradient
304
+ tape = wp.Tape()
305
+ output = wp.zeros(1, dtype=wp.float32, device=device, requires_grad=True)
306
+ with tape:
307
+ wp.launch(kernel, dim=1, inputs=inputs, outputs=[output], device=device)
308
+
309
+ tape.backward(loss=output)
310
+
311
+ # compare gradients
312
+ for i in range(len(inputs)):
313
+ grad = tape.gradients[inputs[i]]
314
+ assert_np_equal(grad.numpy(), numerical_grad[i], tol=tol)
315
+
316
+ tape.zero()
317
+
318
+
319
+ def test_vector_math_grad(test, device):
320
+ rng = np.random.default_rng(123)
321
+
322
+ # test unary operations
323
+ for dim, vec_type in [(2, wp.vec2), (3, wp.vec3), (4, wp.vec4), (4, wp.quat)]:
324
+
325
+ def check_length(vs: wp.array(dtype=vec_type), out: wp.array(dtype=float)):
326
+ out[0] = wp.length(vs[0])
327
+
328
+ def check_length_sq(vs: wp.array(dtype=vec_type), out: wp.array(dtype=float)):
329
+ out[0] = wp.length_sq(vs[0])
330
+
331
+ def check_normalize(vs: wp.array(dtype=vec_type), out: wp.array(dtype=float)):
332
+ out[0] = wp.length_sq(wp.normalize(vs[0])) # compress to scalar output
333
+
334
+ # run the tests with 5 different random inputs
335
+ for _ in range(5):
336
+ x = wp.array(rng.random(size=(1, dim), dtype=np.float32), dtype=vec_type, device=device)
337
+ gradcheck(check_length, f"check_length_{vec_type.__name__}", [x], device)
338
+ gradcheck(check_length_sq, f"check_length_sq_{vec_type.__name__}", [x], device)
339
+ gradcheck(check_normalize, f"check_normalize_{vec_type.__name__}", [x], device)
340
+
341
+
342
+ def test_matrix_math_grad(test, device):
343
+ rng = np.random.default_rng(123)
344
+
345
+ # test unary operations
346
+ for dim, mat_type in [(2, wp.mat22), (3, wp.mat33), (4, wp.mat44)]:
347
+
348
+ def check_determinant(vs: wp.array(dtype=mat_type), out: wp.array(dtype=float)):
349
+ out[0] = wp.determinant(vs[0])
350
+
351
+ def check_trace(vs: wp.array(dtype=mat_type), out: wp.array(dtype=float)):
352
+ out[0] = wp.trace(vs[0])
353
+
354
+ # run the tests with 5 different random inputs
355
+ for _ in range(5):
356
+ x = wp.array(rng.random(size=(1, dim, dim), dtype=np.float32), ndim=1, dtype=mat_type, device=device)
357
+ gradcheck(check_determinant, f"check_length_{mat_type.__name__}", [x], device)
358
+ gradcheck(check_trace, f"check_length_sq_{mat_type.__name__}", [x], device)
359
+
360
+
361
+ def test_3d_math_grad(test, device):
362
+ rng = np.random.default_rng(123)
363
+
364
+ # test binary operations
365
+ def check_cross(vs: wp.array(dtype=wp.vec3), out: wp.array(dtype=float)):
366
+ out[0] = wp.length(wp.cross(vs[0], vs[1]))
367
+
368
+ def check_dot(vs: wp.array(dtype=wp.vec3), out: wp.array(dtype=float)):
369
+ out[0] = wp.dot(vs[0], vs[1])
370
+
371
+ def check_mat33(vs: wp.array(dtype=wp.vec3), out: wp.array(dtype=float)):
372
+ a = vs[0]
373
+ b = vs[1]
374
+ c = wp.cross(a, b)
375
+ m = wp.mat33(a[0], b[0], c[0], a[1], b[1], c[1], a[2], b[2], c[2])
376
+ out[0] = wp.determinant(m)
377
+
378
+ def check_trace_diagonal(vs: wp.array(dtype=wp.vec3), out: wp.array(dtype=float)):
379
+ a = vs[0]
380
+ b = vs[1]
381
+ c = wp.cross(a, b)
382
+ m = wp.mat33(
383
+ 1.0 / (a[0] + 10.0),
384
+ 0.0,
385
+ 0.0,
386
+ 0.0,
387
+ 1.0 / (b[1] + 10.0),
388
+ 0.0,
389
+ 0.0,
390
+ 0.0,
391
+ 1.0 / (c[2] + 10.0),
392
+ )
393
+ out[0] = wp.trace(m)
394
+
395
+ def check_rot_rpy(vs: wp.array(dtype=wp.vec3), out: wp.array(dtype=float)):
396
+ v = vs[0]
397
+ q = wp.quat_rpy(v[0], v[1], v[2])
398
+ out[0] = wp.length(wp.quat_rotate(q, vs[1]))
399
+
400
+ def check_rot_axis_angle(vs: wp.array(dtype=wp.vec3), out: wp.array(dtype=float)):
401
+ v = wp.normalize(vs[0])
402
+ q = wp.quat_from_axis_angle(v, 0.5)
403
+ out[0] = wp.length(wp.quat_rotate(q, vs[1]))
404
+
405
+ def check_rot_quat_inv(vs: wp.array(dtype=wp.vec3), out: wp.array(dtype=float)):
406
+ v = vs[0]
407
+ q = wp.normalize(wp.quat(v[0], v[1], v[2], 1.0))
408
+ out[0] = wp.length(wp.quat_rotate_inv(q, vs[1]))
409
+
410
+ # run the tests with 5 different random inputs
411
+ for _ in range(5):
412
+ x = wp.array(
413
+ rng.standard_normal(size=(2, 3), dtype=np.float32), dtype=wp.vec3, device=device, requires_grad=True
414
+ )
415
+ gradcheck(check_cross, "check_cross_3d", [x], device)
416
+ gradcheck(check_dot, "check_dot_3d", [x], device)
417
+ gradcheck(check_mat33, "check_mat33_3d", [x], device, eps=2e-2)
418
+ gradcheck(check_trace_diagonal, "check_trace_diagonal_3d", [x], device)
419
+ gradcheck(check_rot_rpy, "check_rot_rpy_3d", [x], device)
420
+ gradcheck(check_rot_axis_angle, "check_rot_axis_angle_3d", [x], device)
421
+ gradcheck(check_rot_quat_inv, "check_rot_quat_inv_3d", [x], device)
422
+
423
+
424
+ def test_multi_valued_function_grad(test, device):
425
+ rng = np.random.default_rng(123)
426
+
427
+ @wp.func
428
+ def multi_valued(x: float, y: float, z: float):
429
+ return wp.sin(x), wp.cos(y) * z, wp.sqrt(wp.abs(z)) / wp.abs(x)
430
+
431
+ # test multi-valued functions
432
+ def check_multi_valued(vs: wp.array(dtype=wp.vec3), out: wp.array(dtype=float)):
433
+ tid = wp.tid()
434
+ v = vs[tid]
435
+ a, b, c = multi_valued(v[0], v[1], v[2])
436
+ out[tid] = a + b + c
437
+
438
+ # run the tests with 5 different random inputs
439
+ for _ in range(5):
440
+ x = wp.array(
441
+ rng.standard_normal(size=(2, 3), dtype=np.float32), dtype=wp.vec3, device=device, requires_grad=True
442
+ )
443
+ gradcheck(check_multi_valued, "check_multi_valued_3d", [x], device)
444
+
445
+
446
+ def test_mesh_grad(test, device):
447
+ pos = wp.array(
448
+ [
449
+ [0.0, 0.0, 0.0],
450
+ [1.0, 0.0, 0.0],
451
+ [0.0, 1.0, 0.0],
452
+ [0.0, 0.0, 1.0],
453
+ ],
454
+ dtype=wp.vec3,
455
+ device=device,
456
+ requires_grad=True,
457
+ )
458
+ indices = wp.array(
459
+ [0, 1, 2, 0, 2, 3, 0, 3, 1, 1, 3, 2],
460
+ dtype=wp.int32,
461
+ device=device,
462
+ )
463
+
464
+ mesh = wp.Mesh(points=pos, indices=indices)
465
+
466
+ @wp.func
467
+ def compute_triangle_area(mesh_id: wp.uint64, tri_id: int):
468
+ mesh = wp.mesh_get(mesh_id)
469
+ i, j, k = mesh.indices[tri_id * 3 + 0], mesh.indices[tri_id * 3 + 1], mesh.indices[tri_id * 3 + 2]
470
+ a = mesh.points[i]
471
+ b = mesh.points[j]
472
+ c = mesh.points[k]
473
+ return wp.length(wp.cross(b - a, c - a)) * 0.5
474
+
475
+ @wp.kernel
476
+ def compute_area(mesh_id: wp.uint64, out: wp.array(dtype=wp.float32)):
477
+ wp.atomic_add(out, 0, compute_triangle_area(mesh_id, wp.tid()))
478
+
479
+ num_tris = int(len(indices) / 3)
480
+
481
+ # compute analytical gradient
482
+ tape = wp.Tape()
483
+ output = wp.zeros(1, dtype=wp.float32, device=device, requires_grad=True)
484
+ with tape:
485
+ wp.launch(compute_area, dim=num_tris, inputs=[mesh.id], outputs=[output], device=device)
486
+
487
+ tape.backward(loss=output)
488
+
489
+ ad_grad = mesh.points.grad.numpy()
490
+
491
+ # compute finite differences
492
+ eps = 1e-3
493
+ pos_np = pos.numpy()
494
+ fd_grad = np.zeros_like(ad_grad)
495
+
496
+ for i in range(len(pos)):
497
+ for j in range(3):
498
+ pos_np[i, j] += eps
499
+ pos = wp.array(pos_np, dtype=wp.vec3, device=device)
500
+ mesh = wp.Mesh(points=pos, indices=indices)
501
+ output.zero_()
502
+ wp.launch(compute_area, dim=num_tris, inputs=[mesh.id], outputs=[output], device=device)
503
+ f1 = output.numpy()[0]
504
+ pos_np[i, j] -= 2 * eps
505
+ pos = wp.array(pos_np, dtype=wp.vec3, device=device)
506
+ mesh = wp.Mesh(points=pos, indices=indices)
507
+ output.zero_()
508
+ wp.launch(compute_area, dim=num_tris, inputs=[mesh.id], outputs=[output], device=device)
509
+ f2 = output.numpy()[0]
510
+ pos_np[i, j] += eps
511
+ fd_grad[i, j] = (f1 - f2) / (2 * eps)
512
+
513
+ assert np.allclose(ad_grad, fd_grad, atol=1e-3)
514
+
515
+
516
+ @wp.func
517
+ def name_clash(a: float, b: float) -> float:
518
+ return a + b
519
+
520
+
521
+ @wp.func_grad(name_clash)
522
+ def adj_name_clash(a: float, b: float, adj_ret: float):
523
+ # names `adj_a` and `adj_b` must not clash with function args of generated function
524
+ adj_a = 0.0
525
+ adj_b = 0.0
526
+ if a < 0.0:
527
+ adj_a = adj_ret
528
+ if b > 0.0:
529
+ adj_b = adj_ret
530
+
531
+ wp.adjoint[a] += adj_a
532
+ wp.adjoint[b] += adj_b
533
+
534
+
535
+ @wp.kernel
536
+ def name_clash_kernel(
537
+ input_a: wp.array(dtype=float),
538
+ input_b: wp.array(dtype=float),
539
+ output: wp.array(dtype=float),
540
+ ):
541
+ tid = wp.tid()
542
+ output[tid] = name_clash(input_a[tid], input_b[tid])
543
+
544
+
545
+ def test_name_clash(test, device):
546
+ # tests that no name clashes occur when variable names such as `adj_a` are used in custom gradient code
547
+ with wp.ScopedDevice(device):
548
+ input_a = wp.array([1.0, -2.0, 3.0], dtype=wp.float32, requires_grad=True)
549
+ input_b = wp.array([4.0, 5.0, -6.0], dtype=wp.float32, requires_grad=True)
550
+ output = wp.zeros(3, dtype=wp.float32, requires_grad=True)
551
+
552
+ tape = wp.Tape()
553
+ with tape:
554
+ wp.launch(name_clash_kernel, dim=len(input_a), inputs=[input_a, input_b], outputs=[output])
555
+
556
+ tape.backward(grads={output: wp.array(np.ones(len(input_a), dtype=np.float32))})
557
+
558
+ assert_np_equal(input_a.grad.numpy(), np.array([0.0, 1.0, 0.0]))
559
+ assert_np_equal(input_b.grad.numpy(), np.array([1.0, 1.0, 0.0]))
560
+
561
+
562
+ @wp.struct
563
+ class NestedStruct:
564
+ v: wp.vec2
565
+
566
+
567
+ @wp.struct
568
+ class ParentStruct:
569
+ a: float
570
+ n: NestedStruct
571
+
572
+
573
+ @wp.func
574
+ def noop(a: Any):
575
+ pass
576
+
577
+
578
+ @wp.func
579
+ def sum2(v: wp.vec2):
580
+ return v[0] + v[1]
581
+
582
+
583
+ @wp.kernel
584
+ def test_struct_attribute_gradient_kernel(src: wp.array(dtype=float), res: wp.array(dtype=float)):
585
+ tid = wp.tid()
586
+
587
+ p = ParentStruct(src[tid], NestedStruct(wp.vec2(2.0 * src[tid])))
588
+
589
+ # test that we are not losing gradients when accessing attributes
590
+ noop(p.a)
591
+ noop(p.n)
592
+ noop(p.n.v)
593
+
594
+ res[tid] = p.a + sum2(p.n.v)
595
+
596
+
597
+ def test_struct_attribute_gradient(test, device):
598
+ with wp.ScopedDevice(device):
599
+ src = wp.array([1], dtype=float, requires_grad=True)
600
+ res = wp.empty_like(src)
601
+
602
+ tape = wp.Tape()
603
+ with tape:
604
+ wp.launch(test_struct_attribute_gradient_kernel, dim=1, inputs=[src, res])
605
+
606
+ res.grad.fill_(1.0)
607
+ tape.backward()
608
+
609
+ test.assertEqual(src.grad.numpy()[0], 5.0)
610
+
611
+
612
+ @wp.kernel
613
+ def copy_kernel(a: wp.array(dtype=wp.float32), b: wp.array(dtype=wp.float32)):
614
+ tid = wp.tid()
615
+ ai = a[tid]
616
+ bi = ai
617
+ b[tid] = bi
618
+
619
+
620
+ def test_copy(test, device):
621
+ with wp.ScopedDevice(device):
622
+ a = wp.array([-1.0, 2.0, 3.0], dtype=wp.float32, requires_grad=True)
623
+ b = wp.array([0.0, 0.0, 0.0], dtype=wp.float32, requires_grad=True)
624
+
625
+ wp.launch(copy_kernel, 1, inputs=[a, b])
626
+
627
+ b.grad = wp.array([1.0, 1.0, 1.0], dtype=wp.float32)
628
+ wp.launch(copy_kernel, a.shape[0], inputs=[a, b], adjoint=True, adj_inputs=[None, None])
629
+
630
+ assert_np_equal(a.grad.numpy(), np.array([1.0, 1.0, 1.0]))
631
+
632
+
633
+ @wp.kernel
634
+ def aliasing_kernel(a: wp.array(dtype=wp.float32), b: wp.array(dtype=wp.float32)):
635
+ tid = wp.tid()
636
+ x = a[tid]
637
+
638
+ y = x
639
+ if y > 0.0:
640
+ y = x * x
641
+ else:
642
+ y = x * x * x
643
+
644
+ b[tid] = y
645
+
646
+
647
+ def test_aliasing(test, device):
648
+ with wp.ScopedDevice(device):
649
+ a = wp.array([-1.0, 2.0, 3.0], dtype=wp.float32, requires_grad=True)
650
+ b = wp.array([0.0, 0.0, 0.0], dtype=wp.float32, requires_grad=True)
651
+
652
+ wp.launch(aliasing_kernel, 1, inputs=[a, b])
653
+
654
+ b.grad = wp.array([1.0, 1.0, 1.0], dtype=wp.float32)
655
+ wp.launch(aliasing_kernel, a.shape[0], inputs=[a, b], adjoint=True, adj_inputs=[None, None])
656
+
657
+ assert_np_equal(a.grad.numpy(), np.array([3.0, 4.0, 6.0]))
658
+
659
+
660
+ @wp.kernel
661
+ def square_kernel(x: wp.array(dtype=float), y: wp.array(dtype=float)):
662
+ tid = wp.tid()
663
+ y[tid] = x[tid] ** 2.0
664
+
665
+
666
+ @wp.kernel
667
+ def square_slice_2d_kernel(x: wp.array2d(dtype=float), y: wp.array2d(dtype=float), row_idx: int):
668
+ tid = wp.tid()
669
+ x_slice = x[row_idx]
670
+ y_slice = y[row_idx]
671
+ y_slice[tid] = x_slice[tid] ** 2.0
672
+
673
+
674
+ @wp.kernel
675
+ def square_slice_3d_1d_kernel(x: wp.array3d(dtype=float), y: wp.array3d(dtype=float), slice_idx: int):
676
+ i, j = wp.tid()
677
+ x_slice = x[slice_idx]
678
+ y_slice = y[slice_idx]
679
+ y_slice[i, j] = x_slice[i, j] ** 2.0
680
+
681
+
682
+ @wp.kernel
683
+ def square_slice_3d_2d_kernel(x: wp.array3d(dtype=float), y: wp.array3d(dtype=float), slice_i: int, slice_j: int):
684
+ tid = wp.tid()
685
+ x_slice = x[slice_i, slice_j]
686
+ y_slice = y[slice_i, slice_j]
687
+ y_slice[tid] = x_slice[tid] ** 2.0
688
+
689
+
690
+ def test_gradient_internal(test, device):
691
+ with wp.ScopedDevice(device):
692
+ a = wp.array([1.0, 2.0, 3.0], dtype=float, requires_grad=True)
693
+ b = wp.array([0.0, 0.0, 0.0], dtype=float, requires_grad=True)
694
+
695
+ wp.launch(square_kernel, dim=a.size, inputs=[a, b])
696
+
697
+ # use internal gradients (.grad), adj_inputs are None
698
+ b.grad = wp.array([1.0, 1.0, 1.0], dtype=float)
699
+ wp.launch(square_kernel, dim=a.size, inputs=[a, b], adjoint=True, adj_inputs=[None, None])
700
+
701
+ assert_np_equal(a.grad.numpy(), np.array([2.0, 4.0, 6.0]))
702
+
703
+
704
+ def test_gradient_external(test, device):
705
+ with wp.ScopedDevice(device):
706
+ a = wp.array([1.0, 2.0, 3.0], dtype=float, requires_grad=False)
707
+ b = wp.array([0.0, 0.0, 0.0], dtype=float, requires_grad=False)
708
+
709
+ wp.launch(square_kernel, dim=a.size, inputs=[a, b])
710
+
711
+ # use external gradients passed in adj_inputs
712
+ a_grad = wp.array([0.0, 0.0, 0.0], dtype=float)
713
+ b_grad = wp.array([1.0, 1.0, 1.0], dtype=float)
714
+ wp.launch(square_kernel, dim=a.size, inputs=[a, b], adjoint=True, adj_inputs=[a_grad, b_grad])
715
+
716
+ assert_np_equal(a_grad.numpy(), np.array([2.0, 4.0, 6.0]))
717
+
718
+
719
+ def test_gradient_precedence(test, device):
720
+ with wp.ScopedDevice(device):
721
+ a = wp.array([1.0, 2.0, 3.0], dtype=float, requires_grad=True)
722
+ b = wp.array([0.0, 0.0, 0.0], dtype=float, requires_grad=True)
723
+
724
+ wp.launch(square_kernel, dim=a.size, inputs=[a, b])
725
+
726
+ # if both internal and external gradients are present, the external one takes precedence,
727
+ # because it's explicitly passed by the user in adj_inputs
728
+ a_grad = wp.array([0.0, 0.0, 0.0], dtype=float)
729
+ b_grad = wp.array([1.0, 1.0, 1.0], dtype=float)
730
+ wp.launch(square_kernel, dim=a.size, inputs=[a, b], adjoint=True, adj_inputs=[a_grad, b_grad])
731
+
732
+ assert_np_equal(a_grad.numpy(), np.array([2.0, 4.0, 6.0])) # used
733
+ assert_np_equal(a.grad.numpy(), np.array([0.0, 0.0, 0.0])) # unused
734
+
735
+
736
+ def test_gradient_slice_2d(test, device):
737
+ with wp.ScopedDevice(device):
738
+ a = wp.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=float, requires_grad=True)
739
+ b = wp.zeros_like(a, requires_grad=False)
740
+ b.grad = wp.ones_like(a, requires_grad=False)
741
+
742
+ wp.launch(square_slice_2d_kernel, dim=a.shape[1], inputs=[a, b, 1])
743
+
744
+ # use internal gradients (.grad), adj_inputs are None
745
+ wp.launch(square_slice_2d_kernel, dim=a.shape[1], inputs=[a, b, 1], adjoint=True, adj_inputs=[None, None, 1])
746
+
747
+ assert_np_equal(a.grad.numpy(), np.array([[0.0, 0.0], [6.0, 8.0], [0.0, 0.0]]))
748
+
749
+
750
+ def test_gradient_slice_3d_1d(test, device):
751
+ with wp.ScopedDevice(device):
752
+ data = [
753
+ [
754
+ [1, 2, 3],
755
+ [4, 5, 6],
756
+ [7, 8, 9],
757
+ ],
758
+ [
759
+ [11, 12, 13],
760
+ [14, 15, 16],
761
+ [17, 18, 19],
762
+ ],
763
+ [
764
+ [21, 22, 23],
765
+ [24, 25, 26],
766
+ [27, 28, 29],
767
+ ],
768
+ ]
769
+ a = wp.array(data, dtype=float, requires_grad=True)
770
+ b = wp.zeros_like(a, requires_grad=False)
771
+ b.grad = wp.ones_like(a, requires_grad=False)
772
+
773
+ wp.launch(square_slice_3d_1d_kernel, dim=a.shape[1:], inputs=[a, b, 1])
774
+
775
+ # use internal gradients (.grad), adj_inputs are None
776
+ wp.launch(
777
+ square_slice_3d_1d_kernel, dim=a.shape[1:], inputs=[a, b, 1], adjoint=True, adj_inputs=[None, None, 1]
778
+ )
779
+
780
+ expected_grad = [
781
+ [
782
+ [0, 0, 0],
783
+ [0, 0, 0],
784
+ [0, 0, 0],
785
+ ],
786
+ [
787
+ [11 * 2, 12 * 2, 13 * 2],
788
+ [14 * 2, 15 * 2, 16 * 2],
789
+ [17 * 2, 18 * 2, 19 * 2],
790
+ ],
791
+ [
792
+ [0, 0, 0],
793
+ [0, 0, 0],
794
+ [0, 0, 0],
795
+ ],
796
+ ]
797
+ assert_np_equal(a.grad.numpy(), np.array(expected_grad))
798
+
799
+
800
+ def test_gradient_slice_3d_2d(test, device):
801
+ with wp.ScopedDevice(device):
802
+ data = [
803
+ [
804
+ [1, 2, 3],
805
+ [4, 5, 6],
806
+ [7, 8, 9],
807
+ ],
808
+ [
809
+ [11, 12, 13],
810
+ [14, 15, 16],
811
+ [17, 18, 19],
812
+ ],
813
+ [
814
+ [21, 22, 23],
815
+ [24, 25, 26],
816
+ [27, 28, 29],
817
+ ],
818
+ ]
819
+ a = wp.array(data, dtype=float, requires_grad=True)
820
+ b = wp.zeros_like(a, requires_grad=False)
821
+ b.grad = wp.ones_like(a, requires_grad=False)
822
+
823
+ wp.launch(square_slice_3d_2d_kernel, dim=a.shape[2], inputs=[a, b, 1, 1])
824
+
825
+ # use internal gradients (.grad), adj_inputs are None
826
+ wp.launch(
827
+ square_slice_3d_2d_kernel, dim=a.shape[2], inputs=[a, b, 1, 1], adjoint=True, adj_inputs=[None, None, 1, 1]
828
+ )
829
+
830
+ expected_grad = [
831
+ [
832
+ [0, 0, 0],
833
+ [0, 0, 0],
834
+ [0, 0, 0],
835
+ ],
836
+ [
837
+ [0, 0, 0],
838
+ [14 * 2, 15 * 2, 16 * 2],
839
+ [0, 0, 0],
840
+ ],
841
+ [
842
+ [0, 0, 0],
843
+ [0, 0, 0],
844
+ [0, 0, 0],
845
+ ],
846
+ ]
847
+ assert_np_equal(a.grad.numpy(), np.array(expected_grad))
848
+
849
+
850
+ devices = get_test_devices()
851
+
852
+
853
+ class TestGrad(unittest.TestCase):
854
+ pass
855
+
856
+
857
+ # add_function_test(TestGrad, "test_while_loop_grad", test_while_loop_grad, devices=devices)
858
+ add_function_test(TestGrad, "test_for_loop_nested_for_grad", test_for_loop_nested_for_grad, devices=devices)
859
+ add_function_test(TestGrad, "test_scalar_grad", test_scalar_grad, devices=devices)
860
+ add_function_test(TestGrad, "test_for_loop_grad", test_for_loop_grad, devices=devices)
861
+ add_function_test(
862
+ TestGrad, "test_for_loop_graph_grad", test_for_loop_graph_grad, devices=get_selected_cuda_test_devices()
863
+ )
864
+ add_function_test(TestGrad, "test_for_loop_nested_if_grad", test_for_loop_nested_if_grad, devices=devices)
865
+ add_function_test(TestGrad, "test_preserve_outputs_grad", test_preserve_outputs_grad, devices=devices)
866
+ add_function_test(TestGrad, "test_vector_math_grad", test_vector_math_grad, devices=devices)
867
+ add_function_test(TestGrad, "test_matrix_math_grad", test_matrix_math_grad, devices=devices)
868
+ add_function_test(TestGrad, "test_3d_math_grad", test_3d_math_grad, devices=devices)
869
+ add_function_test(TestGrad, "test_multi_valued_function_grad", test_multi_valued_function_grad, devices=devices)
870
+ add_function_test(TestGrad, "test_mesh_grad", test_mesh_grad, devices=devices)
871
+ add_function_test(TestGrad, "test_name_clash", test_name_clash, devices=devices)
872
+ add_function_test(TestGrad, "test_struct_attribute_gradient", test_struct_attribute_gradient, devices=devices)
873
+ add_function_test(TestGrad, "test_copy", test_copy, devices=devices)
874
+ add_function_test(TestGrad, "test_aliasing", test_aliasing, devices=devices)
875
+ add_function_test(TestGrad, "test_gradient_internal", test_gradient_internal, devices=devices)
876
+ add_function_test(TestGrad, "test_gradient_external", test_gradient_external, devices=devices)
877
+ add_function_test(TestGrad, "test_gradient_precedence", test_gradient_precedence, devices=devices)
878
+ add_function_test(TestGrad, "test_gradient_slice_2d", test_gradient_slice_2d, devices=devices)
879
+ add_function_test(TestGrad, "test_gradient_slice_3d_1d", test_gradient_slice_3d_1d, devices=devices)
880
+ add_function_test(TestGrad, "test_gradient_slice_3d_2d", test_gradient_slice_3d_2d, devices=devices)
881
+
882
+
883
+ if __name__ == "__main__":
884
+ wp.build.clear_kernel_cache()
885
+ unittest.main(verbosity=2, failfast=False)