warp-lang 1.0.2__py3-none-manylinux2014_x86_64.whl → 1.2.0__py3-none-manylinux2014_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of warp-lang might be problematic. Click here for more details.

Files changed (356) hide show
  1. warp/__init__.py +108 -97
  2. warp/__init__.pyi +1 -1
  3. warp/bin/warp-clang.so +0 -0
  4. warp/bin/warp.so +0 -0
  5. warp/build.py +88 -113
  6. warp/build_dll.py +383 -375
  7. warp/builtins.py +3693 -3354
  8. warp/codegen.py +2925 -2792
  9. warp/config.py +40 -36
  10. warp/constants.py +49 -45
  11. warp/context.py +5409 -5102
  12. warp/dlpack.py +442 -442
  13. warp/examples/__init__.py +16 -16
  14. warp/examples/assets/bear.usd +0 -0
  15. warp/examples/assets/bunny.usd +0 -0
  16. warp/examples/assets/cartpole.urdf +110 -110
  17. warp/examples/assets/crazyflie.usd +0 -0
  18. warp/examples/assets/cube.usd +0 -0
  19. warp/examples/assets/nv_ant.xml +92 -92
  20. warp/examples/assets/nv_humanoid.xml +183 -183
  21. warp/examples/assets/quadruped.urdf +267 -267
  22. warp/examples/assets/rocks.nvdb +0 -0
  23. warp/examples/assets/rocks.usd +0 -0
  24. warp/examples/assets/sphere.usd +0 -0
  25. warp/examples/benchmarks/benchmark_api.py +381 -383
  26. warp/examples/benchmarks/benchmark_cloth.py +278 -277
  27. warp/examples/benchmarks/benchmark_cloth_cupy.py +88 -88
  28. warp/examples/benchmarks/benchmark_cloth_jax.py +97 -100
  29. warp/examples/benchmarks/benchmark_cloth_numba.py +146 -142
  30. warp/examples/benchmarks/benchmark_cloth_numpy.py +77 -77
  31. warp/examples/benchmarks/benchmark_cloth_pytorch.py +86 -86
  32. warp/examples/benchmarks/benchmark_cloth_taichi.py +112 -112
  33. warp/examples/benchmarks/benchmark_cloth_warp.py +145 -146
  34. warp/examples/benchmarks/benchmark_launches.py +293 -295
  35. warp/examples/browse.py +29 -29
  36. warp/examples/core/example_dem.py +232 -219
  37. warp/examples/core/example_fluid.py +291 -267
  38. warp/examples/core/example_graph_capture.py +142 -126
  39. warp/examples/core/example_marching_cubes.py +186 -174
  40. warp/examples/core/example_mesh.py +172 -155
  41. warp/examples/core/example_mesh_intersect.py +203 -193
  42. warp/examples/core/example_nvdb.py +174 -170
  43. warp/examples/core/example_raycast.py +103 -90
  44. warp/examples/core/example_raymarch.py +197 -178
  45. warp/examples/core/example_render_opengl.py +183 -141
  46. warp/examples/core/example_sph.py +403 -387
  47. warp/examples/core/example_torch.py +219 -181
  48. warp/examples/core/example_wave.py +261 -248
  49. warp/examples/fem/bsr_utils.py +378 -380
  50. warp/examples/fem/example_apic_fluid.py +432 -389
  51. warp/examples/fem/example_burgers.py +262 -0
  52. warp/examples/fem/example_convection_diffusion.py +180 -168
  53. warp/examples/fem/example_convection_diffusion_dg.py +217 -209
  54. warp/examples/fem/example_deformed_geometry.py +175 -159
  55. warp/examples/fem/example_diffusion.py +199 -173
  56. warp/examples/fem/example_diffusion_3d.py +178 -152
  57. warp/examples/fem/example_diffusion_mgpu.py +219 -214
  58. warp/examples/fem/example_mixed_elasticity.py +242 -222
  59. warp/examples/fem/example_navier_stokes.py +257 -243
  60. warp/examples/fem/example_stokes.py +218 -192
  61. warp/examples/fem/example_stokes_transfer.py +263 -249
  62. warp/examples/fem/mesh_utils.py +133 -109
  63. warp/examples/fem/plot_utils.py +292 -287
  64. warp/examples/optim/example_bounce.py +258 -246
  65. warp/examples/optim/example_cloth_throw.py +220 -209
  66. warp/examples/optim/example_diffray.py +564 -536
  67. warp/examples/optim/example_drone.py +862 -835
  68. warp/examples/optim/example_inverse_kinematics.py +174 -168
  69. warp/examples/optim/example_inverse_kinematics_torch.py +183 -169
  70. warp/examples/optim/example_spring_cage.py +237 -231
  71. warp/examples/optim/example_trajectory.py +221 -199
  72. warp/examples/optim/example_walker.py +304 -293
  73. warp/examples/sim/example_cartpole.py +137 -129
  74. warp/examples/sim/example_cloth.py +194 -186
  75. warp/examples/sim/example_granular.py +122 -111
  76. warp/examples/sim/example_granular_collision_sdf.py +195 -186
  77. warp/examples/sim/example_jacobian_ik.py +234 -214
  78. warp/examples/sim/example_particle_chain.py +116 -105
  79. warp/examples/sim/example_quadruped.py +191 -180
  80. warp/examples/sim/example_rigid_chain.py +195 -187
  81. warp/examples/sim/example_rigid_contact.py +187 -177
  82. warp/examples/sim/example_rigid_force.py +125 -125
  83. warp/examples/sim/example_rigid_gyroscopic.py +107 -95
  84. warp/examples/sim/example_rigid_soft_contact.py +132 -122
  85. warp/examples/sim/example_soft_body.py +188 -177
  86. warp/fabric.py +337 -335
  87. warp/fem/__init__.py +61 -27
  88. warp/fem/cache.py +403 -388
  89. warp/fem/dirichlet.py +178 -179
  90. warp/fem/domain.py +262 -263
  91. warp/fem/field/__init__.py +100 -101
  92. warp/fem/field/field.py +148 -149
  93. warp/fem/field/nodal_field.py +298 -299
  94. warp/fem/field/restriction.py +22 -21
  95. warp/fem/field/test.py +180 -181
  96. warp/fem/field/trial.py +183 -183
  97. warp/fem/geometry/__init__.py +16 -19
  98. warp/fem/geometry/closest_point.py +69 -70
  99. warp/fem/geometry/deformed_geometry.py +270 -271
  100. warp/fem/geometry/element.py +748 -744
  101. warp/fem/geometry/geometry.py +184 -186
  102. warp/fem/geometry/grid_2d.py +380 -373
  103. warp/fem/geometry/grid_3d.py +437 -435
  104. warp/fem/geometry/hexmesh.py +953 -953
  105. warp/fem/geometry/nanogrid.py +455 -0
  106. warp/fem/geometry/partition.py +374 -376
  107. warp/fem/geometry/quadmesh_2d.py +532 -532
  108. warp/fem/geometry/tetmesh.py +840 -840
  109. warp/fem/geometry/trimesh_2d.py +577 -577
  110. warp/fem/integrate.py +1684 -1615
  111. warp/fem/operator.py +190 -191
  112. warp/fem/polynomial.py +214 -213
  113. warp/fem/quadrature/__init__.py +2 -2
  114. warp/fem/quadrature/pic_quadrature.py +243 -245
  115. warp/fem/quadrature/quadrature.py +295 -294
  116. warp/fem/space/__init__.py +179 -292
  117. warp/fem/space/basis_space.py +522 -489
  118. warp/fem/space/collocated_function_space.py +100 -105
  119. warp/fem/space/dof_mapper.py +236 -236
  120. warp/fem/space/function_space.py +148 -145
  121. warp/fem/space/grid_2d_function_space.py +148 -267
  122. warp/fem/space/grid_3d_function_space.py +167 -306
  123. warp/fem/space/hexmesh_function_space.py +253 -352
  124. warp/fem/space/nanogrid_function_space.py +202 -0
  125. warp/fem/space/partition.py +350 -350
  126. warp/fem/space/quadmesh_2d_function_space.py +261 -369
  127. warp/fem/space/restriction.py +161 -160
  128. warp/fem/space/shape/__init__.py +90 -15
  129. warp/fem/space/shape/cube_shape_function.py +728 -738
  130. warp/fem/space/shape/shape_function.py +102 -103
  131. warp/fem/space/shape/square_shape_function.py +611 -611
  132. warp/fem/space/shape/tet_shape_function.py +565 -567
  133. warp/fem/space/shape/triangle_shape_function.py +429 -429
  134. warp/fem/space/tetmesh_function_space.py +224 -292
  135. warp/fem/space/topology.py +297 -295
  136. warp/fem/space/trimesh_2d_function_space.py +153 -221
  137. warp/fem/types.py +77 -77
  138. warp/fem/utils.py +495 -495
  139. warp/jax.py +166 -141
  140. warp/jax_experimental.py +341 -339
  141. warp/native/array.h +1081 -1025
  142. warp/native/builtin.h +1603 -1560
  143. warp/native/bvh.cpp +402 -398
  144. warp/native/bvh.cu +533 -525
  145. warp/native/bvh.h +430 -429
  146. warp/native/clang/clang.cpp +496 -464
  147. warp/native/crt.cpp +42 -32
  148. warp/native/crt.h +352 -335
  149. warp/native/cuda_crt.h +1049 -1049
  150. warp/native/cuda_util.cpp +549 -540
  151. warp/native/cuda_util.h +288 -203
  152. warp/native/cutlass_gemm.cpp +34 -34
  153. warp/native/cutlass_gemm.cu +372 -372
  154. warp/native/error.cpp +66 -66
  155. warp/native/error.h +27 -27
  156. warp/native/exports.h +187 -0
  157. warp/native/fabric.h +228 -228
  158. warp/native/hashgrid.cpp +301 -278
  159. warp/native/hashgrid.cu +78 -77
  160. warp/native/hashgrid.h +227 -227
  161. warp/native/initializer_array.h +32 -32
  162. warp/native/intersect.h +1204 -1204
  163. warp/native/intersect_adj.h +365 -365
  164. warp/native/intersect_tri.h +322 -322
  165. warp/native/marching.cpp +2 -2
  166. warp/native/marching.cu +497 -497
  167. warp/native/marching.h +2 -2
  168. warp/native/mat.h +1545 -1498
  169. warp/native/matnn.h +333 -333
  170. warp/native/mesh.cpp +203 -203
  171. warp/native/mesh.cu +292 -293
  172. warp/native/mesh.h +1887 -1887
  173. warp/native/nanovdb/GridHandle.h +366 -0
  174. warp/native/nanovdb/HostBuffer.h +590 -0
  175. warp/native/nanovdb/NanoVDB.h +6624 -4782
  176. warp/native/nanovdb/PNanoVDB.h +3390 -2553
  177. warp/native/noise.h +850 -850
  178. warp/native/quat.h +1112 -1085
  179. warp/native/rand.h +303 -299
  180. warp/native/range.h +108 -108
  181. warp/native/reduce.cpp +156 -156
  182. warp/native/reduce.cu +348 -348
  183. warp/native/runlength_encode.cpp +61 -61
  184. warp/native/runlength_encode.cu +46 -46
  185. warp/native/scan.cpp +30 -30
  186. warp/native/scan.cu +36 -36
  187. warp/native/scan.h +7 -7
  188. warp/native/solid_angle.h +442 -442
  189. warp/native/sort.cpp +94 -94
  190. warp/native/sort.cu +97 -97
  191. warp/native/sort.h +14 -14
  192. warp/native/sparse.cpp +337 -337
  193. warp/native/sparse.cu +544 -544
  194. warp/native/spatial.h +630 -630
  195. warp/native/svd.h +562 -562
  196. warp/native/temp_buffer.h +30 -30
  197. warp/native/vec.h +1177 -1133
  198. warp/native/volume.cpp +529 -297
  199. warp/native/volume.cu +58 -32
  200. warp/native/volume.h +960 -538
  201. warp/native/volume_builder.cu +446 -425
  202. warp/native/volume_builder.h +34 -19
  203. warp/native/volume_impl.h +61 -0
  204. warp/native/warp.cpp +1057 -1052
  205. warp/native/warp.cu +2949 -2828
  206. warp/native/warp.h +321 -305
  207. warp/optim/__init__.py +9 -9
  208. warp/optim/adam.py +120 -120
  209. warp/optim/linear.py +1104 -939
  210. warp/optim/sgd.py +104 -92
  211. warp/render/__init__.py +10 -10
  212. warp/render/render_opengl.py +3356 -3204
  213. warp/render/render_usd.py +768 -749
  214. warp/render/utils.py +152 -150
  215. warp/sim/__init__.py +52 -59
  216. warp/sim/articulation.py +685 -685
  217. warp/sim/collide.py +1594 -1590
  218. warp/sim/import_mjcf.py +489 -481
  219. warp/sim/import_snu.py +220 -221
  220. warp/sim/import_urdf.py +536 -516
  221. warp/sim/import_usd.py +887 -881
  222. warp/sim/inertia.py +316 -317
  223. warp/sim/integrator.py +234 -233
  224. warp/sim/integrator_euler.py +1956 -1956
  225. warp/sim/integrator_featherstone.py +1917 -1991
  226. warp/sim/integrator_xpbd.py +3288 -3312
  227. warp/sim/model.py +4473 -4314
  228. warp/sim/particles.py +113 -112
  229. warp/sim/render.py +417 -403
  230. warp/sim/utils.py +413 -410
  231. warp/sparse.py +1289 -1227
  232. warp/stubs.py +2192 -2469
  233. warp/tape.py +1162 -225
  234. warp/tests/__init__.py +1 -1
  235. warp/tests/__main__.py +4 -4
  236. warp/tests/assets/test_index_grid.nvdb +0 -0
  237. warp/tests/assets/torus.usda +105 -105
  238. warp/tests/aux_test_class_kernel.py +26 -26
  239. warp/tests/aux_test_compile_consts_dummy.py +10 -10
  240. warp/tests/aux_test_conditional_unequal_types_kernels.py +21 -21
  241. warp/tests/aux_test_dependent.py +20 -22
  242. warp/tests/aux_test_grad_customs.py +21 -23
  243. warp/tests/aux_test_reference.py +9 -11
  244. warp/tests/aux_test_reference_reference.py +8 -10
  245. warp/tests/aux_test_square.py +15 -17
  246. warp/tests/aux_test_unresolved_func.py +14 -14
  247. warp/tests/aux_test_unresolved_symbol.py +14 -14
  248. warp/tests/disabled_kinematics.py +237 -239
  249. warp/tests/run_coverage_serial.py +31 -31
  250. warp/tests/test_adam.py +155 -157
  251. warp/tests/test_arithmetic.py +1088 -1124
  252. warp/tests/test_array.py +2415 -2326
  253. warp/tests/test_array_reduce.py +148 -150
  254. warp/tests/test_async.py +666 -656
  255. warp/tests/test_atomic.py +139 -141
  256. warp/tests/test_bool.py +212 -149
  257. warp/tests/test_builtins_resolution.py +1290 -1292
  258. warp/tests/test_bvh.py +162 -171
  259. warp/tests/test_closest_point_edge_edge.py +227 -228
  260. warp/tests/test_codegen.py +562 -553
  261. warp/tests/test_compile_consts.py +217 -101
  262. warp/tests/test_conditional.py +244 -246
  263. warp/tests/test_copy.py +230 -215
  264. warp/tests/test_ctypes.py +630 -632
  265. warp/tests/test_dense.py +65 -67
  266. warp/tests/test_devices.py +89 -98
  267. warp/tests/test_dlpack.py +528 -529
  268. warp/tests/test_examples.py +403 -378
  269. warp/tests/test_fabricarray.py +952 -955
  270. warp/tests/test_fast_math.py +60 -54
  271. warp/tests/test_fem.py +1298 -1278
  272. warp/tests/test_fp16.py +128 -130
  273. warp/tests/test_func.py +336 -337
  274. warp/tests/test_generics.py +596 -571
  275. warp/tests/test_grad.py +885 -640
  276. warp/tests/test_grad_customs.py +331 -336
  277. warp/tests/test_hash_grid.py +208 -164
  278. warp/tests/test_import.py +37 -39
  279. warp/tests/test_indexedarray.py +1132 -1134
  280. warp/tests/test_intersect.py +65 -67
  281. warp/tests/test_jax.py +305 -307
  282. warp/tests/test_large.py +169 -164
  283. warp/tests/test_launch.py +352 -354
  284. warp/tests/test_lerp.py +217 -261
  285. warp/tests/test_linear_solvers.py +189 -171
  286. warp/tests/test_lvalue.py +419 -493
  287. warp/tests/test_marching_cubes.py +63 -65
  288. warp/tests/test_mat.py +1799 -1827
  289. warp/tests/test_mat_lite.py +113 -115
  290. warp/tests/test_mat_scalar_ops.py +2905 -2889
  291. warp/tests/test_math.py +124 -193
  292. warp/tests/test_matmul.py +498 -499
  293. warp/tests/test_matmul_lite.py +408 -410
  294. warp/tests/test_mempool.py +186 -190
  295. warp/tests/test_mesh.py +281 -324
  296. warp/tests/test_mesh_query_aabb.py +226 -241
  297. warp/tests/test_mesh_query_point.py +690 -702
  298. warp/tests/test_mesh_query_ray.py +290 -303
  299. warp/tests/test_mlp.py +274 -276
  300. warp/tests/test_model.py +108 -110
  301. warp/tests/test_module_hashing.py +111 -0
  302. warp/tests/test_modules_lite.py +36 -39
  303. warp/tests/test_multigpu.py +161 -163
  304. warp/tests/test_noise.py +244 -248
  305. warp/tests/test_operators.py +248 -250
  306. warp/tests/test_options.py +121 -125
  307. warp/tests/test_peer.py +131 -137
  308. warp/tests/test_pinned.py +76 -78
  309. warp/tests/test_print.py +52 -54
  310. warp/tests/test_quat.py +2084 -2086
  311. warp/tests/test_rand.py +324 -288
  312. warp/tests/test_reload.py +207 -217
  313. warp/tests/test_rounding.py +177 -179
  314. warp/tests/test_runlength_encode.py +188 -190
  315. warp/tests/test_sim_grad.py +241 -0
  316. warp/tests/test_sim_kinematics.py +89 -97
  317. warp/tests/test_smoothstep.py +166 -168
  318. warp/tests/test_snippet.py +303 -266
  319. warp/tests/test_sparse.py +466 -460
  320. warp/tests/test_spatial.py +2146 -2148
  321. warp/tests/test_special_values.py +362 -0
  322. warp/tests/test_streams.py +484 -473
  323. warp/tests/test_struct.py +708 -675
  324. warp/tests/test_tape.py +171 -148
  325. warp/tests/test_torch.py +741 -743
  326. warp/tests/test_transient_module.py +85 -87
  327. warp/tests/test_types.py +554 -659
  328. warp/tests/test_utils.py +488 -499
  329. warp/tests/test_vec.py +1262 -1268
  330. warp/tests/test_vec_lite.py +71 -73
  331. warp/tests/test_vec_scalar_ops.py +2097 -2099
  332. warp/tests/test_verify_fp.py +92 -94
  333. warp/tests/test_volume.py +961 -736
  334. warp/tests/test_volume_write.py +338 -265
  335. warp/tests/unittest_serial.py +38 -37
  336. warp/tests/unittest_suites.py +367 -359
  337. warp/tests/unittest_utils.py +434 -578
  338. warp/tests/unused_test_misc.py +69 -71
  339. warp/tests/walkthrough_debug.py +85 -85
  340. warp/thirdparty/appdirs.py +598 -598
  341. warp/thirdparty/dlpack.py +143 -143
  342. warp/thirdparty/unittest_parallel.py +563 -561
  343. warp/torch.py +321 -295
  344. warp/types.py +4941 -4450
  345. warp/utils.py +1008 -821
  346. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/LICENSE.md +126 -126
  347. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/METADATA +365 -400
  348. warp_lang-1.2.0.dist-info/RECORD +359 -0
  349. warp/examples/assets/cube.usda +0 -42
  350. warp/examples/assets/sphere.usda +0 -56
  351. warp/examples/assets/torus.usda +0 -105
  352. warp/examples/fem/example_convection_diffusion_dg0.py +0 -194
  353. warp/native/nanovdb/PNanoVDBWrite.h +0 -295
  354. warp_lang-1.0.2.dist-info/RECORD +0 -352
  355. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/WHEEL +0 -0
  356. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/top_level.txt +0 -0
warp/tests/test_async.py CHANGED
@@ -1,656 +1,666 @@
1
- # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
2
- # NVIDIA CORPORATION and its licensors retain all intellectual property
3
- # and proprietary rights in and to this software, related documentation
4
- # and any modifications thereto. Any use, reproduction, disclosure or
5
- # distribution of this software and related documentation without an express
6
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
7
-
8
- import unittest
9
-
10
- import numpy as np
11
-
12
- import warp as wp
13
- from warp.utils import check_iommu
14
- from warp.tests.unittest_utils import *
15
-
16
- wp.init()
17
-
18
-
19
- class Capturable:
20
- def __init__(self, use_graph=True, stream=None):
21
- self.use_graph = use_graph
22
- self.stream = stream
23
-
24
- def __enter__(self):
25
- if self.use_graph:
26
- wp.capture_begin(stream=self.stream)
27
-
28
- def __exit__(self, exc_type, exc_value, traceback):
29
- if self.use_graph:
30
- try:
31
- # need to call capture_end() to terminate the CUDA stream capture
32
- graph = wp.capture_end(stream=self.stream)
33
- except:
34
- # capture_end() will raise if there was an error during capture, but we squash it here
35
- # if we already had an exception so that the original exception percolates to the caller
36
- if exc_type is None:
37
- raise
38
- else:
39
- # capture can succeed despite some errors during capture (e.g. cudaInvalidValue during copy)
40
- # but if we had an exception during capture, don't launch the graph
41
- if exc_type is None:
42
- wp.capture_launch(graph, stream=self.stream)
43
-
44
-
45
- @wp.kernel
46
- def inc(a: wp.array(dtype=float)):
47
- tid = wp.tid()
48
- a[tid] = a[tid] + 1.0
49
-
50
-
51
- def test_async_empty(test, device, use_mempools, use_graph):
52
- with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
53
- n = 100
54
-
55
- with Capturable(use_graph):
56
- a = wp.empty(n, dtype=float)
57
-
58
- test.assertIsInstance(a, wp.array)
59
- test.assertIsNotNone(a.ptr)
60
- test.assertEqual(a.size, n)
61
- test.assertEqual(a.dtype, wp.float32)
62
- test.assertEqual(a.device, device)
63
-
64
-
65
- def test_async_zeros(test, device, use_mempools, use_graph):
66
- with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
67
- n = 100
68
-
69
- with Capturable(use_graph):
70
- a = wp.zeros(n, dtype=float)
71
-
72
- assert_np_equal(a.numpy(), np.zeros(n, dtype=np.float32))
73
-
74
-
75
- def test_async_zero_v1(test, device, use_mempools, use_graph):
76
- with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
77
- n = 100
78
-
79
- with Capturable(use_graph):
80
- a = wp.empty(n, dtype=float)
81
- a.zero_()
82
-
83
- assert_np_equal(a.numpy(), np.zeros(n, dtype=np.float32))
84
-
85
-
86
- def test_async_zero_v2(test, device, use_mempools, use_graph):
87
- with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
88
- n = 100
89
-
90
- a = wp.empty(n, dtype=float)
91
-
92
- with Capturable(use_graph):
93
- a.zero_()
94
-
95
- assert_np_equal(a.numpy(), np.zeros(n, dtype=np.float32))
96
-
97
-
98
- def test_async_full(test, device, use_mempools, use_graph):
99
- with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
100
- n = 100
101
- value = 42
102
-
103
- with Capturable(use_graph):
104
- a = wp.full(n, value, dtype=float)
105
-
106
- assert_np_equal(a.numpy(), np.full(n, value, dtype=np.float32))
107
-
108
-
109
- def test_async_fill_v1(test, device, use_mempools, use_graph):
110
- with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
111
- n = 100
112
- value = 17
113
-
114
- with Capturable(use_graph):
115
- a = wp.empty(n, dtype=float)
116
- a.fill_(value)
117
-
118
- assert_np_equal(a.numpy(), np.full(n, value, dtype=np.float32))
119
-
120
-
121
- def test_async_fill_v2(test, device, use_mempools, use_graph):
122
- with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
123
- n = 100
124
- value = 17
125
-
126
- a = wp.empty(n, dtype=float)
127
-
128
- with Capturable(use_graph):
129
- a.fill_(value)
130
-
131
- assert_np_equal(a.numpy(), np.full(n, value, dtype=np.float32))
132
-
133
-
134
- def test_async_kernels_v1(test, device, use_mempools, use_graph):
135
- with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
136
- n = 100
137
- num_iters = 10
138
-
139
- with Capturable(use_graph):
140
- a = wp.zeros(n, dtype=float)
141
- for i in range(num_iters):
142
- wp.launch(inc, dim=a.size, inputs=[a])
143
-
144
- assert_np_equal(a.numpy(), np.full(n, num_iters, dtype=np.float32))
145
-
146
-
147
- def test_async_kernels_v2(test, device, use_mempools, use_graph):
148
- with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
149
- n = 100
150
- num_iters = 10
151
-
152
- a = wp.zeros(n, dtype=float)
153
-
154
- with Capturable(use_graph):
155
- for i in range(num_iters):
156
- wp.launch(inc, dim=a.size, inputs=[a])
157
-
158
- assert_np_equal(a.numpy(), np.full(n, num_iters, dtype=np.float32))
159
-
160
-
161
- class TestAsync(unittest.TestCase):
162
- pass
163
-
164
-
165
- # get all CUDA devices
166
- cuda_devices = wp.get_cuda_devices()
167
-
168
- # get CUDA devices that support mempools
169
- cuda_devices_with_mempools = []
170
- for d in cuda_devices:
171
- if d.is_mempool_supported:
172
- cuda_devices_with_mempools.append(d)
173
-
174
- # get a pair of CUDA devices that support mempool access
175
- cuda_devices_with_mempool_access = []
176
- for target_device in cuda_devices_with_mempools:
177
- for peer_device in cuda_devices_with_mempools:
178
- if peer_device != target_device:
179
- if wp.is_mempool_access_supported(target_device, peer_device):
180
- cuda_devices_with_mempool_access = [target_device, peer_device]
181
- break
182
- if cuda_devices_with_mempool_access:
183
- break
184
-
185
- def add_test_variants(
186
- func,
187
- device_count=1,
188
- graph_allocs=False,
189
- requires_mempool_access_with_graph=False,
190
- ):
191
-
192
- # test that works with default allocators
193
- if not graph_allocs and device_count <= len(cuda_devices):
194
- devices = cuda_devices[:device_count]
195
- func1 = lambda t, d: func(t, *devices, False, False)
196
- func2 = lambda t, d: func(t, *devices, False, True)
197
- name1 = f"{func.__name__}_DefaultAlloc_NoGraph"
198
- name2 = f"{func.__name__}_DefaultAlloc_WithGraph"
199
- if device_count == 1:
200
- add_function_test(TestAsync, name1, func1, devices=devices)
201
- add_function_test(TestAsync, name2, func2, devices=devices)
202
- else:
203
- add_function_test(TestAsync, name1, func1)
204
- add_function_test(TestAsync, name2, func2)
205
-
206
- # test that works with mempool allocators
207
- if device_count <= len(cuda_devices_with_mempools):
208
- devices = cuda_devices_with_mempools[:device_count]
209
- func3 = lambda t, d: func(t, *devices, True, False)
210
- name3 = f"{func.__name__}_MempoolAlloc_NoGraph"
211
- if device_count == 1:
212
- add_function_test(TestAsync, name3, func3, devices=devices)
213
- else:
214
- add_function_test(TestAsync, name3, func3)
215
-
216
- # test that requires devices with mutual mempool access during graph capture (e.g., p2p memcpy limitation)
217
- if requires_mempool_access_with_graph:
218
- suitable_devices = cuda_devices_with_mempool_access
219
- else:
220
- suitable_devices = cuda_devices_with_mempools
221
-
222
- if device_count <= len(suitable_devices):
223
- devices = suitable_devices[:device_count]
224
- func4 = lambda t, d: func(t, *devices, True, True)
225
- name4 = f"{func.__name__}_MempoolAlloc_WithGraph"
226
- if device_count == 1:
227
- add_function_test(TestAsync, name4, func4, devices=devices)
228
- else:
229
- add_function_test(TestAsync, name4, func4)
230
-
231
- add_test_variants(test_async_empty, graph_allocs=True)
232
- add_test_variants(test_async_zeros, graph_allocs=True)
233
- add_test_variants(test_async_zero_v1, graph_allocs=True)
234
- add_test_variants(test_async_zero_v2, graph_allocs=False)
235
- add_test_variants(test_async_full, graph_allocs=True)
236
- add_test_variants(test_async_fill_v1, graph_allocs=True)
237
- add_test_variants(test_async_fill_v2, graph_allocs=False)
238
- add_test_variants(test_async_kernels_v1, graph_allocs=True)
239
- add_test_variants(test_async_kernels_v2, graph_allocs=False)
240
-
241
-
242
- #=================================================================================
243
- # wp.copy() tests
244
- #=================================================================================
245
-
246
- def as_contiguous_array(data, device=None, grad_data=None):
247
- a = wp.array(data=data, device=device, copy=True)
248
- if grad_data is not None:
249
- a.grad = as_contiguous_array(grad_data, device=device)
250
- return a
251
-
252
-
253
- def as_strided_array(data, device=None, grad_data=None):
254
- a = wp.array(data=data, device=device)
255
- # make a copy with non-contiguous strides
256
- strides = (*a.strides[:-1], 2 * a.strides[-1])
257
- strided_a = wp.zeros(shape=a.shape, strides=strides, dtype=a.dtype, device=device)
258
- wp.copy(strided_a, a)
259
- if grad_data is not None:
260
- strided_a.grad = as_strided_array(grad_data, device=device)
261
- return strided_a
262
-
263
-
264
- def as_indexed_array(data, device=None, **kwargs):
265
- a = wp.array(data=data, device=device)
266
- # allocate double the elements so we can index half of them
267
- shape = (*a.shape[:-1], 2 * a.shape[-1])
268
- big_a = wp.zeros(shape=shape, dtype=a.dtype, device=device)
269
- indices = wp.array(data=np.arange(0, shape[-1], 2, dtype=np.int32), device=device)
270
- indexed_a = big_a[indices]
271
- wp.copy(indexed_a, a)
272
- return indexed_a
273
-
274
-
275
- def as_fabric_array(data, device=None, **kwargs):
276
- from warp.tests.test_fabricarray import _create_fabric_array_interface
277
- a = wp.array(data=data, device=device)
278
- iface = _create_fabric_array_interface(a, "foo")
279
- fa = wp.fabricarray(data=iface, attrib="foo")
280
- fa._iface = iface # save data reference
281
- return fa
282
-
283
-
284
- def as_indexed_fabric_array(data, device=None, **kwargs):
285
- from warp.tests.test_fabricarray import _create_fabric_array_interface
286
- a = wp.array(data=data, device=device)
287
- shape = (*a.shape[:-1], 2 * a.shape[-1])
288
- # allocate double the elements so we can index half of them
289
- big_a = wp.zeros(shape=shape, dtype=a.dtype, device=device)
290
- indices = wp.array(data=np.arange(0, shape[-1], 2, dtype=np.int32), device=device)
291
- iface = _create_fabric_array_interface(big_a, "foo", copy=True)
292
- fa = wp.fabricarray(data=iface, attrib="foo")
293
- fa._iface = iface # save data reference
294
- indexed_fa = fa[indices]
295
- wp.copy(indexed_fa, a)
296
- return indexed_fa
297
-
298
-
299
- class CopyParams:
300
- def __init__(self,
301
- with_grad=False, # whether to use arrays with gradients (contiguous and strided only)
302
- src_use_mempool=False, # whether to enable memory pool on source device
303
- dst_use_mempool=False, # whether to enable memory pool on destination device
304
- access_dst_src=False, # whether destination device has access to the source mempool
305
- access_src_dst=False, # whether source device has access to the destination mempool
306
- stream_device=None, # the device for the stream (None for default behaviour)
307
- use_graph=False, # whether to use a graph
308
- value_offset=0, # unique offset for generated data values per test
309
- ):
310
- self.with_grad = with_grad
311
- self.src_use_mempool = src_use_mempool
312
- self.dst_use_mempool = dst_use_mempool
313
- self.access_dst_src = access_dst_src
314
- self.access_src_dst = access_src_dst
315
- self.stream_device = stream_device
316
- self.use_graph = use_graph
317
- self.value_offset = value_offset
318
-
319
-
320
- def copy_template(
321
- test,
322
- src_ctor,
323
- dst_ctor,
324
- src_device,
325
- dst_device,
326
- n,
327
- params: CopyParams
328
- ):
329
-
330
- # activate the given memory pool configuration
331
- with wp.ScopedMempool(src_device, params.src_use_mempool), \
332
- wp.ScopedMempool(dst_device, params.dst_use_mempool), \
333
- wp.ScopedMempoolAccess(dst_device, src_device, params.access_dst_src), \
334
- wp.ScopedMempoolAccess(src_device, dst_device, params.access_src_dst):
335
-
336
- # make sure the data are different between tests by adding a unique offset
337
- # this avoids aliasing issues with older memory
338
- src_data = np.arange(params.value_offset, params.value_offset + n, dtype=np.float32)
339
- dst_data = np.zeros(n, dtype=np.float32)
340
-
341
- if params.with_grad:
342
- src_grad_data = -np.arange(params.value_offset, params.value_offset + n, dtype=np.float32)
343
- dst_grad_data = np.zeros(n, dtype=np.float32)
344
- else:
345
- src_grad_data = None
346
- dst_grad_data = None
347
-
348
- # create Warp arrays for the copy
349
- src = src_ctor(src_data, device=src_device, grad_data=src_grad_data)
350
- dst = dst_ctor(dst_data, device=dst_device, grad_data=dst_grad_data)
351
-
352
- # determine the stream argument to pass to wp.copy()
353
- if params.stream_device is not None:
354
- stream_arg = wp.Stream(params.stream_device)
355
- else:
356
- stream_arg = None
357
-
358
- # determine the actual stream used for the copy
359
- if stream_arg is not None:
360
- stream = stream_arg
361
- else:
362
- if dst_device.is_cuda:
363
- stream = dst_device.stream
364
- elif src_device.is_cuda:
365
- stream = src_device.stream
366
- else:
367
- stream = None
368
-
369
- # check if an exception is expected given the arguments and system configuration
370
- expected_error_type = None
371
- expected_error_regex = None
372
-
373
- # restrictions on copying between different devices during graph capture
374
- if params.use_graph and src_device != dst_device:
375
-
376
- # errors with allocating staging buffer on source device
377
- if not src.is_contiguous:
378
- if src_device.is_cuda and not src_device.is_mempool_enabled:
379
- # can't allocate staging buffer using default CUDA allocator during capture
380
- expected_error_type, expected_error_regex = RuntimeError, r"^Failed to allocate"
381
- elif src_device.is_cpu:
382
- # can't allocate CPU staging buffer during capture
383
- expected_error_type, expected_error_regex = RuntimeError, r"^Failed to allocate"
384
-
385
- # errors with allocating staging buffer on destination device
386
- if expected_error_type is None:
387
- if not dst.is_contiguous:
388
- if dst_device.is_cuda and not dst_device.is_mempool_enabled:
389
- # can't allocate staging buffer using default CUDA allocator during capture
390
- expected_error_type, expected_error_regex = RuntimeError, r"^Failed to allocate"
391
- elif dst_device.is_cpu and src_device.is_cuda:
392
- # can't allocate CPU staging buffer during capture
393
- expected_error_type, expected_error_regex = RuntimeError, r"^Failed to allocate"
394
-
395
- # p2p copies and mempool access
396
- if expected_error_type is None and src_device.is_cuda and dst_device.is_cuda:
397
-
398
- # If the source is a contiguous mempool allocation or a non-contiguous array
399
- # AND the destination is a contiguous mempool allocation or a non-contiguous array,
400
- # then memory pool access needs to be enabled EITHER from src_device to dst_device
401
- # OR from dst_device to src_device.
402
- if (((src.is_contiguous and params.src_use_mempool) or not src.is_contiguous) and
403
- ((dst.is_contiguous and params.dst_use_mempool) or not dst.is_contiguous) and
404
- not wp.is_mempool_access_enabled(src_device, dst_device) and
405
- not wp.is_mempool_access_enabled(dst_device, src_device)
406
- ):
407
- expected_error_type, expected_error_regex = RuntimeError, r"^Warp copy error"
408
-
409
- # synchronize before test
410
- wp.synchronize()
411
-
412
- if expected_error_type is not None:
413
- # disable error output from Warp if we expect an exception
414
- try:
415
- saved_error_output_enabled = wp.context.runtime.core.is_error_output_enabled()
416
- wp.context.runtime.core.set_error_output_enabled(False)
417
- with test.assertRaisesRegex(expected_error_type, expected_error_regex):
418
- with Capturable(use_graph=params.use_graph, stream=stream):
419
- wp.copy(dst, src, stream=stream_arg)
420
- finally:
421
- wp.context.runtime.core.set_error_output_enabled(saved_error_output_enabled)
422
- wp.synchronize()
423
-
424
- # print(f"SUCCESSFUL ERROR PREDICTION: {expected_error_regex}")
425
-
426
- else:
427
- with Capturable(use_graph=params.use_graph, stream=stream):
428
- wp.copy(dst, src, stream=stream_arg)
429
-
430
- # synchronize the stream where the copy was running (None for h2h copies)
431
- if stream is not None:
432
- wp.synchronize_stream(stream)
433
-
434
- assert_np_equal(dst.numpy(), src.numpy())
435
-
436
- if params.with_grad:
437
- assert_np_equal(dst.grad.numpy(), src.grad.numpy())
438
-
439
- # print("SUCCESSFUL COPY")
440
-
441
-
442
- array_constructors = {
443
- "contiguous": as_contiguous_array,
444
- "strided": as_strided_array,
445
- "indexed": as_indexed_array,
446
- "fabric": as_fabric_array,
447
- "indexedfabric": as_indexed_fabric_array,
448
- }
449
-
450
- array_type_codes = {
451
- "contiguous": "c",
452
- "strided": "s",
453
- "indexed": "i",
454
- "fabric": "f",
455
- "indexedfabric": "fi",
456
- }
457
-
458
- device_pairs = {}
459
- cpu = None
460
- cuda0 = None
461
- cuda1 = None
462
- cuda2 = None
463
- if wp.is_cpu_available():
464
- cpu = wp.get_device("cpu")
465
- device_pairs["h2h"] = (cpu, cpu)
466
- if wp.is_cuda_available():
467
- cuda0 = wp.get_device("cuda:0")
468
- device_pairs["d2d"] = (cuda0, cuda0)
469
- if wp.is_cpu_available():
470
- device_pairs["h2d"] = (cpu, cuda0)
471
- device_pairs["d2h"] = (cuda0, cpu)
472
- if wp.get_cuda_device_count() > 1:
473
- cuda1 = wp.get_device("cuda:1")
474
- device_pairs["p2p"] = (cuda0, cuda1)
475
- if wp.get_cuda_device_count() > 2:
476
- cuda2 = wp.get_device("cuda:2")
477
-
478
- num_copy_elems = 1000000
479
- num_copy_tests = 0
480
-
481
-
482
- def add_copy_test(test_name, src_ctor, dst_ctor, src_device, dst_device, n, params):
483
- test_func = \
484
- lambda test, device, src_ctor=src_ctor, dst_ctor=dst_ctor, src_device=src_device, dst_device=dst_device, n=n, params=params: \
485
- copy_template(test, src_ctor, dst_ctor, src_device, dst_device, n, params)
486
- add_function_test(TestAsync, test_name, test_func, check_output=False)
487
-
488
-
489
- # Procedurally add tests with argument combinations supported by the system.
490
- for src_type, src_ctor in array_constructors.items():
491
- for dst_type, dst_ctor in array_constructors.items():
492
-
493
- copy_type = f"{array_type_codes[src_type]}2{array_type_codes[dst_type]}"
494
-
495
- for transfer_type, device_pair in device_pairs.items():
496
-
497
- # skip p2p tests if IOMMU is enabled on Linux
498
- if transfer_type == "p2p" and not check_iommu():
499
- continue
500
-
501
- src_device = device_pair[0]
502
- dst_device = device_pair[1]
503
-
504
- # basic copy arguments
505
- copy_args = (src_ctor, dst_ctor, src_device, dst_device, num_copy_elems)
506
-
507
- if src_device.is_cuda and src_device.is_mempool_supported:
508
- src_mempool_flags = [False, True]
509
- else:
510
- src_mempool_flags = [False]
511
-
512
- if dst_device.is_cuda and dst_device.is_mempool_supported:
513
- dst_mempool_flags = [False, True]
514
- else:
515
- dst_mempool_flags = [False]
516
-
517
- # stream options
518
- if src_device.is_cuda:
519
- if dst_device.is_cuda:
520
- if src_device == dst_device:
521
- # d2d
522
- assert src_device == cuda0 and dst_device == cuda0
523
- if cuda1 is not None:
524
- stream_devices = [None, cuda0, cuda1]
525
- else:
526
- stream_devices = [None, cuda0]
527
- else:
528
- # p2p
529
- assert src_device == cuda0 and dst_device == cuda1
530
- if cuda2 is not None:
531
- stream_devices = [None, cuda0, cuda1, cuda2]
532
- else:
533
- stream_devices = [None, cuda0, cuda1]
534
- else:
535
- # d2h
536
- assert src_device == cuda0
537
- if cuda1 is not None:
538
- stream_devices = [None, cuda0, cuda1]
539
- else:
540
- stream_devices = [None, cuda0]
541
- else:
542
- if dst_device.is_cuda:
543
- # h2d
544
- assert dst_device == cuda0
545
- if cuda1 is not None:
546
- stream_devices = [None, cuda0, cuda1]
547
- else:
548
- stream_devices = [None, cuda0]
549
- else:
550
- # h2h
551
- stream_devices = [None]
552
-
553
- # gradient options (only supported with contiguous and strided arrays)
554
- if src_type in ("contiguous", "strided") and dst_type in ("contiguous", "strided"):
555
- grad_flags = [False, True]
556
- else:
557
- grad_flags = [False]
558
-
559
- # graph capture options (only supported with CUDA devices)
560
- if src_device.is_cuda or dst_device.is_cuda:
561
- graph_flags = [False, True]
562
- else:
563
- graph_flags = [False]
564
-
565
- # access from destination device to source mempool
566
- if wp.is_mempool_access_supported(dst_device, src_device):
567
- access_dst_src_flags = [False, True]
568
- else:
569
- access_dst_src_flags = [False]
570
-
571
- # access from source device to destination mempool
572
- if wp.is_mempool_access_supported(src_device, dst_device):
573
- access_src_dst_flags = [False, True]
574
- else:
575
- access_src_dst_flags = [False]
576
-
577
- for src_use_mempool in src_mempool_flags:
578
- for dst_use_mempool in dst_mempool_flags:
579
- for stream_device in stream_devices:
580
- for access_dst_src in access_dst_src_flags:
581
- for access_src_dst in access_src_dst_flags:
582
- for with_grad in grad_flags:
583
- for use_graph in graph_flags:
584
-
585
- test_name = f"test_copy_{copy_type}_{transfer_type}"
586
-
587
- if src_use_mempool:
588
- test_name += "_SrcPoolOn"
589
- else:
590
- test_name += "_SrcPoolOff"
591
-
592
- if dst_use_mempool:
593
- test_name += "_DstPoolOn"
594
- else:
595
- test_name += "_DstPoolOff"
596
-
597
- if stream_device is None:
598
- test_name += "_NoStream"
599
- elif stream_device == cuda0:
600
- test_name += "_Stream0"
601
- elif stream_device == cuda1:
602
- test_name += "_Stream1"
603
- elif stream_device == cuda2:
604
- test_name += "_Stream2"
605
- else:
606
- assert False
607
-
608
- if with_grad:
609
- test_name += "_Grad"
610
- else:
611
- test_name += "_NoGrad"
612
-
613
- if use_graph:
614
- test_name += "_Graph"
615
- else:
616
- test_name += "_NoGraph"
617
-
618
- if access_dst_src and access_src_dst:
619
- test_name += "_AccessBoth"
620
- elif access_dst_src and not access_src_dst:
621
- test_name += "_AccessDstSrc"
622
- elif not access_dst_src and access_src_dst:
623
- test_name += "_AccessSrcDst"
624
- else:
625
- test_name += "_AccessNone"
626
-
627
- copy_params = CopyParams(
628
- src_use_mempool=src_use_mempool,
629
- dst_use_mempool=dst_use_mempool,
630
- access_dst_src=access_dst_src,
631
- access_src_dst=access_src_dst,
632
- stream_device=stream_device,
633
- with_grad=with_grad,
634
- use_graph=use_graph,
635
- value_offset=num_copy_tests,
636
- )
637
-
638
- add_copy_test(test_name, *copy_args, copy_params)
639
-
640
- num_copy_tests += 1
641
-
642
- # Specify individual test(s) for debugging purposes
643
- # add_copy_test("test_a", as_contiguous_array, as_strided_array, cuda0, cuda1, num_copy_elems,
644
- # CopyParams(
645
- # src_use_mempool=True,
646
- # dst_use_mempool=True,
647
- # access_dst_src=False,
648
- # access_src_dst=False,
649
- # stream_device=cuda0,
650
- # with_grad=False,
651
- # use_graph=True,
652
- # value_offset=0))
653
-
654
- if __name__ == "__main__":
655
- wp.build.clear_kernel_cache()
656
- unittest.main(verbosity=2)
1
+ # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
2
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
3
+ # and proprietary rights in and to this software, related documentation
4
+ # and any modifications thereto. Any use, reproduction, disclosure or
5
+ # distribution of this software and related documentation without an express
6
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
7
+
8
+ import unittest
9
+
10
+ import numpy as np
11
+
12
+ import warp as wp
13
+ from warp.tests.unittest_utils import *
14
+ from warp.utils import check_iommu
15
+
16
+
17
+ class Capturable:
18
+ def __init__(self, use_graph=True, stream=None):
19
+ self.use_graph = use_graph
20
+ self.stream = stream
21
+
22
+ def __enter__(self):
23
+ if self.use_graph:
24
+ wp.capture_begin(stream=self.stream)
25
+
26
+ def __exit__(self, exc_type, exc_value, traceback):
27
+ if self.use_graph:
28
+ try:
29
+ # need to call capture_end() to terminate the CUDA stream capture
30
+ graph = wp.capture_end(stream=self.stream)
31
+ except Exception:
32
+ # capture_end() will raise if there was an error during capture, but we squash it here
33
+ # if we already had an exception so that the original exception percolates to the caller
34
+ if exc_type is None:
35
+ raise
36
+ else:
37
+ # capture can succeed despite some errors during capture (e.g. cudaInvalidValue during copy)
38
+ # but if we had an exception during capture, don't launch the graph
39
+ if exc_type is None:
40
+ wp.capture_launch(graph, stream=self.stream)
41
+
42
+
43
+ @wp.kernel
44
+ def inc(a: wp.array(dtype=float)):
45
+ tid = wp.tid()
46
+ a[tid] = a[tid] + 1.0
47
+
48
+
49
+ def test_async_empty(test, device, use_mempools, use_graph):
50
+ with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
51
+ n = 100
52
+
53
+ with Capturable(use_graph):
54
+ a = wp.empty(n, dtype=float)
55
+
56
+ test.assertIsInstance(a, wp.array)
57
+ test.assertIsNotNone(a.ptr)
58
+ test.assertEqual(a.size, n)
59
+ test.assertEqual(a.dtype, wp.float32)
60
+ test.assertEqual(a.device, device)
61
+
62
+
63
+ def test_async_zeros(test, device, use_mempools, use_graph):
64
+ with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
65
+ n = 100
66
+
67
+ with Capturable(use_graph):
68
+ a = wp.zeros(n, dtype=float)
69
+
70
+ assert_np_equal(a.numpy(), np.zeros(n, dtype=np.float32))
71
+
72
+
73
+ def test_async_zero_v1(test, device, use_mempools, use_graph):
74
+ with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
75
+ n = 100
76
+
77
+ with Capturable(use_graph):
78
+ a = wp.empty(n, dtype=float)
79
+ a.zero_()
80
+
81
+ assert_np_equal(a.numpy(), np.zeros(n, dtype=np.float32))
82
+
83
+
84
+ def test_async_zero_v2(test, device, use_mempools, use_graph):
85
+ with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
86
+ n = 100
87
+
88
+ a = wp.empty(n, dtype=float)
89
+
90
+ with Capturable(use_graph):
91
+ a.zero_()
92
+
93
+ assert_np_equal(a.numpy(), np.zeros(n, dtype=np.float32))
94
+
95
+
96
+ def test_async_full(test, device, use_mempools, use_graph):
97
+ with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
98
+ n = 100
99
+ value = 42
100
+
101
+ with Capturable(use_graph):
102
+ a = wp.full(n, value, dtype=float)
103
+
104
+ assert_np_equal(a.numpy(), np.full(n, value, dtype=np.float32))
105
+
106
+
107
+ def test_async_fill_v1(test, device, use_mempools, use_graph):
108
+ with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
109
+ n = 100
110
+ value = 17
111
+
112
+ with Capturable(use_graph):
113
+ a = wp.empty(n, dtype=float)
114
+ a.fill_(value)
115
+
116
+ assert_np_equal(a.numpy(), np.full(n, value, dtype=np.float32))
117
+
118
+
119
+ def test_async_fill_v2(test, device, use_mempools, use_graph):
120
+ with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
121
+ n = 100
122
+ value = 17
123
+
124
+ a = wp.empty(n, dtype=float)
125
+
126
+ with Capturable(use_graph):
127
+ a.fill_(value)
128
+
129
+ assert_np_equal(a.numpy(), np.full(n, value, dtype=np.float32))
130
+
131
+
132
+ def test_async_kernels_v1(test, device, use_mempools, use_graph):
133
+ with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
134
+ n = 100
135
+ num_iters = 10
136
+
137
+ with Capturable(use_graph):
138
+ a = wp.zeros(n, dtype=float)
139
+ for _i in range(num_iters):
140
+ wp.launch(inc, dim=a.size, inputs=[a])
141
+
142
+ assert_np_equal(a.numpy(), np.full(n, num_iters, dtype=np.float32))
143
+
144
+
145
+ def test_async_kernels_v2(test, device, use_mempools, use_graph):
146
+ with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
147
+ n = 100
148
+ num_iters = 10
149
+
150
+ a = wp.zeros(n, dtype=float)
151
+
152
+ with Capturable(use_graph):
153
+ for _i in range(num_iters):
154
+ wp.launch(inc, dim=a.size, inputs=[a])
155
+
156
+ assert_np_equal(a.numpy(), np.full(n, num_iters, dtype=np.float32))
157
+
158
+
159
+ class TestAsync(unittest.TestCase):
160
+ pass
161
+
162
+
163
+ # get all CUDA devices
164
+ cuda_devices = wp.get_cuda_devices()
165
+
166
+ # get CUDA devices that support mempools
167
+ cuda_devices_with_mempools = []
168
+ for d in cuda_devices:
169
+ if d.is_mempool_supported:
170
+ cuda_devices_with_mempools.append(d)
171
+
172
+ # get a pair of CUDA devices that support mempool access
173
+ cuda_devices_with_mempool_access = []
174
+ for target_device in cuda_devices_with_mempools:
175
+ for peer_device in cuda_devices_with_mempools:
176
+ if peer_device != target_device:
177
+ if wp.is_mempool_access_supported(target_device, peer_device):
178
+ cuda_devices_with_mempool_access = [target_device, peer_device]
179
+ break
180
+ if cuda_devices_with_mempool_access:
181
+ break
182
+
183
+
184
+ def add_test_variants(
185
+ func,
186
+ device_count=1,
187
+ graph_allocs=False,
188
+ requires_mempool_access_with_graph=False,
189
+ ):
190
+ # test that works with default allocators
191
+ if not graph_allocs and device_count <= len(cuda_devices):
192
+ devices = cuda_devices[:device_count]
193
+
194
+ def func1(t, d):
195
+ return func(t, *devices, False, False)
196
+
197
+ def func2(t, d):
198
+ return func(t, *devices, False, True)
199
+
200
+ name1 = f"{func.__name__}_DefaultAlloc_NoGraph"
201
+ name2 = f"{func.__name__}_DefaultAlloc_WithGraph"
202
+ if device_count == 1:
203
+ add_function_test(TestAsync, name1, func1, devices=devices)
204
+ add_function_test(TestAsync, name2, func2, devices=devices)
205
+ else:
206
+ add_function_test(TestAsync, name1, func1)
207
+ add_function_test(TestAsync, name2, func2)
208
+
209
+ # test that works with mempool allocators
210
+ if device_count <= len(cuda_devices_with_mempools):
211
+ devices = cuda_devices_with_mempools[:device_count]
212
+
213
+ def func3(t, d):
214
+ return func(t, *devices, True, False)
215
+
216
+ name3 = f"{func.__name__}_MempoolAlloc_NoGraph"
217
+ if device_count == 1:
218
+ add_function_test(TestAsync, name3, func3, devices=devices)
219
+ else:
220
+ add_function_test(TestAsync, name3, func3)
221
+
222
+ # test that requires devices with mutual mempool access during graph capture (e.g., p2p memcpy limitation)
223
+ if requires_mempool_access_with_graph:
224
+ suitable_devices = cuda_devices_with_mempool_access
225
+ else:
226
+ suitable_devices = cuda_devices_with_mempools
227
+
228
+ if device_count <= len(suitable_devices):
229
+ devices = suitable_devices[:device_count]
230
+
231
+ def func4(t, d):
232
+ return func(t, *devices, True, True)
233
+
234
+ name4 = f"{func.__name__}_MempoolAlloc_WithGraph"
235
+ if device_count == 1:
236
+ add_function_test(TestAsync, name4, func4, devices=devices)
237
+ else:
238
+ add_function_test(TestAsync, name4, func4)
239
+
240
+
241
+ add_test_variants(test_async_empty, graph_allocs=True)
242
+ add_test_variants(test_async_zeros, graph_allocs=True)
243
+ add_test_variants(test_async_zero_v1, graph_allocs=True)
244
+ add_test_variants(test_async_zero_v2, graph_allocs=False)
245
+ add_test_variants(test_async_full, graph_allocs=True)
246
+ add_test_variants(test_async_fill_v1, graph_allocs=True)
247
+ add_test_variants(test_async_fill_v2, graph_allocs=False)
248
+ add_test_variants(test_async_kernels_v1, graph_allocs=True)
249
+ add_test_variants(test_async_kernels_v2, graph_allocs=False)
250
+
251
+
252
+ # =================================================================================
253
+ # wp.copy() tests
254
+ # =================================================================================
255
+
256
+
257
+ def as_contiguous_array(data, device=None, grad_data=None):
258
+ a = wp.array(data=data, device=device, copy=True)
259
+ if grad_data is not None:
260
+ a.grad = as_contiguous_array(grad_data, device=device)
261
+ return a
262
+
263
+
264
+ def as_strided_array(data, device=None, grad_data=None):
265
+ a = wp.array(data=data, device=device)
266
+ # make a copy with non-contiguous strides
267
+ strides = (*a.strides[:-1], 2 * a.strides[-1])
268
+ strided_a = wp.zeros(shape=a.shape, strides=strides, dtype=a.dtype, device=device)
269
+ wp.copy(strided_a, a)
270
+ if grad_data is not None:
271
+ strided_a.grad = as_strided_array(grad_data, device=device)
272
+ return strided_a
273
+
274
+
275
+ def as_indexed_array(data, device=None, **kwargs):
276
+ a = wp.array(data=data, device=device)
277
+ # allocate double the elements so we can index half of them
278
+ shape = (*a.shape[:-1], 2 * a.shape[-1])
279
+ big_a = wp.zeros(shape=shape, dtype=a.dtype, device=device)
280
+ indices = wp.array(data=np.arange(0, shape[-1], 2, dtype=np.int32), device=device)
281
+ indexed_a = big_a[indices]
282
+ wp.copy(indexed_a, a)
283
+ return indexed_a
284
+
285
+
286
+ def as_fabric_array(data, device=None, **kwargs):
287
+ from warp.tests.test_fabricarray import _create_fabric_array_interface
288
+
289
+ a = wp.array(data=data, device=device)
290
+ iface = _create_fabric_array_interface(a, "foo")
291
+ fa = wp.fabricarray(data=iface, attrib="foo")
292
+ fa._iface = iface # save data reference
293
+ return fa
294
+
295
+
296
+ def as_indexed_fabric_array(data, device=None, **kwargs):
297
+ from warp.tests.test_fabricarray import _create_fabric_array_interface
298
+
299
+ a = wp.array(data=data, device=device)
300
+ shape = (*a.shape[:-1], 2 * a.shape[-1])
301
+ # allocate double the elements so we can index half of them
302
+ big_a = wp.zeros(shape=shape, dtype=a.dtype, device=device)
303
+ indices = wp.array(data=np.arange(0, shape[-1], 2, dtype=np.int32), device=device)
304
+ iface = _create_fabric_array_interface(big_a, "foo", copy=True)
305
+ fa = wp.fabricarray(data=iface, attrib="foo")
306
+ fa._iface = iface # save data reference
307
+ indexed_fa = fa[indices]
308
+ wp.copy(indexed_fa, a)
309
+ return indexed_fa
310
+
311
+
312
+ class CopyParams:
313
+ def __init__(
314
+ self,
315
+ with_grad=False, # whether to use arrays with gradients (contiguous and strided only)
316
+ src_use_mempool=False, # whether to enable memory pool on source device
317
+ dst_use_mempool=False, # whether to enable memory pool on destination device
318
+ access_dst_src=False, # whether destination device has access to the source mempool
319
+ access_src_dst=False, # whether source device has access to the destination mempool
320
+ stream_device=None, # the device for the stream (None for default behaviour)
321
+ use_graph=False, # whether to use a graph
322
+ value_offset=0, # unique offset for generated data values per test
323
+ ):
324
+ self.with_grad = with_grad
325
+ self.src_use_mempool = src_use_mempool
326
+ self.dst_use_mempool = dst_use_mempool
327
+ self.access_dst_src = access_dst_src
328
+ self.access_src_dst = access_src_dst
329
+ self.stream_device = stream_device
330
+ self.use_graph = use_graph
331
+ self.value_offset = value_offset
332
+
333
+
334
+ def copy_template(test, src_ctor, dst_ctor, src_device, dst_device, n, params: CopyParams):
335
+ # activate the given memory pool configuration
336
+ with wp.ScopedMempool(src_device, params.src_use_mempool), wp.ScopedMempool(
337
+ dst_device, params.dst_use_mempool
338
+ ), wp.ScopedMempoolAccess(dst_device, src_device, params.access_dst_src), wp.ScopedMempoolAccess(
339
+ src_device, dst_device, params.access_src_dst
340
+ ):
341
+ # make sure the data are different between tests by adding a unique offset
342
+ # this avoids aliasing issues with older memory
343
+ src_data = np.arange(params.value_offset, params.value_offset + n, dtype=np.float32)
344
+ dst_data = np.zeros(n, dtype=np.float32)
345
+
346
+ if params.with_grad:
347
+ src_grad_data = -np.arange(params.value_offset, params.value_offset + n, dtype=np.float32)
348
+ dst_grad_data = np.zeros(n, dtype=np.float32)
349
+ else:
350
+ src_grad_data = None
351
+ dst_grad_data = None
352
+
353
+ # create Warp arrays for the copy
354
+ src = src_ctor(src_data, device=src_device, grad_data=src_grad_data)
355
+ dst = dst_ctor(dst_data, device=dst_device, grad_data=dst_grad_data)
356
+
357
+ # determine the stream argument to pass to wp.copy()
358
+ if params.stream_device is not None:
359
+ stream_arg = wp.Stream(params.stream_device)
360
+ else:
361
+ stream_arg = None
362
+
363
+ # determine the actual stream used for the copy
364
+ if stream_arg is not None:
365
+ stream = stream_arg
366
+ else:
367
+ if dst_device.is_cuda:
368
+ stream = dst_device.stream
369
+ elif src_device.is_cuda:
370
+ stream = src_device.stream
371
+ else:
372
+ stream = None
373
+
374
+ # check if an exception is expected given the arguments and system configuration
375
+ expected_error_type = None
376
+ expected_error_regex = None
377
+
378
+ # restrictions on copying between different devices during graph capture
379
+ if params.use_graph and src_device != dst_device:
380
+ # errors with allocating staging buffer on source device
381
+ if not src.is_contiguous:
382
+ if src_device.is_cuda and not src_device.is_mempool_enabled:
383
+ # can't allocate staging buffer using default CUDA allocator during capture
384
+ expected_error_type, expected_error_regex = RuntimeError, r"^Failed to allocate"
385
+ elif src_device.is_cpu:
386
+ # can't allocate CPU staging buffer during capture
387
+ expected_error_type, expected_error_regex = RuntimeError, r"^Failed to allocate"
388
+
389
+ # errors with allocating staging buffer on destination device
390
+ if expected_error_type is None:
391
+ if not dst.is_contiguous:
392
+ if dst_device.is_cuda and not dst_device.is_mempool_enabled:
393
+ # can't allocate staging buffer using default CUDA allocator during capture
394
+ expected_error_type, expected_error_regex = RuntimeError, r"^Failed to allocate"
395
+ elif dst_device.is_cpu and src_device.is_cuda:
396
+ # can't allocate CPU staging buffer during capture
397
+ expected_error_type, expected_error_regex = RuntimeError, r"^Failed to allocate"
398
+
399
+ # p2p copies and mempool access
400
+ if expected_error_type is None and src_device.is_cuda and dst_device.is_cuda:
401
+ # If the source is a contiguous mempool allocation or a non-contiguous array
402
+ # AND the destination is a contiguous mempool allocation or a non-contiguous array,
403
+ # then memory pool access needs to be enabled EITHER from src_device to dst_device
404
+ # OR from dst_device to src_device.
405
+ if (
406
+ ((src.is_contiguous and params.src_use_mempool) or not src.is_contiguous)
407
+ and ((dst.is_contiguous and params.dst_use_mempool) or not dst.is_contiguous)
408
+ and not wp.is_mempool_access_enabled(src_device, dst_device)
409
+ and not wp.is_mempool_access_enabled(dst_device, src_device)
410
+ ):
411
+ expected_error_type, expected_error_regex = RuntimeError, r"^Warp copy error"
412
+
413
+ # synchronize before test
414
+ wp.synchronize()
415
+
416
+ if expected_error_type is not None:
417
+ # disable error output from Warp if we expect an exception
418
+ try:
419
+ saved_error_output_enabled = wp.context.runtime.core.is_error_output_enabled()
420
+ wp.context.runtime.core.set_error_output_enabled(False)
421
+ with test.assertRaisesRegex(expected_error_type, expected_error_regex):
422
+ with Capturable(use_graph=params.use_graph, stream=stream):
423
+ wp.copy(dst, src, stream=stream_arg)
424
+ finally:
425
+ wp.context.runtime.core.set_error_output_enabled(saved_error_output_enabled)
426
+ wp.synchronize()
427
+
428
+ # print(f"SUCCESSFUL ERROR PREDICTION: {expected_error_regex}")
429
+
430
+ else:
431
+ with Capturable(use_graph=params.use_graph, stream=stream):
432
+ wp.copy(dst, src, stream=stream_arg)
433
+
434
+ # synchronize the stream where the copy was running (None for h2h copies)
435
+ if stream is not None:
436
+ wp.synchronize_stream(stream)
437
+
438
+ assert_np_equal(dst.numpy(), src.numpy())
439
+
440
+ if params.with_grad:
441
+ assert_np_equal(dst.grad.numpy(), src.grad.numpy())
442
+
443
+ # print("SUCCESSFUL COPY")
444
+
445
+
446
+ array_constructors = {
447
+ "contiguous": as_contiguous_array,
448
+ "strided": as_strided_array,
449
+ "indexed": as_indexed_array,
450
+ "fabric": as_fabric_array,
451
+ "indexedfabric": as_indexed_fabric_array,
452
+ }
453
+
454
+ array_type_codes = {
455
+ "contiguous": "c",
456
+ "strided": "s",
457
+ "indexed": "i",
458
+ "fabric": "f",
459
+ "indexedfabric": "fi",
460
+ }
461
+
462
+ device_pairs = {}
463
+ cpu = None
464
+ cuda0 = None
465
+ cuda1 = None
466
+ cuda2 = None
467
+ if wp.is_cpu_available():
468
+ cpu = wp.get_device("cpu")
469
+ device_pairs["h2h"] = (cpu, cpu)
470
+ if wp.is_cuda_available():
471
+ cuda0 = wp.get_device("cuda:0")
472
+ device_pairs["d2d"] = (cuda0, cuda0)
473
+ if wp.is_cpu_available():
474
+ device_pairs["h2d"] = (cpu, cuda0)
475
+ device_pairs["d2h"] = (cuda0, cpu)
476
+ if wp.get_cuda_device_count() > 1:
477
+ cuda1 = wp.get_device("cuda:1")
478
+ device_pairs["p2p"] = (cuda0, cuda1)
479
+ if wp.get_cuda_device_count() > 2:
480
+ cuda2 = wp.get_device("cuda:2")
481
+
482
+ num_copy_elems = 1000000
483
+ num_copy_tests = 0
484
+
485
+
486
+ def add_copy_test(test_name, src_ctor, dst_ctor, src_device, dst_device, n, params):
487
+ def test_func(
488
+ test,
489
+ device,
490
+ src_ctor=src_ctor,
491
+ dst_ctor=dst_ctor,
492
+ src_device=src_device,
493
+ dst_device=dst_device,
494
+ n=n,
495
+ params=params,
496
+ ):
497
+ return copy_template(test, src_ctor, dst_ctor, src_device, dst_device, n, params)
498
+
499
+ add_function_test(TestAsync, test_name, test_func, check_output=False)
500
+
501
+
502
+ # Procedurally add tests with argument combinations supported by the system.
503
+ for src_type, src_ctor in array_constructors.items():
504
+ for dst_type, dst_ctor in array_constructors.items():
505
+ copy_type = f"{array_type_codes[src_type]}2{array_type_codes[dst_type]}"
506
+
507
+ for transfer_type, device_pair in device_pairs.items():
508
+ # skip p2p tests if IOMMU is enabled on Linux
509
+ if transfer_type == "p2p" and not check_iommu():
510
+ continue
511
+
512
+ src_device = device_pair[0]
513
+ dst_device = device_pair[1]
514
+
515
+ # basic copy arguments
516
+ copy_args = (src_ctor, dst_ctor, src_device, dst_device, num_copy_elems)
517
+
518
+ if src_device.is_cuda and src_device.is_mempool_supported:
519
+ src_mempool_flags = [False, True]
520
+ else:
521
+ src_mempool_flags = [False]
522
+
523
+ if dst_device.is_cuda and dst_device.is_mempool_supported:
524
+ dst_mempool_flags = [False, True]
525
+ else:
526
+ dst_mempool_flags = [False]
527
+
528
+ # stream options
529
+ if src_device.is_cuda:
530
+ if dst_device.is_cuda:
531
+ if src_device == dst_device:
532
+ # d2d
533
+ assert src_device == cuda0 and dst_device == cuda0
534
+ if cuda1 is not None:
535
+ stream_devices = [None, cuda0, cuda1]
536
+ else:
537
+ stream_devices = [None, cuda0]
538
+ else:
539
+ # p2p
540
+ assert src_device == cuda0 and dst_device == cuda1
541
+ if cuda2 is not None:
542
+ stream_devices = [None, cuda0, cuda1, cuda2]
543
+ else:
544
+ stream_devices = [None, cuda0, cuda1]
545
+ else:
546
+ # d2h
547
+ assert src_device == cuda0
548
+ if cuda1 is not None:
549
+ stream_devices = [None, cuda0, cuda1]
550
+ else:
551
+ stream_devices = [None, cuda0]
552
+ else:
553
+ if dst_device.is_cuda:
554
+ # h2d
555
+ assert dst_device == cuda0
556
+ if cuda1 is not None:
557
+ stream_devices = [None, cuda0, cuda1]
558
+ else:
559
+ stream_devices = [None, cuda0]
560
+ else:
561
+ # h2h
562
+ stream_devices = [None]
563
+
564
+ # gradient options (only supported with contiguous and strided arrays)
565
+ if src_type in ("contiguous", "strided") and dst_type in ("contiguous", "strided"):
566
+ grad_flags = [False, True]
567
+ else:
568
+ grad_flags = [False]
569
+
570
+ # graph capture options (only supported with CUDA devices)
571
+ if src_device.is_cuda or dst_device.is_cuda:
572
+ graph_flags = [False, True]
573
+ else:
574
+ graph_flags = [False]
575
+
576
+ # access from destination device to source mempool
577
+ if wp.is_mempool_access_supported(dst_device, src_device):
578
+ access_dst_src_flags = [False, True]
579
+ else:
580
+ access_dst_src_flags = [False]
581
+
582
+ # access from source device to destination mempool
583
+ if wp.is_mempool_access_supported(src_device, dst_device):
584
+ access_src_dst_flags = [False, True]
585
+ else:
586
+ access_src_dst_flags = [False]
587
+
588
+ for src_use_mempool in src_mempool_flags:
589
+ for dst_use_mempool in dst_mempool_flags:
590
+ for stream_device in stream_devices:
591
+ for access_dst_src in access_dst_src_flags:
592
+ for access_src_dst in access_src_dst_flags:
593
+ for with_grad in grad_flags:
594
+ for use_graph in graph_flags:
595
+ test_name = f"test_copy_{copy_type}_{transfer_type}"
596
+
597
+ if src_use_mempool:
598
+ test_name += "_SrcPoolOn"
599
+ else:
600
+ test_name += "_SrcPoolOff"
601
+
602
+ if dst_use_mempool:
603
+ test_name += "_DstPoolOn"
604
+ else:
605
+ test_name += "_DstPoolOff"
606
+
607
+ if stream_device is None:
608
+ test_name += "_NoStream"
609
+ elif stream_device == cuda0:
610
+ test_name += "_Stream0"
611
+ elif stream_device == cuda1:
612
+ test_name += "_Stream1"
613
+ elif stream_device == cuda2:
614
+ test_name += "_Stream2"
615
+ else:
616
+ raise AssertionError
617
+
618
+ if with_grad:
619
+ test_name += "_Grad"
620
+ else:
621
+ test_name += "_NoGrad"
622
+
623
+ if use_graph:
624
+ test_name += "_Graph"
625
+ else:
626
+ test_name += "_NoGraph"
627
+
628
+ if access_dst_src and access_src_dst:
629
+ test_name += "_AccessBoth"
630
+ elif access_dst_src and not access_src_dst:
631
+ test_name += "_AccessDstSrc"
632
+ elif not access_dst_src and access_src_dst:
633
+ test_name += "_AccessSrcDst"
634
+ else:
635
+ test_name += "_AccessNone"
636
+
637
+ copy_params = CopyParams(
638
+ src_use_mempool=src_use_mempool,
639
+ dst_use_mempool=dst_use_mempool,
640
+ access_dst_src=access_dst_src,
641
+ access_src_dst=access_src_dst,
642
+ stream_device=stream_device,
643
+ with_grad=with_grad,
644
+ use_graph=use_graph,
645
+ value_offset=num_copy_tests,
646
+ )
647
+
648
+ add_copy_test(test_name, *copy_args, copy_params)
649
+
650
+ num_copy_tests += 1
651
+
652
+ # Specify individual test(s) for debugging purposes
653
+ # add_copy_test("test_a", as_contiguous_array, as_strided_array, cuda0, cuda1, num_copy_elems,
654
+ # CopyParams(
655
+ # src_use_mempool=True,
656
+ # dst_use_mempool=True,
657
+ # access_dst_src=False,
658
+ # access_src_dst=False,
659
+ # stream_device=cuda0,
660
+ # with_grad=False,
661
+ # use_graph=True,
662
+ # value_offset=0))
663
+
664
+ if __name__ == "__main__":
665
+ wp.build.clear_kernel_cache()
666
+ unittest.main(verbosity=2)