warp-lang 1.7.0__py3-none-manylinux_2_28_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of warp-lang might be problematic. Click here for more details.

Files changed (429) hide show
  1. warp/__init__.py +139 -0
  2. warp/__init__.pyi +1 -0
  3. warp/autograd.py +1142 -0
  4. warp/bin/warp-clang.so +0 -0
  5. warp/bin/warp.so +0 -0
  6. warp/build.py +557 -0
  7. warp/build_dll.py +405 -0
  8. warp/builtins.py +6855 -0
  9. warp/codegen.py +3969 -0
  10. warp/config.py +158 -0
  11. warp/constants.py +57 -0
  12. warp/context.py +6812 -0
  13. warp/dlpack.py +462 -0
  14. warp/examples/__init__.py +24 -0
  15. warp/examples/assets/bear.usd +0 -0
  16. warp/examples/assets/bunny.usd +0 -0
  17. warp/examples/assets/cartpole.urdf +110 -0
  18. warp/examples/assets/crazyflie.usd +0 -0
  19. warp/examples/assets/cube.usd +0 -0
  20. warp/examples/assets/nonuniform.usd +0 -0
  21. warp/examples/assets/nv_ant.xml +92 -0
  22. warp/examples/assets/nv_humanoid.xml +183 -0
  23. warp/examples/assets/nvidia_logo.png +0 -0
  24. warp/examples/assets/pixel.jpg +0 -0
  25. warp/examples/assets/quadruped.urdf +268 -0
  26. warp/examples/assets/rocks.nvdb +0 -0
  27. warp/examples/assets/rocks.usd +0 -0
  28. warp/examples/assets/sphere.usd +0 -0
  29. warp/examples/assets/square_cloth.usd +0 -0
  30. warp/examples/benchmarks/benchmark_api.py +389 -0
  31. warp/examples/benchmarks/benchmark_cloth.py +296 -0
  32. warp/examples/benchmarks/benchmark_cloth_cupy.py +96 -0
  33. warp/examples/benchmarks/benchmark_cloth_jax.py +105 -0
  34. warp/examples/benchmarks/benchmark_cloth_numba.py +161 -0
  35. warp/examples/benchmarks/benchmark_cloth_numpy.py +85 -0
  36. warp/examples/benchmarks/benchmark_cloth_paddle.py +94 -0
  37. warp/examples/benchmarks/benchmark_cloth_pytorch.py +94 -0
  38. warp/examples/benchmarks/benchmark_cloth_taichi.py +120 -0
  39. warp/examples/benchmarks/benchmark_cloth_warp.py +153 -0
  40. warp/examples/benchmarks/benchmark_gemm.py +164 -0
  41. warp/examples/benchmarks/benchmark_interop_paddle.py +166 -0
  42. warp/examples/benchmarks/benchmark_interop_torch.py +166 -0
  43. warp/examples/benchmarks/benchmark_launches.py +301 -0
  44. warp/examples/benchmarks/benchmark_tile_load_store.py +103 -0
  45. warp/examples/browse.py +37 -0
  46. warp/examples/core/example_cupy.py +86 -0
  47. warp/examples/core/example_dem.py +241 -0
  48. warp/examples/core/example_fluid.py +299 -0
  49. warp/examples/core/example_graph_capture.py +150 -0
  50. warp/examples/core/example_marching_cubes.py +194 -0
  51. warp/examples/core/example_mesh.py +180 -0
  52. warp/examples/core/example_mesh_intersect.py +211 -0
  53. warp/examples/core/example_nvdb.py +182 -0
  54. warp/examples/core/example_raycast.py +111 -0
  55. warp/examples/core/example_raymarch.py +205 -0
  56. warp/examples/core/example_render_opengl.py +193 -0
  57. warp/examples/core/example_sample_mesh.py +300 -0
  58. warp/examples/core/example_sph.py +411 -0
  59. warp/examples/core/example_torch.py +211 -0
  60. warp/examples/core/example_wave.py +269 -0
  61. warp/examples/fem/example_adaptive_grid.py +286 -0
  62. warp/examples/fem/example_apic_fluid.py +423 -0
  63. warp/examples/fem/example_burgers.py +261 -0
  64. warp/examples/fem/example_convection_diffusion.py +178 -0
  65. warp/examples/fem/example_convection_diffusion_dg.py +204 -0
  66. warp/examples/fem/example_deformed_geometry.py +172 -0
  67. warp/examples/fem/example_diffusion.py +196 -0
  68. warp/examples/fem/example_diffusion_3d.py +225 -0
  69. warp/examples/fem/example_diffusion_mgpu.py +220 -0
  70. warp/examples/fem/example_distortion_energy.py +228 -0
  71. warp/examples/fem/example_magnetostatics.py +240 -0
  72. warp/examples/fem/example_mixed_elasticity.py +291 -0
  73. warp/examples/fem/example_navier_stokes.py +261 -0
  74. warp/examples/fem/example_nonconforming_contact.py +298 -0
  75. warp/examples/fem/example_stokes.py +213 -0
  76. warp/examples/fem/example_stokes_transfer.py +262 -0
  77. warp/examples/fem/example_streamlines.py +352 -0
  78. warp/examples/fem/utils.py +1000 -0
  79. warp/examples/interop/example_jax_callable.py +116 -0
  80. warp/examples/interop/example_jax_ffi_callback.py +132 -0
  81. warp/examples/interop/example_jax_kernel.py +205 -0
  82. warp/examples/optim/example_bounce.py +266 -0
  83. warp/examples/optim/example_cloth_throw.py +228 -0
  84. warp/examples/optim/example_diffray.py +561 -0
  85. warp/examples/optim/example_drone.py +870 -0
  86. warp/examples/optim/example_fluid_checkpoint.py +497 -0
  87. warp/examples/optim/example_inverse_kinematics.py +182 -0
  88. warp/examples/optim/example_inverse_kinematics_torch.py +191 -0
  89. warp/examples/optim/example_softbody_properties.py +400 -0
  90. warp/examples/optim/example_spring_cage.py +245 -0
  91. warp/examples/optim/example_trajectory.py +227 -0
  92. warp/examples/sim/example_cartpole.py +143 -0
  93. warp/examples/sim/example_cloth.py +225 -0
  94. warp/examples/sim/example_cloth_self_contact.py +322 -0
  95. warp/examples/sim/example_granular.py +130 -0
  96. warp/examples/sim/example_granular_collision_sdf.py +202 -0
  97. warp/examples/sim/example_jacobian_ik.py +244 -0
  98. warp/examples/sim/example_particle_chain.py +124 -0
  99. warp/examples/sim/example_quadruped.py +203 -0
  100. warp/examples/sim/example_rigid_chain.py +203 -0
  101. warp/examples/sim/example_rigid_contact.py +195 -0
  102. warp/examples/sim/example_rigid_force.py +133 -0
  103. warp/examples/sim/example_rigid_gyroscopic.py +115 -0
  104. warp/examples/sim/example_rigid_soft_contact.py +140 -0
  105. warp/examples/sim/example_soft_body.py +196 -0
  106. warp/examples/tile/example_tile_cholesky.py +87 -0
  107. warp/examples/tile/example_tile_convolution.py +66 -0
  108. warp/examples/tile/example_tile_fft.py +55 -0
  109. warp/examples/tile/example_tile_filtering.py +113 -0
  110. warp/examples/tile/example_tile_matmul.py +85 -0
  111. warp/examples/tile/example_tile_mlp.py +383 -0
  112. warp/examples/tile/example_tile_nbody.py +199 -0
  113. warp/examples/tile/example_tile_walker.py +327 -0
  114. warp/fabric.py +355 -0
  115. warp/fem/__init__.py +106 -0
  116. warp/fem/adaptivity.py +508 -0
  117. warp/fem/cache.py +572 -0
  118. warp/fem/dirichlet.py +202 -0
  119. warp/fem/domain.py +411 -0
  120. warp/fem/field/__init__.py +125 -0
  121. warp/fem/field/field.py +619 -0
  122. warp/fem/field/nodal_field.py +326 -0
  123. warp/fem/field/restriction.py +37 -0
  124. warp/fem/field/virtual.py +848 -0
  125. warp/fem/geometry/__init__.py +32 -0
  126. warp/fem/geometry/adaptive_nanogrid.py +857 -0
  127. warp/fem/geometry/closest_point.py +84 -0
  128. warp/fem/geometry/deformed_geometry.py +221 -0
  129. warp/fem/geometry/element.py +776 -0
  130. warp/fem/geometry/geometry.py +362 -0
  131. warp/fem/geometry/grid_2d.py +392 -0
  132. warp/fem/geometry/grid_3d.py +452 -0
  133. warp/fem/geometry/hexmesh.py +911 -0
  134. warp/fem/geometry/nanogrid.py +571 -0
  135. warp/fem/geometry/partition.py +389 -0
  136. warp/fem/geometry/quadmesh.py +663 -0
  137. warp/fem/geometry/tetmesh.py +855 -0
  138. warp/fem/geometry/trimesh.py +806 -0
  139. warp/fem/integrate.py +2335 -0
  140. warp/fem/linalg.py +419 -0
  141. warp/fem/operator.py +293 -0
  142. warp/fem/polynomial.py +229 -0
  143. warp/fem/quadrature/__init__.py +17 -0
  144. warp/fem/quadrature/pic_quadrature.py +299 -0
  145. warp/fem/quadrature/quadrature.py +591 -0
  146. warp/fem/space/__init__.py +228 -0
  147. warp/fem/space/basis_function_space.py +468 -0
  148. warp/fem/space/basis_space.py +667 -0
  149. warp/fem/space/dof_mapper.py +251 -0
  150. warp/fem/space/function_space.py +309 -0
  151. warp/fem/space/grid_2d_function_space.py +177 -0
  152. warp/fem/space/grid_3d_function_space.py +227 -0
  153. warp/fem/space/hexmesh_function_space.py +257 -0
  154. warp/fem/space/nanogrid_function_space.py +201 -0
  155. warp/fem/space/partition.py +367 -0
  156. warp/fem/space/quadmesh_function_space.py +223 -0
  157. warp/fem/space/restriction.py +179 -0
  158. warp/fem/space/shape/__init__.py +143 -0
  159. warp/fem/space/shape/cube_shape_function.py +1105 -0
  160. warp/fem/space/shape/shape_function.py +133 -0
  161. warp/fem/space/shape/square_shape_function.py +926 -0
  162. warp/fem/space/shape/tet_shape_function.py +834 -0
  163. warp/fem/space/shape/triangle_shape_function.py +672 -0
  164. warp/fem/space/tetmesh_function_space.py +271 -0
  165. warp/fem/space/topology.py +424 -0
  166. warp/fem/space/trimesh_function_space.py +194 -0
  167. warp/fem/types.py +99 -0
  168. warp/fem/utils.py +420 -0
  169. warp/jax.py +187 -0
  170. warp/jax_experimental/__init__.py +16 -0
  171. warp/jax_experimental/custom_call.py +351 -0
  172. warp/jax_experimental/ffi.py +698 -0
  173. warp/jax_experimental/xla_ffi.py +602 -0
  174. warp/math.py +244 -0
  175. warp/native/array.h +1145 -0
  176. warp/native/builtin.h +1800 -0
  177. warp/native/bvh.cpp +492 -0
  178. warp/native/bvh.cu +791 -0
  179. warp/native/bvh.h +554 -0
  180. warp/native/clang/clang.cpp +536 -0
  181. warp/native/coloring.cpp +613 -0
  182. warp/native/crt.cpp +51 -0
  183. warp/native/crt.h +362 -0
  184. warp/native/cuda_crt.h +1058 -0
  185. warp/native/cuda_util.cpp +646 -0
  186. warp/native/cuda_util.h +307 -0
  187. warp/native/error.cpp +77 -0
  188. warp/native/error.h +36 -0
  189. warp/native/exports.h +1878 -0
  190. warp/native/fabric.h +245 -0
  191. warp/native/hashgrid.cpp +311 -0
  192. warp/native/hashgrid.cu +87 -0
  193. warp/native/hashgrid.h +240 -0
  194. warp/native/initializer_array.h +41 -0
  195. warp/native/intersect.h +1230 -0
  196. warp/native/intersect_adj.h +375 -0
  197. warp/native/intersect_tri.h +339 -0
  198. warp/native/marching.cpp +19 -0
  199. warp/native/marching.cu +514 -0
  200. warp/native/marching.h +19 -0
  201. warp/native/mat.h +2220 -0
  202. warp/native/mathdx.cpp +87 -0
  203. warp/native/matnn.h +343 -0
  204. warp/native/mesh.cpp +266 -0
  205. warp/native/mesh.cu +404 -0
  206. warp/native/mesh.h +1980 -0
  207. warp/native/nanovdb/GridHandle.h +366 -0
  208. warp/native/nanovdb/HostBuffer.h +590 -0
  209. warp/native/nanovdb/NanoVDB.h +6624 -0
  210. warp/native/nanovdb/PNanoVDB.h +3390 -0
  211. warp/native/noise.h +859 -0
  212. warp/native/quat.h +1371 -0
  213. warp/native/rand.h +342 -0
  214. warp/native/range.h +139 -0
  215. warp/native/reduce.cpp +174 -0
  216. warp/native/reduce.cu +364 -0
  217. warp/native/runlength_encode.cpp +79 -0
  218. warp/native/runlength_encode.cu +61 -0
  219. warp/native/scan.cpp +47 -0
  220. warp/native/scan.cu +53 -0
  221. warp/native/scan.h +23 -0
  222. warp/native/solid_angle.h +466 -0
  223. warp/native/sort.cpp +251 -0
  224. warp/native/sort.cu +277 -0
  225. warp/native/sort.h +33 -0
  226. warp/native/sparse.cpp +378 -0
  227. warp/native/sparse.cu +524 -0
  228. warp/native/spatial.h +657 -0
  229. warp/native/svd.h +702 -0
  230. warp/native/temp_buffer.h +46 -0
  231. warp/native/tile.h +2584 -0
  232. warp/native/tile_reduce.h +264 -0
  233. warp/native/vec.h +1426 -0
  234. warp/native/volume.cpp +501 -0
  235. warp/native/volume.cu +67 -0
  236. warp/native/volume.h +969 -0
  237. warp/native/volume_builder.cu +477 -0
  238. warp/native/volume_builder.h +52 -0
  239. warp/native/volume_impl.h +70 -0
  240. warp/native/warp.cpp +1082 -0
  241. warp/native/warp.cu +3636 -0
  242. warp/native/warp.h +381 -0
  243. warp/optim/__init__.py +17 -0
  244. warp/optim/adam.py +163 -0
  245. warp/optim/linear.py +1137 -0
  246. warp/optim/sgd.py +112 -0
  247. warp/paddle.py +407 -0
  248. warp/render/__init__.py +18 -0
  249. warp/render/render_opengl.py +3518 -0
  250. warp/render/render_usd.py +784 -0
  251. warp/render/utils.py +160 -0
  252. warp/sim/__init__.py +65 -0
  253. warp/sim/articulation.py +793 -0
  254. warp/sim/collide.py +2395 -0
  255. warp/sim/graph_coloring.py +300 -0
  256. warp/sim/import_mjcf.py +790 -0
  257. warp/sim/import_snu.py +227 -0
  258. warp/sim/import_urdf.py +579 -0
  259. warp/sim/import_usd.py +894 -0
  260. warp/sim/inertia.py +324 -0
  261. warp/sim/integrator.py +242 -0
  262. warp/sim/integrator_euler.py +1997 -0
  263. warp/sim/integrator_featherstone.py +2101 -0
  264. warp/sim/integrator_vbd.py +2048 -0
  265. warp/sim/integrator_xpbd.py +3292 -0
  266. warp/sim/model.py +4791 -0
  267. warp/sim/particles.py +121 -0
  268. warp/sim/render.py +427 -0
  269. warp/sim/utils.py +428 -0
  270. warp/sparse.py +2057 -0
  271. warp/stubs.py +3333 -0
  272. warp/tape.py +1203 -0
  273. warp/tests/__init__.py +1 -0
  274. warp/tests/__main__.py +4 -0
  275. warp/tests/assets/curlnoise_golden.npy +0 -0
  276. warp/tests/assets/mlp_golden.npy +0 -0
  277. warp/tests/assets/pixel.npy +0 -0
  278. warp/tests/assets/pnoise_golden.npy +0 -0
  279. warp/tests/assets/spiky.usd +0 -0
  280. warp/tests/assets/test_grid.nvdb +0 -0
  281. warp/tests/assets/test_index_grid.nvdb +0 -0
  282. warp/tests/assets/test_int32_grid.nvdb +0 -0
  283. warp/tests/assets/test_vec_grid.nvdb +0 -0
  284. warp/tests/assets/torus.nvdb +0 -0
  285. warp/tests/assets/torus.usda +105 -0
  286. warp/tests/aux_test_class_kernel.py +34 -0
  287. warp/tests/aux_test_compile_consts_dummy.py +18 -0
  288. warp/tests/aux_test_conditional_unequal_types_kernels.py +29 -0
  289. warp/tests/aux_test_dependent.py +29 -0
  290. warp/tests/aux_test_grad_customs.py +29 -0
  291. warp/tests/aux_test_instancing_gc.py +26 -0
  292. warp/tests/aux_test_module_unload.py +23 -0
  293. warp/tests/aux_test_name_clash1.py +40 -0
  294. warp/tests/aux_test_name_clash2.py +40 -0
  295. warp/tests/aux_test_reference.py +9 -0
  296. warp/tests/aux_test_reference_reference.py +8 -0
  297. warp/tests/aux_test_square.py +16 -0
  298. warp/tests/aux_test_unresolved_func.py +22 -0
  299. warp/tests/aux_test_unresolved_symbol.py +22 -0
  300. warp/tests/cuda/__init__.py +0 -0
  301. warp/tests/cuda/test_async.py +676 -0
  302. warp/tests/cuda/test_ipc.py +124 -0
  303. warp/tests/cuda/test_mempool.py +233 -0
  304. warp/tests/cuda/test_multigpu.py +169 -0
  305. warp/tests/cuda/test_peer.py +139 -0
  306. warp/tests/cuda/test_pinned.py +84 -0
  307. warp/tests/cuda/test_streams.py +634 -0
  308. warp/tests/geometry/__init__.py +0 -0
  309. warp/tests/geometry/test_bvh.py +200 -0
  310. warp/tests/geometry/test_hash_grid.py +221 -0
  311. warp/tests/geometry/test_marching_cubes.py +74 -0
  312. warp/tests/geometry/test_mesh.py +316 -0
  313. warp/tests/geometry/test_mesh_query_aabb.py +399 -0
  314. warp/tests/geometry/test_mesh_query_point.py +932 -0
  315. warp/tests/geometry/test_mesh_query_ray.py +311 -0
  316. warp/tests/geometry/test_volume.py +1103 -0
  317. warp/tests/geometry/test_volume_write.py +346 -0
  318. warp/tests/interop/__init__.py +0 -0
  319. warp/tests/interop/test_dlpack.py +729 -0
  320. warp/tests/interop/test_jax.py +371 -0
  321. warp/tests/interop/test_paddle.py +800 -0
  322. warp/tests/interop/test_torch.py +1001 -0
  323. warp/tests/run_coverage_serial.py +39 -0
  324. warp/tests/sim/__init__.py +0 -0
  325. warp/tests/sim/disabled_kinematics.py +244 -0
  326. warp/tests/sim/flaky_test_sim_grad.py +290 -0
  327. warp/tests/sim/test_collision.py +604 -0
  328. warp/tests/sim/test_coloring.py +258 -0
  329. warp/tests/sim/test_model.py +224 -0
  330. warp/tests/sim/test_sim_grad_bounce_linear.py +212 -0
  331. warp/tests/sim/test_sim_kinematics.py +98 -0
  332. warp/tests/sim/test_vbd.py +597 -0
  333. warp/tests/test_adam.py +163 -0
  334. warp/tests/test_arithmetic.py +1096 -0
  335. warp/tests/test_array.py +2972 -0
  336. warp/tests/test_array_reduce.py +156 -0
  337. warp/tests/test_assert.py +250 -0
  338. warp/tests/test_atomic.py +153 -0
  339. warp/tests/test_bool.py +220 -0
  340. warp/tests/test_builtins_resolution.py +1298 -0
  341. warp/tests/test_closest_point_edge_edge.py +327 -0
  342. warp/tests/test_codegen.py +810 -0
  343. warp/tests/test_codegen_instancing.py +1495 -0
  344. warp/tests/test_compile_consts.py +215 -0
  345. warp/tests/test_conditional.py +252 -0
  346. warp/tests/test_context.py +42 -0
  347. warp/tests/test_copy.py +238 -0
  348. warp/tests/test_ctypes.py +638 -0
  349. warp/tests/test_dense.py +73 -0
  350. warp/tests/test_devices.py +97 -0
  351. warp/tests/test_examples.py +482 -0
  352. warp/tests/test_fabricarray.py +996 -0
  353. warp/tests/test_fast_math.py +74 -0
  354. warp/tests/test_fem.py +2003 -0
  355. warp/tests/test_fp16.py +136 -0
  356. warp/tests/test_func.py +454 -0
  357. warp/tests/test_future_annotations.py +98 -0
  358. warp/tests/test_generics.py +656 -0
  359. warp/tests/test_grad.py +893 -0
  360. warp/tests/test_grad_customs.py +339 -0
  361. warp/tests/test_grad_debug.py +341 -0
  362. warp/tests/test_implicit_init.py +411 -0
  363. warp/tests/test_import.py +45 -0
  364. warp/tests/test_indexedarray.py +1140 -0
  365. warp/tests/test_intersect.py +73 -0
  366. warp/tests/test_iter.py +76 -0
  367. warp/tests/test_large.py +177 -0
  368. warp/tests/test_launch.py +411 -0
  369. warp/tests/test_lerp.py +151 -0
  370. warp/tests/test_linear_solvers.py +193 -0
  371. warp/tests/test_lvalue.py +427 -0
  372. warp/tests/test_mat.py +2089 -0
  373. warp/tests/test_mat_lite.py +122 -0
  374. warp/tests/test_mat_scalar_ops.py +2913 -0
  375. warp/tests/test_math.py +178 -0
  376. warp/tests/test_mlp.py +282 -0
  377. warp/tests/test_module_hashing.py +258 -0
  378. warp/tests/test_modules_lite.py +44 -0
  379. warp/tests/test_noise.py +252 -0
  380. warp/tests/test_operators.py +299 -0
  381. warp/tests/test_options.py +129 -0
  382. warp/tests/test_overwrite.py +551 -0
  383. warp/tests/test_print.py +339 -0
  384. warp/tests/test_quat.py +2315 -0
  385. warp/tests/test_rand.py +339 -0
  386. warp/tests/test_reload.py +302 -0
  387. warp/tests/test_rounding.py +185 -0
  388. warp/tests/test_runlength_encode.py +196 -0
  389. warp/tests/test_scalar_ops.py +105 -0
  390. warp/tests/test_smoothstep.py +108 -0
  391. warp/tests/test_snippet.py +318 -0
  392. warp/tests/test_sparse.py +582 -0
  393. warp/tests/test_spatial.py +2229 -0
  394. warp/tests/test_special_values.py +361 -0
  395. warp/tests/test_static.py +592 -0
  396. warp/tests/test_struct.py +734 -0
  397. warp/tests/test_tape.py +204 -0
  398. warp/tests/test_transient_module.py +93 -0
  399. warp/tests/test_triangle_closest_point.py +145 -0
  400. warp/tests/test_types.py +562 -0
  401. warp/tests/test_utils.py +588 -0
  402. warp/tests/test_vec.py +1487 -0
  403. warp/tests/test_vec_lite.py +80 -0
  404. warp/tests/test_vec_scalar_ops.py +2327 -0
  405. warp/tests/test_verify_fp.py +100 -0
  406. warp/tests/tile/__init__.py +0 -0
  407. warp/tests/tile/test_tile.py +780 -0
  408. warp/tests/tile/test_tile_load.py +407 -0
  409. warp/tests/tile/test_tile_mathdx.py +208 -0
  410. warp/tests/tile/test_tile_mlp.py +402 -0
  411. warp/tests/tile/test_tile_reduce.py +447 -0
  412. warp/tests/tile/test_tile_shared_memory.py +247 -0
  413. warp/tests/tile/test_tile_view.py +173 -0
  414. warp/tests/unittest_serial.py +47 -0
  415. warp/tests/unittest_suites.py +427 -0
  416. warp/tests/unittest_utils.py +468 -0
  417. warp/tests/walkthrough_debug.py +93 -0
  418. warp/thirdparty/__init__.py +0 -0
  419. warp/thirdparty/appdirs.py +598 -0
  420. warp/thirdparty/dlpack.py +145 -0
  421. warp/thirdparty/unittest_parallel.py +570 -0
  422. warp/torch.py +391 -0
  423. warp/types.py +5230 -0
  424. warp/utils.py +1137 -0
  425. warp_lang-1.7.0.dist-info/METADATA +516 -0
  426. warp_lang-1.7.0.dist-info/RECORD +429 -0
  427. warp_lang-1.7.0.dist-info/WHEEL +5 -0
  428. warp_lang-1.7.0.dist-info/licenses/LICENSE.md +202 -0
  429. warp_lang-1.7.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,676 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import unittest
17
+
18
+ import numpy as np
19
+
20
+ import warp as wp
21
+ from warp.tests.unittest_utils import *
22
+ from warp.utils import check_p2p
23
+
24
+
25
+ class Capturable:
26
+ def __init__(self, use_graph=True, stream=None):
27
+ self.use_graph = use_graph
28
+ self.stream = stream
29
+
30
+ def __enter__(self):
31
+ if self.use_graph:
32
+ # preload module before graph capture
33
+ wp.load_module(device=wp.get_device())
34
+ wp.capture_begin(stream=self.stream, force_module_load=False)
35
+
36
+ def __exit__(self, exc_type, exc_value, traceback):
37
+ if self.use_graph:
38
+ try:
39
+ # need to call capture_end() to terminate the CUDA stream capture
40
+ graph = wp.capture_end(stream=self.stream)
41
+ except Exception:
42
+ # capture_end() will raise if there was an error during capture, but we squash it here
43
+ # if we already had an exception so that the original exception percolates to the caller
44
+ if exc_type is None:
45
+ raise
46
+ else:
47
+ # capture can succeed despite some errors during capture (e.g. cudaInvalidValue during copy)
48
+ # but if we had an exception during capture, don't launch the graph
49
+ if exc_type is None:
50
+ wp.capture_launch(graph, stream=self.stream)
51
+
52
+
53
+ @wp.kernel
54
+ def inc(a: wp.array(dtype=float)):
55
+ tid = wp.tid()
56
+ a[tid] = a[tid] + 1.0
57
+
58
+
59
+ def test_async_empty(test, device, use_mempools, use_graph):
60
+ with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
61
+ n = 100
62
+
63
+ with Capturable(use_graph):
64
+ a = wp.empty(n, dtype=float)
65
+
66
+ test.assertIsInstance(a, wp.array)
67
+ test.assertIsNotNone(a.ptr)
68
+ test.assertEqual(a.size, n)
69
+ test.assertEqual(a.dtype, wp.float32)
70
+ test.assertEqual(a.device, device)
71
+
72
+
73
+ def test_async_zeros(test, device, use_mempools, use_graph):
74
+ with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
75
+ n = 100
76
+
77
+ with Capturable(use_graph):
78
+ a = wp.zeros(n, dtype=float)
79
+
80
+ assert_np_equal(a.numpy(), np.zeros(n, dtype=np.float32))
81
+
82
+
83
+ def test_async_zero_v1(test, device, use_mempools, use_graph):
84
+ with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
85
+ n = 100
86
+
87
+ with Capturable(use_graph):
88
+ a = wp.empty(n, dtype=float)
89
+ a.zero_()
90
+
91
+ assert_np_equal(a.numpy(), np.zeros(n, dtype=np.float32))
92
+
93
+
94
+ def test_async_zero_v2(test, device, use_mempools, use_graph):
95
+ with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
96
+ n = 100
97
+
98
+ a = wp.empty(n, dtype=float)
99
+
100
+ with Capturable(use_graph):
101
+ a.zero_()
102
+
103
+ assert_np_equal(a.numpy(), np.zeros(n, dtype=np.float32))
104
+
105
+
106
+ def test_async_full(test, device, use_mempools, use_graph):
107
+ with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
108
+ n = 100
109
+ value = 42
110
+
111
+ with Capturable(use_graph):
112
+ a = wp.full(n, value, dtype=float)
113
+
114
+ assert_np_equal(a.numpy(), np.full(n, value, dtype=np.float32))
115
+
116
+
117
+ def test_async_fill_v1(test, device, use_mempools, use_graph):
118
+ with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
119
+ n = 100
120
+ value = 17
121
+
122
+ with Capturable(use_graph):
123
+ a = wp.empty(n, dtype=float)
124
+ a.fill_(value)
125
+
126
+ assert_np_equal(a.numpy(), np.full(n, value, dtype=np.float32))
127
+
128
+
129
+ def test_async_fill_v2(test, device, use_mempools, use_graph):
130
+ with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
131
+ n = 100
132
+ value = 17
133
+
134
+ a = wp.empty(n, dtype=float)
135
+
136
+ with Capturable(use_graph):
137
+ a.fill_(value)
138
+
139
+ assert_np_equal(a.numpy(), np.full(n, value, dtype=np.float32))
140
+
141
+
142
+ def test_async_kernels_v1(test, device, use_mempools, use_graph):
143
+ with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
144
+ n = 100
145
+ num_iters = 10
146
+
147
+ with Capturable(use_graph):
148
+ a = wp.zeros(n, dtype=float)
149
+ for _i in range(num_iters):
150
+ wp.launch(inc, dim=a.size, inputs=[a])
151
+
152
+ assert_np_equal(a.numpy(), np.full(n, num_iters, dtype=np.float32))
153
+
154
+
155
+ def test_async_kernels_v2(test, device, use_mempools, use_graph):
156
+ with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
157
+ n = 100
158
+ num_iters = 10
159
+
160
+ a = wp.zeros(n, dtype=float)
161
+
162
+ with Capturable(use_graph):
163
+ for _i in range(num_iters):
164
+ wp.launch(inc, dim=a.size, inputs=[a])
165
+
166
+ assert_np_equal(a.numpy(), np.full(n, num_iters, dtype=np.float32))
167
+
168
+
169
+ class TestAsync(unittest.TestCase):
170
+ pass
171
+
172
+
173
+ # get all CUDA devices
174
+ cuda_devices = wp.get_cuda_devices()
175
+
176
+ # get CUDA devices that support mempools
177
+ cuda_devices_with_mempools = []
178
+ for d in cuda_devices:
179
+ if d.is_mempool_supported:
180
+ cuda_devices_with_mempools.append(d)
181
+
182
+ # get a pair of CUDA devices that support mempool access
183
+ cuda_devices_with_mempool_access = []
184
+ for target_device in cuda_devices_with_mempools:
185
+ for peer_device in cuda_devices_with_mempools:
186
+ if peer_device != target_device:
187
+ if wp.is_mempool_access_supported(target_device, peer_device):
188
+ cuda_devices_with_mempool_access = [target_device, peer_device]
189
+ break
190
+ if cuda_devices_with_mempool_access:
191
+ break
192
+
193
+
194
+ def add_test_variants(
195
+ func,
196
+ device_count=1,
197
+ graph_allocs=False,
198
+ requires_mempool_access_with_graph=False,
199
+ ):
200
+ # test that works with default allocators
201
+ if not graph_allocs and device_count <= len(cuda_devices):
202
+ devices = cuda_devices[:device_count]
203
+
204
+ def func1(t, d):
205
+ return func(t, *devices, False, False)
206
+
207
+ def func2(t, d):
208
+ return func(t, *devices, False, True)
209
+
210
+ name1 = f"{func.__name__}_DefaultAlloc_NoGraph"
211
+ name2 = f"{func.__name__}_DefaultAlloc_WithGraph"
212
+ if device_count == 1:
213
+ add_function_test(TestAsync, name1, func1, devices=devices)
214
+ add_function_test(TestAsync, name2, func2, devices=devices)
215
+ else:
216
+ add_function_test(TestAsync, name1, func1)
217
+ add_function_test(TestAsync, name2, func2)
218
+
219
+ # test that works with mempool allocators
220
+ if device_count <= len(cuda_devices_with_mempools):
221
+ devices = cuda_devices_with_mempools[:device_count]
222
+
223
+ def func3(t, d):
224
+ return func(t, *devices, True, False)
225
+
226
+ name3 = f"{func.__name__}_MempoolAlloc_NoGraph"
227
+ if device_count == 1:
228
+ add_function_test(TestAsync, name3, func3, devices=devices)
229
+ else:
230
+ add_function_test(TestAsync, name3, func3)
231
+
232
+ # test that requires devices with mutual mempool access during graph capture (e.g., p2p memcpy limitation)
233
+ if requires_mempool_access_with_graph:
234
+ suitable_devices = cuda_devices_with_mempool_access
235
+ else:
236
+ suitable_devices = cuda_devices_with_mempools
237
+
238
+ if device_count <= len(suitable_devices):
239
+ devices = suitable_devices[:device_count]
240
+
241
+ def func4(t, d):
242
+ return func(t, *devices, True, True)
243
+
244
+ name4 = f"{func.__name__}_MempoolAlloc_WithGraph"
245
+ if device_count == 1:
246
+ add_function_test(TestAsync, name4, func4, devices=devices)
247
+ else:
248
+ add_function_test(TestAsync, name4, func4)
249
+
250
+
251
+ add_test_variants(test_async_empty, graph_allocs=True)
252
+ add_test_variants(test_async_zeros, graph_allocs=True)
253
+ add_test_variants(test_async_zero_v1, graph_allocs=True)
254
+ add_test_variants(test_async_zero_v2, graph_allocs=False)
255
+ add_test_variants(test_async_full, graph_allocs=True)
256
+ add_test_variants(test_async_fill_v1, graph_allocs=True)
257
+ add_test_variants(test_async_fill_v2, graph_allocs=False)
258
+ add_test_variants(test_async_kernels_v1, graph_allocs=True)
259
+ add_test_variants(test_async_kernels_v2, graph_allocs=False)
260
+
261
+
262
+ # =================================================================================
263
+ # wp.copy() tests
264
+ # =================================================================================
265
+
266
+
267
+ def as_contiguous_array(data, device=None, grad_data=None):
268
+ a = wp.array(data=data, device=device, copy=True)
269
+ if grad_data is not None:
270
+ a.grad = as_contiguous_array(grad_data, device=device)
271
+ return a
272
+
273
+
274
+ def as_strided_array(data, device=None, grad_data=None):
275
+ a = wp.array(data=data, device=device)
276
+ # make a copy with non-contiguous strides
277
+ strides = (*a.strides[:-1], 2 * a.strides[-1])
278
+ strided_a = wp.zeros(shape=a.shape, strides=strides, dtype=a.dtype, device=device)
279
+ wp.copy(strided_a, a)
280
+ if grad_data is not None:
281
+ strided_a.grad = as_strided_array(grad_data, device=device)
282
+ return strided_a
283
+
284
+
285
+ def as_indexed_array(data, device=None, **kwargs):
286
+ a = wp.array(data=data, device=device)
287
+ # allocate double the elements so we can index half of them
288
+ shape = (*a.shape[:-1], 2 * a.shape[-1])
289
+ big_a = wp.zeros(shape=shape, dtype=a.dtype, device=device)
290
+ indices = wp.array(data=np.arange(0, shape[-1], 2, dtype=np.int32), device=device)
291
+ indexed_a = big_a[indices]
292
+ wp.copy(indexed_a, a)
293
+ return indexed_a
294
+
295
+
296
+ def as_fabric_array(data, device=None, **kwargs):
297
+ from warp.tests.test_fabricarray import _create_fabric_array_interface
298
+
299
+ a = wp.array(data=data, device=device)
300
+ iface = _create_fabric_array_interface(a, "foo")
301
+ fa = wp.fabricarray(data=iface, attrib="foo")
302
+ fa._iface = iface # save data reference
303
+ return fa
304
+
305
+
306
+ def as_indexed_fabric_array(data, device=None, **kwargs):
307
+ from warp.tests.test_fabricarray import _create_fabric_array_interface
308
+
309
+ a = wp.array(data=data, device=device)
310
+ shape = (*a.shape[:-1], 2 * a.shape[-1])
311
+ # allocate double the elements so we can index half of them
312
+ big_a = wp.zeros(shape=shape, dtype=a.dtype, device=device)
313
+ indices = wp.array(data=np.arange(0, shape[-1], 2, dtype=np.int32), device=device)
314
+ iface = _create_fabric_array_interface(big_a, "foo", copy=True)
315
+ fa = wp.fabricarray(data=iface, attrib="foo")
316
+ fa._iface = iface # save data reference
317
+ indexed_fa = fa[indices]
318
+ wp.copy(indexed_fa, a)
319
+ return indexed_fa
320
+
321
+
322
+ class CopyParams:
323
+ def __init__(
324
+ self,
325
+ with_grad=False, # whether to use arrays with gradients (contiguous and strided only)
326
+ src_use_mempool=False, # whether to enable memory pool on source device
327
+ dst_use_mempool=False, # whether to enable memory pool on destination device
328
+ access_dst_src=False, # whether destination device has access to the source mempool
329
+ access_src_dst=False, # whether source device has access to the destination mempool
330
+ stream_device=None, # the device for the stream (None for default behaviour)
331
+ use_graph=False, # whether to use a graph
332
+ value_offset=0, # unique offset for generated data values per test
333
+ ):
334
+ self.with_grad = with_grad
335
+ self.src_use_mempool = src_use_mempool
336
+ self.dst_use_mempool = dst_use_mempool
337
+ self.access_dst_src = access_dst_src
338
+ self.access_src_dst = access_src_dst
339
+ self.stream_device = stream_device
340
+ self.use_graph = use_graph
341
+ self.value_offset = value_offset
342
+
343
+
344
+ def copy_template(test, src_ctor, dst_ctor, src_device, dst_device, n, params: CopyParams):
345
+ # activate the given memory pool configuration
346
+ with wp.ScopedMempool(src_device, params.src_use_mempool), wp.ScopedMempool(
347
+ dst_device, params.dst_use_mempool
348
+ ), wp.ScopedMempoolAccess(dst_device, src_device, params.access_dst_src), wp.ScopedMempoolAccess(
349
+ src_device, dst_device, params.access_src_dst
350
+ ):
351
+ # make sure the data are different between tests by adding a unique offset
352
+ # this avoids aliasing issues with older memory
353
+ src_data = np.arange(params.value_offset, params.value_offset + n, dtype=np.float32)
354
+ dst_data = np.zeros(n, dtype=np.float32)
355
+
356
+ if params.with_grad:
357
+ src_grad_data = -np.arange(params.value_offset, params.value_offset + n, dtype=np.float32)
358
+ dst_grad_data = np.zeros(n, dtype=np.float32)
359
+ else:
360
+ src_grad_data = None
361
+ dst_grad_data = None
362
+
363
+ # create Warp arrays for the copy
364
+ src = src_ctor(src_data, device=src_device, grad_data=src_grad_data)
365
+ dst = dst_ctor(dst_data, device=dst_device, grad_data=dst_grad_data)
366
+
367
+ # determine the stream argument to pass to wp.copy()
368
+ if params.stream_device is not None:
369
+ stream_arg = wp.Stream(params.stream_device)
370
+ else:
371
+ stream_arg = None
372
+
373
+ # determine the actual stream used for the copy
374
+ if stream_arg is not None:
375
+ stream = stream_arg
376
+ else:
377
+ if dst_device.is_cuda:
378
+ stream = dst_device.stream
379
+ elif src_device.is_cuda:
380
+ stream = src_device.stream
381
+ else:
382
+ stream = None
383
+
384
+ # check if an exception is expected given the arguments and system configuration
385
+ expected_error_type = None
386
+ expected_error_regex = None
387
+
388
+ # restrictions on copying between different devices during graph capture
389
+ if params.use_graph and src_device != dst_device:
390
+ # errors with allocating staging buffer on source device
391
+ if not src.is_contiguous:
392
+ if src_device.is_cuda and not src_device.is_mempool_enabled:
393
+ # can't allocate staging buffer using default CUDA allocator during capture
394
+ expected_error_type, expected_error_regex = RuntimeError, r"^Failed to allocate"
395
+ elif src_device.is_cpu:
396
+ # can't allocate CPU staging buffer during capture
397
+ expected_error_type, expected_error_regex = RuntimeError, r"^Failed to allocate"
398
+
399
+ # errors with allocating staging buffer on destination device
400
+ if expected_error_type is None:
401
+ if not dst.is_contiguous:
402
+ if dst_device.is_cuda and not dst_device.is_mempool_enabled:
403
+ # can't allocate staging buffer using default CUDA allocator during capture
404
+ expected_error_type, expected_error_regex = RuntimeError, r"^Failed to allocate"
405
+ elif dst_device.is_cpu and src_device.is_cuda:
406
+ # can't allocate CPU staging buffer during capture
407
+ expected_error_type, expected_error_regex = RuntimeError, r"^Failed to allocate"
408
+
409
+ # p2p copies and mempool access
410
+ if expected_error_type is None and src_device.is_cuda and dst_device.is_cuda:
411
+ # If the source is a contiguous mempool allocation or a non-contiguous array
412
+ # AND the destination is a contiguous mempool allocation or a non-contiguous array,
413
+ # then memory pool access needs to be enabled EITHER from src_device to dst_device
414
+ # OR from dst_device to src_device.
415
+ if (
416
+ ((src.is_contiguous and params.src_use_mempool) or not src.is_contiguous)
417
+ and ((dst.is_contiguous and params.dst_use_mempool) or not dst.is_contiguous)
418
+ and not wp.is_mempool_access_enabled(src_device, dst_device)
419
+ and not wp.is_mempool_access_enabled(dst_device, src_device)
420
+ ):
421
+ expected_error_type, expected_error_regex = RuntimeError, r"^Warp copy error"
422
+
423
+ # synchronize before test
424
+ wp.synchronize()
425
+
426
+ if expected_error_type is not None:
427
+ # disable error output from Warp if we expect an exception
428
+ try:
429
+ saved_error_output_enabled = wp.context.runtime.core.is_error_output_enabled()
430
+ wp.context.runtime.core.set_error_output_enabled(False)
431
+ with test.assertRaisesRegex(expected_error_type, expected_error_regex):
432
+ with Capturable(use_graph=params.use_graph, stream=stream):
433
+ wp.copy(dst, src, stream=stream_arg)
434
+ finally:
435
+ wp.context.runtime.core.set_error_output_enabled(saved_error_output_enabled)
436
+ wp.synchronize()
437
+
438
+ # print(f"SUCCESSFUL ERROR PREDICTION: {expected_error_regex}")
439
+
440
+ else:
441
+ with Capturable(use_graph=params.use_graph, stream=stream):
442
+ wp.copy(dst, src, stream=stream_arg)
443
+
444
+ # synchronize the stream where the copy was running (None for h2h copies)
445
+ if stream is not None:
446
+ wp.synchronize_stream(stream)
447
+
448
+ assert_np_equal(dst.numpy(), src.numpy())
449
+
450
+ if params.with_grad:
451
+ assert_np_equal(dst.grad.numpy(), src.grad.numpy())
452
+
453
+ # print("SUCCESSFUL COPY")
454
+
455
+
456
+ array_constructors = {
457
+ "contiguous": as_contiguous_array,
458
+ "strided": as_strided_array,
459
+ "indexed": as_indexed_array,
460
+ "fabric": as_fabric_array,
461
+ "indexedfabric": as_indexed_fabric_array,
462
+ }
463
+
464
+ array_type_codes = {
465
+ "contiguous": "c",
466
+ "strided": "s",
467
+ "indexed": "i",
468
+ "fabric": "f",
469
+ "indexedfabric": "fi",
470
+ }
471
+
472
+ device_pairs = {}
473
+ cpu = None
474
+ cuda0 = None
475
+ cuda1 = None
476
+ cuda2 = None
477
+ if wp.is_cpu_available():
478
+ cpu = wp.get_device("cpu")
479
+ device_pairs["h2h"] = (cpu, cpu)
480
+ if wp.is_cuda_available():
481
+ cuda0 = wp.get_device("cuda:0")
482
+ device_pairs["d2d"] = (cuda0, cuda0)
483
+ if wp.is_cpu_available():
484
+ device_pairs["h2d"] = (cpu, cuda0)
485
+ device_pairs["d2h"] = (cuda0, cpu)
486
+ if wp.get_cuda_device_count() > 1:
487
+ cuda1 = wp.get_device("cuda:1")
488
+ device_pairs["p2p"] = (cuda0, cuda1)
489
+ if wp.get_cuda_device_count() > 2:
490
+ cuda2 = wp.get_device("cuda:2")
491
+
492
+ num_copy_elems = 1000000
493
+ num_copy_tests = 0
494
+
495
+
496
+ def add_copy_test(test_name, src_ctor, dst_ctor, src_device, dst_device, n, params):
497
+ def test_func(
498
+ test,
499
+ device,
500
+ src_ctor=src_ctor,
501
+ dst_ctor=dst_ctor,
502
+ src_device=src_device,
503
+ dst_device=dst_device,
504
+ n=n,
505
+ params=params,
506
+ ):
507
+ return copy_template(test, src_ctor, dst_ctor, src_device, dst_device, n, params)
508
+
509
+ add_function_test(TestAsync, test_name, test_func, check_output=False)
510
+
511
+
512
+ # Procedurally add tests with argument combinations supported by the system.
513
+ for src_type, src_ctor in array_constructors.items():
514
+ for dst_type, dst_ctor in array_constructors.items():
515
+ copy_type = f"{array_type_codes[src_type]}2{array_type_codes[dst_type]}"
516
+
517
+ for transfer_type, device_pair in device_pairs.items():
518
+ # skip p2p tests if not supported (e.g., IOMMU is enabled on Linux)
519
+ if transfer_type == "p2p" and not check_p2p():
520
+ continue
521
+
522
+ src_device = device_pair[0]
523
+ dst_device = device_pair[1]
524
+
525
+ # basic copy arguments
526
+ copy_args = (src_ctor, dst_ctor, src_device, dst_device, num_copy_elems)
527
+
528
+ if src_device.is_cuda and src_device.is_mempool_supported:
529
+ src_mempool_flags = [False, True]
530
+ else:
531
+ src_mempool_flags = [False]
532
+
533
+ if dst_device.is_cuda and dst_device.is_mempool_supported:
534
+ dst_mempool_flags = [False, True]
535
+ else:
536
+ dst_mempool_flags = [False]
537
+
538
+ # stream options
539
+ if src_device.is_cuda:
540
+ if dst_device.is_cuda:
541
+ if src_device == dst_device:
542
+ # d2d
543
+ assert src_device == cuda0 and dst_device == cuda0
544
+ if cuda1 is not None:
545
+ stream_devices = [None, cuda0, cuda1]
546
+ else:
547
+ stream_devices = [None, cuda0]
548
+ else:
549
+ # p2p
550
+ assert src_device == cuda0 and dst_device == cuda1
551
+ if cuda2 is not None:
552
+ stream_devices = [None, cuda0, cuda1, cuda2]
553
+ else:
554
+ stream_devices = [None, cuda0, cuda1]
555
+ else:
556
+ # d2h
557
+ assert src_device == cuda0
558
+ if cuda1 is not None:
559
+ stream_devices = [None, cuda0, cuda1]
560
+ else:
561
+ stream_devices = [None, cuda0]
562
+ else:
563
+ if dst_device.is_cuda:
564
+ # h2d
565
+ assert dst_device == cuda0
566
+ if cuda1 is not None:
567
+ stream_devices = [None, cuda0, cuda1]
568
+ else:
569
+ stream_devices = [None, cuda0]
570
+ else:
571
+ # h2h
572
+ stream_devices = [None]
573
+
574
+ # gradient options (only supported with contiguous and strided arrays)
575
+ if src_type in ("contiguous", "strided") and dst_type in ("contiguous", "strided"):
576
+ grad_flags = [False, True]
577
+ else:
578
+ grad_flags = [False]
579
+
580
+ # graph capture options (only supported with CUDA devices)
581
+ if src_device.is_cuda or dst_device.is_cuda:
582
+ graph_flags = [False, True]
583
+ else:
584
+ graph_flags = [False]
585
+
586
+ # access from destination device to source mempool
587
+ if wp.is_mempool_access_supported(dst_device, src_device):
588
+ access_dst_src_flags = [False, True]
589
+ else:
590
+ access_dst_src_flags = [False]
591
+
592
+ # access from source device to destination mempool
593
+ if wp.is_mempool_access_supported(src_device, dst_device):
594
+ access_src_dst_flags = [False, True]
595
+ else:
596
+ access_src_dst_flags = [False]
597
+
598
+ for src_use_mempool in src_mempool_flags:
599
+ for dst_use_mempool in dst_mempool_flags:
600
+ for stream_device in stream_devices:
601
+ for access_dst_src in access_dst_src_flags:
602
+ for access_src_dst in access_src_dst_flags:
603
+ for with_grad in grad_flags:
604
+ for use_graph in graph_flags:
605
+ test_name = f"test_copy_{copy_type}_{transfer_type}"
606
+
607
+ if src_use_mempool:
608
+ test_name += "_SrcPoolOn"
609
+ else:
610
+ test_name += "_SrcPoolOff"
611
+
612
+ if dst_use_mempool:
613
+ test_name += "_DstPoolOn"
614
+ else:
615
+ test_name += "_DstPoolOff"
616
+
617
+ if stream_device is None:
618
+ test_name += "_NoStream"
619
+ elif stream_device == cuda0:
620
+ test_name += "_Stream0"
621
+ elif stream_device == cuda1:
622
+ test_name += "_Stream1"
623
+ elif stream_device == cuda2:
624
+ test_name += "_Stream2"
625
+ else:
626
+ raise AssertionError
627
+
628
+ if with_grad:
629
+ test_name += "_Grad"
630
+ else:
631
+ test_name += "_NoGrad"
632
+
633
+ if use_graph:
634
+ test_name += "_Graph"
635
+ else:
636
+ test_name += "_NoGraph"
637
+
638
+ if access_dst_src and access_src_dst:
639
+ test_name += "_AccessBoth"
640
+ elif access_dst_src and not access_src_dst:
641
+ test_name += "_AccessDstSrc"
642
+ elif not access_dst_src and access_src_dst:
643
+ test_name += "_AccessSrcDst"
644
+ else:
645
+ test_name += "_AccessNone"
646
+
647
+ copy_params = CopyParams(
648
+ src_use_mempool=src_use_mempool,
649
+ dst_use_mempool=dst_use_mempool,
650
+ access_dst_src=access_dst_src,
651
+ access_src_dst=access_src_dst,
652
+ stream_device=stream_device,
653
+ with_grad=with_grad,
654
+ use_graph=use_graph,
655
+ value_offset=num_copy_tests,
656
+ )
657
+
658
+ add_copy_test(test_name, *copy_args, copy_params)
659
+
660
+ num_copy_tests += 1
661
+
662
+ # Specify individual test(s) for debugging purposes
663
+ # add_copy_test("test_a", as_contiguous_array, as_strided_array, cuda0, cuda1, num_copy_elems,
664
+ # CopyParams(
665
+ # src_use_mempool=True,
666
+ # dst_use_mempool=True,
667
+ # access_dst_src=False,
668
+ # access_src_dst=False,
669
+ # stream_device=cuda0,
670
+ # with_grad=False,
671
+ # use_graph=True,
672
+ # value_offset=0))
673
+
674
+ if __name__ == "__main__":
675
+ wp.clear_kernel_cache()
676
+ unittest.main(verbosity=2)