warp-lang 1.0.2__py3-none-win_amd64.whl → 1.2.0__py3-none-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of warp-lang might be problematic. Click here for more details.

Files changed (356) hide show
  1. warp/__init__.py +108 -97
  2. warp/__init__.pyi +1 -1
  3. warp/bin/warp-clang.dll +0 -0
  4. warp/bin/warp.dll +0 -0
  5. warp/build.py +88 -113
  6. warp/build_dll.py +383 -375
  7. warp/builtins.py +3693 -3354
  8. warp/codegen.py +2925 -2792
  9. warp/config.py +40 -36
  10. warp/constants.py +49 -45
  11. warp/context.py +5409 -5102
  12. warp/dlpack.py +442 -442
  13. warp/examples/__init__.py +16 -16
  14. warp/examples/assets/bear.usd +0 -0
  15. warp/examples/assets/bunny.usd +0 -0
  16. warp/examples/assets/cartpole.urdf +110 -110
  17. warp/examples/assets/crazyflie.usd +0 -0
  18. warp/examples/assets/cube.usd +0 -0
  19. warp/examples/assets/nv_ant.xml +92 -92
  20. warp/examples/assets/nv_humanoid.xml +183 -183
  21. warp/examples/assets/quadruped.urdf +267 -267
  22. warp/examples/assets/rocks.nvdb +0 -0
  23. warp/examples/assets/rocks.usd +0 -0
  24. warp/examples/assets/sphere.usd +0 -0
  25. warp/examples/benchmarks/benchmark_api.py +381 -383
  26. warp/examples/benchmarks/benchmark_cloth.py +278 -277
  27. warp/examples/benchmarks/benchmark_cloth_cupy.py +88 -88
  28. warp/examples/benchmarks/benchmark_cloth_jax.py +97 -100
  29. warp/examples/benchmarks/benchmark_cloth_numba.py +146 -142
  30. warp/examples/benchmarks/benchmark_cloth_numpy.py +77 -77
  31. warp/examples/benchmarks/benchmark_cloth_pytorch.py +86 -86
  32. warp/examples/benchmarks/benchmark_cloth_taichi.py +112 -112
  33. warp/examples/benchmarks/benchmark_cloth_warp.py +145 -146
  34. warp/examples/benchmarks/benchmark_launches.py +293 -295
  35. warp/examples/browse.py +29 -29
  36. warp/examples/core/example_dem.py +232 -219
  37. warp/examples/core/example_fluid.py +291 -267
  38. warp/examples/core/example_graph_capture.py +142 -126
  39. warp/examples/core/example_marching_cubes.py +186 -174
  40. warp/examples/core/example_mesh.py +172 -155
  41. warp/examples/core/example_mesh_intersect.py +203 -193
  42. warp/examples/core/example_nvdb.py +174 -170
  43. warp/examples/core/example_raycast.py +103 -90
  44. warp/examples/core/example_raymarch.py +197 -178
  45. warp/examples/core/example_render_opengl.py +183 -141
  46. warp/examples/core/example_sph.py +403 -387
  47. warp/examples/core/example_torch.py +219 -181
  48. warp/examples/core/example_wave.py +261 -248
  49. warp/examples/fem/bsr_utils.py +378 -380
  50. warp/examples/fem/example_apic_fluid.py +432 -389
  51. warp/examples/fem/example_burgers.py +262 -0
  52. warp/examples/fem/example_convection_diffusion.py +180 -168
  53. warp/examples/fem/example_convection_diffusion_dg.py +217 -209
  54. warp/examples/fem/example_deformed_geometry.py +175 -159
  55. warp/examples/fem/example_diffusion.py +199 -173
  56. warp/examples/fem/example_diffusion_3d.py +178 -152
  57. warp/examples/fem/example_diffusion_mgpu.py +219 -214
  58. warp/examples/fem/example_mixed_elasticity.py +242 -222
  59. warp/examples/fem/example_navier_stokes.py +257 -243
  60. warp/examples/fem/example_stokes.py +218 -192
  61. warp/examples/fem/example_stokes_transfer.py +263 -249
  62. warp/examples/fem/mesh_utils.py +133 -109
  63. warp/examples/fem/plot_utils.py +292 -287
  64. warp/examples/optim/example_bounce.py +258 -246
  65. warp/examples/optim/example_cloth_throw.py +220 -209
  66. warp/examples/optim/example_diffray.py +564 -536
  67. warp/examples/optim/example_drone.py +862 -835
  68. warp/examples/optim/example_inverse_kinematics.py +174 -168
  69. warp/examples/optim/example_inverse_kinematics_torch.py +183 -169
  70. warp/examples/optim/example_spring_cage.py +237 -231
  71. warp/examples/optim/example_trajectory.py +221 -199
  72. warp/examples/optim/example_walker.py +304 -293
  73. warp/examples/sim/example_cartpole.py +137 -129
  74. warp/examples/sim/example_cloth.py +194 -186
  75. warp/examples/sim/example_granular.py +122 -111
  76. warp/examples/sim/example_granular_collision_sdf.py +195 -186
  77. warp/examples/sim/example_jacobian_ik.py +234 -214
  78. warp/examples/sim/example_particle_chain.py +116 -105
  79. warp/examples/sim/example_quadruped.py +191 -180
  80. warp/examples/sim/example_rigid_chain.py +195 -187
  81. warp/examples/sim/example_rigid_contact.py +187 -177
  82. warp/examples/sim/example_rigid_force.py +125 -125
  83. warp/examples/sim/example_rigid_gyroscopic.py +107 -95
  84. warp/examples/sim/example_rigid_soft_contact.py +132 -122
  85. warp/examples/sim/example_soft_body.py +188 -177
  86. warp/fabric.py +337 -335
  87. warp/fem/__init__.py +61 -27
  88. warp/fem/cache.py +403 -388
  89. warp/fem/dirichlet.py +178 -179
  90. warp/fem/domain.py +262 -263
  91. warp/fem/field/__init__.py +100 -101
  92. warp/fem/field/field.py +148 -149
  93. warp/fem/field/nodal_field.py +298 -299
  94. warp/fem/field/restriction.py +22 -21
  95. warp/fem/field/test.py +180 -181
  96. warp/fem/field/trial.py +183 -183
  97. warp/fem/geometry/__init__.py +16 -19
  98. warp/fem/geometry/closest_point.py +69 -70
  99. warp/fem/geometry/deformed_geometry.py +270 -271
  100. warp/fem/geometry/element.py +748 -744
  101. warp/fem/geometry/geometry.py +184 -186
  102. warp/fem/geometry/grid_2d.py +380 -373
  103. warp/fem/geometry/grid_3d.py +437 -435
  104. warp/fem/geometry/hexmesh.py +953 -953
  105. warp/fem/geometry/nanogrid.py +455 -0
  106. warp/fem/geometry/partition.py +374 -376
  107. warp/fem/geometry/quadmesh_2d.py +532 -532
  108. warp/fem/geometry/tetmesh.py +840 -840
  109. warp/fem/geometry/trimesh_2d.py +577 -577
  110. warp/fem/integrate.py +1684 -1615
  111. warp/fem/operator.py +190 -191
  112. warp/fem/polynomial.py +214 -213
  113. warp/fem/quadrature/__init__.py +2 -2
  114. warp/fem/quadrature/pic_quadrature.py +243 -245
  115. warp/fem/quadrature/quadrature.py +295 -294
  116. warp/fem/space/__init__.py +179 -292
  117. warp/fem/space/basis_space.py +522 -489
  118. warp/fem/space/collocated_function_space.py +100 -105
  119. warp/fem/space/dof_mapper.py +236 -236
  120. warp/fem/space/function_space.py +148 -145
  121. warp/fem/space/grid_2d_function_space.py +148 -267
  122. warp/fem/space/grid_3d_function_space.py +167 -306
  123. warp/fem/space/hexmesh_function_space.py +253 -352
  124. warp/fem/space/nanogrid_function_space.py +202 -0
  125. warp/fem/space/partition.py +350 -350
  126. warp/fem/space/quadmesh_2d_function_space.py +261 -369
  127. warp/fem/space/restriction.py +161 -160
  128. warp/fem/space/shape/__init__.py +90 -15
  129. warp/fem/space/shape/cube_shape_function.py +728 -738
  130. warp/fem/space/shape/shape_function.py +102 -103
  131. warp/fem/space/shape/square_shape_function.py +611 -611
  132. warp/fem/space/shape/tet_shape_function.py +565 -567
  133. warp/fem/space/shape/triangle_shape_function.py +429 -429
  134. warp/fem/space/tetmesh_function_space.py +224 -292
  135. warp/fem/space/topology.py +297 -295
  136. warp/fem/space/trimesh_2d_function_space.py +153 -221
  137. warp/fem/types.py +77 -77
  138. warp/fem/utils.py +495 -495
  139. warp/jax.py +166 -141
  140. warp/jax_experimental.py +341 -339
  141. warp/native/array.h +1081 -1025
  142. warp/native/builtin.h +1603 -1560
  143. warp/native/bvh.cpp +402 -398
  144. warp/native/bvh.cu +533 -525
  145. warp/native/bvh.h +430 -429
  146. warp/native/clang/clang.cpp +496 -464
  147. warp/native/crt.cpp +42 -32
  148. warp/native/crt.h +352 -335
  149. warp/native/cuda_crt.h +1049 -1049
  150. warp/native/cuda_util.cpp +549 -540
  151. warp/native/cuda_util.h +288 -203
  152. warp/native/cutlass_gemm.cpp +34 -34
  153. warp/native/cutlass_gemm.cu +372 -372
  154. warp/native/error.cpp +66 -66
  155. warp/native/error.h +27 -27
  156. warp/native/exports.h +187 -0
  157. warp/native/fabric.h +228 -228
  158. warp/native/hashgrid.cpp +301 -278
  159. warp/native/hashgrid.cu +78 -77
  160. warp/native/hashgrid.h +227 -227
  161. warp/native/initializer_array.h +32 -32
  162. warp/native/intersect.h +1204 -1204
  163. warp/native/intersect_adj.h +365 -365
  164. warp/native/intersect_tri.h +322 -322
  165. warp/native/marching.cpp +2 -2
  166. warp/native/marching.cu +497 -497
  167. warp/native/marching.h +2 -2
  168. warp/native/mat.h +1545 -1498
  169. warp/native/matnn.h +333 -333
  170. warp/native/mesh.cpp +203 -203
  171. warp/native/mesh.cu +292 -293
  172. warp/native/mesh.h +1887 -1887
  173. warp/native/nanovdb/GridHandle.h +366 -0
  174. warp/native/nanovdb/HostBuffer.h +590 -0
  175. warp/native/nanovdb/NanoVDB.h +6624 -4782
  176. warp/native/nanovdb/PNanoVDB.h +3390 -2553
  177. warp/native/noise.h +850 -850
  178. warp/native/quat.h +1112 -1085
  179. warp/native/rand.h +303 -299
  180. warp/native/range.h +108 -108
  181. warp/native/reduce.cpp +156 -156
  182. warp/native/reduce.cu +348 -348
  183. warp/native/runlength_encode.cpp +61 -61
  184. warp/native/runlength_encode.cu +46 -46
  185. warp/native/scan.cpp +30 -30
  186. warp/native/scan.cu +36 -36
  187. warp/native/scan.h +7 -7
  188. warp/native/solid_angle.h +442 -442
  189. warp/native/sort.cpp +94 -94
  190. warp/native/sort.cu +97 -97
  191. warp/native/sort.h +14 -14
  192. warp/native/sparse.cpp +337 -337
  193. warp/native/sparse.cu +544 -544
  194. warp/native/spatial.h +630 -630
  195. warp/native/svd.h +562 -562
  196. warp/native/temp_buffer.h +30 -30
  197. warp/native/vec.h +1177 -1133
  198. warp/native/volume.cpp +529 -297
  199. warp/native/volume.cu +58 -32
  200. warp/native/volume.h +960 -538
  201. warp/native/volume_builder.cu +446 -425
  202. warp/native/volume_builder.h +34 -19
  203. warp/native/volume_impl.h +61 -0
  204. warp/native/warp.cpp +1057 -1052
  205. warp/native/warp.cu +2949 -2828
  206. warp/native/warp.h +321 -305
  207. warp/optim/__init__.py +9 -9
  208. warp/optim/adam.py +120 -120
  209. warp/optim/linear.py +1104 -939
  210. warp/optim/sgd.py +104 -92
  211. warp/render/__init__.py +10 -10
  212. warp/render/render_opengl.py +3356 -3204
  213. warp/render/render_usd.py +768 -749
  214. warp/render/utils.py +152 -150
  215. warp/sim/__init__.py +52 -59
  216. warp/sim/articulation.py +685 -685
  217. warp/sim/collide.py +1594 -1590
  218. warp/sim/import_mjcf.py +489 -481
  219. warp/sim/import_snu.py +220 -221
  220. warp/sim/import_urdf.py +536 -516
  221. warp/sim/import_usd.py +887 -881
  222. warp/sim/inertia.py +316 -317
  223. warp/sim/integrator.py +234 -233
  224. warp/sim/integrator_euler.py +1956 -1956
  225. warp/sim/integrator_featherstone.py +1917 -1991
  226. warp/sim/integrator_xpbd.py +3288 -3312
  227. warp/sim/model.py +4473 -4314
  228. warp/sim/particles.py +113 -112
  229. warp/sim/render.py +417 -403
  230. warp/sim/utils.py +413 -410
  231. warp/sparse.py +1289 -1227
  232. warp/stubs.py +2192 -2469
  233. warp/tape.py +1162 -225
  234. warp/tests/__init__.py +1 -1
  235. warp/tests/__main__.py +4 -4
  236. warp/tests/assets/test_index_grid.nvdb +0 -0
  237. warp/tests/assets/torus.usda +105 -105
  238. warp/tests/aux_test_class_kernel.py +26 -26
  239. warp/tests/aux_test_compile_consts_dummy.py +10 -10
  240. warp/tests/aux_test_conditional_unequal_types_kernels.py +21 -21
  241. warp/tests/aux_test_dependent.py +20 -22
  242. warp/tests/aux_test_grad_customs.py +21 -23
  243. warp/tests/aux_test_reference.py +9 -11
  244. warp/tests/aux_test_reference_reference.py +8 -10
  245. warp/tests/aux_test_square.py +15 -17
  246. warp/tests/aux_test_unresolved_func.py +14 -14
  247. warp/tests/aux_test_unresolved_symbol.py +14 -14
  248. warp/tests/disabled_kinematics.py +237 -239
  249. warp/tests/run_coverage_serial.py +31 -31
  250. warp/tests/test_adam.py +155 -157
  251. warp/tests/test_arithmetic.py +1088 -1124
  252. warp/tests/test_array.py +2415 -2326
  253. warp/tests/test_array_reduce.py +148 -150
  254. warp/tests/test_async.py +666 -656
  255. warp/tests/test_atomic.py +139 -141
  256. warp/tests/test_bool.py +212 -149
  257. warp/tests/test_builtins_resolution.py +1290 -1292
  258. warp/tests/test_bvh.py +162 -171
  259. warp/tests/test_closest_point_edge_edge.py +227 -228
  260. warp/tests/test_codegen.py +562 -553
  261. warp/tests/test_compile_consts.py +217 -101
  262. warp/tests/test_conditional.py +244 -246
  263. warp/tests/test_copy.py +230 -215
  264. warp/tests/test_ctypes.py +630 -632
  265. warp/tests/test_dense.py +65 -67
  266. warp/tests/test_devices.py +89 -98
  267. warp/tests/test_dlpack.py +528 -529
  268. warp/tests/test_examples.py +403 -378
  269. warp/tests/test_fabricarray.py +952 -955
  270. warp/tests/test_fast_math.py +60 -54
  271. warp/tests/test_fem.py +1298 -1278
  272. warp/tests/test_fp16.py +128 -130
  273. warp/tests/test_func.py +336 -337
  274. warp/tests/test_generics.py +596 -571
  275. warp/tests/test_grad.py +885 -640
  276. warp/tests/test_grad_customs.py +331 -336
  277. warp/tests/test_hash_grid.py +208 -164
  278. warp/tests/test_import.py +37 -39
  279. warp/tests/test_indexedarray.py +1132 -1134
  280. warp/tests/test_intersect.py +65 -67
  281. warp/tests/test_jax.py +305 -307
  282. warp/tests/test_large.py +169 -164
  283. warp/tests/test_launch.py +352 -354
  284. warp/tests/test_lerp.py +217 -261
  285. warp/tests/test_linear_solvers.py +189 -171
  286. warp/tests/test_lvalue.py +419 -493
  287. warp/tests/test_marching_cubes.py +63 -65
  288. warp/tests/test_mat.py +1799 -1827
  289. warp/tests/test_mat_lite.py +113 -115
  290. warp/tests/test_mat_scalar_ops.py +2905 -2889
  291. warp/tests/test_math.py +124 -193
  292. warp/tests/test_matmul.py +498 -499
  293. warp/tests/test_matmul_lite.py +408 -410
  294. warp/tests/test_mempool.py +186 -190
  295. warp/tests/test_mesh.py +281 -324
  296. warp/tests/test_mesh_query_aabb.py +226 -241
  297. warp/tests/test_mesh_query_point.py +690 -702
  298. warp/tests/test_mesh_query_ray.py +290 -303
  299. warp/tests/test_mlp.py +274 -276
  300. warp/tests/test_model.py +108 -110
  301. warp/tests/test_module_hashing.py +111 -0
  302. warp/tests/test_modules_lite.py +36 -39
  303. warp/tests/test_multigpu.py +161 -163
  304. warp/tests/test_noise.py +244 -248
  305. warp/tests/test_operators.py +248 -250
  306. warp/tests/test_options.py +121 -125
  307. warp/tests/test_peer.py +131 -137
  308. warp/tests/test_pinned.py +76 -78
  309. warp/tests/test_print.py +52 -54
  310. warp/tests/test_quat.py +2084 -2086
  311. warp/tests/test_rand.py +324 -288
  312. warp/tests/test_reload.py +207 -217
  313. warp/tests/test_rounding.py +177 -179
  314. warp/tests/test_runlength_encode.py +188 -190
  315. warp/tests/test_sim_grad.py +241 -0
  316. warp/tests/test_sim_kinematics.py +89 -97
  317. warp/tests/test_smoothstep.py +166 -168
  318. warp/tests/test_snippet.py +303 -266
  319. warp/tests/test_sparse.py +466 -460
  320. warp/tests/test_spatial.py +2146 -2148
  321. warp/tests/test_special_values.py +362 -0
  322. warp/tests/test_streams.py +484 -473
  323. warp/tests/test_struct.py +708 -675
  324. warp/tests/test_tape.py +171 -148
  325. warp/tests/test_torch.py +741 -743
  326. warp/tests/test_transient_module.py +85 -87
  327. warp/tests/test_types.py +554 -659
  328. warp/tests/test_utils.py +488 -499
  329. warp/tests/test_vec.py +1262 -1268
  330. warp/tests/test_vec_lite.py +71 -73
  331. warp/tests/test_vec_scalar_ops.py +2097 -2099
  332. warp/tests/test_verify_fp.py +92 -94
  333. warp/tests/test_volume.py +961 -736
  334. warp/tests/test_volume_write.py +338 -265
  335. warp/tests/unittest_serial.py +38 -37
  336. warp/tests/unittest_suites.py +367 -359
  337. warp/tests/unittest_utils.py +434 -578
  338. warp/tests/unused_test_misc.py +69 -71
  339. warp/tests/walkthrough_debug.py +85 -85
  340. warp/thirdparty/appdirs.py +598 -598
  341. warp/thirdparty/dlpack.py +143 -143
  342. warp/thirdparty/unittest_parallel.py +563 -561
  343. warp/torch.py +321 -295
  344. warp/types.py +4941 -4450
  345. warp/utils.py +1008 -821
  346. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/LICENSE.md +126 -126
  347. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/METADATA +365 -400
  348. warp_lang-1.2.0.dist-info/RECORD +359 -0
  349. warp/examples/assets/cube.usda +0 -42
  350. warp/examples/assets/sphere.usda +0 -56
  351. warp/examples/assets/torus.usda +0 -105
  352. warp/examples/fem/example_convection_diffusion_dg0.py +0 -194
  353. warp/native/nanovdb/PNanoVDBWrite.h +0 -295
  354. warp_lang-1.0.2.dist-info/RECORD +0 -352
  355. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/WHEEL +0 -0
  356. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/top_level.txt +0 -0
warp/tests/test_torch.py CHANGED
@@ -1,743 +1,741 @@
1
- # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
2
- # NVIDIA CORPORATION and its licensors retain all intellectual property
3
- # and proprietary rights in and to this software, related documentation
4
- # and any modifications thereto. Any use, reproduction, disclosure or
5
- # distribution of this software and related documentation without an express
6
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
7
-
8
- import unittest
9
-
10
- import numpy as np
11
-
12
- import warp as wp
13
- from warp.tests.unittest_utils import *
14
-
15
- wp.init()
16
-
17
-
18
- @wp.kernel
19
- def op_kernel(x: wp.array(dtype=float), y: wp.array(dtype=float)):
20
- tid = wp.tid()
21
- y[tid] = 0.5 - x[tid] * 2.0
22
-
23
-
24
- @wp.kernel
25
- def inc(a: wp.array(dtype=float)):
26
- tid = wp.tid()
27
- a[tid] = a[tid] + 1.0
28
-
29
-
30
- @wp.kernel
31
- def arange(start: int, step: int, a: wp.array(dtype=int)):
32
- tid = wp.tid()
33
- a[tid] = start + step * tid
34
-
35
-
36
- # copy elements between non-contiguous 1d arrays of float
37
- @wp.kernel
38
- def copy1d_float_kernel(dst: wp.array(dtype=float), src: wp.array(dtype=float)):
39
- i = wp.tid()
40
- dst[i] = src[i]
41
-
42
-
43
- # copy elements between non-contiguous 2d arrays of float
44
- @wp.kernel
45
- def copy2d_float_kernel(dst: wp.array2d(dtype=float), src: wp.array2d(dtype=float)):
46
- i, j = wp.tid()
47
- dst[i, j] = src[i, j]
48
-
49
-
50
- # copy elements between non-contiguous 3d arrays of float
51
- @wp.kernel
52
- def copy3d_float_kernel(dst: wp.array3d(dtype=float), src: wp.array3d(dtype=float)):
53
- i, j, k = wp.tid()
54
- dst[i, j, k] = src[i, j, k]
55
-
56
-
57
- # copy elements between non-contiguous 2d arrays of vec3
58
- @wp.kernel
59
- def copy2d_vec3_kernel(dst: wp.array2d(dtype=wp.vec3), src: wp.array2d(dtype=wp.vec3)):
60
- i, j = wp.tid()
61
- dst[i, j] = src[i, j]
62
-
63
-
64
- # copy elements between non-contiguous 2d arrays of mat22
65
- @wp.kernel
66
- def copy2d_mat22_kernel(dst: wp.array2d(dtype=wp.mat22), src: wp.array2d(dtype=wp.mat22)):
67
- i, j = wp.tid()
68
- dst[i, j] = src[i, j]
69
-
70
-
71
- def test_dtype_from_torch(test, device):
72
- import torch
73
-
74
- def test_conversions(torch_type, warp_type):
75
- test.assertEqual(wp.dtype_from_torch(torch_type), warp_type)
76
-
77
- test_conversions(torch.float16, wp.float16)
78
- test_conversions(torch.float32, wp.float32)
79
- test_conversions(torch.float64, wp.float64)
80
- test_conversions(torch.int8, wp.int8)
81
- test_conversions(torch.int16, wp.int16)
82
- test_conversions(torch.int32, wp.int32)
83
- test_conversions(torch.int64, wp.int64)
84
- test_conversions(torch.uint8, wp.uint8)
85
- test_conversions(torch.bool, wp.bool)
86
-
87
-
88
- def test_dtype_to_torch(test, device):
89
- import torch
90
-
91
- def test_conversions(warp_type, torch_type):
92
- test.assertEqual(wp.dtype_to_torch(warp_type), torch_type)
93
-
94
- test_conversions(wp.float16, torch.float16)
95
- test_conversions(wp.float32, torch.float32)
96
- test_conversions(wp.float64, torch.float64)
97
- test_conversions(wp.int8, torch.int8)
98
- test_conversions(wp.int16, torch.int16)
99
- test_conversions(wp.int32, torch.int32)
100
- test_conversions(wp.int64, torch.int64)
101
- test_conversions(wp.uint8, torch.uint8)
102
- test_conversions(wp.uint16, torch.int16)
103
- test_conversions(wp.uint32, torch.int32)
104
- test_conversions(wp.uint64, torch.int64)
105
- test_conversions(wp.bool, torch.bool)
106
-
107
-
108
- def test_device_conversion(test, device):
109
- torch_device = wp.device_to_torch(device)
110
- warp_device = wp.device_from_torch(torch_device)
111
- test.assertEqual(warp_device, device)
112
-
113
-
114
- def test_torch_zerocopy(test, device):
115
- import torch
116
-
117
- a = wp.zeros(10, dtype=wp.float32, device=device)
118
- t = wp.to_torch(a)
119
- assert a.ptr == t.data_ptr()
120
-
121
- torch_device = wp.device_to_torch(device)
122
-
123
- t = torch.zeros(10, dtype=torch.float32, device=torch_device)
124
- a = wp.from_torch(t)
125
- assert a.ptr == t.data_ptr()
126
-
127
-
128
- def test_from_torch(test, device):
129
- import torch
130
-
131
- torch_device = wp.device_to_torch(device)
132
-
133
- # automatically determine warp dtype
134
- def wrap_scalar_tensor_implicit(torch_dtype, expected_warp_dtype):
135
- t = torch.zeros(10, dtype=torch_dtype, device=torch_device)
136
- a = wp.from_torch(t)
137
- assert a.dtype == expected_warp_dtype
138
- assert a.shape == tuple(t.shape)
139
-
140
- wrap_scalar_tensor_implicit(torch.float64, wp.float64)
141
- wrap_scalar_tensor_implicit(torch.float32, wp.float32)
142
- wrap_scalar_tensor_implicit(torch.float16, wp.float16)
143
- wrap_scalar_tensor_implicit(torch.int64, wp.int64)
144
- wrap_scalar_tensor_implicit(torch.int32, wp.int32)
145
- wrap_scalar_tensor_implicit(torch.int16, wp.int16)
146
- wrap_scalar_tensor_implicit(torch.int8, wp.int8)
147
- wrap_scalar_tensor_implicit(torch.uint8, wp.uint8)
148
- wrap_scalar_tensor_implicit(torch.bool, wp.bool)
149
-
150
- # explicitly specify warp dtype
151
- def wrap_scalar_tensor_explicit(torch_dtype, expected_warp_dtype):
152
- t = torch.zeros(10, dtype=torch_dtype, device=torch_device)
153
- a = wp.from_torch(t, expected_warp_dtype)
154
- assert a.dtype == expected_warp_dtype
155
- assert a.shape == tuple(t.shape)
156
-
157
- wrap_scalar_tensor_explicit(torch.float64, wp.float64)
158
- wrap_scalar_tensor_explicit(torch.float32, wp.float32)
159
- wrap_scalar_tensor_explicit(torch.float16, wp.float16)
160
- wrap_scalar_tensor_explicit(torch.int64, wp.int64)
161
- wrap_scalar_tensor_explicit(torch.int64, wp.uint64)
162
- wrap_scalar_tensor_explicit(torch.int32, wp.int32)
163
- wrap_scalar_tensor_explicit(torch.int32, wp.uint32)
164
- wrap_scalar_tensor_explicit(torch.int16, wp.int16)
165
- wrap_scalar_tensor_explicit(torch.int16, wp.uint16)
166
- wrap_scalar_tensor_explicit(torch.int8, wp.int8)
167
- wrap_scalar_tensor_explicit(torch.int8, wp.uint8)
168
- wrap_scalar_tensor_explicit(torch.uint8, wp.uint8)
169
- wrap_scalar_tensor_explicit(torch.uint8, wp.int8)
170
- wrap_scalar_tensor_explicit(torch.bool, wp.uint8)
171
- wrap_scalar_tensor_explicit(torch.bool, wp.int8)
172
- wrap_scalar_tensor_explicit(torch.bool, wp.bool)
173
-
174
- def wrap_vec_tensor(n, desired_warp_dtype):
175
- t = torch.zeros((10, n), dtype=torch.float32, device=torch_device)
176
- a = wp.from_torch(t, desired_warp_dtype)
177
- assert a.dtype == desired_warp_dtype
178
- assert a.shape == (10,)
179
-
180
- wrap_vec_tensor(2, wp.vec2)
181
- wrap_vec_tensor(3, wp.vec3)
182
- wrap_vec_tensor(4, wp.vec4)
183
- wrap_vec_tensor(6, wp.spatial_vector)
184
- wrap_vec_tensor(7, wp.transform)
185
-
186
- def wrap_mat_tensor(n, m, desired_warp_dtype):
187
- t = torch.zeros((10, n, m), dtype=torch.float32, device=torch_device)
188
- a = wp.from_torch(t, desired_warp_dtype)
189
- assert a.dtype == desired_warp_dtype
190
- assert a.shape == (10,)
191
-
192
- wrap_mat_tensor(2, 2, wp.mat22)
193
- wrap_mat_tensor(3, 3, wp.mat33)
194
- wrap_mat_tensor(4, 4, wp.mat44)
195
- wrap_mat_tensor(6, 6, wp.spatial_matrix)
196
-
197
- def wrap_vec_tensor_with_grad(n, desired_warp_dtype):
198
- t = torch.zeros((10, n), dtype=torch.float32, device=torch_device)
199
- a = wp.from_torch(t, desired_warp_dtype, requires_grad=True)
200
- assert a.dtype == desired_warp_dtype
201
- assert a.shape == (10,)
202
-
203
- wrap_vec_tensor_with_grad(2, wp.vec2)
204
- wrap_vec_tensor_with_grad(3, wp.vec3)
205
- wrap_vec_tensor_with_grad(4, wp.vec4)
206
- wrap_vec_tensor_with_grad(6, wp.spatial_vector)
207
- wrap_vec_tensor_with_grad(7, wp.transform)
208
-
209
- def wrap_mat_tensor_with_grad(n, m, desired_warp_dtype):
210
- t = torch.zeros((10, n, m), dtype=torch.float32, device=torch_device)
211
- a = wp.from_torch(t, desired_warp_dtype, requires_grad=True)
212
- assert a.dtype == desired_warp_dtype
213
- assert a.shape == (10,)
214
-
215
- wrap_mat_tensor_with_grad(2, 2, wp.mat22)
216
- wrap_mat_tensor_with_grad(3, 3, wp.mat33)
217
- wrap_mat_tensor_with_grad(4, 4, wp.mat44)
218
- wrap_mat_tensor_with_grad(6, 6, wp.spatial_matrix)
219
-
220
-
221
- def test_to_torch(test, device):
222
- import torch
223
-
224
- def wrap_scalar_array(warp_dtype, expected_torch_dtype):
225
- a = wp.zeros(10, dtype=warp_dtype, device=device)
226
- t = wp.to_torch(a)
227
- assert t.dtype == expected_torch_dtype
228
- assert tuple(t.shape) == a.shape
229
-
230
- wrap_scalar_array(wp.float64, torch.float64)
231
- wrap_scalar_array(wp.float32, torch.float32)
232
- wrap_scalar_array(wp.float16, torch.float16)
233
- wrap_scalar_array(wp.int64, torch.int64)
234
- wrap_scalar_array(wp.int32, torch.int32)
235
- wrap_scalar_array(wp.int16, torch.int16)
236
- wrap_scalar_array(wp.int8, torch.int8)
237
- wrap_scalar_array(wp.uint8, torch.uint8)
238
- wrap_scalar_array(wp.bool, torch.bool)
239
-
240
- # not supported by torch
241
- # wrap_scalar_array(wp.uint64, torch.int64)
242
- # wrap_scalar_array(wp.uint32, torch.int32)
243
- # wrap_scalar_array(wp.uint16, torch.int16)
244
-
245
- def wrap_vec_array(n, warp_dtype):
246
- a = wp.zeros(10, dtype=warp_dtype, device=device)
247
- t = wp.to_torch(a)
248
- assert t.dtype == torch.float32
249
- assert tuple(t.shape) == (10, n)
250
-
251
- wrap_vec_array(2, wp.vec2)
252
- wrap_vec_array(3, wp.vec3)
253
- wrap_vec_array(4, wp.vec4)
254
- wrap_vec_array(6, wp.spatial_vector)
255
- wrap_vec_array(7, wp.transform)
256
-
257
- def wrap_mat_array(n, m, warp_dtype):
258
- a = wp.zeros(10, dtype=warp_dtype, device=device)
259
- t = wp.to_torch(a)
260
- assert t.dtype == torch.float32
261
- assert tuple(t.shape) == (10, n, m)
262
-
263
- wrap_mat_array(2, 2, wp.mat22)
264
- wrap_mat_array(3, 3, wp.mat33)
265
- wrap_mat_array(4, 4, wp.mat44)
266
- wrap_mat_array(6, 6, wp.spatial_matrix)
267
-
268
-
269
- def test_from_torch_slices(test, device):
270
- import torch
271
-
272
- torch_device = wp.device_to_torch(device)
273
-
274
- # 1D slice, contiguous
275
- t_base = torch.arange(10, dtype=torch.float32, device=torch_device)
276
- t = t_base[2:9]
277
- a = wp.from_torch(t)
278
- assert a.ptr == t.data_ptr()
279
- assert a.is_contiguous
280
- assert a.shape == tuple(t.shape)
281
- assert_np_equal(a.numpy(), t.cpu().numpy())
282
-
283
- # 1D slice with non-contiguous stride
284
- t_base = torch.arange(10, dtype=torch.float32, device=torch_device)
285
- t = t_base[2:9:2]
286
- a = wp.from_torch(t)
287
- assert a.ptr == t.data_ptr()
288
- assert not a.is_contiguous
289
- assert a.shape == tuple(t.shape)
290
- # copy contents to contiguous array
291
- a_contiguous = wp.empty_like(a)
292
- wp.launch(copy1d_float_kernel, dim=a.shape, inputs=[a_contiguous, a], device=device)
293
- assert_np_equal(a_contiguous.numpy(), t.cpu().numpy())
294
-
295
- # 2D slices (non-contiguous)
296
- t_base = torch.arange(24, dtype=torch.float32, device=torch_device).reshape((4, 6))
297
- t = t_base[1:3, 2:5]
298
- a = wp.from_torch(t)
299
- assert a.ptr == t.data_ptr()
300
- assert not a.is_contiguous
301
- assert a.shape == tuple(t.shape)
302
- # copy contents to contiguous array
303
- a_contiguous = wp.empty_like(a)
304
- wp.launch(copy2d_float_kernel, dim=a.shape, inputs=[a_contiguous, a], device=device)
305
- assert_np_equal(a_contiguous.numpy(), t.cpu().numpy())
306
-
307
- # 3D slices (non-contiguous)
308
- t_base = torch.arange(36, dtype=torch.float32, device=torch_device).reshape((4, 3, 3))
309
- t = t_base[::2, 0:1, 1:2]
310
- a = wp.from_torch(t)
311
- assert a.ptr == t.data_ptr()
312
- assert not a.is_contiguous
313
- assert a.shape == tuple(t.shape)
314
- # copy contents to contiguous array
315
- a_contiguous = wp.empty_like(a)
316
- wp.launch(copy3d_float_kernel, dim=a.shape, inputs=[a_contiguous, a], device=device)
317
- assert_np_equal(a_contiguous.numpy(), t.cpu().numpy())
318
-
319
- # 2D slices of vec3 (inner contiguous, outer non-contiguous)
320
- t_base = torch.arange(150, dtype=torch.float32, device=torch_device).reshape((10, 5, 3))
321
- t = t_base[1:7:2, 2:5]
322
- a = wp.from_torch(t, dtype=wp.vec3)
323
- assert a.ptr == t.data_ptr()
324
- assert not a.is_contiguous
325
- assert a.shape == tuple(t.shape[:-1])
326
- # copy contents to contiguous array
327
- a_contiguous = wp.empty_like(a)
328
- wp.launch(copy2d_vec3_kernel, dim=a.shape, inputs=[a_contiguous, a], device=device)
329
- assert_np_equal(a_contiguous.numpy(), t.cpu().numpy())
330
-
331
- # 2D slices of mat22 (inner contiguous, outer non-contiguous)
332
- t_base = torch.arange(200, dtype=torch.float32, device=torch_device).reshape((10, 5, 2, 2))
333
- t = t_base[1:7:2, 2:5]
334
- a = wp.from_torch(t, dtype=wp.mat22)
335
- assert a.ptr == t.data_ptr()
336
- assert not a.is_contiguous
337
- assert a.shape == tuple(t.shape[:-2])
338
- # copy contents to contiguous array
339
- a_contiguous = wp.empty_like(a)
340
- wp.launch(copy2d_mat22_kernel, dim=a.shape, inputs=[a_contiguous, a], device=device)
341
- assert_np_equal(a_contiguous.numpy(), t.cpu().numpy())
342
-
343
-
344
- def test_from_torch_zero_strides(test, device):
345
- import torch
346
-
347
- torch_device = wp.device_to_torch(device)
348
-
349
- t_base = torch.arange(9, dtype=torch.float32, device=torch_device).reshape((3, 3))
350
-
351
- # expand outermost dimension
352
- t = t_base.unsqueeze(0).expand(3, -1, -1)
353
- a = wp.from_torch(t)
354
- assert a.ptr == t.data_ptr()
355
- assert not a.is_contiguous
356
- assert a.shape == tuple(t.shape)
357
- a_contiguous = wp.empty_like(a)
358
- wp.launch(copy3d_float_kernel, dim=a.shape, inputs=[a_contiguous, a], device=device)
359
- assert_np_equal(a_contiguous.numpy(), t.cpu().numpy())
360
-
361
- # expand middle dimension
362
- t = t_base.unsqueeze(1).expand(-1, 3, -1)
363
- a = wp.from_torch(t)
364
- assert a.ptr == t.data_ptr()
365
- assert not a.is_contiguous
366
- assert a.shape == tuple(t.shape)
367
- a_contiguous = wp.empty_like(a)
368
- wp.launch(copy3d_float_kernel, dim=a.shape, inputs=[a_contiguous, a], device=device)
369
- assert_np_equal(a_contiguous.numpy(), t.cpu().numpy())
370
-
371
- # expand innermost dimension
372
- t = t_base.unsqueeze(2).expand(-1, -1, 3)
373
- a = wp.from_torch(t)
374
- assert a.ptr == t.data_ptr()
375
- assert not a.is_contiguous
376
- assert a.shape == tuple(t.shape)
377
- a_contiguous = wp.empty_like(a)
378
- wp.launch(copy3d_float_kernel, dim=a.shape, inputs=[a_contiguous, a], device=device)
379
- assert_np_equal(a_contiguous.numpy(), t.cpu().numpy())
380
-
381
-
382
- def test_torch_mgpu_from_torch(test, device):
383
- import torch
384
-
385
- n = 32
386
-
387
- t0 = torch.arange(0, n, 1, dtype=torch.int32, device="cuda:0")
388
- t1 = torch.arange(0, n * 2, 2, dtype=torch.int32, device="cuda:1")
389
-
390
- a0 = wp.from_torch(t0, dtype=wp.int32)
391
- a1 = wp.from_torch(t1, dtype=wp.int32)
392
-
393
- assert a0.device == "cuda:0"
394
- assert a1.device == "cuda:1"
395
-
396
- expected0 = np.arange(0, n, 1)
397
- expected1 = np.arange(0, n * 2, 2)
398
-
399
- assert_np_equal(a0.numpy(), expected0)
400
- assert_np_equal(a1.numpy(), expected1)
401
-
402
-
403
- def test_torch_mgpu_to_torch(test, device):
404
- n = 32
405
-
406
- with wp.ScopedDevice("cuda:0"):
407
- a0 = wp.empty(n, dtype=wp.int32)
408
- wp.launch(arange, dim=a0.size, inputs=[0, 1, a0])
409
-
410
- with wp.ScopedDevice("cuda:1"):
411
- a1 = wp.empty(n, dtype=wp.int32)
412
- wp.launch(arange, dim=a1.size, inputs=[0, 2, a1])
413
-
414
- t0 = wp.to_torch(a0)
415
- t1 = wp.to_torch(a1)
416
-
417
- assert str(t0.device) == "cuda:0"
418
- assert str(t1.device) == "cuda:1"
419
-
420
- expected0 = np.arange(0, n, 1, dtype=np.int32)
421
- expected1 = np.arange(0, n * 2, 2, dtype=np.int32)
422
-
423
- assert_np_equal(t0.cpu().numpy(), expected0)
424
- assert_np_equal(t1.cpu().numpy(), expected1)
425
-
426
-
427
- def test_torch_mgpu_interop(test, device):
428
- import torch
429
-
430
- n = 1024 * 1024
431
-
432
- with torch.cuda.device(0):
433
- t0 = torch.arange(n, dtype=torch.float32, device="cuda")
434
- a0 = wp.from_torch(t0)
435
- wp.launch(inc, dim=a0.size, inputs=[a0], stream=wp.stream_from_torch())
436
-
437
- with torch.cuda.device(1):
438
- t1 = torch.arange(n, dtype=torch.float32, device="cuda")
439
- a1 = wp.from_torch(t1)
440
- wp.launch(inc, dim=a1.size, inputs=[a1], stream=wp.stream_from_torch())
441
-
442
- assert a0.device == "cuda:0"
443
- assert a1.device == "cuda:1"
444
-
445
- expected = np.arange(n, dtype=int) + 1
446
-
447
- # ensure the torch tensors were modified by warp
448
- assert_np_equal(t0.cpu().numpy(), expected)
449
- assert_np_equal(t1.cpu().numpy(), expected)
450
-
451
-
452
- def test_torch_autograd(test, device):
453
- """Test torch autograd with a custom Warp op"""
454
-
455
- import torch
456
-
457
- # custom autograd op
458
- class TestFunc(torch.autograd.Function):
459
- @staticmethod
460
- def forward(ctx, x):
461
- # allocate output array
462
- y = torch.empty_like(x)
463
-
464
- ctx.x = x
465
- ctx.y = y
466
-
467
- wp.launch(kernel=op_kernel, dim=len(x), inputs=[wp.from_torch(x)], outputs=[wp.from_torch(y)])
468
-
469
- return y
470
-
471
- @staticmethod
472
- def backward(ctx, adj_y):
473
- # adjoints should be allocated as zero initialized
474
- adj_x = torch.zeros_like(ctx.x).contiguous()
475
- adj_y = adj_y.contiguous()
476
-
477
- wp_x = wp.from_torch(ctx.x, grad=adj_x)
478
- wp_y = wp.from_torch(ctx.y, grad=adj_y)
479
-
480
- wp.launch(
481
- kernel=op_kernel,
482
- dim=len(ctx.x),
483
- # fwd inputs
484
- inputs=[wp_x],
485
- outputs=[wp_y],
486
- # adj inputs (already stored in input/output arrays, passing null pointers)
487
- adj_inputs=[None],
488
- adj_outputs=[None],
489
- adjoint=True,
490
- )
491
-
492
- return adj_x
493
-
494
- # run autograd on given device
495
- with wp.ScopedDevice(device):
496
- torch_device = wp.device_to_torch(device)
497
-
498
- # input data
499
- x = torch.ones(16, dtype=torch.float32, device=torch_device, requires_grad=True)
500
-
501
- # execute op
502
- y = TestFunc.apply(x)
503
-
504
- # compute grads
505
- l = y.sum()
506
- l.backward()
507
-
508
- passed = (x.grad == -2.0).all()
509
- assert passed.item()
510
-
511
-
512
- def test_torch_graph_torch_stream(test, device):
513
- """Capture Torch graph on Torch stream"""
514
-
515
- wp.load_module(device=device)
516
-
517
- import torch
518
-
519
- torch_device = wp.device_to_torch(device)
520
-
521
- n = 1024 * 1024
522
- t = torch.zeros(n, dtype=torch.float32, device=torch_device)
523
- a = wp.from_torch(t)
524
-
525
- g = torch.cuda.CUDAGraph()
526
-
527
- # create a device-specific torch stream to use for capture
528
- # (otherwise torch.cuda.graph reuses its capture stream, which can be problematic if it's from a different device)
529
- torch_stream = torch.cuda.Stream(device=torch_device)
530
-
531
- # make warp use the same stream
532
- warp_stream = wp.stream_from_torch(torch_stream)
533
-
534
- # capture graph
535
- with wp.ScopedStream(warp_stream), torch.cuda.graph(g, stream=torch_stream):
536
- wp.capture_begin(force_module_load=False, external=True)
537
- try:
538
- t += 1.0
539
- wp.launch(inc, dim=n, inputs=[a])
540
- t += 1.0
541
- wp.launch(inc, dim=n, inputs=[a])
542
- finally:
543
- wp.capture_end()
544
-
545
- # replay graph
546
- num_iters = 10
547
- for i in range(num_iters):
548
- g.replay()
549
-
550
- passed = (t == num_iters * 4.0).all()
551
- assert passed.item()
552
-
553
-
554
- def test_torch_graph_warp_stream(test, device):
555
- """Capture Torch graph on Warp stream"""
556
-
557
- import torch
558
-
559
- torch_device = wp.device_to_torch(device)
560
-
561
- n = 1024 * 1024
562
- t = torch.zeros(n, dtype=torch.float32, device=torch_device)
563
- a = wp.from_torch(t)
564
-
565
- g = torch.cuda.CUDAGraph()
566
-
567
- # make torch use the warp stream from the given device
568
- torch_stream = wp.stream_to_torch(device)
569
-
570
- # capture graph
571
- with wp.ScopedDevice(device), torch.cuda.graph(g, stream=torch_stream):
572
- wp.capture_begin(force_module_load=False, external=True)
573
- try:
574
- t += 1.0
575
- wp.launch(inc, dim=n, inputs=[a])
576
- t += 1.0
577
- wp.launch(inc, dim=n, inputs=[a])
578
- finally:
579
- wp.capture_end()
580
-
581
- # replay graph
582
- num_iters = 10
583
- for i in range(num_iters):
584
- g.replay()
585
-
586
- passed = (t == num_iters * 4.0).all()
587
- assert passed.item()
588
-
589
-
590
- def test_warp_graph_warp_stream(test, device):
591
- """Capture Warp graph on Warp stream"""
592
-
593
- import torch
594
-
595
- torch_device = wp.device_to_torch(device)
596
-
597
- n = 1024 * 1024
598
- t = torch.zeros(n, dtype=torch.float32, device=torch_device)
599
- a = wp.from_torch(t)
600
-
601
- # make torch use the warp stream from the given device
602
- torch_stream = wp.stream_to_torch(device)
603
-
604
- # capture graph
605
- with wp.ScopedDevice(device), torch.cuda.stream(torch_stream):
606
- wp.capture_begin(force_module_load=False)
607
- try:
608
- t += 1.0
609
- wp.launch(inc, dim=n, inputs=[a])
610
- t += 1.0
611
- wp.launch(inc, dim=n, inputs=[a])
612
- finally:
613
- g = wp.capture_end()
614
-
615
- # replay graph
616
- num_iters = 10
617
- for i in range(num_iters):
618
- wp.capture_launch(g)
619
-
620
- passed = (t == num_iters * 4.0).all()
621
- assert passed.item()
622
-
623
-
624
- def test_warp_graph_torch_stream(test, device):
625
- """Capture Warp graph on Torch stream"""
626
-
627
- wp.load_module(device=device)
628
-
629
- import torch
630
-
631
- torch_device = wp.device_to_torch(device)
632
-
633
- n = 1024 * 1024
634
- t = torch.zeros(n, dtype=torch.float32, device=torch_device)
635
- a = wp.from_torch(t)
636
-
637
- # create a device-specific torch stream to use for capture
638
- # (the default torch stream is not suitable for graph capture)
639
- torch_stream = torch.cuda.Stream(device=torch_device)
640
-
641
- # make warp use the same stream
642
- warp_stream = wp.stream_from_torch(torch_stream)
643
-
644
- # capture graph
645
- with wp.ScopedStream(warp_stream), torch.cuda.stream(torch_stream):
646
- wp.capture_begin(force_module_load=False)
647
- try:
648
- t += 1.0
649
- wp.launch(inc, dim=n, inputs=[a])
650
- t += 1.0
651
- wp.launch(inc, dim=n, inputs=[a])
652
- finally:
653
- g = wp.capture_end()
654
-
655
- # replay graph
656
- num_iters = 10
657
- for i in range(num_iters):
658
- wp.capture_launch(g)
659
-
660
- passed = (t == num_iters * 4.0).all()
661
- assert passed.item()
662
-
663
-
664
- class TestTorch(unittest.TestCase):
665
- pass
666
-
667
-
668
- test_devices = get_test_devices()
669
-
670
- try:
671
- import torch
672
-
673
- # check which Warp devices work with Torch
674
- # CUDA devices may fail if Torch was not compiled with CUDA support
675
- torch_compatible_devices = []
676
- torch_compatible_cuda_devices = []
677
-
678
- for d in test_devices:
679
- try:
680
- t = torch.arange(10, device=wp.device_to_torch(d))
681
- t += 1
682
- torch_compatible_devices.append(d)
683
- if d.is_cuda:
684
- torch_compatible_cuda_devices.append(d)
685
- except Exception as e:
686
- print(f"Skipping Torch tests on device '{d}' due to exception: {e}")
687
-
688
- add_function_test(TestTorch, "test_dtype_from_torch", test_dtype_from_torch, devices=None)
689
- add_function_test(TestTorch, "test_dtype_to_torch", test_dtype_to_torch, devices=None)
690
-
691
- if torch_compatible_devices:
692
- add_function_test(TestTorch, "test_device_conversion", test_device_conversion, devices=torch_compatible_devices)
693
- add_function_test(TestTorch, "test_from_torch", test_from_torch, devices=torch_compatible_devices)
694
- add_function_test(TestTorch, "test_from_torch_slices", test_from_torch_slices, devices=torch_compatible_devices)
695
- add_function_test(
696
- TestTorch,
697
- "test_from_torch_zero_strides",
698
- test_from_torch_zero_strides,
699
- devices=torch_compatible_devices,
700
- )
701
- add_function_test(TestTorch, "test_to_torch", test_to_torch, devices=torch_compatible_devices)
702
- add_function_test(TestTorch, "test_torch_zerocopy", test_torch_zerocopy, devices=torch_compatible_devices)
703
- add_function_test(TestTorch, "test_torch_autograd", test_torch_autograd, devices=torch_compatible_devices)
704
-
705
- if torch_compatible_cuda_devices:
706
- add_function_test(
707
- TestTorch,
708
- "test_torch_graph_torch_stream",
709
- test_torch_graph_torch_stream,
710
- devices=torch_compatible_cuda_devices,
711
- )
712
- add_function_test(
713
- TestTorch,
714
- "test_torch_graph_warp_stream",
715
- test_torch_graph_warp_stream,
716
- devices=torch_compatible_cuda_devices,
717
- )
718
- add_function_test(
719
- TestTorch,
720
- "test_warp_graph_warp_stream",
721
- test_warp_graph_warp_stream,
722
- devices=torch_compatible_cuda_devices,
723
- )
724
- add_function_test(
725
- TestTorch,
726
- "test_warp_graph_torch_stream",
727
- test_warp_graph_torch_stream,
728
- devices=torch_compatible_cuda_devices,
729
- )
730
-
731
- # multi-GPU tests
732
- if len(torch_compatible_cuda_devices) > 1:
733
- add_function_test(TestTorch, "test_torch_mgpu_from_torch", test_torch_mgpu_from_torch)
734
- add_function_test(TestTorch, "test_torch_mgpu_to_torch", test_torch_mgpu_to_torch)
735
- add_function_test(TestTorch, "test_torch_mgpu_interop", test_torch_mgpu_interop)
736
-
737
- except Exception as e:
738
- print(f"Skipping Torch tests due to exception: {e}")
739
-
740
-
741
- if __name__ == "__main__":
742
- wp.build.clear_kernel_cache()
743
- unittest.main(verbosity=2)
1
+ # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
2
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
3
+ # and proprietary rights in and to this software, related documentation
4
+ # and any modifications thereto. Any use, reproduction, disclosure or
5
+ # distribution of this software and related documentation without an express
6
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
7
+
8
+ import unittest
9
+
10
+ import numpy as np
11
+
12
+ import warp as wp
13
+ from warp.tests.unittest_utils import *
14
+
15
+
16
+ @wp.kernel
17
+ def op_kernel(x: wp.array(dtype=float), y: wp.array(dtype=float)):
18
+ tid = wp.tid()
19
+ y[tid] = 0.5 - x[tid] * 2.0
20
+
21
+
22
+ @wp.kernel
23
+ def inc(a: wp.array(dtype=float)):
24
+ tid = wp.tid()
25
+ a[tid] = a[tid] + 1.0
26
+
27
+
28
+ @wp.kernel
29
+ def arange(start: int, step: int, a: wp.array(dtype=int)):
30
+ tid = wp.tid()
31
+ a[tid] = start + step * tid
32
+
33
+
34
+ # copy elements between non-contiguous 1d arrays of float
35
+ @wp.kernel
36
+ def copy1d_float_kernel(dst: wp.array(dtype=float), src: wp.array(dtype=float)):
37
+ i = wp.tid()
38
+ dst[i] = src[i]
39
+
40
+
41
+ # copy elements between non-contiguous 2d arrays of float
42
+ @wp.kernel
43
+ def copy2d_float_kernel(dst: wp.array2d(dtype=float), src: wp.array2d(dtype=float)):
44
+ i, j = wp.tid()
45
+ dst[i, j] = src[i, j]
46
+
47
+
48
+ # copy elements between non-contiguous 3d arrays of float
49
+ @wp.kernel
50
+ def copy3d_float_kernel(dst: wp.array3d(dtype=float), src: wp.array3d(dtype=float)):
51
+ i, j, k = wp.tid()
52
+ dst[i, j, k] = src[i, j, k]
53
+
54
+
55
+ # copy elements between non-contiguous 2d arrays of vec3
56
+ @wp.kernel
57
+ def copy2d_vec3_kernel(dst: wp.array2d(dtype=wp.vec3), src: wp.array2d(dtype=wp.vec3)):
58
+ i, j = wp.tid()
59
+ dst[i, j] = src[i, j]
60
+
61
+
62
+ # copy elements between non-contiguous 2d arrays of mat22
63
+ @wp.kernel
64
+ def copy2d_mat22_kernel(dst: wp.array2d(dtype=wp.mat22), src: wp.array2d(dtype=wp.mat22)):
65
+ i, j = wp.tid()
66
+ dst[i, j] = src[i, j]
67
+
68
+
69
+ def test_dtype_from_torch(test, device):
70
+ import torch
71
+
72
+ def test_conversions(torch_type, warp_type):
73
+ test.assertEqual(wp.dtype_from_torch(torch_type), warp_type)
74
+
75
+ test_conversions(torch.float16, wp.float16)
76
+ test_conversions(torch.float32, wp.float32)
77
+ test_conversions(torch.float64, wp.float64)
78
+ test_conversions(torch.int8, wp.int8)
79
+ test_conversions(torch.int16, wp.int16)
80
+ test_conversions(torch.int32, wp.int32)
81
+ test_conversions(torch.int64, wp.int64)
82
+ test_conversions(torch.uint8, wp.uint8)
83
+ test_conversions(torch.bool, wp.bool)
84
+
85
+
86
+ def test_dtype_to_torch(test, device):
87
+ import torch
88
+
89
+ def test_conversions(warp_type, torch_type):
90
+ test.assertEqual(wp.dtype_to_torch(warp_type), torch_type)
91
+
92
+ test_conversions(wp.float16, torch.float16)
93
+ test_conversions(wp.float32, torch.float32)
94
+ test_conversions(wp.float64, torch.float64)
95
+ test_conversions(wp.int8, torch.int8)
96
+ test_conversions(wp.int16, torch.int16)
97
+ test_conversions(wp.int32, torch.int32)
98
+ test_conversions(wp.int64, torch.int64)
99
+ test_conversions(wp.uint8, torch.uint8)
100
+ test_conversions(wp.uint16, torch.int16)
101
+ test_conversions(wp.uint32, torch.int32)
102
+ test_conversions(wp.uint64, torch.int64)
103
+ test_conversions(wp.bool, torch.bool)
104
+
105
+
106
+ def test_device_conversion(test, device):
107
+ torch_device = wp.device_to_torch(device)
108
+ warp_device = wp.device_from_torch(torch_device)
109
+ test.assertEqual(warp_device, device)
110
+
111
+
112
+ def test_torch_zerocopy(test, device):
113
+ import torch
114
+
115
+ a = wp.zeros(10, dtype=wp.float32, device=device)
116
+ t = wp.to_torch(a)
117
+ assert a.ptr == t.data_ptr()
118
+
119
+ torch_device = wp.device_to_torch(device)
120
+
121
+ t = torch.zeros(10, dtype=torch.float32, device=torch_device)
122
+ a = wp.from_torch(t)
123
+ assert a.ptr == t.data_ptr()
124
+
125
+
126
+ def test_from_torch(test, device):
127
+ import torch
128
+
129
+ torch_device = wp.device_to_torch(device)
130
+
131
+ # automatically determine warp dtype
132
+ def wrap_scalar_tensor_implicit(torch_dtype, expected_warp_dtype):
133
+ t = torch.zeros(10, dtype=torch_dtype, device=torch_device)
134
+ a = wp.from_torch(t)
135
+ assert a.dtype == expected_warp_dtype
136
+ assert a.shape == tuple(t.shape)
137
+
138
+ wrap_scalar_tensor_implicit(torch.float64, wp.float64)
139
+ wrap_scalar_tensor_implicit(torch.float32, wp.float32)
140
+ wrap_scalar_tensor_implicit(torch.float16, wp.float16)
141
+ wrap_scalar_tensor_implicit(torch.int64, wp.int64)
142
+ wrap_scalar_tensor_implicit(torch.int32, wp.int32)
143
+ wrap_scalar_tensor_implicit(torch.int16, wp.int16)
144
+ wrap_scalar_tensor_implicit(torch.int8, wp.int8)
145
+ wrap_scalar_tensor_implicit(torch.uint8, wp.uint8)
146
+ wrap_scalar_tensor_implicit(torch.bool, wp.bool)
147
+
148
+ # explicitly specify warp dtype
149
+ def wrap_scalar_tensor_explicit(torch_dtype, expected_warp_dtype):
150
+ t = torch.zeros(10, dtype=torch_dtype, device=torch_device)
151
+ a = wp.from_torch(t, expected_warp_dtype)
152
+ assert a.dtype == expected_warp_dtype
153
+ assert a.shape == tuple(t.shape)
154
+
155
+ wrap_scalar_tensor_explicit(torch.float64, wp.float64)
156
+ wrap_scalar_tensor_explicit(torch.float32, wp.float32)
157
+ wrap_scalar_tensor_explicit(torch.float16, wp.float16)
158
+ wrap_scalar_tensor_explicit(torch.int64, wp.int64)
159
+ wrap_scalar_tensor_explicit(torch.int64, wp.uint64)
160
+ wrap_scalar_tensor_explicit(torch.int32, wp.int32)
161
+ wrap_scalar_tensor_explicit(torch.int32, wp.uint32)
162
+ wrap_scalar_tensor_explicit(torch.int16, wp.int16)
163
+ wrap_scalar_tensor_explicit(torch.int16, wp.uint16)
164
+ wrap_scalar_tensor_explicit(torch.int8, wp.int8)
165
+ wrap_scalar_tensor_explicit(torch.int8, wp.uint8)
166
+ wrap_scalar_tensor_explicit(torch.uint8, wp.uint8)
167
+ wrap_scalar_tensor_explicit(torch.uint8, wp.int8)
168
+ wrap_scalar_tensor_explicit(torch.bool, wp.uint8)
169
+ wrap_scalar_tensor_explicit(torch.bool, wp.int8)
170
+ wrap_scalar_tensor_explicit(torch.bool, wp.bool)
171
+
172
+ def wrap_vec_tensor(n, desired_warp_dtype):
173
+ t = torch.zeros((10, n), dtype=torch.float32, device=torch_device)
174
+ a = wp.from_torch(t, desired_warp_dtype)
175
+ assert a.dtype == desired_warp_dtype
176
+ assert a.shape == (10,)
177
+
178
+ wrap_vec_tensor(2, wp.vec2)
179
+ wrap_vec_tensor(3, wp.vec3)
180
+ wrap_vec_tensor(4, wp.vec4)
181
+ wrap_vec_tensor(6, wp.spatial_vector)
182
+ wrap_vec_tensor(7, wp.transform)
183
+
184
+ def wrap_mat_tensor(n, m, desired_warp_dtype):
185
+ t = torch.zeros((10, n, m), dtype=torch.float32, device=torch_device)
186
+ a = wp.from_torch(t, desired_warp_dtype)
187
+ assert a.dtype == desired_warp_dtype
188
+ assert a.shape == (10,)
189
+
190
+ wrap_mat_tensor(2, 2, wp.mat22)
191
+ wrap_mat_tensor(3, 3, wp.mat33)
192
+ wrap_mat_tensor(4, 4, wp.mat44)
193
+ wrap_mat_tensor(6, 6, wp.spatial_matrix)
194
+
195
+ def wrap_vec_tensor_with_grad(n, desired_warp_dtype):
196
+ t = torch.zeros((10, n), dtype=torch.float32, device=torch_device)
197
+ a = wp.from_torch(t, desired_warp_dtype, requires_grad=True)
198
+ assert a.dtype == desired_warp_dtype
199
+ assert a.shape == (10,)
200
+
201
+ wrap_vec_tensor_with_grad(2, wp.vec2)
202
+ wrap_vec_tensor_with_grad(3, wp.vec3)
203
+ wrap_vec_tensor_with_grad(4, wp.vec4)
204
+ wrap_vec_tensor_with_grad(6, wp.spatial_vector)
205
+ wrap_vec_tensor_with_grad(7, wp.transform)
206
+
207
+ def wrap_mat_tensor_with_grad(n, m, desired_warp_dtype):
208
+ t = torch.zeros((10, n, m), dtype=torch.float32, device=torch_device)
209
+ a = wp.from_torch(t, desired_warp_dtype, requires_grad=True)
210
+ assert a.dtype == desired_warp_dtype
211
+ assert a.shape == (10,)
212
+
213
+ wrap_mat_tensor_with_grad(2, 2, wp.mat22)
214
+ wrap_mat_tensor_with_grad(3, 3, wp.mat33)
215
+ wrap_mat_tensor_with_grad(4, 4, wp.mat44)
216
+ wrap_mat_tensor_with_grad(6, 6, wp.spatial_matrix)
217
+
218
+
219
+ def test_to_torch(test, device):
220
+ import torch
221
+
222
+ def wrap_scalar_array(warp_dtype, expected_torch_dtype):
223
+ a = wp.zeros(10, dtype=warp_dtype, device=device)
224
+ t = wp.to_torch(a)
225
+ assert t.dtype == expected_torch_dtype
226
+ assert tuple(t.shape) == a.shape
227
+
228
+ wrap_scalar_array(wp.float64, torch.float64)
229
+ wrap_scalar_array(wp.float32, torch.float32)
230
+ wrap_scalar_array(wp.float16, torch.float16)
231
+ wrap_scalar_array(wp.int64, torch.int64)
232
+ wrap_scalar_array(wp.int32, torch.int32)
233
+ wrap_scalar_array(wp.int16, torch.int16)
234
+ wrap_scalar_array(wp.int8, torch.int8)
235
+ wrap_scalar_array(wp.uint8, torch.uint8)
236
+ wrap_scalar_array(wp.bool, torch.bool)
237
+
238
+ # not supported by torch
239
+ # wrap_scalar_array(wp.uint64, torch.int64)
240
+ # wrap_scalar_array(wp.uint32, torch.int32)
241
+ # wrap_scalar_array(wp.uint16, torch.int16)
242
+
243
+ def wrap_vec_array(n, warp_dtype):
244
+ a = wp.zeros(10, dtype=warp_dtype, device=device)
245
+ t = wp.to_torch(a)
246
+ assert t.dtype == torch.float32
247
+ assert tuple(t.shape) == (10, n)
248
+
249
+ wrap_vec_array(2, wp.vec2)
250
+ wrap_vec_array(3, wp.vec3)
251
+ wrap_vec_array(4, wp.vec4)
252
+ wrap_vec_array(6, wp.spatial_vector)
253
+ wrap_vec_array(7, wp.transform)
254
+
255
+ def wrap_mat_array(n, m, warp_dtype):
256
+ a = wp.zeros(10, dtype=warp_dtype, device=device)
257
+ t = wp.to_torch(a)
258
+ assert t.dtype == torch.float32
259
+ assert tuple(t.shape) == (10, n, m)
260
+
261
+ wrap_mat_array(2, 2, wp.mat22)
262
+ wrap_mat_array(3, 3, wp.mat33)
263
+ wrap_mat_array(4, 4, wp.mat44)
264
+ wrap_mat_array(6, 6, wp.spatial_matrix)
265
+
266
+
267
+ def test_from_torch_slices(test, device):
268
+ import torch
269
+
270
+ torch_device = wp.device_to_torch(device)
271
+
272
+ # 1D slice, contiguous
273
+ t_base = torch.arange(10, dtype=torch.float32, device=torch_device)
274
+ t = t_base[2:9]
275
+ a = wp.from_torch(t)
276
+ assert a.ptr == t.data_ptr()
277
+ assert a.is_contiguous
278
+ assert a.shape == tuple(t.shape)
279
+ assert_np_equal(a.numpy(), t.cpu().numpy())
280
+
281
+ # 1D slice with non-contiguous stride
282
+ t_base = torch.arange(10, dtype=torch.float32, device=torch_device)
283
+ t = t_base[2:9:2]
284
+ a = wp.from_torch(t)
285
+ assert a.ptr == t.data_ptr()
286
+ assert not a.is_contiguous
287
+ assert a.shape == tuple(t.shape)
288
+ # copy contents to contiguous array
289
+ a_contiguous = wp.empty_like(a)
290
+ wp.launch(copy1d_float_kernel, dim=a.shape, inputs=[a_contiguous, a], device=device)
291
+ assert_np_equal(a_contiguous.numpy(), t.cpu().numpy())
292
+
293
+ # 2D slices (non-contiguous)
294
+ t_base = torch.arange(24, dtype=torch.float32, device=torch_device).reshape((4, 6))
295
+ t = t_base[1:3, 2:5]
296
+ a = wp.from_torch(t)
297
+ assert a.ptr == t.data_ptr()
298
+ assert not a.is_contiguous
299
+ assert a.shape == tuple(t.shape)
300
+ # copy contents to contiguous array
301
+ a_contiguous = wp.empty_like(a)
302
+ wp.launch(copy2d_float_kernel, dim=a.shape, inputs=[a_contiguous, a], device=device)
303
+ assert_np_equal(a_contiguous.numpy(), t.cpu().numpy())
304
+
305
+ # 3D slices (non-contiguous)
306
+ t_base = torch.arange(36, dtype=torch.float32, device=torch_device).reshape((4, 3, 3))
307
+ t = t_base[::2, 0:1, 1:2]
308
+ a = wp.from_torch(t)
309
+ assert a.ptr == t.data_ptr()
310
+ assert not a.is_contiguous
311
+ assert a.shape == tuple(t.shape)
312
+ # copy contents to contiguous array
313
+ a_contiguous = wp.empty_like(a)
314
+ wp.launch(copy3d_float_kernel, dim=a.shape, inputs=[a_contiguous, a], device=device)
315
+ assert_np_equal(a_contiguous.numpy(), t.cpu().numpy())
316
+
317
+ # 2D slices of vec3 (inner contiguous, outer non-contiguous)
318
+ t_base = torch.arange(150, dtype=torch.float32, device=torch_device).reshape((10, 5, 3))
319
+ t = t_base[1:7:2, 2:5]
320
+ a = wp.from_torch(t, dtype=wp.vec3)
321
+ assert a.ptr == t.data_ptr()
322
+ assert not a.is_contiguous
323
+ assert a.shape == tuple(t.shape[:-1])
324
+ # copy contents to contiguous array
325
+ a_contiguous = wp.empty_like(a)
326
+ wp.launch(copy2d_vec3_kernel, dim=a.shape, inputs=[a_contiguous, a], device=device)
327
+ assert_np_equal(a_contiguous.numpy(), t.cpu().numpy())
328
+
329
+ # 2D slices of mat22 (inner contiguous, outer non-contiguous)
330
+ t_base = torch.arange(200, dtype=torch.float32, device=torch_device).reshape((10, 5, 2, 2))
331
+ t = t_base[1:7:2, 2:5]
332
+ a = wp.from_torch(t, dtype=wp.mat22)
333
+ assert a.ptr == t.data_ptr()
334
+ assert not a.is_contiguous
335
+ assert a.shape == tuple(t.shape[:-2])
336
+ # copy contents to contiguous array
337
+ a_contiguous = wp.empty_like(a)
338
+ wp.launch(copy2d_mat22_kernel, dim=a.shape, inputs=[a_contiguous, a], device=device)
339
+ assert_np_equal(a_contiguous.numpy(), t.cpu().numpy())
340
+
341
+
342
+ def test_from_torch_zero_strides(test, device):
343
+ import torch
344
+
345
+ torch_device = wp.device_to_torch(device)
346
+
347
+ t_base = torch.arange(9, dtype=torch.float32, device=torch_device).reshape((3, 3))
348
+
349
+ # expand outermost dimension
350
+ t = t_base.unsqueeze(0).expand(3, -1, -1)
351
+ a = wp.from_torch(t)
352
+ assert a.ptr == t.data_ptr()
353
+ assert not a.is_contiguous
354
+ assert a.shape == tuple(t.shape)
355
+ a_contiguous = wp.empty_like(a)
356
+ wp.launch(copy3d_float_kernel, dim=a.shape, inputs=[a_contiguous, a], device=device)
357
+ assert_np_equal(a_contiguous.numpy(), t.cpu().numpy())
358
+
359
+ # expand middle dimension
360
+ t = t_base.unsqueeze(1).expand(-1, 3, -1)
361
+ a = wp.from_torch(t)
362
+ assert a.ptr == t.data_ptr()
363
+ assert not a.is_contiguous
364
+ assert a.shape == tuple(t.shape)
365
+ a_contiguous = wp.empty_like(a)
366
+ wp.launch(copy3d_float_kernel, dim=a.shape, inputs=[a_contiguous, a], device=device)
367
+ assert_np_equal(a_contiguous.numpy(), t.cpu().numpy())
368
+
369
+ # expand innermost dimension
370
+ t = t_base.unsqueeze(2).expand(-1, -1, 3)
371
+ a = wp.from_torch(t)
372
+ assert a.ptr == t.data_ptr()
373
+ assert not a.is_contiguous
374
+ assert a.shape == tuple(t.shape)
375
+ a_contiguous = wp.empty_like(a)
376
+ wp.launch(copy3d_float_kernel, dim=a.shape, inputs=[a_contiguous, a], device=device)
377
+ assert_np_equal(a_contiguous.numpy(), t.cpu().numpy())
378
+
379
+
380
+ def test_torch_mgpu_from_torch(test, device):
381
+ import torch
382
+
383
+ n = 32
384
+
385
+ t0 = torch.arange(0, n, 1, dtype=torch.int32, device="cuda:0")
386
+ t1 = torch.arange(0, n * 2, 2, dtype=torch.int32, device="cuda:1")
387
+
388
+ a0 = wp.from_torch(t0, dtype=wp.int32)
389
+ a1 = wp.from_torch(t1, dtype=wp.int32)
390
+
391
+ assert a0.device == "cuda:0"
392
+ assert a1.device == "cuda:1"
393
+
394
+ expected0 = np.arange(0, n, 1)
395
+ expected1 = np.arange(0, n * 2, 2)
396
+
397
+ assert_np_equal(a0.numpy(), expected0)
398
+ assert_np_equal(a1.numpy(), expected1)
399
+
400
+
401
+ def test_torch_mgpu_to_torch(test, device):
402
+ n = 32
403
+
404
+ with wp.ScopedDevice("cuda:0"):
405
+ a0 = wp.empty(n, dtype=wp.int32)
406
+ wp.launch(arange, dim=a0.size, inputs=[0, 1, a0])
407
+
408
+ with wp.ScopedDevice("cuda:1"):
409
+ a1 = wp.empty(n, dtype=wp.int32)
410
+ wp.launch(arange, dim=a1.size, inputs=[0, 2, a1])
411
+
412
+ t0 = wp.to_torch(a0)
413
+ t1 = wp.to_torch(a1)
414
+
415
+ assert str(t0.device) == "cuda:0"
416
+ assert str(t1.device) == "cuda:1"
417
+
418
+ expected0 = np.arange(0, n, 1, dtype=np.int32)
419
+ expected1 = np.arange(0, n * 2, 2, dtype=np.int32)
420
+
421
+ assert_np_equal(t0.cpu().numpy(), expected0)
422
+ assert_np_equal(t1.cpu().numpy(), expected1)
423
+
424
+
425
+ def test_torch_mgpu_interop(test, device):
426
+ import torch
427
+
428
+ n = 1024 * 1024
429
+
430
+ with torch.cuda.device(0):
431
+ t0 = torch.arange(n, dtype=torch.float32, device="cuda")
432
+ a0 = wp.from_torch(t0)
433
+ wp.launch(inc, dim=a0.size, inputs=[a0], stream=wp.stream_from_torch())
434
+
435
+ with torch.cuda.device(1):
436
+ t1 = torch.arange(n, dtype=torch.float32, device="cuda")
437
+ a1 = wp.from_torch(t1)
438
+ wp.launch(inc, dim=a1.size, inputs=[a1], stream=wp.stream_from_torch())
439
+
440
+ assert a0.device == "cuda:0"
441
+ assert a1.device == "cuda:1"
442
+
443
+ expected = np.arange(n, dtype=int) + 1
444
+
445
+ # ensure the torch tensors were modified by warp
446
+ assert_np_equal(t0.cpu().numpy(), expected)
447
+ assert_np_equal(t1.cpu().numpy(), expected)
448
+
449
+
450
+ def test_torch_autograd(test, device):
451
+ """Test torch autograd with a custom Warp op"""
452
+
453
+ import torch
454
+
455
+ # custom autograd op
456
+ class TestFunc(torch.autograd.Function):
457
+ @staticmethod
458
+ def forward(ctx, x):
459
+ # allocate output array
460
+ y = torch.empty_like(x)
461
+
462
+ ctx.x = x
463
+ ctx.y = y
464
+
465
+ wp.launch(kernel=op_kernel, dim=len(x), inputs=[wp.from_torch(x)], outputs=[wp.from_torch(y)])
466
+
467
+ return y
468
+
469
+ @staticmethod
470
+ def backward(ctx, adj_y):
471
+ # adjoints should be allocated as zero initialized
472
+ adj_x = torch.zeros_like(ctx.x).contiguous()
473
+ adj_y = adj_y.contiguous()
474
+
475
+ wp_x = wp.from_torch(ctx.x, grad=adj_x)
476
+ wp_y = wp.from_torch(ctx.y, grad=adj_y)
477
+
478
+ wp.launch(
479
+ kernel=op_kernel,
480
+ dim=len(ctx.x),
481
+ # fwd inputs
482
+ inputs=[wp_x],
483
+ outputs=[wp_y],
484
+ # adj inputs (already stored in input/output arrays, passing null pointers)
485
+ adj_inputs=[None],
486
+ adj_outputs=[None],
487
+ adjoint=True,
488
+ )
489
+
490
+ return adj_x
491
+
492
+ # run autograd on given device
493
+ with wp.ScopedDevice(device):
494
+ torch_device = wp.device_to_torch(device)
495
+
496
+ # input data
497
+ x = torch.ones(16, dtype=torch.float32, device=torch_device, requires_grad=True)
498
+
499
+ # execute op
500
+ y = TestFunc.apply(x)
501
+
502
+ # compute grads
503
+ l = y.sum()
504
+ l.backward()
505
+
506
+ passed = (x.grad == -2.0).all()
507
+ assert passed.item()
508
+
509
+
510
+ def test_torch_graph_torch_stream(test, device):
511
+ """Capture Torch graph on Torch stream"""
512
+
513
+ wp.load_module(device=device)
514
+
515
+ import torch
516
+
517
+ torch_device = wp.device_to_torch(device)
518
+
519
+ n = 1024 * 1024
520
+ t = torch.zeros(n, dtype=torch.float32, device=torch_device)
521
+ a = wp.from_torch(t)
522
+
523
+ g = torch.cuda.CUDAGraph()
524
+
525
+ # create a device-specific torch stream to use for capture
526
+ # (otherwise torch.cuda.graph reuses its capture stream, which can be problematic if it's from a different device)
527
+ torch_stream = torch.cuda.Stream(device=torch_device)
528
+
529
+ # make warp use the same stream
530
+ warp_stream = wp.stream_from_torch(torch_stream)
531
+
532
+ # capture graph
533
+ with wp.ScopedStream(warp_stream), torch.cuda.graph(g, stream=torch_stream):
534
+ wp.capture_begin(force_module_load=False, external=True)
535
+ try:
536
+ t += 1.0
537
+ wp.launch(inc, dim=n, inputs=[a])
538
+ t += 1.0
539
+ wp.launch(inc, dim=n, inputs=[a])
540
+ finally:
541
+ wp.capture_end()
542
+
543
+ # replay graph
544
+ num_iters = 10
545
+ for _i in range(num_iters):
546
+ g.replay()
547
+
548
+ passed = (t == num_iters * 4.0).all()
549
+ assert passed.item()
550
+
551
+
552
+ def test_torch_graph_warp_stream(test, device):
553
+ """Capture Torch graph on Warp stream"""
554
+
555
+ import torch
556
+
557
+ torch_device = wp.device_to_torch(device)
558
+
559
+ n = 1024 * 1024
560
+ t = torch.zeros(n, dtype=torch.float32, device=torch_device)
561
+ a = wp.from_torch(t)
562
+
563
+ g = torch.cuda.CUDAGraph()
564
+
565
+ # make torch use the warp stream from the given device
566
+ torch_stream = wp.stream_to_torch(device)
567
+
568
+ # capture graph
569
+ with wp.ScopedDevice(device), torch.cuda.graph(g, stream=torch_stream):
570
+ wp.capture_begin(force_module_load=False, external=True)
571
+ try:
572
+ t += 1.0
573
+ wp.launch(inc, dim=n, inputs=[a])
574
+ t += 1.0
575
+ wp.launch(inc, dim=n, inputs=[a])
576
+ finally:
577
+ wp.capture_end()
578
+
579
+ # replay graph
580
+ num_iters = 10
581
+ for _i in range(num_iters):
582
+ g.replay()
583
+
584
+ passed = (t == num_iters * 4.0).all()
585
+ assert passed.item()
586
+
587
+
588
+ def test_warp_graph_warp_stream(test, device):
589
+ """Capture Warp graph on Warp stream"""
590
+
591
+ import torch
592
+
593
+ torch_device = wp.device_to_torch(device)
594
+
595
+ n = 1024 * 1024
596
+ t = torch.zeros(n, dtype=torch.float32, device=torch_device)
597
+ a = wp.from_torch(t)
598
+
599
+ # make torch use the warp stream from the given device
600
+ torch_stream = wp.stream_to_torch(device)
601
+
602
+ # capture graph
603
+ with wp.ScopedDevice(device), torch.cuda.stream(torch_stream):
604
+ wp.capture_begin(force_module_load=False)
605
+ try:
606
+ t += 1.0
607
+ wp.launch(inc, dim=n, inputs=[a])
608
+ t += 1.0
609
+ wp.launch(inc, dim=n, inputs=[a])
610
+ finally:
611
+ g = wp.capture_end()
612
+
613
+ # replay graph
614
+ num_iters = 10
615
+ for _i in range(num_iters):
616
+ wp.capture_launch(g)
617
+
618
+ passed = (t == num_iters * 4.0).all()
619
+ assert passed.item()
620
+
621
+
622
+ def test_warp_graph_torch_stream(test, device):
623
+ """Capture Warp graph on Torch stream"""
624
+
625
+ wp.load_module(device=device)
626
+
627
+ import torch
628
+
629
+ torch_device = wp.device_to_torch(device)
630
+
631
+ n = 1024 * 1024
632
+ t = torch.zeros(n, dtype=torch.float32, device=torch_device)
633
+ a = wp.from_torch(t)
634
+
635
+ # create a device-specific torch stream to use for capture
636
+ # (the default torch stream is not suitable for graph capture)
637
+ torch_stream = torch.cuda.Stream(device=torch_device)
638
+
639
+ # make warp use the same stream
640
+ warp_stream = wp.stream_from_torch(torch_stream)
641
+
642
+ # capture graph
643
+ with wp.ScopedStream(warp_stream), torch.cuda.stream(torch_stream):
644
+ wp.capture_begin(force_module_load=False)
645
+ try:
646
+ t += 1.0
647
+ wp.launch(inc, dim=n, inputs=[a])
648
+ t += 1.0
649
+ wp.launch(inc, dim=n, inputs=[a])
650
+ finally:
651
+ g = wp.capture_end()
652
+
653
+ # replay graph
654
+ num_iters = 10
655
+ for _i in range(num_iters):
656
+ wp.capture_launch(g)
657
+
658
+ passed = (t == num_iters * 4.0).all()
659
+ assert passed.item()
660
+
661
+
662
+ class TestTorch(unittest.TestCase):
663
+ pass
664
+
665
+
666
+ test_devices = get_test_devices()
667
+
668
+ try:
669
+ import torch
670
+
671
+ # check which Warp devices work with Torch
672
+ # CUDA devices may fail if Torch was not compiled with CUDA support
673
+ torch_compatible_devices = []
674
+ torch_compatible_cuda_devices = []
675
+
676
+ for d in test_devices:
677
+ try:
678
+ t = torch.arange(10, device=wp.device_to_torch(d))
679
+ t += 1
680
+ torch_compatible_devices.append(d)
681
+ if d.is_cuda:
682
+ torch_compatible_cuda_devices.append(d)
683
+ except Exception as e:
684
+ print(f"Skipping Torch tests on device '{d}' due to exception: {e}")
685
+
686
+ add_function_test(TestTorch, "test_dtype_from_torch", test_dtype_from_torch, devices=None)
687
+ add_function_test(TestTorch, "test_dtype_to_torch", test_dtype_to_torch, devices=None)
688
+
689
+ if torch_compatible_devices:
690
+ add_function_test(TestTorch, "test_device_conversion", test_device_conversion, devices=torch_compatible_devices)
691
+ add_function_test(TestTorch, "test_from_torch", test_from_torch, devices=torch_compatible_devices)
692
+ add_function_test(TestTorch, "test_from_torch_slices", test_from_torch_slices, devices=torch_compatible_devices)
693
+ add_function_test(
694
+ TestTorch,
695
+ "test_from_torch_zero_strides",
696
+ test_from_torch_zero_strides,
697
+ devices=torch_compatible_devices,
698
+ )
699
+ add_function_test(TestTorch, "test_to_torch", test_to_torch, devices=torch_compatible_devices)
700
+ add_function_test(TestTorch, "test_torch_zerocopy", test_torch_zerocopy, devices=torch_compatible_devices)
701
+ add_function_test(TestTorch, "test_torch_autograd", test_torch_autograd, devices=torch_compatible_devices)
702
+
703
+ if torch_compatible_cuda_devices:
704
+ add_function_test(
705
+ TestTorch,
706
+ "test_torch_graph_torch_stream",
707
+ test_torch_graph_torch_stream,
708
+ devices=torch_compatible_cuda_devices,
709
+ )
710
+ add_function_test(
711
+ TestTorch,
712
+ "test_torch_graph_warp_stream",
713
+ test_torch_graph_warp_stream,
714
+ devices=torch_compatible_cuda_devices,
715
+ )
716
+ add_function_test(
717
+ TestTorch,
718
+ "test_warp_graph_warp_stream",
719
+ test_warp_graph_warp_stream,
720
+ devices=torch_compatible_cuda_devices,
721
+ )
722
+ add_function_test(
723
+ TestTorch,
724
+ "test_warp_graph_torch_stream",
725
+ test_warp_graph_torch_stream,
726
+ devices=torch_compatible_cuda_devices,
727
+ )
728
+
729
+ # multi-GPU tests
730
+ if len(torch_compatible_cuda_devices) > 1:
731
+ add_function_test(TestTorch, "test_torch_mgpu_from_torch", test_torch_mgpu_from_torch)
732
+ add_function_test(TestTorch, "test_torch_mgpu_to_torch", test_torch_mgpu_to_torch)
733
+ add_function_test(TestTorch, "test_torch_mgpu_interop", test_torch_mgpu_interop)
734
+
735
+ except Exception as e:
736
+ print(f"Skipping Torch tests due to exception: {e}")
737
+
738
+
739
+ if __name__ == "__main__":
740
+ wp.build.clear_kernel_cache()
741
+ unittest.main(verbosity=2)