warp-lang 1.0.2__py3-none-win_amd64.whl → 1.2.0__py3-none-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of warp-lang might be problematic. Click here for more details.

Files changed (356) hide show
  1. warp/__init__.py +108 -97
  2. warp/__init__.pyi +1 -1
  3. warp/bin/warp-clang.dll +0 -0
  4. warp/bin/warp.dll +0 -0
  5. warp/build.py +88 -113
  6. warp/build_dll.py +383 -375
  7. warp/builtins.py +3693 -3354
  8. warp/codegen.py +2925 -2792
  9. warp/config.py +40 -36
  10. warp/constants.py +49 -45
  11. warp/context.py +5409 -5102
  12. warp/dlpack.py +442 -442
  13. warp/examples/__init__.py +16 -16
  14. warp/examples/assets/bear.usd +0 -0
  15. warp/examples/assets/bunny.usd +0 -0
  16. warp/examples/assets/cartpole.urdf +110 -110
  17. warp/examples/assets/crazyflie.usd +0 -0
  18. warp/examples/assets/cube.usd +0 -0
  19. warp/examples/assets/nv_ant.xml +92 -92
  20. warp/examples/assets/nv_humanoid.xml +183 -183
  21. warp/examples/assets/quadruped.urdf +267 -267
  22. warp/examples/assets/rocks.nvdb +0 -0
  23. warp/examples/assets/rocks.usd +0 -0
  24. warp/examples/assets/sphere.usd +0 -0
  25. warp/examples/benchmarks/benchmark_api.py +381 -383
  26. warp/examples/benchmarks/benchmark_cloth.py +278 -277
  27. warp/examples/benchmarks/benchmark_cloth_cupy.py +88 -88
  28. warp/examples/benchmarks/benchmark_cloth_jax.py +97 -100
  29. warp/examples/benchmarks/benchmark_cloth_numba.py +146 -142
  30. warp/examples/benchmarks/benchmark_cloth_numpy.py +77 -77
  31. warp/examples/benchmarks/benchmark_cloth_pytorch.py +86 -86
  32. warp/examples/benchmarks/benchmark_cloth_taichi.py +112 -112
  33. warp/examples/benchmarks/benchmark_cloth_warp.py +145 -146
  34. warp/examples/benchmarks/benchmark_launches.py +293 -295
  35. warp/examples/browse.py +29 -29
  36. warp/examples/core/example_dem.py +232 -219
  37. warp/examples/core/example_fluid.py +291 -267
  38. warp/examples/core/example_graph_capture.py +142 -126
  39. warp/examples/core/example_marching_cubes.py +186 -174
  40. warp/examples/core/example_mesh.py +172 -155
  41. warp/examples/core/example_mesh_intersect.py +203 -193
  42. warp/examples/core/example_nvdb.py +174 -170
  43. warp/examples/core/example_raycast.py +103 -90
  44. warp/examples/core/example_raymarch.py +197 -178
  45. warp/examples/core/example_render_opengl.py +183 -141
  46. warp/examples/core/example_sph.py +403 -387
  47. warp/examples/core/example_torch.py +219 -181
  48. warp/examples/core/example_wave.py +261 -248
  49. warp/examples/fem/bsr_utils.py +378 -380
  50. warp/examples/fem/example_apic_fluid.py +432 -389
  51. warp/examples/fem/example_burgers.py +262 -0
  52. warp/examples/fem/example_convection_diffusion.py +180 -168
  53. warp/examples/fem/example_convection_diffusion_dg.py +217 -209
  54. warp/examples/fem/example_deformed_geometry.py +175 -159
  55. warp/examples/fem/example_diffusion.py +199 -173
  56. warp/examples/fem/example_diffusion_3d.py +178 -152
  57. warp/examples/fem/example_diffusion_mgpu.py +219 -214
  58. warp/examples/fem/example_mixed_elasticity.py +242 -222
  59. warp/examples/fem/example_navier_stokes.py +257 -243
  60. warp/examples/fem/example_stokes.py +218 -192
  61. warp/examples/fem/example_stokes_transfer.py +263 -249
  62. warp/examples/fem/mesh_utils.py +133 -109
  63. warp/examples/fem/plot_utils.py +292 -287
  64. warp/examples/optim/example_bounce.py +258 -246
  65. warp/examples/optim/example_cloth_throw.py +220 -209
  66. warp/examples/optim/example_diffray.py +564 -536
  67. warp/examples/optim/example_drone.py +862 -835
  68. warp/examples/optim/example_inverse_kinematics.py +174 -168
  69. warp/examples/optim/example_inverse_kinematics_torch.py +183 -169
  70. warp/examples/optim/example_spring_cage.py +237 -231
  71. warp/examples/optim/example_trajectory.py +221 -199
  72. warp/examples/optim/example_walker.py +304 -293
  73. warp/examples/sim/example_cartpole.py +137 -129
  74. warp/examples/sim/example_cloth.py +194 -186
  75. warp/examples/sim/example_granular.py +122 -111
  76. warp/examples/sim/example_granular_collision_sdf.py +195 -186
  77. warp/examples/sim/example_jacobian_ik.py +234 -214
  78. warp/examples/sim/example_particle_chain.py +116 -105
  79. warp/examples/sim/example_quadruped.py +191 -180
  80. warp/examples/sim/example_rigid_chain.py +195 -187
  81. warp/examples/sim/example_rigid_contact.py +187 -177
  82. warp/examples/sim/example_rigid_force.py +125 -125
  83. warp/examples/sim/example_rigid_gyroscopic.py +107 -95
  84. warp/examples/sim/example_rigid_soft_contact.py +132 -122
  85. warp/examples/sim/example_soft_body.py +188 -177
  86. warp/fabric.py +337 -335
  87. warp/fem/__init__.py +61 -27
  88. warp/fem/cache.py +403 -388
  89. warp/fem/dirichlet.py +178 -179
  90. warp/fem/domain.py +262 -263
  91. warp/fem/field/__init__.py +100 -101
  92. warp/fem/field/field.py +148 -149
  93. warp/fem/field/nodal_field.py +298 -299
  94. warp/fem/field/restriction.py +22 -21
  95. warp/fem/field/test.py +180 -181
  96. warp/fem/field/trial.py +183 -183
  97. warp/fem/geometry/__init__.py +16 -19
  98. warp/fem/geometry/closest_point.py +69 -70
  99. warp/fem/geometry/deformed_geometry.py +270 -271
  100. warp/fem/geometry/element.py +748 -744
  101. warp/fem/geometry/geometry.py +184 -186
  102. warp/fem/geometry/grid_2d.py +380 -373
  103. warp/fem/geometry/grid_3d.py +437 -435
  104. warp/fem/geometry/hexmesh.py +953 -953
  105. warp/fem/geometry/nanogrid.py +455 -0
  106. warp/fem/geometry/partition.py +374 -376
  107. warp/fem/geometry/quadmesh_2d.py +532 -532
  108. warp/fem/geometry/tetmesh.py +840 -840
  109. warp/fem/geometry/trimesh_2d.py +577 -577
  110. warp/fem/integrate.py +1684 -1615
  111. warp/fem/operator.py +190 -191
  112. warp/fem/polynomial.py +214 -213
  113. warp/fem/quadrature/__init__.py +2 -2
  114. warp/fem/quadrature/pic_quadrature.py +243 -245
  115. warp/fem/quadrature/quadrature.py +295 -294
  116. warp/fem/space/__init__.py +179 -292
  117. warp/fem/space/basis_space.py +522 -489
  118. warp/fem/space/collocated_function_space.py +100 -105
  119. warp/fem/space/dof_mapper.py +236 -236
  120. warp/fem/space/function_space.py +148 -145
  121. warp/fem/space/grid_2d_function_space.py +148 -267
  122. warp/fem/space/grid_3d_function_space.py +167 -306
  123. warp/fem/space/hexmesh_function_space.py +253 -352
  124. warp/fem/space/nanogrid_function_space.py +202 -0
  125. warp/fem/space/partition.py +350 -350
  126. warp/fem/space/quadmesh_2d_function_space.py +261 -369
  127. warp/fem/space/restriction.py +161 -160
  128. warp/fem/space/shape/__init__.py +90 -15
  129. warp/fem/space/shape/cube_shape_function.py +728 -738
  130. warp/fem/space/shape/shape_function.py +102 -103
  131. warp/fem/space/shape/square_shape_function.py +611 -611
  132. warp/fem/space/shape/tet_shape_function.py +565 -567
  133. warp/fem/space/shape/triangle_shape_function.py +429 -429
  134. warp/fem/space/tetmesh_function_space.py +224 -292
  135. warp/fem/space/topology.py +297 -295
  136. warp/fem/space/trimesh_2d_function_space.py +153 -221
  137. warp/fem/types.py +77 -77
  138. warp/fem/utils.py +495 -495
  139. warp/jax.py +166 -141
  140. warp/jax_experimental.py +341 -339
  141. warp/native/array.h +1081 -1025
  142. warp/native/builtin.h +1603 -1560
  143. warp/native/bvh.cpp +402 -398
  144. warp/native/bvh.cu +533 -525
  145. warp/native/bvh.h +430 -429
  146. warp/native/clang/clang.cpp +496 -464
  147. warp/native/crt.cpp +42 -32
  148. warp/native/crt.h +352 -335
  149. warp/native/cuda_crt.h +1049 -1049
  150. warp/native/cuda_util.cpp +549 -540
  151. warp/native/cuda_util.h +288 -203
  152. warp/native/cutlass_gemm.cpp +34 -34
  153. warp/native/cutlass_gemm.cu +372 -372
  154. warp/native/error.cpp +66 -66
  155. warp/native/error.h +27 -27
  156. warp/native/exports.h +187 -0
  157. warp/native/fabric.h +228 -228
  158. warp/native/hashgrid.cpp +301 -278
  159. warp/native/hashgrid.cu +78 -77
  160. warp/native/hashgrid.h +227 -227
  161. warp/native/initializer_array.h +32 -32
  162. warp/native/intersect.h +1204 -1204
  163. warp/native/intersect_adj.h +365 -365
  164. warp/native/intersect_tri.h +322 -322
  165. warp/native/marching.cpp +2 -2
  166. warp/native/marching.cu +497 -497
  167. warp/native/marching.h +2 -2
  168. warp/native/mat.h +1545 -1498
  169. warp/native/matnn.h +333 -333
  170. warp/native/mesh.cpp +203 -203
  171. warp/native/mesh.cu +292 -293
  172. warp/native/mesh.h +1887 -1887
  173. warp/native/nanovdb/GridHandle.h +366 -0
  174. warp/native/nanovdb/HostBuffer.h +590 -0
  175. warp/native/nanovdb/NanoVDB.h +6624 -4782
  176. warp/native/nanovdb/PNanoVDB.h +3390 -2553
  177. warp/native/noise.h +850 -850
  178. warp/native/quat.h +1112 -1085
  179. warp/native/rand.h +303 -299
  180. warp/native/range.h +108 -108
  181. warp/native/reduce.cpp +156 -156
  182. warp/native/reduce.cu +348 -348
  183. warp/native/runlength_encode.cpp +61 -61
  184. warp/native/runlength_encode.cu +46 -46
  185. warp/native/scan.cpp +30 -30
  186. warp/native/scan.cu +36 -36
  187. warp/native/scan.h +7 -7
  188. warp/native/solid_angle.h +442 -442
  189. warp/native/sort.cpp +94 -94
  190. warp/native/sort.cu +97 -97
  191. warp/native/sort.h +14 -14
  192. warp/native/sparse.cpp +337 -337
  193. warp/native/sparse.cu +544 -544
  194. warp/native/spatial.h +630 -630
  195. warp/native/svd.h +562 -562
  196. warp/native/temp_buffer.h +30 -30
  197. warp/native/vec.h +1177 -1133
  198. warp/native/volume.cpp +529 -297
  199. warp/native/volume.cu +58 -32
  200. warp/native/volume.h +960 -538
  201. warp/native/volume_builder.cu +446 -425
  202. warp/native/volume_builder.h +34 -19
  203. warp/native/volume_impl.h +61 -0
  204. warp/native/warp.cpp +1057 -1052
  205. warp/native/warp.cu +2949 -2828
  206. warp/native/warp.h +321 -305
  207. warp/optim/__init__.py +9 -9
  208. warp/optim/adam.py +120 -120
  209. warp/optim/linear.py +1104 -939
  210. warp/optim/sgd.py +104 -92
  211. warp/render/__init__.py +10 -10
  212. warp/render/render_opengl.py +3356 -3204
  213. warp/render/render_usd.py +768 -749
  214. warp/render/utils.py +152 -150
  215. warp/sim/__init__.py +52 -59
  216. warp/sim/articulation.py +685 -685
  217. warp/sim/collide.py +1594 -1590
  218. warp/sim/import_mjcf.py +489 -481
  219. warp/sim/import_snu.py +220 -221
  220. warp/sim/import_urdf.py +536 -516
  221. warp/sim/import_usd.py +887 -881
  222. warp/sim/inertia.py +316 -317
  223. warp/sim/integrator.py +234 -233
  224. warp/sim/integrator_euler.py +1956 -1956
  225. warp/sim/integrator_featherstone.py +1917 -1991
  226. warp/sim/integrator_xpbd.py +3288 -3312
  227. warp/sim/model.py +4473 -4314
  228. warp/sim/particles.py +113 -112
  229. warp/sim/render.py +417 -403
  230. warp/sim/utils.py +413 -410
  231. warp/sparse.py +1289 -1227
  232. warp/stubs.py +2192 -2469
  233. warp/tape.py +1162 -225
  234. warp/tests/__init__.py +1 -1
  235. warp/tests/__main__.py +4 -4
  236. warp/tests/assets/test_index_grid.nvdb +0 -0
  237. warp/tests/assets/torus.usda +105 -105
  238. warp/tests/aux_test_class_kernel.py +26 -26
  239. warp/tests/aux_test_compile_consts_dummy.py +10 -10
  240. warp/tests/aux_test_conditional_unequal_types_kernels.py +21 -21
  241. warp/tests/aux_test_dependent.py +20 -22
  242. warp/tests/aux_test_grad_customs.py +21 -23
  243. warp/tests/aux_test_reference.py +9 -11
  244. warp/tests/aux_test_reference_reference.py +8 -10
  245. warp/tests/aux_test_square.py +15 -17
  246. warp/tests/aux_test_unresolved_func.py +14 -14
  247. warp/tests/aux_test_unresolved_symbol.py +14 -14
  248. warp/tests/disabled_kinematics.py +237 -239
  249. warp/tests/run_coverage_serial.py +31 -31
  250. warp/tests/test_adam.py +155 -157
  251. warp/tests/test_arithmetic.py +1088 -1124
  252. warp/tests/test_array.py +2415 -2326
  253. warp/tests/test_array_reduce.py +148 -150
  254. warp/tests/test_async.py +666 -656
  255. warp/tests/test_atomic.py +139 -141
  256. warp/tests/test_bool.py +212 -149
  257. warp/tests/test_builtins_resolution.py +1290 -1292
  258. warp/tests/test_bvh.py +162 -171
  259. warp/tests/test_closest_point_edge_edge.py +227 -228
  260. warp/tests/test_codegen.py +562 -553
  261. warp/tests/test_compile_consts.py +217 -101
  262. warp/tests/test_conditional.py +244 -246
  263. warp/tests/test_copy.py +230 -215
  264. warp/tests/test_ctypes.py +630 -632
  265. warp/tests/test_dense.py +65 -67
  266. warp/tests/test_devices.py +89 -98
  267. warp/tests/test_dlpack.py +528 -529
  268. warp/tests/test_examples.py +403 -378
  269. warp/tests/test_fabricarray.py +952 -955
  270. warp/tests/test_fast_math.py +60 -54
  271. warp/tests/test_fem.py +1298 -1278
  272. warp/tests/test_fp16.py +128 -130
  273. warp/tests/test_func.py +336 -337
  274. warp/tests/test_generics.py +596 -571
  275. warp/tests/test_grad.py +885 -640
  276. warp/tests/test_grad_customs.py +331 -336
  277. warp/tests/test_hash_grid.py +208 -164
  278. warp/tests/test_import.py +37 -39
  279. warp/tests/test_indexedarray.py +1132 -1134
  280. warp/tests/test_intersect.py +65 -67
  281. warp/tests/test_jax.py +305 -307
  282. warp/tests/test_large.py +169 -164
  283. warp/tests/test_launch.py +352 -354
  284. warp/tests/test_lerp.py +217 -261
  285. warp/tests/test_linear_solvers.py +189 -171
  286. warp/tests/test_lvalue.py +419 -493
  287. warp/tests/test_marching_cubes.py +63 -65
  288. warp/tests/test_mat.py +1799 -1827
  289. warp/tests/test_mat_lite.py +113 -115
  290. warp/tests/test_mat_scalar_ops.py +2905 -2889
  291. warp/tests/test_math.py +124 -193
  292. warp/tests/test_matmul.py +498 -499
  293. warp/tests/test_matmul_lite.py +408 -410
  294. warp/tests/test_mempool.py +186 -190
  295. warp/tests/test_mesh.py +281 -324
  296. warp/tests/test_mesh_query_aabb.py +226 -241
  297. warp/tests/test_mesh_query_point.py +690 -702
  298. warp/tests/test_mesh_query_ray.py +290 -303
  299. warp/tests/test_mlp.py +274 -276
  300. warp/tests/test_model.py +108 -110
  301. warp/tests/test_module_hashing.py +111 -0
  302. warp/tests/test_modules_lite.py +36 -39
  303. warp/tests/test_multigpu.py +161 -163
  304. warp/tests/test_noise.py +244 -248
  305. warp/tests/test_operators.py +248 -250
  306. warp/tests/test_options.py +121 -125
  307. warp/tests/test_peer.py +131 -137
  308. warp/tests/test_pinned.py +76 -78
  309. warp/tests/test_print.py +52 -54
  310. warp/tests/test_quat.py +2084 -2086
  311. warp/tests/test_rand.py +324 -288
  312. warp/tests/test_reload.py +207 -217
  313. warp/tests/test_rounding.py +177 -179
  314. warp/tests/test_runlength_encode.py +188 -190
  315. warp/tests/test_sim_grad.py +241 -0
  316. warp/tests/test_sim_kinematics.py +89 -97
  317. warp/tests/test_smoothstep.py +166 -168
  318. warp/tests/test_snippet.py +303 -266
  319. warp/tests/test_sparse.py +466 -460
  320. warp/tests/test_spatial.py +2146 -2148
  321. warp/tests/test_special_values.py +362 -0
  322. warp/tests/test_streams.py +484 -473
  323. warp/tests/test_struct.py +708 -675
  324. warp/tests/test_tape.py +171 -148
  325. warp/tests/test_torch.py +741 -743
  326. warp/tests/test_transient_module.py +85 -87
  327. warp/tests/test_types.py +554 -659
  328. warp/tests/test_utils.py +488 -499
  329. warp/tests/test_vec.py +1262 -1268
  330. warp/tests/test_vec_lite.py +71 -73
  331. warp/tests/test_vec_scalar_ops.py +2097 -2099
  332. warp/tests/test_verify_fp.py +92 -94
  333. warp/tests/test_volume.py +961 -736
  334. warp/tests/test_volume_write.py +338 -265
  335. warp/tests/unittest_serial.py +38 -37
  336. warp/tests/unittest_suites.py +367 -359
  337. warp/tests/unittest_utils.py +434 -578
  338. warp/tests/unused_test_misc.py +69 -71
  339. warp/tests/walkthrough_debug.py +85 -85
  340. warp/thirdparty/appdirs.py +598 -598
  341. warp/thirdparty/dlpack.py +143 -143
  342. warp/thirdparty/unittest_parallel.py +563 -561
  343. warp/torch.py +321 -295
  344. warp/types.py +4941 -4450
  345. warp/utils.py +1008 -821
  346. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/LICENSE.md +126 -126
  347. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/METADATA +365 -400
  348. warp_lang-1.2.0.dist-info/RECORD +359 -0
  349. warp/examples/assets/cube.usda +0 -42
  350. warp/examples/assets/sphere.usda +0 -56
  351. warp/examples/assets/torus.usda +0 -105
  352. warp/examples/fem/example_convection_diffusion_dg0.py +0 -194
  353. warp/native/nanovdb/PNanoVDBWrite.h +0 -295
  354. warp_lang-1.0.2.dist-info/RECORD +0 -352
  355. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/WHEEL +0 -0
  356. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/top_level.txt +0 -0
warp/torch.py CHANGED
@@ -1,295 +1,321 @@
1
- # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
2
- # NVIDIA CORPORATION and its licensors retain all intellectual property
3
- # and proprietary rights in and to this software, related documentation
4
- # and any modifications thereto. Any use, reproduction, disclosure or
5
- # distribution of this software and related documentation without an express
6
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
7
-
8
- import ctypes
9
- import numpy
10
- import warp
11
-
12
-
13
- # return the warp device corresponding to a torch device
14
- def device_from_torch(torch_device):
15
- """Return the Warp device corresponding to a Torch device."""
16
- return warp.get_device(str(torch_device))
17
-
18
-
19
- def device_to_torch(warp_device):
20
- """Return the Torch device corresponding to a Warp device."""
21
- device = warp.get_device(warp_device)
22
- if device.is_cpu or device.is_primary:
23
- return str(device)
24
- elif device.is_cuda and device.is_uva:
25
- # it's not a primary context, but torch can access the data ptr directly thanks to UVA
26
- return f"cuda:{device.ordinal}"
27
- raise RuntimeError(f"Warp device {device} is not compatible with torch")
28
-
29
-
30
- def dtype_to_torch(warp_dtype):
31
- """Return the Torch dtype corresponding to a Warp dtype."""
32
- # initialize lookup table on first call to defer torch import
33
- if dtype_to_torch.type_map is None:
34
- import torch
35
-
36
- dtype_to_torch.type_map = {
37
- warp.float16: torch.float16,
38
- warp.float32: torch.float32,
39
- warp.float64: torch.float64,
40
- warp.int8: torch.int8,
41
- warp.int16: torch.int16,
42
- warp.int32: torch.int32,
43
- warp.int64: torch.int64,
44
- warp.uint8: torch.uint8,
45
- # torch doesn't support unsigned ints bigger than 8 bits
46
- warp.uint16: torch.int16,
47
- warp.uint32: torch.int32,
48
- warp.uint64: torch.int64,
49
- warp.bool: torch.bool,
50
- }
51
-
52
- torch_dtype = dtype_to_torch.type_map.get(warp_dtype)
53
- if torch_dtype is not None:
54
- return torch_dtype
55
- else:
56
- raise TypeError(f"Cannot convert {warp_dtype} to a Torch type")
57
-
58
-
59
- def dtype_from_torch(torch_dtype):
60
- """Return the Warp dtype corresponding to a Torch dtype."""
61
- # initialize lookup table on first call to defer torch import
62
- if dtype_from_torch.type_map is None:
63
- import torch
64
-
65
- dtype_from_torch.type_map = {
66
- torch.float16: warp.float16,
67
- torch.float32: warp.float32,
68
- torch.float64: warp.float64,
69
- torch.int8: warp.int8,
70
- torch.int16: warp.int16,
71
- torch.int32: warp.int32,
72
- torch.int64: warp.int64,
73
- torch.uint8: warp.uint8,
74
- torch.bool: warp.bool,
75
- # currently unsupported by Warp
76
- # torch.bfloat16:
77
- # torch.complex64:
78
- # torch.complex128:
79
- }
80
-
81
- warp_dtype = dtype_from_torch.type_map.get(torch_dtype)
82
-
83
- if warp_dtype is not None:
84
- return warp_dtype
85
- else:
86
- raise TypeError(f"Cannot convert {torch_dtype} to a Warp type")
87
-
88
-
89
- def dtype_is_compatible(torch_dtype, warp_dtype):
90
- """Evaluates whether the given torch dtype is compatible with the given warp dtype."""
91
- # initialize lookup table on first call to defer torch import
92
- if dtype_is_compatible.compatible_sets is None:
93
- import torch
94
-
95
- dtype_is_compatible.compatible_sets = {
96
- torch.float64: {warp.float64},
97
- torch.float32: {warp.float32},
98
- torch.float16: {warp.float16},
99
- # allow aliasing integer tensors as signed or unsigned integer arrays
100
- torch.int64: {warp.int64, warp.uint64},
101
- torch.int32: {warp.int32, warp.uint32},
102
- torch.int16: {warp.int16, warp.uint16},
103
- torch.int8: {warp.int8, warp.uint8},
104
- torch.uint8: {warp.uint8, warp.int8},
105
- torch.bool: {warp.bool, warp.uint8, warp.int8},
106
- # currently unsupported by Warp
107
- # torch.bfloat16:
108
- # torch.complex64:
109
- # torch.complex128:
110
- }
111
-
112
- compatible_set = dtype_is_compatible.compatible_sets.get(torch_dtype)
113
-
114
- if compatible_set is not None:
115
- if warp_dtype in compatible_set:
116
- return True
117
- # check if it's a vector or matrix type
118
- if hasattr(warp_dtype, "_wp_scalar_type_"):
119
- return warp_dtype._wp_scalar_type_ in compatible_set
120
-
121
- return False
122
-
123
-
124
- # lookup tables initialized when needed
125
- dtype_from_torch.type_map = None
126
- dtype_to_torch.type_map = None
127
- dtype_is_compatible.compatible_sets = None
128
-
129
-
130
- # wrap a torch tensor to a wp array, data is not copied
131
- def from_torch(t, dtype=None, requires_grad=None, grad=None):
132
- """Convert a Torch tensor to a Warp array without copying the data.
133
-
134
- Args:
135
- t (torch.Tensor): The torch tensor to wrap.
136
- dtype (warp.dtype, optional): The target data type of the resulting Warp array. Defaults to the tensor value type mapped to a Warp array value type.
137
- requires_grad (bool, optional): Whether the resulting array should wrap the tensor's gradient, if it exists (the grad tensor will be allocated otherwise). Defaults to the tensor's `requires_grad` value.
138
-
139
- Returns:
140
- warp.array: The wrapped array.
141
- """
142
- if dtype is None:
143
- dtype = dtype_from_torch(t.dtype)
144
- elif not dtype_is_compatible(t.dtype, dtype):
145
- raise RuntimeError(f"Cannot convert Torch type {t.dtype} to Warp type {dtype}")
146
-
147
- # get size of underlying data type to compute strides
148
- ctype_size = ctypes.sizeof(dtype._type_)
149
-
150
- shape = tuple(t.shape)
151
- strides = tuple(s * ctype_size for s in t.stride())
152
- device = device_from_torch(t.device)
153
-
154
- # if target is a vector or matrix type
155
- # then check if trailing dimensions match
156
- # the target type and update the shape
157
- if hasattr(dtype, "_shape_"):
158
- dtype_shape = dtype._shape_
159
- dtype_dims = len(dtype._shape_)
160
- if dtype_dims > len(shape) or dtype_shape != shape[-dtype_dims:]:
161
- raise RuntimeError(
162
- f"Could not convert Torch tensor with shape {shape} to Warp array with dtype={dtype}, ensure that source inner shape is {dtype_shape}"
163
- )
164
-
165
- # ensure the inner strides are contiguous
166
- stride = ctype_size
167
- for i in range(dtype_dims):
168
- if strides[-i - 1] != stride:
169
- raise RuntimeError(
170
- f"Could not convert Torch tensor with shape {shape} to Warp array with dtype={dtype}, because the source inner strides are not contiguous"
171
- )
172
- stride *= dtype_shape[-i - 1]
173
-
174
- shape = tuple(shape[:-dtype_dims]) or (1,)
175
- strides = tuple(strides[:-dtype_dims]) or (ctype_size,)
176
-
177
- requires_grad = t.requires_grad if requires_grad is None else requires_grad
178
- if grad is not None:
179
- if not isinstance(grad, warp.array):
180
- import torch
181
-
182
- if isinstance(grad, torch.Tensor):
183
- grad = from_torch(grad, dtype=dtype)
184
- else:
185
- raise ValueError(f"Invalid gradient type: {type(grad)}")
186
- elif requires_grad:
187
- # wrap the tensor gradient, allocate if necessary
188
- if t.grad is None:
189
- # allocate a zero-filled gradient if it doesn't exist
190
- # Note: we use Warp to allocate the shared gradient with compatible strides
191
- grad = warp.zeros(dtype=dtype, shape=shape, strides=strides, device=device)
192
- t.grad = to_torch(grad, requires_grad=False)
193
- else:
194
- # TODO: this will fail if the strides are incompatible
195
- grad = from_torch(t.grad, dtype=dtype)
196
-
197
- a = warp.array(
198
- ptr=t.data_ptr(),
199
- dtype=dtype,
200
- shape=shape,
201
- strides=strides,
202
- device=device,
203
- copy=False,
204
- grad=grad,
205
- requires_grad=requires_grad,
206
- )
207
-
208
- # save a reference to the source tensor, otherwise it will be deallocated
209
- a._tensor = t
210
- return a
211
-
212
-
213
- def to_torch(a, requires_grad=None):
214
- """
215
- Convert a Warp array to a Torch tensor without copying the data.
216
-
217
- Args:
218
- a (warp.array): The Warp array to convert.
219
- requires_grad (bool, optional): Whether the resulting tensor should convert the array's gradient, if it exists, to a grad tensor. Defaults to the array's `requires_grad` value.
220
-
221
- Returns:
222
- torch.Tensor: The converted tensor.
223
- """
224
- import torch
225
-
226
- if requires_grad is None:
227
- requires_grad = a.requires_grad
228
-
229
- # Torch does not support structured arrays
230
- if isinstance(a.dtype, warp.codegen.Struct):
231
- raise RuntimeError("Cannot convert structured Warp arrays to Torch.")
232
-
233
- if a.device.is_cpu:
234
- # Torch has an issue wrapping CPU objects
235
- # that support the __array_interface__ protocol
236
- # in this case we need to workaround by going
237
- # to an ndarray first, see https://pearu.github.io/array_interface_pytorch.html
238
- t = torch.as_tensor(numpy.asarray(a))
239
- t.requires_grad = requires_grad
240
- if requires_grad and a.requires_grad:
241
- t.grad = torch.as_tensor(numpy.asarray(a.grad))
242
- return t
243
-
244
- elif a.device.is_cuda:
245
- # Torch does support the __cuda_array_interface__
246
- # correctly, but we must be sure to maintain a reference
247
- # to the owning object to prevent memory allocs going out of scope
248
- t = torch.as_tensor(a, device=device_to_torch(a.device))
249
- t.requires_grad = requires_grad
250
- if requires_grad and a.requires_grad:
251
- t.grad = torch.as_tensor(a.grad, device=device_to_torch(a.device))
252
- return t
253
-
254
- else:
255
- raise RuntimeError("Unsupported device")
256
-
257
-
258
- def stream_from_torch(stream_or_device=None):
259
- """Convert from a Torch CUDA stream to a Warp CUDA stream."""
260
- import torch
261
-
262
- if isinstance(stream_or_device, torch.cuda.Stream):
263
- stream = stream_or_device
264
- else:
265
- # assume arg is a torch device
266
- stream = torch.cuda.current_stream(stream_or_device)
267
-
268
- device = device_from_torch(stream.device)
269
-
270
- warp_stream = warp.Stream(device, cuda_stream=stream.cuda_stream)
271
-
272
- # save a reference to the source stream, otherwise it may be destroyed
273
- warp_stream._torch_stream = stream
274
-
275
- return warp_stream
276
-
277
-
278
- def stream_to_torch(stream_or_device=None):
279
- """Convert from a Warp CUDA stream to a Torch CUDA stream."""
280
- import torch
281
-
282
- if isinstance(stream_or_device, warp.Stream):
283
- stream = stream_or_device
284
- else:
285
- # assume arg is a warp device
286
- stream = warp.get_device(stream_or_device).stream
287
-
288
- device = device_to_torch(stream.device)
289
-
290
- torch_stream = torch.cuda.ExternalStream(stream.cuda_stream, device=device)
291
-
292
- # save a reference to the source stream, otherwise it may be destroyed
293
- torch_stream._warp_stream = stream
294
-
295
- return torch_stream
1
+ # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
2
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
3
+ # and proprietary rights in and to this software, related documentation
4
+ # and any modifications thereto. Any use, reproduction, disclosure or
5
+ # distribution of this software and related documentation without an express
6
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
7
+
8
+ import ctypes
9
+
10
+ import numpy
11
+
12
+ import warp
13
+
14
+
15
+ # return the warp device corresponding to a torch device
16
+ def device_from_torch(torch_device) -> warp.context.Device:
17
+ """Return the Warp device corresponding to a Torch device."""
18
+ return warp.get_device(str(torch_device))
19
+
20
+
21
+ def device_to_torch(warp_device: warp.context.Devicelike) -> str:
22
+ """Return the Torch device string corresponding to a Warp device.
23
+
24
+ Args:
25
+ warp_device: An identifier that can be resolved to a :class:`warp.context.Device`.
26
+
27
+ Raises:
28
+ RuntimeError: The Warp device is not compatible with PyTorch.
29
+ """
30
+ device = warp.get_device(warp_device)
31
+ if device.is_cpu or device.is_primary:
32
+ return str(device)
33
+ elif device.is_cuda and device.is_uva:
34
+ # it's not a primary context, but torch can access the data ptr directly thanks to UVA
35
+ return f"cuda:{device.ordinal}"
36
+ raise RuntimeError(f"Warp device {device} is not compatible with torch")
37
+
38
+
39
+ def dtype_to_torch(warp_dtype):
40
+ """Return the Torch dtype corresponding to a Warp dtype.
41
+
42
+ Args:
43
+ warp_dtype: A Warp data type that has a corresponding ``torch.dtype``.
44
+ ``warp.uint16``, ``warp.uint32``, and ``warp.uint64`` are mapped
45
+ to the signed integer ``torch.dtype`` of the same width.
46
+ Raises:
47
+ TypeError: Unable to find a corresponding PyTorch data type.
48
+ """
49
+ # initialize lookup table on first call to defer torch import
50
+ if dtype_to_torch.type_map is None:
51
+ import torch
52
+
53
+ dtype_to_torch.type_map = {
54
+ warp.float16: torch.float16,
55
+ warp.float32: torch.float32,
56
+ warp.float64: torch.float64,
57
+ warp.int8: torch.int8,
58
+ warp.int16: torch.int16,
59
+ warp.int32: torch.int32,
60
+ warp.int64: torch.int64,
61
+ warp.uint8: torch.uint8,
62
+ # torch doesn't support unsigned ints bigger than 8 bits
63
+ warp.uint16: torch.int16,
64
+ warp.uint32: torch.int32,
65
+ warp.uint64: torch.int64,
66
+ warp.bool: torch.bool,
67
+ }
68
+
69
+ torch_dtype = dtype_to_torch.type_map.get(warp_dtype)
70
+ if torch_dtype is not None:
71
+ return torch_dtype
72
+ else:
73
+ raise TypeError(f"Cannot convert {warp_dtype} to a Torch type")
74
+
75
+
76
+ def dtype_from_torch(torch_dtype):
77
+ """Return the Warp dtype corresponding to a Torch dtype.
78
+
79
+ Args:
80
+ torch_dtype: A ``torch.dtype`` that has a corresponding Warp data type.
81
+ Currently ``torch.bfloat16``, ``torch.complex64``, and
82
+ ``torch.complex128`` are not supported.
83
+
84
+ Raises:
85
+ TypeError: Unable to find a corresponding Warp data type.
86
+ """
87
+ # initialize lookup table on first call to defer torch import
88
+ if dtype_from_torch.type_map is None:
89
+ import torch
90
+
91
+ dtype_from_torch.type_map = {
92
+ torch.float16: warp.float16,
93
+ torch.float32: warp.float32,
94
+ torch.float64: warp.float64,
95
+ torch.int8: warp.int8,
96
+ torch.int16: warp.int16,
97
+ torch.int32: warp.int32,
98
+ torch.int64: warp.int64,
99
+ torch.uint8: warp.uint8,
100
+ torch.bool: warp.bool,
101
+ # currently unsupported by Warp
102
+ # torch.bfloat16:
103
+ # torch.complex64:
104
+ # torch.complex128:
105
+ }
106
+
107
+ warp_dtype = dtype_from_torch.type_map.get(torch_dtype)
108
+
109
+ if warp_dtype is not None:
110
+ return warp_dtype
111
+ else:
112
+ raise TypeError(f"Cannot convert {torch_dtype} to a Warp type")
113
+
114
+
115
+ def dtype_is_compatible(torch_dtype, warp_dtype) -> bool:
116
+ """Evaluates whether the given torch dtype is compatible with the given Warp dtype."""
117
+ # initialize lookup table on first call to defer torch import
118
+ if dtype_is_compatible.compatible_sets is None:
119
+ import torch
120
+
121
+ dtype_is_compatible.compatible_sets = {
122
+ torch.float64: {warp.float64},
123
+ torch.float32: {warp.float32},
124
+ torch.float16: {warp.float16},
125
+ # allow aliasing integer tensors as signed or unsigned integer arrays
126
+ torch.int64: {warp.int64, warp.uint64},
127
+ torch.int32: {warp.int32, warp.uint32},
128
+ torch.int16: {warp.int16, warp.uint16},
129
+ torch.int8: {warp.int8, warp.uint8},
130
+ torch.uint8: {warp.uint8, warp.int8},
131
+ torch.bool: {warp.bool, warp.uint8, warp.int8},
132
+ # currently unsupported by Warp
133
+ # torch.bfloat16:
134
+ # torch.complex64:
135
+ # torch.complex128:
136
+ }
137
+
138
+ compatible_set = dtype_is_compatible.compatible_sets.get(torch_dtype)
139
+
140
+ if compatible_set is not None:
141
+ if warp_dtype in compatible_set:
142
+ return True
143
+ # check if it's a vector or matrix type
144
+ if hasattr(warp_dtype, "_wp_scalar_type_"):
145
+ return warp_dtype._wp_scalar_type_ in compatible_set
146
+
147
+ return False
148
+
149
+
150
+ # lookup tables initialized when needed
151
+ dtype_from_torch.type_map = None
152
+ dtype_to_torch.type_map = None
153
+ dtype_is_compatible.compatible_sets = None
154
+
155
+
156
+ # wrap a torch tensor to a wp array, data is not copied
157
+ def from_torch(t, dtype=None, requires_grad=None, grad=None):
158
+ """Convert a Torch tensor to a Warp array without copying the data.
159
+
160
+ Args:
161
+ t (torch.Tensor): The torch tensor to wrap.
162
+ dtype (warp.dtype, optional): The target data type of the resulting Warp array. Defaults to the tensor value type mapped to a Warp array value type.
163
+ requires_grad (bool, optional): Whether the resulting array should wrap the tensor's gradient, if it exists (the grad tensor will be allocated otherwise). Defaults to the tensor's `requires_grad` value.
164
+
165
+ Returns:
166
+ warp.array: The wrapped array.
167
+ """
168
+ if dtype is None:
169
+ dtype = dtype_from_torch(t.dtype)
170
+ elif not dtype_is_compatible(t.dtype, dtype):
171
+ raise RuntimeError(f"Cannot convert Torch type {t.dtype} to Warp type {dtype}")
172
+
173
+ # get size of underlying data type to compute strides
174
+ ctype_size = ctypes.sizeof(dtype._type_)
175
+
176
+ shape = tuple(t.shape)
177
+ strides = tuple(s * ctype_size for s in t.stride())
178
+ device = device_from_torch(t.device)
179
+
180
+ # if target is a vector or matrix type
181
+ # then check if trailing dimensions match
182
+ # the target type and update the shape
183
+ if hasattr(dtype, "_shape_"):
184
+ dtype_shape = dtype._shape_
185
+ dtype_dims = len(dtype._shape_)
186
+ if dtype_dims > len(shape) or dtype_shape != shape[-dtype_dims:]:
187
+ raise RuntimeError(
188
+ f"Could not convert Torch tensor with shape {shape} to Warp array with dtype={dtype}, ensure that source inner shape is {dtype_shape}"
189
+ )
190
+
191
+ # ensure the inner strides are contiguous
192
+ stride = ctype_size
193
+ for i in range(dtype_dims):
194
+ if strides[-i - 1] != stride:
195
+ raise RuntimeError(
196
+ f"Could not convert Torch tensor with shape {shape} to Warp array with dtype={dtype}, because the source inner strides are not contiguous"
197
+ )
198
+ stride *= dtype_shape[-i - 1]
199
+
200
+ shape = tuple(shape[:-dtype_dims]) or (1,)
201
+ strides = tuple(strides[:-dtype_dims]) or (ctype_size,)
202
+
203
+ requires_grad = t.requires_grad if requires_grad is None else requires_grad
204
+ if grad is not None:
205
+ if not isinstance(grad, warp.array):
206
+ import torch
207
+
208
+ if isinstance(grad, torch.Tensor):
209
+ grad = from_torch(grad, dtype=dtype)
210
+ else:
211
+ raise ValueError(f"Invalid gradient type: {type(grad)}")
212
+ elif requires_grad:
213
+ # wrap the tensor gradient, allocate if necessary
214
+ if t.grad is None:
215
+ # allocate a zero-filled gradient if it doesn't exist
216
+ # Note: we use Warp to allocate the shared gradient with compatible strides
217
+ grad = warp.zeros(dtype=dtype, shape=shape, strides=strides, device=device)
218
+ t.grad = to_torch(grad, requires_grad=False)
219
+ else:
220
+ # TODO: this will fail if the strides are incompatible
221
+ grad = from_torch(t.grad, dtype=dtype)
222
+
223
+ a = warp.array(
224
+ ptr=t.data_ptr(),
225
+ dtype=dtype,
226
+ shape=shape,
227
+ strides=strides,
228
+ device=device,
229
+ copy=False,
230
+ grad=grad,
231
+ requires_grad=requires_grad,
232
+ )
233
+
234
+ # save a reference to the source tensor, otherwise it will be deallocated
235
+ a._tensor = t
236
+ return a
237
+
238
+
239
+ def to_torch(a, requires_grad=None):
240
+ """
241
+ Convert a Warp array to a Torch tensor without copying the data.
242
+
243
+ Args:
244
+ a (warp.array): The Warp array to convert.
245
+ requires_grad (bool, optional): Whether the resulting tensor should convert the array's gradient, if it exists, to a grad tensor. Defaults to the array's `requires_grad` value.
246
+
247
+ Returns:
248
+ torch.Tensor: The converted tensor.
249
+ """
250
+ import torch
251
+
252
+ if requires_grad is None:
253
+ requires_grad = a.requires_grad
254
+
255
+ # Torch does not support structured arrays
256
+ if isinstance(a.dtype, warp.codegen.Struct):
257
+ raise RuntimeError("Cannot convert structured Warp arrays to Torch.")
258
+
259
+ if a.device.is_cpu:
260
+ # Torch has an issue wrapping CPU objects
261
+ # that support the __array_interface__ protocol
262
+ # in this case we need to workaround by going
263
+ # to an ndarray first, see https://pearu.github.io/array_interface_pytorch.html
264
+ t = torch.as_tensor(numpy.asarray(a))
265
+ t.requires_grad = requires_grad
266
+ if requires_grad and a.requires_grad:
267
+ t.grad = torch.as_tensor(numpy.asarray(a.grad))
268
+ return t
269
+
270
+ elif a.device.is_cuda:
271
+ # Torch does support the __cuda_array_interface__
272
+ # correctly, but we must be sure to maintain a reference
273
+ # to the owning object to prevent memory allocs going out of scope
274
+ t = torch.as_tensor(a, device=device_to_torch(a.device))
275
+ t.requires_grad = requires_grad
276
+ if requires_grad and a.requires_grad:
277
+ t.grad = torch.as_tensor(a.grad, device=device_to_torch(a.device))
278
+ return t
279
+
280
+ else:
281
+ raise RuntimeError("Unsupported device")
282
+
283
+
284
+ def stream_from_torch(stream_or_device=None):
285
+ """Convert from a Torch CUDA stream to a Warp CUDA stream."""
286
+ import torch
287
+
288
+ if isinstance(stream_or_device, torch.cuda.Stream):
289
+ stream = stream_or_device
290
+ else:
291
+ # assume arg is a torch device
292
+ stream = torch.cuda.current_stream(stream_or_device)
293
+
294
+ device = device_from_torch(stream.device)
295
+
296
+ warp_stream = warp.Stream(device, cuda_stream=stream.cuda_stream)
297
+
298
+ # save a reference to the source stream, otherwise it may be destroyed
299
+ warp_stream._torch_stream = stream
300
+
301
+ return warp_stream
302
+
303
+
304
+ def stream_to_torch(stream_or_device=None):
305
+ """Convert from a Warp CUDA stream to a Torch CUDA stream."""
306
+ import torch
307
+
308
+ if isinstance(stream_or_device, warp.Stream):
309
+ stream = stream_or_device
310
+ else:
311
+ # assume arg is a warp device
312
+ stream = warp.get_device(stream_or_device).stream
313
+
314
+ device = device_to_torch(stream.device)
315
+
316
+ torch_stream = torch.cuda.ExternalStream(stream.cuda_stream, device=device)
317
+
318
+ # save a reference to the source stream, otherwise it may be destroyed
319
+ torch_stream._warp_stream = stream
320
+
321
+ return torch_stream