warp-lang 1.0.2__py3-none-manylinux2014_x86_64.whl → 1.2.0__py3-none-manylinux2014_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of warp-lang might be problematic. Click here for more details.

Files changed (356) hide show
  1. warp/__init__.py +108 -97
  2. warp/__init__.pyi +1 -1
  3. warp/bin/warp-clang.so +0 -0
  4. warp/bin/warp.so +0 -0
  5. warp/build.py +88 -113
  6. warp/build_dll.py +383 -375
  7. warp/builtins.py +3693 -3354
  8. warp/codegen.py +2925 -2792
  9. warp/config.py +40 -36
  10. warp/constants.py +49 -45
  11. warp/context.py +5409 -5102
  12. warp/dlpack.py +442 -442
  13. warp/examples/__init__.py +16 -16
  14. warp/examples/assets/bear.usd +0 -0
  15. warp/examples/assets/bunny.usd +0 -0
  16. warp/examples/assets/cartpole.urdf +110 -110
  17. warp/examples/assets/crazyflie.usd +0 -0
  18. warp/examples/assets/cube.usd +0 -0
  19. warp/examples/assets/nv_ant.xml +92 -92
  20. warp/examples/assets/nv_humanoid.xml +183 -183
  21. warp/examples/assets/quadruped.urdf +267 -267
  22. warp/examples/assets/rocks.nvdb +0 -0
  23. warp/examples/assets/rocks.usd +0 -0
  24. warp/examples/assets/sphere.usd +0 -0
  25. warp/examples/benchmarks/benchmark_api.py +381 -383
  26. warp/examples/benchmarks/benchmark_cloth.py +278 -277
  27. warp/examples/benchmarks/benchmark_cloth_cupy.py +88 -88
  28. warp/examples/benchmarks/benchmark_cloth_jax.py +97 -100
  29. warp/examples/benchmarks/benchmark_cloth_numba.py +146 -142
  30. warp/examples/benchmarks/benchmark_cloth_numpy.py +77 -77
  31. warp/examples/benchmarks/benchmark_cloth_pytorch.py +86 -86
  32. warp/examples/benchmarks/benchmark_cloth_taichi.py +112 -112
  33. warp/examples/benchmarks/benchmark_cloth_warp.py +145 -146
  34. warp/examples/benchmarks/benchmark_launches.py +293 -295
  35. warp/examples/browse.py +29 -29
  36. warp/examples/core/example_dem.py +232 -219
  37. warp/examples/core/example_fluid.py +291 -267
  38. warp/examples/core/example_graph_capture.py +142 -126
  39. warp/examples/core/example_marching_cubes.py +186 -174
  40. warp/examples/core/example_mesh.py +172 -155
  41. warp/examples/core/example_mesh_intersect.py +203 -193
  42. warp/examples/core/example_nvdb.py +174 -170
  43. warp/examples/core/example_raycast.py +103 -90
  44. warp/examples/core/example_raymarch.py +197 -178
  45. warp/examples/core/example_render_opengl.py +183 -141
  46. warp/examples/core/example_sph.py +403 -387
  47. warp/examples/core/example_torch.py +219 -181
  48. warp/examples/core/example_wave.py +261 -248
  49. warp/examples/fem/bsr_utils.py +378 -380
  50. warp/examples/fem/example_apic_fluid.py +432 -389
  51. warp/examples/fem/example_burgers.py +262 -0
  52. warp/examples/fem/example_convection_diffusion.py +180 -168
  53. warp/examples/fem/example_convection_diffusion_dg.py +217 -209
  54. warp/examples/fem/example_deformed_geometry.py +175 -159
  55. warp/examples/fem/example_diffusion.py +199 -173
  56. warp/examples/fem/example_diffusion_3d.py +178 -152
  57. warp/examples/fem/example_diffusion_mgpu.py +219 -214
  58. warp/examples/fem/example_mixed_elasticity.py +242 -222
  59. warp/examples/fem/example_navier_stokes.py +257 -243
  60. warp/examples/fem/example_stokes.py +218 -192
  61. warp/examples/fem/example_stokes_transfer.py +263 -249
  62. warp/examples/fem/mesh_utils.py +133 -109
  63. warp/examples/fem/plot_utils.py +292 -287
  64. warp/examples/optim/example_bounce.py +258 -246
  65. warp/examples/optim/example_cloth_throw.py +220 -209
  66. warp/examples/optim/example_diffray.py +564 -536
  67. warp/examples/optim/example_drone.py +862 -835
  68. warp/examples/optim/example_inverse_kinematics.py +174 -168
  69. warp/examples/optim/example_inverse_kinematics_torch.py +183 -169
  70. warp/examples/optim/example_spring_cage.py +237 -231
  71. warp/examples/optim/example_trajectory.py +221 -199
  72. warp/examples/optim/example_walker.py +304 -293
  73. warp/examples/sim/example_cartpole.py +137 -129
  74. warp/examples/sim/example_cloth.py +194 -186
  75. warp/examples/sim/example_granular.py +122 -111
  76. warp/examples/sim/example_granular_collision_sdf.py +195 -186
  77. warp/examples/sim/example_jacobian_ik.py +234 -214
  78. warp/examples/sim/example_particle_chain.py +116 -105
  79. warp/examples/sim/example_quadruped.py +191 -180
  80. warp/examples/sim/example_rigid_chain.py +195 -187
  81. warp/examples/sim/example_rigid_contact.py +187 -177
  82. warp/examples/sim/example_rigid_force.py +125 -125
  83. warp/examples/sim/example_rigid_gyroscopic.py +107 -95
  84. warp/examples/sim/example_rigid_soft_contact.py +132 -122
  85. warp/examples/sim/example_soft_body.py +188 -177
  86. warp/fabric.py +337 -335
  87. warp/fem/__init__.py +61 -27
  88. warp/fem/cache.py +403 -388
  89. warp/fem/dirichlet.py +178 -179
  90. warp/fem/domain.py +262 -263
  91. warp/fem/field/__init__.py +100 -101
  92. warp/fem/field/field.py +148 -149
  93. warp/fem/field/nodal_field.py +298 -299
  94. warp/fem/field/restriction.py +22 -21
  95. warp/fem/field/test.py +180 -181
  96. warp/fem/field/trial.py +183 -183
  97. warp/fem/geometry/__init__.py +16 -19
  98. warp/fem/geometry/closest_point.py +69 -70
  99. warp/fem/geometry/deformed_geometry.py +270 -271
  100. warp/fem/geometry/element.py +748 -744
  101. warp/fem/geometry/geometry.py +184 -186
  102. warp/fem/geometry/grid_2d.py +380 -373
  103. warp/fem/geometry/grid_3d.py +437 -435
  104. warp/fem/geometry/hexmesh.py +953 -953
  105. warp/fem/geometry/nanogrid.py +455 -0
  106. warp/fem/geometry/partition.py +374 -376
  107. warp/fem/geometry/quadmesh_2d.py +532 -532
  108. warp/fem/geometry/tetmesh.py +840 -840
  109. warp/fem/geometry/trimesh_2d.py +577 -577
  110. warp/fem/integrate.py +1684 -1615
  111. warp/fem/operator.py +190 -191
  112. warp/fem/polynomial.py +214 -213
  113. warp/fem/quadrature/__init__.py +2 -2
  114. warp/fem/quadrature/pic_quadrature.py +243 -245
  115. warp/fem/quadrature/quadrature.py +295 -294
  116. warp/fem/space/__init__.py +179 -292
  117. warp/fem/space/basis_space.py +522 -489
  118. warp/fem/space/collocated_function_space.py +100 -105
  119. warp/fem/space/dof_mapper.py +236 -236
  120. warp/fem/space/function_space.py +148 -145
  121. warp/fem/space/grid_2d_function_space.py +148 -267
  122. warp/fem/space/grid_3d_function_space.py +167 -306
  123. warp/fem/space/hexmesh_function_space.py +253 -352
  124. warp/fem/space/nanogrid_function_space.py +202 -0
  125. warp/fem/space/partition.py +350 -350
  126. warp/fem/space/quadmesh_2d_function_space.py +261 -369
  127. warp/fem/space/restriction.py +161 -160
  128. warp/fem/space/shape/__init__.py +90 -15
  129. warp/fem/space/shape/cube_shape_function.py +728 -738
  130. warp/fem/space/shape/shape_function.py +102 -103
  131. warp/fem/space/shape/square_shape_function.py +611 -611
  132. warp/fem/space/shape/tet_shape_function.py +565 -567
  133. warp/fem/space/shape/triangle_shape_function.py +429 -429
  134. warp/fem/space/tetmesh_function_space.py +224 -292
  135. warp/fem/space/topology.py +297 -295
  136. warp/fem/space/trimesh_2d_function_space.py +153 -221
  137. warp/fem/types.py +77 -77
  138. warp/fem/utils.py +495 -495
  139. warp/jax.py +166 -141
  140. warp/jax_experimental.py +341 -339
  141. warp/native/array.h +1081 -1025
  142. warp/native/builtin.h +1603 -1560
  143. warp/native/bvh.cpp +402 -398
  144. warp/native/bvh.cu +533 -525
  145. warp/native/bvh.h +430 -429
  146. warp/native/clang/clang.cpp +496 -464
  147. warp/native/crt.cpp +42 -32
  148. warp/native/crt.h +352 -335
  149. warp/native/cuda_crt.h +1049 -1049
  150. warp/native/cuda_util.cpp +549 -540
  151. warp/native/cuda_util.h +288 -203
  152. warp/native/cutlass_gemm.cpp +34 -34
  153. warp/native/cutlass_gemm.cu +372 -372
  154. warp/native/error.cpp +66 -66
  155. warp/native/error.h +27 -27
  156. warp/native/exports.h +187 -0
  157. warp/native/fabric.h +228 -228
  158. warp/native/hashgrid.cpp +301 -278
  159. warp/native/hashgrid.cu +78 -77
  160. warp/native/hashgrid.h +227 -227
  161. warp/native/initializer_array.h +32 -32
  162. warp/native/intersect.h +1204 -1204
  163. warp/native/intersect_adj.h +365 -365
  164. warp/native/intersect_tri.h +322 -322
  165. warp/native/marching.cpp +2 -2
  166. warp/native/marching.cu +497 -497
  167. warp/native/marching.h +2 -2
  168. warp/native/mat.h +1545 -1498
  169. warp/native/matnn.h +333 -333
  170. warp/native/mesh.cpp +203 -203
  171. warp/native/mesh.cu +292 -293
  172. warp/native/mesh.h +1887 -1887
  173. warp/native/nanovdb/GridHandle.h +366 -0
  174. warp/native/nanovdb/HostBuffer.h +590 -0
  175. warp/native/nanovdb/NanoVDB.h +6624 -4782
  176. warp/native/nanovdb/PNanoVDB.h +3390 -2553
  177. warp/native/noise.h +850 -850
  178. warp/native/quat.h +1112 -1085
  179. warp/native/rand.h +303 -299
  180. warp/native/range.h +108 -108
  181. warp/native/reduce.cpp +156 -156
  182. warp/native/reduce.cu +348 -348
  183. warp/native/runlength_encode.cpp +61 -61
  184. warp/native/runlength_encode.cu +46 -46
  185. warp/native/scan.cpp +30 -30
  186. warp/native/scan.cu +36 -36
  187. warp/native/scan.h +7 -7
  188. warp/native/solid_angle.h +442 -442
  189. warp/native/sort.cpp +94 -94
  190. warp/native/sort.cu +97 -97
  191. warp/native/sort.h +14 -14
  192. warp/native/sparse.cpp +337 -337
  193. warp/native/sparse.cu +544 -544
  194. warp/native/spatial.h +630 -630
  195. warp/native/svd.h +562 -562
  196. warp/native/temp_buffer.h +30 -30
  197. warp/native/vec.h +1177 -1133
  198. warp/native/volume.cpp +529 -297
  199. warp/native/volume.cu +58 -32
  200. warp/native/volume.h +960 -538
  201. warp/native/volume_builder.cu +446 -425
  202. warp/native/volume_builder.h +34 -19
  203. warp/native/volume_impl.h +61 -0
  204. warp/native/warp.cpp +1057 -1052
  205. warp/native/warp.cu +2949 -2828
  206. warp/native/warp.h +321 -305
  207. warp/optim/__init__.py +9 -9
  208. warp/optim/adam.py +120 -120
  209. warp/optim/linear.py +1104 -939
  210. warp/optim/sgd.py +104 -92
  211. warp/render/__init__.py +10 -10
  212. warp/render/render_opengl.py +3356 -3204
  213. warp/render/render_usd.py +768 -749
  214. warp/render/utils.py +152 -150
  215. warp/sim/__init__.py +52 -59
  216. warp/sim/articulation.py +685 -685
  217. warp/sim/collide.py +1594 -1590
  218. warp/sim/import_mjcf.py +489 -481
  219. warp/sim/import_snu.py +220 -221
  220. warp/sim/import_urdf.py +536 -516
  221. warp/sim/import_usd.py +887 -881
  222. warp/sim/inertia.py +316 -317
  223. warp/sim/integrator.py +234 -233
  224. warp/sim/integrator_euler.py +1956 -1956
  225. warp/sim/integrator_featherstone.py +1917 -1991
  226. warp/sim/integrator_xpbd.py +3288 -3312
  227. warp/sim/model.py +4473 -4314
  228. warp/sim/particles.py +113 -112
  229. warp/sim/render.py +417 -403
  230. warp/sim/utils.py +413 -410
  231. warp/sparse.py +1289 -1227
  232. warp/stubs.py +2192 -2469
  233. warp/tape.py +1162 -225
  234. warp/tests/__init__.py +1 -1
  235. warp/tests/__main__.py +4 -4
  236. warp/tests/assets/test_index_grid.nvdb +0 -0
  237. warp/tests/assets/torus.usda +105 -105
  238. warp/tests/aux_test_class_kernel.py +26 -26
  239. warp/tests/aux_test_compile_consts_dummy.py +10 -10
  240. warp/tests/aux_test_conditional_unequal_types_kernels.py +21 -21
  241. warp/tests/aux_test_dependent.py +20 -22
  242. warp/tests/aux_test_grad_customs.py +21 -23
  243. warp/tests/aux_test_reference.py +9 -11
  244. warp/tests/aux_test_reference_reference.py +8 -10
  245. warp/tests/aux_test_square.py +15 -17
  246. warp/tests/aux_test_unresolved_func.py +14 -14
  247. warp/tests/aux_test_unresolved_symbol.py +14 -14
  248. warp/tests/disabled_kinematics.py +237 -239
  249. warp/tests/run_coverage_serial.py +31 -31
  250. warp/tests/test_adam.py +155 -157
  251. warp/tests/test_arithmetic.py +1088 -1124
  252. warp/tests/test_array.py +2415 -2326
  253. warp/tests/test_array_reduce.py +148 -150
  254. warp/tests/test_async.py +666 -656
  255. warp/tests/test_atomic.py +139 -141
  256. warp/tests/test_bool.py +212 -149
  257. warp/tests/test_builtins_resolution.py +1290 -1292
  258. warp/tests/test_bvh.py +162 -171
  259. warp/tests/test_closest_point_edge_edge.py +227 -228
  260. warp/tests/test_codegen.py +562 -553
  261. warp/tests/test_compile_consts.py +217 -101
  262. warp/tests/test_conditional.py +244 -246
  263. warp/tests/test_copy.py +230 -215
  264. warp/tests/test_ctypes.py +630 -632
  265. warp/tests/test_dense.py +65 -67
  266. warp/tests/test_devices.py +89 -98
  267. warp/tests/test_dlpack.py +528 -529
  268. warp/tests/test_examples.py +403 -378
  269. warp/tests/test_fabricarray.py +952 -955
  270. warp/tests/test_fast_math.py +60 -54
  271. warp/tests/test_fem.py +1298 -1278
  272. warp/tests/test_fp16.py +128 -130
  273. warp/tests/test_func.py +336 -337
  274. warp/tests/test_generics.py +596 -571
  275. warp/tests/test_grad.py +885 -640
  276. warp/tests/test_grad_customs.py +331 -336
  277. warp/tests/test_hash_grid.py +208 -164
  278. warp/tests/test_import.py +37 -39
  279. warp/tests/test_indexedarray.py +1132 -1134
  280. warp/tests/test_intersect.py +65 -67
  281. warp/tests/test_jax.py +305 -307
  282. warp/tests/test_large.py +169 -164
  283. warp/tests/test_launch.py +352 -354
  284. warp/tests/test_lerp.py +217 -261
  285. warp/tests/test_linear_solvers.py +189 -171
  286. warp/tests/test_lvalue.py +419 -493
  287. warp/tests/test_marching_cubes.py +63 -65
  288. warp/tests/test_mat.py +1799 -1827
  289. warp/tests/test_mat_lite.py +113 -115
  290. warp/tests/test_mat_scalar_ops.py +2905 -2889
  291. warp/tests/test_math.py +124 -193
  292. warp/tests/test_matmul.py +498 -499
  293. warp/tests/test_matmul_lite.py +408 -410
  294. warp/tests/test_mempool.py +186 -190
  295. warp/tests/test_mesh.py +281 -324
  296. warp/tests/test_mesh_query_aabb.py +226 -241
  297. warp/tests/test_mesh_query_point.py +690 -702
  298. warp/tests/test_mesh_query_ray.py +290 -303
  299. warp/tests/test_mlp.py +274 -276
  300. warp/tests/test_model.py +108 -110
  301. warp/tests/test_module_hashing.py +111 -0
  302. warp/tests/test_modules_lite.py +36 -39
  303. warp/tests/test_multigpu.py +161 -163
  304. warp/tests/test_noise.py +244 -248
  305. warp/tests/test_operators.py +248 -250
  306. warp/tests/test_options.py +121 -125
  307. warp/tests/test_peer.py +131 -137
  308. warp/tests/test_pinned.py +76 -78
  309. warp/tests/test_print.py +52 -54
  310. warp/tests/test_quat.py +2084 -2086
  311. warp/tests/test_rand.py +324 -288
  312. warp/tests/test_reload.py +207 -217
  313. warp/tests/test_rounding.py +177 -179
  314. warp/tests/test_runlength_encode.py +188 -190
  315. warp/tests/test_sim_grad.py +241 -0
  316. warp/tests/test_sim_kinematics.py +89 -97
  317. warp/tests/test_smoothstep.py +166 -168
  318. warp/tests/test_snippet.py +303 -266
  319. warp/tests/test_sparse.py +466 -460
  320. warp/tests/test_spatial.py +2146 -2148
  321. warp/tests/test_special_values.py +362 -0
  322. warp/tests/test_streams.py +484 -473
  323. warp/tests/test_struct.py +708 -675
  324. warp/tests/test_tape.py +171 -148
  325. warp/tests/test_torch.py +741 -743
  326. warp/tests/test_transient_module.py +85 -87
  327. warp/tests/test_types.py +554 -659
  328. warp/tests/test_utils.py +488 -499
  329. warp/tests/test_vec.py +1262 -1268
  330. warp/tests/test_vec_lite.py +71 -73
  331. warp/tests/test_vec_scalar_ops.py +2097 -2099
  332. warp/tests/test_verify_fp.py +92 -94
  333. warp/tests/test_volume.py +961 -736
  334. warp/tests/test_volume_write.py +338 -265
  335. warp/tests/unittest_serial.py +38 -37
  336. warp/tests/unittest_suites.py +367 -359
  337. warp/tests/unittest_utils.py +434 -578
  338. warp/tests/unused_test_misc.py +69 -71
  339. warp/tests/walkthrough_debug.py +85 -85
  340. warp/thirdparty/appdirs.py +598 -598
  341. warp/thirdparty/dlpack.py +143 -143
  342. warp/thirdparty/unittest_parallel.py +563 -561
  343. warp/torch.py +321 -295
  344. warp/types.py +4941 -4450
  345. warp/utils.py +1008 -821
  346. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/LICENSE.md +126 -126
  347. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/METADATA +365 -400
  348. warp_lang-1.2.0.dist-info/RECORD +359 -0
  349. warp/examples/assets/cube.usda +0 -42
  350. warp/examples/assets/sphere.usda +0 -56
  351. warp/examples/assets/torus.usda +0 -105
  352. warp/examples/fem/example_convection_diffusion_dg0.py +0 -194
  353. warp/native/nanovdb/PNanoVDBWrite.h +0 -295
  354. warp_lang-1.0.2.dist-info/RECORD +0 -352
  355. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/WHEEL +0 -0
  356. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/top_level.txt +0 -0
warp/utils.py CHANGED
@@ -1,821 +1,1008 @@
1
- # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
2
- # NVIDIA CORPORATION and its licensors retain all intellectual property
3
- # and proprietary rights in and to this software, related documentation
4
- # and any modifications thereto. Any use, reproduction, disclosure or
5
- # distribution of this software and related documentation without an express
6
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
7
-
8
- import cProfile
9
- import os
10
- import sys
11
- import time
12
- import warnings
13
- from typing import Any
14
-
15
- import numpy as np
16
-
17
- import warp as wp
18
- import warp.types
19
-
20
-
21
- warnings_seen = set()
22
-
23
-
24
- def warp_showwarning(message, category, filename, lineno, file=None, line=None):
25
- """Version of warnings.showwarning that always prints to sys.stdout."""
26
-
27
- if warp.config.verbose_warnings:
28
- s = f"Warp {category.__name__}: {message} ({filename}:{lineno})\n"
29
-
30
- if line is None:
31
- try:
32
- import linecache
33
- line = linecache.getline(filename, lineno)
34
- except Exception:
35
- # When a warning is logged during Python shutdown, linecache
36
- # and the import machinery don't work anymore
37
- line = None
38
- linecache = None
39
- else:
40
- line = line
41
- if line:
42
- line = line.strip()
43
- s += " %s\n" % line
44
- else:
45
- # simple warning
46
- s = f"Warp {category.__name__}: {message}\n"
47
-
48
- sys.stdout.write(s)
49
-
50
-
51
- def warn(message, category=None, stacklevel=1):
52
- if (category, message) in warnings_seen:
53
- return
54
-
55
- with warnings.catch_warnings():
56
- warnings.simplefilter("default") # Change the filter in this process
57
- warnings.showwarning = warp_showwarning
58
- warnings.warn(message, category, stacklevel + 1) # Increment stacklevel by 1 since we are in a wrapper
59
-
60
- if category is DeprecationWarning:
61
- warnings_seen.add((category, message))
62
-
63
-
64
- # expand a 7-vec to a tuple of arrays
65
- def transform_expand(t):
66
- return wp.transform(np.array(t[0:3]), np.array(t[3:7]))
67
-
68
-
69
- @wp.func
70
- def quat_between_vectors(a: wp.vec3, b: wp.vec3) -> wp.quat:
71
- """
72
- Compute the quaternion that rotates vector a to vector b
73
- """
74
- a = wp.normalize(a)
75
- b = wp.normalize(b)
76
- c = wp.cross(a, b)
77
- d = wp.dot(a, b)
78
- q = wp.quat(c[0], c[1], c[2], 1.0 + d)
79
- return wp.normalize(q)
80
-
81
-
82
- def array_scan(in_array, out_array, inclusive=True):
83
- if in_array.device != out_array.device:
84
- raise RuntimeError("Array storage devices do not match")
85
-
86
- if in_array.size != out_array.size:
87
- raise RuntimeError("Array storage sizes do not match")
88
-
89
- if in_array.dtype != out_array.dtype:
90
- raise RuntimeError("Array data types do not match")
91
-
92
- if in_array.size == 0:
93
- return
94
-
95
- from warp.context import runtime
96
-
97
- if in_array.device.is_cpu:
98
- if in_array.dtype == wp.int32:
99
- runtime.core.array_scan_int_host(in_array.ptr, out_array.ptr, in_array.size, inclusive)
100
- elif in_array.dtype == wp.float32:
101
- runtime.core.array_scan_float_host(in_array.ptr, out_array.ptr, in_array.size, inclusive)
102
- else:
103
- raise RuntimeError("Unsupported data type")
104
- elif in_array.device.is_cuda:
105
- if in_array.dtype == wp.int32:
106
- runtime.core.array_scan_int_device(in_array.ptr, out_array.ptr, in_array.size, inclusive)
107
- elif in_array.dtype == wp.float32:
108
- runtime.core.array_scan_float_device(in_array.ptr, out_array.ptr, in_array.size, inclusive)
109
- else:
110
- raise RuntimeError("Unsupported data type")
111
-
112
-
113
- def radix_sort_pairs(keys, values, count: int):
114
- if keys.device != values.device:
115
- raise RuntimeError("Array storage devices do not match")
116
-
117
- if count == 0:
118
- return
119
-
120
- if keys.size < 2 * count or values.size < 2 * count:
121
- raise RuntimeError("Array storage must be large enough to contain 2*count elements")
122
-
123
- from warp.context import runtime
124
-
125
- if keys.device.is_cpu:
126
- if keys.dtype == wp.int32 and values.dtype == wp.int32:
127
- runtime.core.radix_sort_pairs_int_host(keys.ptr, values.ptr, count)
128
- else:
129
- raise RuntimeError("Unsupported data type")
130
- elif keys.device.is_cuda:
131
- if keys.dtype == wp.int32 and values.dtype == wp.int32:
132
- runtime.core.radix_sort_pairs_int_device(keys.ptr, values.ptr, count)
133
- else:
134
- raise RuntimeError("Unsupported data type")
135
-
136
-
137
- def runlength_encode(values, run_values, run_lengths, run_count=None, value_count=None):
138
- if run_values.device != values.device or run_lengths.device != values.device:
139
- raise RuntimeError("Array storage devices do not match")
140
-
141
- if value_count is None:
142
- value_count = values.size
143
-
144
- if run_values.size < value_count or run_lengths.size < value_count:
145
- raise RuntimeError("Output array storage sizes must be at least equal to value_count")
146
-
147
- if values.dtype != run_values.dtype:
148
- raise RuntimeError("values and run_values data types do not match")
149
-
150
- if run_lengths.dtype != wp.int32:
151
- raise RuntimeError("run_lengths array must be of type int32")
152
-
153
- # User can provide a device output array for storing the number of runs
154
- # For convenience, if no such array is provided, number of runs is returned on host
155
- if run_count is None:
156
- if value_count == 0:
157
- return 0
158
- run_count = wp.empty(shape=(1,), dtype=int, device=values.device)
159
- host_return = True
160
- else:
161
- if run_count.device != values.device:
162
- raise RuntimeError("run_count storage device does not match other arrays")
163
- if run_count.dtype != wp.int32:
164
- raise RuntimeError("run_count array must be of type int32")
165
- if value_count == 0:
166
- run_count.zero_()
167
- return 0
168
- host_return = False
169
-
170
- from warp.context import runtime
171
-
172
- if values.device.is_cpu:
173
- if values.dtype == wp.int32:
174
- runtime.core.runlength_encode_int_host(
175
- values.ptr, run_values.ptr, run_lengths.ptr, run_count.ptr, value_count
176
- )
177
- else:
178
- raise RuntimeError("Unsupported data type")
179
- elif values.device.is_cuda:
180
- if values.dtype == wp.int32:
181
- runtime.core.runlength_encode_int_device(
182
- values.ptr, run_values.ptr, run_lengths.ptr, run_count.ptr, value_count
183
- )
184
- else:
185
- raise RuntimeError("Unsupported data type")
186
-
187
- if host_return:
188
- return int(run_count.numpy()[0])
189
-
190
-
191
- def array_sum(values, out=None, value_count=None, axis=None):
192
- if value_count is None:
193
- if axis is None:
194
- value_count = values.size
195
- else:
196
- value_count = values.shape[axis]
197
-
198
- if axis is None:
199
- output_shape = (1,)
200
- else:
201
-
202
- def output_dim(ax, dim):
203
- return 1 if ax == axis else dim
204
-
205
- output_shape = tuple(output_dim(ax, dim) for ax, dim in enumerate(values.shape))
206
-
207
- type_length = wp.types.type_length(values.dtype)
208
- scalar_type = wp.types.type_scalar_type(values.dtype)
209
-
210
- # User can provide a device output array for storing the number of runs
211
- # For convenience, if no such array is provided, number of runs is returned on host
212
- if out is None:
213
- host_return = True
214
- out = wp.empty(shape=output_shape, dtype=values.dtype, device=values.device)
215
- else:
216
- host_return = False
217
- if out.device != values.device:
218
- raise RuntimeError("out storage device should match values array")
219
- if out.dtype != values.dtype:
220
- raise RuntimeError(f"out array should have type {values.dtype.__name__}")
221
- if out.shape != output_shape:
222
- raise RuntimeError(f"out array should have shape {output_shape}")
223
-
224
- if value_count == 0:
225
- out.zero_()
226
- if axis is None and host_return:
227
- return out.numpy()[0]
228
- return out
229
-
230
- from warp.context import runtime
231
-
232
- if values.device.is_cpu:
233
- if scalar_type == wp.float32:
234
- native_func = runtime.core.array_sum_float_host
235
- elif scalar_type == wp.float64:
236
- native_func = runtime.core.array_sum_double_host
237
- else:
238
- raise RuntimeError("Unsupported data type")
239
- elif values.device.is_cuda:
240
- if scalar_type == wp.float32:
241
- native_func = runtime.core.array_sum_float_device
242
- elif scalar_type == wp.float64:
243
- native_func = runtime.core.array_sum_double_device
244
- else:
245
- raise RuntimeError("Unsupported data type")
246
-
247
- if axis is None:
248
- stride = wp.types.type_size_in_bytes(values.dtype)
249
- native_func(values.ptr, out.ptr, value_count, stride, type_length)
250
-
251
- if host_return:
252
- return out.numpy()[0]
253
- else:
254
- stride = values.strides[axis]
255
- for idx in np.ndindex(output_shape):
256
- out_offset = sum(i * s for i, s in zip(idx, out.strides))
257
- val_offset = sum(i * s for i, s in zip(idx, values.strides))
258
-
259
- native_func(
260
- values.ptr + val_offset,
261
- out.ptr + out_offset,
262
- value_count,
263
- stride,
264
- type_length,
265
- )
266
-
267
- if host_return:
268
- return out
269
-
270
-
271
- def array_inner(a, b, out=None, count=None, axis=None):
272
- if a.size != b.size:
273
- raise RuntimeError("Array storage sizes do not match")
274
-
275
- if a.device != b.device:
276
- raise RuntimeError("Array storage devices do not match")
277
-
278
- if a.dtype != b.dtype:
279
- raise RuntimeError("Array data types do not match")
280
-
281
- if count is None:
282
- if axis is None:
283
- count = a.size
284
- else:
285
- count = a.shape[axis]
286
-
287
- if axis is None:
288
- output_shape = (1,)
289
- else:
290
-
291
- def output_dim(ax, dim):
292
- return 1 if ax == axis else dim
293
-
294
- output_shape = tuple(output_dim(ax, dim) for ax, dim in enumerate(a.shape))
295
-
296
- type_length = wp.types.type_length(a.dtype)
297
- scalar_type = wp.types.type_scalar_type(a.dtype)
298
-
299
- # User can provide a device output array for storing the number of runs
300
- # For convenience, if no such array is provided, number of runs is returned on host
301
- if out is None:
302
- host_return = True
303
- out = wp.empty(shape=output_shape, dtype=scalar_type, device=a.device)
304
- else:
305
- host_return = False
306
- if out.device != a.device:
307
- raise RuntimeError("out storage device should match values array")
308
- if out.dtype != scalar_type:
309
- raise RuntimeError(f"out array should have type {scalar_type.__name__}")
310
- if out.shape != output_shape:
311
- raise RuntimeError(f"out array should have shape {output_shape}")
312
-
313
- if count == 0:
314
- if axis is None and host_return:
315
- return 0.0
316
- out.zero_()
317
- return out
318
-
319
- from warp.context import runtime
320
-
321
- if a.device.is_cpu:
322
- if scalar_type == wp.float32:
323
- native_func = runtime.core.array_inner_float_host
324
- elif scalar_type == wp.float64:
325
- native_func = runtime.core.array_inner_double_host
326
- else:
327
- raise RuntimeError("Unsupported data type")
328
- elif a.device.is_cuda:
329
- if scalar_type == wp.float32:
330
- native_func = runtime.core.array_inner_float_device
331
- elif scalar_type == wp.float64:
332
- native_func = runtime.core.array_inner_double_device
333
- else:
334
- raise RuntimeError("Unsupported data type")
335
-
336
- if axis is None:
337
- stride_a = wp.types.type_size_in_bytes(a.dtype)
338
- stride_b = wp.types.type_size_in_bytes(b.dtype)
339
- native_func(a.ptr, b.ptr, out.ptr, count, stride_a, stride_b, type_length)
340
-
341
- if host_return:
342
- return out.numpy()[0]
343
- else:
344
- stride_a = a.strides[axis]
345
- stride_b = b.strides[axis]
346
-
347
- for idx in np.ndindex(output_shape):
348
- out_offset = sum(i * s for i, s in zip(idx, out.strides))
349
- a_offset = sum(i * s for i, s in zip(idx, a.strides))
350
- b_offset = sum(i * s for i, s in zip(idx, b.strides))
351
-
352
- native_func(
353
- a.ptr + a_offset,
354
- b.ptr + b_offset,
355
- out.ptr + out_offset,
356
- count,
357
- stride_a,
358
- stride_b,
359
- type_length,
360
- )
361
-
362
- if host_return:
363
- return out
364
-
365
-
366
- @wp.kernel
367
- def _array_cast_kernel(
368
- dest: Any,
369
- src: Any,
370
- ):
371
- i = wp.tid()
372
- dest[i] = dest.dtype(src[i])
373
-
374
-
375
- def array_cast(in_array, out_array, count=None):
376
- if in_array.device != out_array.device:
377
- raise RuntimeError("Array storage devices do not match")
378
-
379
- in_array_data_shape = getattr(in_array.dtype, "_shape_", ())
380
- out_array_data_shape = getattr(out_array.dtype, "_shape_", ())
381
-
382
- if in_array.ndim != out_array.ndim or in_array_data_shape != out_array_data_shape:
383
- # Number of dimensions or data type shape do not match.
384
- # Flatten arrays and do cast at the scalar level
385
- in_array = in_array.flatten()
386
- out_array = out_array.flatten()
387
-
388
- in_array_data_length = warp.types.type_length(in_array.dtype)
389
- out_array_data_length = warp.types.type_length(out_array.dtype)
390
- in_array_scalar_type = wp.types.type_scalar_type(in_array.dtype)
391
- out_array_scalar_type = wp.types.type_scalar_type(out_array.dtype)
392
-
393
- in_array = wp.array(
394
- data=None,
395
- ptr=in_array.ptr,
396
- capacity=in_array.capacity,
397
- device=in_array.device,
398
- dtype=in_array_scalar_type,
399
- shape=in_array.shape[0] * in_array_data_length,
400
- )
401
-
402
- out_array = wp.array(
403
- data=None,
404
- ptr=out_array.ptr,
405
- capacity=out_array.capacity,
406
- device=out_array.device,
407
- dtype=out_array_scalar_type,
408
- shape=out_array.shape[0] * out_array_data_length,
409
- )
410
-
411
- if count is not None:
412
- count *= in_array_data_length
413
-
414
- if count is None:
415
- count = in_array.size
416
-
417
- if in_array.ndim == 1:
418
- dim = count
419
- elif count < in_array.size:
420
- raise RuntimeError("Partial cast is not supported for arrays with more than one dimension")
421
- else:
422
- dim = in_array.shape
423
-
424
- if in_array.dtype == out_array.dtype:
425
- # Same data type, can simply copy
426
- wp.copy(dest=out_array, src=in_array, count=count)
427
- else:
428
- wp.launch(kernel=_array_cast_kernel, dim=dim, inputs=[out_array, in_array], device=out_array.device)
429
-
430
-
431
- # code snippet for invoking cProfile
432
- # cp = cProfile.Profile()
433
- # cp.enable()
434
- # for i in range(1000):
435
- # self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
436
-
437
- # cp.disable()
438
- # cp.print_stats(sort='tottime')
439
- # exit(0)
440
-
441
-
442
- # helper kernels for initializing NVDB volumes from a dense array
443
- @wp.kernel
444
- def copy_dense_volume_to_nano_vdb_v(volume: wp.uint64, values: wp.array(dtype=wp.vec3, ndim=3)):
445
- i, j, k = wp.tid()
446
- wp.volume_store_v(volume, i, j, k, values[i, j, k])
447
-
448
-
449
- @wp.kernel
450
- def copy_dense_volume_to_nano_vdb_f(volume: wp.uint64, values: wp.array(dtype=wp.float32, ndim=3)):
451
- i, j, k = wp.tid()
452
- wp.volume_store_f(volume, i, j, k, values[i, j, k])
453
-
454
-
455
- @wp.kernel
456
- def copy_dense_volume_to_nano_vdb_i(volume: wp.uint64, values: wp.array(dtype=wp.int32, ndim=3)):
457
- i, j, k = wp.tid()
458
- wp.volume_store_i(volume, i, j, k, values[i, j, k])
459
-
460
-
461
- # represent an edge between v0, v1 with connected faces f0, f1, and opposite vertex o0, and o1
462
- # winding is such that first tri can be reconstructed as {v0, v1, o0}, and second tri as { v1, v0, o1 }
463
- class MeshEdge:
464
- def __init__(self, v0, v1, o0, o1, f0, f1):
465
- self.v0 = v0 # vertex 0
466
- self.v1 = v1 # vertex 1
467
- self.o0 = o0 # opposite vertex 1
468
- self.o1 = o1 # opposite vertex 2
469
- self.f0 = f0 # index of tri1
470
- self.f1 = f1 # index of tri2
471
-
472
-
473
- class MeshAdjacency:
474
- def __init__(self, indices, num_tris):
475
- # map edges (v0, v1) to faces (f0, f1)
476
- self.edges = {}
477
- self.indices = indices
478
-
479
- for index, tri in enumerate(indices):
480
- self.add_edge(tri[0], tri[1], tri[2], index)
481
- self.add_edge(tri[1], tri[2], tri[0], index)
482
- self.add_edge(tri[2], tri[0], tri[1], index)
483
-
484
- def add_edge(self, i0, i1, o, f): # index1, index2, index3, index of triangle
485
- key = (min(i0, i1), max(i0, i1))
486
- edge = None
487
-
488
- if key in self.edges:
489
- edge = self.edges[key]
490
-
491
- if edge.f1 != -1:
492
- print("Detected non-manifold edge")
493
- return
494
- else:
495
- # update other side of the edge
496
- edge.o1 = o
497
- edge.f1 = f
498
- else:
499
- # create new edge with opposite yet to be filled
500
- edge = MeshEdge(i0, i1, o, -1, f, -1)
501
-
502
- self.edges[key] = edge
503
-
504
-
505
- def mem_report(): #pragma: no cover
506
- def _mem_report(tensors, mem_type):
507
- """Print the selected tensors of type
508
- There are two major storage types in our major concern:
509
- - GPU: tensors transferred to CUDA devices
510
- - CPU: tensors remaining on the system memory (usually unimportant)
511
- Args:
512
- - tensors: the tensors of specified type
513
- - mem_type: 'CPU' or 'GPU' in current implementation"""
514
- total_numel = 0
515
- total_mem = 0
516
- visited_data = []
517
- for tensor in tensors:
518
- if tensor.is_sparse:
519
- continue
520
- # a data_ptr indicates a memory block allocated
521
- data_ptr = tensor.storage().data_ptr()
522
- if data_ptr in visited_data:
523
- continue
524
- visited_data.append(data_ptr)
525
-
526
- numel = tensor.storage().size()
527
- total_numel += numel
528
- element_size = tensor.storage().element_size()
529
- mem = numel * element_size / 1024 / 1024 # 32bit=4Byte, MByte
530
- total_mem += mem
531
- element_type = type(tensor).__name__
532
- size = tuple(tensor.size())
533
-
534
- # print('%s\t\t%s\t\t%.2f' % (
535
- # element_type,
536
- # size,
537
- # mem) )
538
- print("Type: %s Total Tensors: %d \tUsed Memory Space: %.2f MBytes" % (mem_type, total_numel, total_mem))
539
-
540
- import gc
541
-
542
- import torch
543
-
544
- gc.collect()
545
-
546
- LEN = 65
547
- objects = gc.get_objects()
548
- # print('%s\t%s\t\t\t%s' %('Element type', 'Size', 'Used MEM(MBytes)') )
549
- tensors = [obj for obj in objects if torch.is_tensor(obj)]
550
- cuda_tensors = [t for t in tensors if t.is_cuda]
551
- host_tensors = [t for t in tensors if not t.is_cuda]
552
- _mem_report(cuda_tensors, "GPU")
553
- _mem_report(host_tensors, "CPU")
554
- print("=" * LEN)
555
-
556
-
557
-
558
- class ScopedDevice:
559
- def __init__(self, device):
560
- self.device = wp.get_device(device)
561
-
562
- def __enter__(self):
563
- # save the previous default device
564
- self.saved_device = self.device.runtime.default_device
565
-
566
- # make this the default device
567
- self.device.runtime.default_device = self.device
568
-
569
- # make it the current CUDA device so that device alias "cuda" will evaluate to this device
570
- self.device.context_guard.__enter__()
571
-
572
- return self.device
573
-
574
- def __exit__(self, exc_type, exc_value, traceback):
575
- # restore original CUDA context
576
- self.device.context_guard.__exit__(exc_type, exc_value, traceback)
577
-
578
- # restore original target device
579
- self.device.runtime.default_device = self.saved_device
580
-
581
-
582
- class ScopedStream:
583
- def __init__(self, stream, sync_enter=True, sync_exit=False):
584
- self.stream = stream
585
- self.sync_enter = sync_enter
586
- self.sync_exit = sync_exit
587
- if stream is not None:
588
- self.device = stream.device
589
- self.device_scope = ScopedDevice(self.device)
590
-
591
- def __enter__(self):
592
- if self.stream is not None:
593
- self.device_scope.__enter__()
594
- self.saved_stream = self.device.stream
595
- self.device.set_stream(self.stream, self.sync_enter)
596
-
597
- return self.stream
598
-
599
- def __exit__(self, exc_type, exc_value, traceback):
600
- if self.stream is not None:
601
- self.device.set_stream(self.saved_stream, self.sync_exit)
602
- self.device_scope.__exit__(exc_type, exc_value, traceback)
603
-
604
-
605
- # timer utils
606
- class ScopedTimer:
607
- indent = -1
608
-
609
- enabled = True
610
-
611
- def __init__(
612
- self,
613
- name,
614
- active=True,
615
- print=True,
616
- detailed=False,
617
- dict=None,
618
- use_nvtx=False,
619
- color="rapids",
620
- synchronize=False,
621
- ):
622
- """Context manager object for a timer
623
-
624
- Parameters:
625
- name (str): Name of timer
626
- active (bool): Enables this timer
627
- print (bool): At context manager exit, print elapsed time to sys.stdout
628
- detailed (bool): Collects additional profiling data using cProfile and calls ``print_stats()`` at context exit
629
- dict (dict): A dictionary of lists to which the elapsed time will be appended using ``name`` as a key
630
- use_nvtx (bool): If true, timing functionality is replaced by an NVTX range
631
- color (int or str): ARGB value (e.g. 0x00FFFF) or color name (e.g. 'cyan') associated with the NVTX range
632
- synchronize (bool): Synchronize the CPU thread with any outstanding CUDA work to return accurate GPU timings
633
-
634
- Attributes:
635
- elapsed (float): The duration of the ``with`` block used with this object
636
- """
637
- self.name = name
638
- self.active = active and self.enabled
639
- self.print = print
640
- self.detailed = detailed
641
- self.dict = dict
642
- self.use_nvtx = use_nvtx
643
- self.color = color
644
- self.synchronize = synchronize
645
- self.elapsed = 0.0
646
-
647
- if self.dict is not None:
648
- if name not in self.dict:
649
- self.dict[name] = []
650
-
651
- def __enter__(self):
652
- if self.active:
653
- if self.synchronize:
654
- wp.synchronize()
655
-
656
- if self.use_nvtx:
657
- import nvtx
658
-
659
- self.nvtx_range_id = nvtx.start_range(self.name, color=self.color)
660
- return self
661
-
662
- if self.detailed:
663
- self.cp = cProfile.Profile()
664
- self.cp.clear()
665
- self.cp.enable()
666
-
667
- if self.print:
668
- ScopedTimer.indent += 1
669
-
670
- self.start = time.perf_counter_ns()
671
-
672
- return self
673
-
674
- def __exit__(self, exc_type, exc_value, traceback):
675
- if self.active:
676
- if self.synchronize:
677
- wp.synchronize()
678
-
679
- if self.use_nvtx:
680
- import nvtx
681
-
682
- nvtx.end_range(self.nvtx_range_id)
683
- return
684
-
685
- self.elapsed = (time.perf_counter_ns() - self.start) / 1000000.0
686
-
687
- if self.detailed:
688
- self.cp.disable()
689
- self.cp.print_stats(sort="tottime")
690
-
691
- if self.dict is not None:
692
- self.dict[self.name].append(self.elapsed)
693
-
694
- if self.print:
695
- indent = ""
696
- for i in range(ScopedTimer.indent):
697
- indent += "\t"
698
-
699
- print("{}{} took {:.2f} ms".format(indent, self.name, self.elapsed))
700
-
701
- ScopedTimer.indent -= 1
702
-
703
-
704
- # Allow temporarily enabling/disabling mempool allocators
705
- class ScopedMempool:
706
- def __init__(self, device, enable: bool):
707
- self.device = wp.get_device(device)
708
- self.enable = enable
709
-
710
- def __enter__(self):
711
- self.saved_setting = wp.is_mempool_enabled(self.device)
712
- wp.set_mempool_enabled(self.device, self.enable)
713
-
714
- def __exit__(self, exc_type, exc_value, traceback):
715
- wp.set_mempool_enabled(self.device, self.saved_setting)
716
-
717
-
718
- # Allow temporarily enabling/disabling mempool access
719
- class ScopedMempoolAccess:
720
- def __init__(self, target_device, peer_device, enable: bool):
721
- self.target_device = target_device
722
- self.peer_device = peer_device
723
- self.enable = enable
724
-
725
- def __enter__(self):
726
- self.saved_setting = wp.is_mempool_access_enabled(self.target_device, self.peer_device)
727
- wp.set_mempool_access_enabled(self.target_device, self.peer_device, self.enable)
728
-
729
- def __exit__(self, exc_type, exc_value, traceback):
730
- wp.set_mempool_access_enabled(self.target_device, self.peer_device, self.saved_setting)
731
-
732
-
733
- # Allow temporarily enabling/disabling peer access
734
- class ScopedPeerAccess:
735
- def __init__(self, target_device, peer_device, enable: bool):
736
- self.target_device = target_device
737
- self.peer_device = peer_device
738
- self.enable = enable
739
-
740
- def __enter__(self):
741
- self.saved_setting = wp.is_peer_access_enabled(self.target_device, self.peer_device)
742
- wp.set_peer_access_enabled(self.target_device, self.peer_device, self.enable)
743
-
744
- def __exit__(self, exc_type, exc_value, traceback):
745
- wp.set_peer_access_enabled(self.target_device, self.peer_device, self.saved_setting)
746
-
747
-
748
- class ScopedCapture:
749
- def __init__(self, device=None, stream=None, force_module_load=None, external=False):
750
- self.device = device
751
- self.stream = stream
752
- self.force_module_load = force_module_load
753
- self.external = external
754
- self.active = False
755
- self.graph = None
756
-
757
- def __enter__(self):
758
- try:
759
- wp.capture_begin(
760
- device=self.device, stream=self.stream, force_module_load=self.force_module_load, external=self.external
761
- )
762
- self.active = True
763
- return self
764
- except:
765
- raise
766
-
767
- def __exit__(self, exc_type, exc_value, traceback):
768
- if self.active:
769
- try:
770
- self.graph = wp.capture_end(device=self.device, stream=self.stream)
771
- finally:
772
- self.active = False
773
-
774
-
775
- # helper kernels for adj_matmul
776
- @wp.kernel
777
- def add_kernel_2d(x: wp.array2d(dtype=Any), acc: wp.array2d(dtype=Any), beta: Any):
778
- i, j = wp.tid()
779
-
780
- x[i,j] = x[i,j] + beta * acc[i,j]
781
-
782
-
783
- @wp.kernel
784
- def add_kernel_3d(x: wp.array3d(dtype=Any), acc: wp.array3d(dtype=Any), beta: Any):
785
- i, j, k = wp.tid()
786
-
787
- x[i,j,k] = x[i,j,k] + beta * acc[i,j,k]
788
-
789
-
790
- # explicit instantiations of generic kernels for adj_matmul
791
- for T in [wp.float16, wp.float32, wp.float64]:
792
- wp.overload(add_kernel_2d, [wp.array2d(dtype=T), wp.array2d(dtype=T), T])
793
- wp.overload(add_kernel_3d, [wp.array3d(dtype=T), wp.array3d(dtype=T), T])
794
-
795
-
796
- def check_iommu():
797
- """Check if IOMMU is enabled on Linux, which can affect peer-to-peer transfers.
798
-
799
- Returns:
800
- A Boolean indicating whether IOMMU is configured properly for peer-to-peer transfers.
801
- On Linux, this function attempts to determine if IOMMU is enabled and will return `False` if IOMMU is detected.
802
- On other operating systems, it always return `True`.
803
- """
804
-
805
- if sys.platform == "linux":
806
- # On modern Linux, there should be IOMMU-related entries in the /sys file system.
807
- # This should be more reliable than checking kernel logs like dmesg.
808
- if os.path.isdir("/sys/class/iommu") and os.listdir("/sys/class/iommu"):
809
- return False
810
- if os.path.isdir("/sys/kernel/iommu_groups") and os.listdir("/sys/kernel/iommu_groups"):
811
- return False
812
-
813
- # HACK: disable P2P tests on misbehaving agents
814
- disable_p2p_tests = os.getenv("WARP_DISABLE_P2P_TESTS", default="0")
815
- if int(disable_p2p_tests):
816
- return False
817
-
818
- return True
819
- else:
820
- # doesn't matter
821
- return True
1
+ # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
2
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
3
+ # and proprietary rights in and to this software, related documentation
4
+ # and any modifications thereto. Any use, reproduction, disclosure or
5
+ # distribution of this software and related documentation without an express
6
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
7
+
8
+ import cProfile
9
+ import ctypes
10
+ import os
11
+ import sys
12
+ import time
13
+ import warnings
14
+ from typing import Any
15
+
16
+ import numpy as np
17
+
18
+ import warp as wp
19
+ import warp.context
20
+ import warp.types
21
+
22
+ warnings_seen = set()
23
+
24
+
25
+ def warp_showwarning(message, category, filename, lineno, file=None, line=None):
26
+ """Version of warnings.showwarning that always prints to sys.stdout."""
27
+
28
+ if warp.config.verbose_warnings:
29
+ s = f"Warp {category.__name__}: {message} ({filename}:{lineno})\n"
30
+
31
+ if line is None:
32
+ try:
33
+ import linecache
34
+
35
+ line = linecache.getline(filename, lineno)
36
+ except Exception:
37
+ # When a warning is logged during Python shutdown, linecache
38
+ # and the import machinery don't work anymore
39
+ line = None
40
+ linecache = None
41
+ else:
42
+ line = line
43
+ if line:
44
+ line = line.strip()
45
+ s += " %s\n" % line
46
+ else:
47
+ # simple warning
48
+ s = f"Warp {category.__name__}: {message}\n"
49
+
50
+ sys.stdout.write(s)
51
+
52
+
53
+ def warn(message, category=None, stacklevel=1):
54
+ if (category, message) in warnings_seen:
55
+ return
56
+
57
+ with warnings.catch_warnings():
58
+ warnings.simplefilter("default") # Change the filter in this process
59
+ warnings.showwarning = warp_showwarning
60
+ warnings.warn(
61
+ message,
62
+ category,
63
+ stacklevel=stacklevel + 1, # Increment stacklevel by 1 since we are in a wrapper
64
+ )
65
+
66
+ if category is DeprecationWarning:
67
+ warnings_seen.add((category, message))
68
+
69
+
70
+ # expand a 7-vec to a tuple of arrays
71
+ def transform_expand(t):
72
+ return wp.transform(np.array(t[0:3]), np.array(t[3:7]))
73
+
74
+
75
+ @wp.func
76
+ def quat_between_vectors(a: wp.vec3, b: wp.vec3) -> wp.quat:
77
+ """
78
+ Compute the quaternion that rotates vector a to vector b
79
+ """
80
+ a = wp.normalize(a)
81
+ b = wp.normalize(b)
82
+ c = wp.cross(a, b)
83
+ d = wp.dot(a, b)
84
+ q = wp.quat(c[0], c[1], c[2], 1.0 + d)
85
+ return wp.normalize(q)
86
+
87
+
88
+ def array_scan(in_array, out_array, inclusive=True):
89
+ if in_array.device != out_array.device:
90
+ raise RuntimeError("Array storage devices do not match")
91
+
92
+ if in_array.size != out_array.size:
93
+ raise RuntimeError("Array storage sizes do not match")
94
+
95
+ if in_array.dtype != out_array.dtype:
96
+ raise RuntimeError("Array data types do not match")
97
+
98
+ if in_array.size == 0:
99
+ return
100
+
101
+ from warp.context import runtime
102
+
103
+ if in_array.device.is_cpu:
104
+ if in_array.dtype == wp.int32:
105
+ runtime.core.array_scan_int_host(in_array.ptr, out_array.ptr, in_array.size, inclusive)
106
+ elif in_array.dtype == wp.float32:
107
+ runtime.core.array_scan_float_host(in_array.ptr, out_array.ptr, in_array.size, inclusive)
108
+ else:
109
+ raise RuntimeError("Unsupported data type")
110
+ elif in_array.device.is_cuda:
111
+ if in_array.dtype == wp.int32:
112
+ runtime.core.array_scan_int_device(in_array.ptr, out_array.ptr, in_array.size, inclusive)
113
+ elif in_array.dtype == wp.float32:
114
+ runtime.core.array_scan_float_device(in_array.ptr, out_array.ptr, in_array.size, inclusive)
115
+ else:
116
+ raise RuntimeError("Unsupported data type")
117
+
118
+
119
+ def radix_sort_pairs(keys, values, count: int):
120
+ if keys.device != values.device:
121
+ raise RuntimeError("Array storage devices do not match")
122
+
123
+ if count == 0:
124
+ return
125
+
126
+ if keys.size < 2 * count or values.size < 2 * count:
127
+ raise RuntimeError("Array storage must be large enough to contain 2*count elements")
128
+
129
+ from warp.context import runtime
130
+
131
+ if keys.device.is_cpu:
132
+ if keys.dtype == wp.int32 and values.dtype == wp.int32:
133
+ runtime.core.radix_sort_pairs_int_host(keys.ptr, values.ptr, count)
134
+ else:
135
+ raise RuntimeError("Unsupported data type")
136
+ elif keys.device.is_cuda:
137
+ if keys.dtype == wp.int32 and values.dtype == wp.int32:
138
+ runtime.core.radix_sort_pairs_int_device(keys.ptr, values.ptr, count)
139
+ else:
140
+ raise RuntimeError("Unsupported data type")
141
+
142
+
143
+ def runlength_encode(values, run_values, run_lengths, run_count=None, value_count=None):
144
+ if run_values.device != values.device or run_lengths.device != values.device:
145
+ raise RuntimeError("Array storage devices do not match")
146
+
147
+ if value_count is None:
148
+ value_count = values.size
149
+
150
+ if run_values.size < value_count or run_lengths.size < value_count:
151
+ raise RuntimeError("Output array storage sizes must be at least equal to value_count")
152
+
153
+ if values.dtype != run_values.dtype:
154
+ raise RuntimeError("values and run_values data types do not match")
155
+
156
+ if run_lengths.dtype != wp.int32:
157
+ raise RuntimeError("run_lengths array must be of type int32")
158
+
159
+ # User can provide a device output array for storing the number of runs
160
+ # For convenience, if no such array is provided, number of runs is returned on host
161
+ if run_count is None:
162
+ if value_count == 0:
163
+ return 0
164
+ run_count = wp.empty(shape=(1,), dtype=int, device=values.device)
165
+ host_return = True
166
+ else:
167
+ if run_count.device != values.device:
168
+ raise RuntimeError("run_count storage device does not match other arrays")
169
+ if run_count.dtype != wp.int32:
170
+ raise RuntimeError("run_count array must be of type int32")
171
+ if value_count == 0:
172
+ run_count.zero_()
173
+ return 0
174
+ host_return = False
175
+
176
+ from warp.context import runtime
177
+
178
+ if values.device.is_cpu:
179
+ if values.dtype == wp.int32:
180
+ runtime.core.runlength_encode_int_host(
181
+ values.ptr, run_values.ptr, run_lengths.ptr, run_count.ptr, value_count
182
+ )
183
+ else:
184
+ raise RuntimeError("Unsupported data type")
185
+ elif values.device.is_cuda:
186
+ if values.dtype == wp.int32:
187
+ runtime.core.runlength_encode_int_device(
188
+ values.ptr, run_values.ptr, run_lengths.ptr, run_count.ptr, value_count
189
+ )
190
+ else:
191
+ raise RuntimeError("Unsupported data type")
192
+
193
+ if host_return:
194
+ return int(run_count.numpy()[0])
195
+
196
+
197
+ def array_sum(values, out=None, value_count=None, axis=None):
198
+ if value_count is None:
199
+ if axis is None:
200
+ value_count = values.size
201
+ else:
202
+ value_count = values.shape[axis]
203
+
204
+ if axis is None:
205
+ output_shape = (1,)
206
+ else:
207
+
208
+ def output_dim(ax, dim):
209
+ return 1 if ax == axis else dim
210
+
211
+ output_shape = tuple(output_dim(ax, dim) for ax, dim in enumerate(values.shape))
212
+
213
+ type_length = wp.types.type_length(values.dtype)
214
+ scalar_type = wp.types.type_scalar_type(values.dtype)
215
+
216
+ # User can provide a device output array for storing the number of runs
217
+ # For convenience, if no such array is provided, number of runs is returned on host
218
+ if out is None:
219
+ host_return = True
220
+ out = wp.empty(shape=output_shape, dtype=values.dtype, device=values.device)
221
+ else:
222
+ host_return = False
223
+ if out.device != values.device:
224
+ raise RuntimeError("out storage device should match values array")
225
+ if out.dtype != values.dtype:
226
+ raise RuntimeError(f"out array should have type {values.dtype.__name__}")
227
+ if out.shape != output_shape:
228
+ raise RuntimeError(f"out array should have shape {output_shape}")
229
+
230
+ if value_count == 0:
231
+ out.zero_()
232
+ if axis is None and host_return:
233
+ return out.numpy()[0]
234
+ return out
235
+
236
+ from warp.context import runtime
237
+
238
+ if values.device.is_cpu:
239
+ if scalar_type == wp.float32:
240
+ native_func = runtime.core.array_sum_float_host
241
+ elif scalar_type == wp.float64:
242
+ native_func = runtime.core.array_sum_double_host
243
+ else:
244
+ raise RuntimeError("Unsupported data type")
245
+ elif values.device.is_cuda:
246
+ if scalar_type == wp.float32:
247
+ native_func = runtime.core.array_sum_float_device
248
+ elif scalar_type == wp.float64:
249
+ native_func = runtime.core.array_sum_double_device
250
+ else:
251
+ raise RuntimeError("Unsupported data type")
252
+
253
+ if axis is None:
254
+ stride = wp.types.type_size_in_bytes(values.dtype)
255
+ native_func(values.ptr, out.ptr, value_count, stride, type_length)
256
+
257
+ if host_return:
258
+ return out.numpy()[0]
259
+ else:
260
+ stride = values.strides[axis]
261
+ for idx in np.ndindex(output_shape):
262
+ out_offset = sum(i * s for i, s in zip(idx, out.strides))
263
+ val_offset = sum(i * s for i, s in zip(idx, values.strides))
264
+
265
+ native_func(
266
+ values.ptr + val_offset,
267
+ out.ptr + out_offset,
268
+ value_count,
269
+ stride,
270
+ type_length,
271
+ )
272
+
273
+ if host_return:
274
+ return out
275
+
276
+
277
+ def array_inner(a, b, out=None, count=None, axis=None):
278
+ if a.size != b.size:
279
+ raise RuntimeError("Array storage sizes do not match")
280
+
281
+ if a.device != b.device:
282
+ raise RuntimeError("Array storage devices do not match")
283
+
284
+ if a.dtype != b.dtype:
285
+ raise RuntimeError("Array data types do not match")
286
+
287
+ if count is None:
288
+ if axis is None:
289
+ count = a.size
290
+ else:
291
+ count = a.shape[axis]
292
+
293
+ if axis is None:
294
+ output_shape = (1,)
295
+ else:
296
+
297
+ def output_dim(ax, dim):
298
+ return 1 if ax == axis else dim
299
+
300
+ output_shape = tuple(output_dim(ax, dim) for ax, dim in enumerate(a.shape))
301
+
302
+ type_length = wp.types.type_length(a.dtype)
303
+ scalar_type = wp.types.type_scalar_type(a.dtype)
304
+
305
+ # User can provide a device output array for storing the number of runs
306
+ # For convenience, if no such array is provided, number of runs is returned on host
307
+ if out is None:
308
+ host_return = True
309
+ out = wp.empty(shape=output_shape, dtype=scalar_type, device=a.device)
310
+ else:
311
+ host_return = False
312
+ if out.device != a.device:
313
+ raise RuntimeError("out storage device should match values array")
314
+ if out.dtype != scalar_type:
315
+ raise RuntimeError(f"out array should have type {scalar_type.__name__}")
316
+ if out.shape != output_shape:
317
+ raise RuntimeError(f"out array should have shape {output_shape}")
318
+
319
+ if count == 0:
320
+ if axis is None and host_return:
321
+ return 0.0
322
+ out.zero_()
323
+ return out
324
+
325
+ from warp.context import runtime
326
+
327
+ if a.device.is_cpu:
328
+ if scalar_type == wp.float32:
329
+ native_func = runtime.core.array_inner_float_host
330
+ elif scalar_type == wp.float64:
331
+ native_func = runtime.core.array_inner_double_host
332
+ else:
333
+ raise RuntimeError("Unsupported data type")
334
+ elif a.device.is_cuda:
335
+ if scalar_type == wp.float32:
336
+ native_func = runtime.core.array_inner_float_device
337
+ elif scalar_type == wp.float64:
338
+ native_func = runtime.core.array_inner_double_device
339
+ else:
340
+ raise RuntimeError("Unsupported data type")
341
+
342
+ if axis is None:
343
+ stride_a = wp.types.type_size_in_bytes(a.dtype)
344
+ stride_b = wp.types.type_size_in_bytes(b.dtype)
345
+ native_func(a.ptr, b.ptr, out.ptr, count, stride_a, stride_b, type_length)
346
+
347
+ if host_return:
348
+ return out.numpy()[0]
349
+ else:
350
+ stride_a = a.strides[axis]
351
+ stride_b = b.strides[axis]
352
+
353
+ for idx in np.ndindex(output_shape):
354
+ out_offset = sum(i * s for i, s in zip(idx, out.strides))
355
+ a_offset = sum(i * s for i, s in zip(idx, a.strides))
356
+ b_offset = sum(i * s for i, s in zip(idx, b.strides))
357
+
358
+ native_func(
359
+ a.ptr + a_offset,
360
+ b.ptr + b_offset,
361
+ out.ptr + out_offset,
362
+ count,
363
+ stride_a,
364
+ stride_b,
365
+ type_length,
366
+ )
367
+
368
+ if host_return:
369
+ return out
370
+
371
+
372
+ @wp.kernel
373
+ def _array_cast_kernel(
374
+ dest: Any,
375
+ src: Any,
376
+ ):
377
+ i = wp.tid()
378
+ dest[i] = dest.dtype(src[i])
379
+
380
+
381
+ def array_cast(in_array, out_array, count=None):
382
+ if in_array.device != out_array.device:
383
+ raise RuntimeError("Array storage devices do not match")
384
+
385
+ in_array_data_shape = getattr(in_array.dtype, "_shape_", ())
386
+ out_array_data_shape = getattr(out_array.dtype, "_shape_", ())
387
+
388
+ if in_array.ndim != out_array.ndim or in_array_data_shape != out_array_data_shape:
389
+ # Number of dimensions or data type shape do not match.
390
+ # Flatten arrays and do cast at the scalar level
391
+ in_array = in_array.flatten()
392
+ out_array = out_array.flatten()
393
+
394
+ in_array_data_length = warp.types.type_length(in_array.dtype)
395
+ out_array_data_length = warp.types.type_length(out_array.dtype)
396
+ in_array_scalar_type = wp.types.type_scalar_type(in_array.dtype)
397
+ out_array_scalar_type = wp.types.type_scalar_type(out_array.dtype)
398
+
399
+ in_array = wp.array(
400
+ data=None,
401
+ ptr=in_array.ptr,
402
+ capacity=in_array.capacity,
403
+ device=in_array.device,
404
+ dtype=in_array_scalar_type,
405
+ shape=in_array.shape[0] * in_array_data_length,
406
+ )
407
+
408
+ out_array = wp.array(
409
+ data=None,
410
+ ptr=out_array.ptr,
411
+ capacity=out_array.capacity,
412
+ device=out_array.device,
413
+ dtype=out_array_scalar_type,
414
+ shape=out_array.shape[0] * out_array_data_length,
415
+ )
416
+
417
+ if count is not None:
418
+ count *= in_array_data_length
419
+
420
+ if count is None:
421
+ count = in_array.size
422
+
423
+ if in_array.ndim == 1:
424
+ dim = count
425
+ elif count < in_array.size:
426
+ raise RuntimeError("Partial cast is not supported for arrays with more than one dimension")
427
+ else:
428
+ dim = in_array.shape
429
+
430
+ if in_array.dtype == out_array.dtype:
431
+ # Same data type, can simply copy
432
+ wp.copy(dest=out_array, src=in_array, count=count)
433
+ else:
434
+ wp.launch(kernel=_array_cast_kernel, dim=dim, inputs=[out_array, in_array], device=out_array.device)
435
+
436
+
437
+ # code snippet for invoking cProfile
438
+ # cp = cProfile.Profile()
439
+ # cp.enable()
440
+ # for i in range(1000):
441
+ # self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
442
+
443
+ # cp.disable()
444
+ # cp.print_stats(sort='tottime')
445
+ # exit(0)
446
+
447
+
448
+ # helper kernels for initializing NVDB volumes from a dense array
449
+ @wp.kernel
450
+ def copy_dense_volume_to_nano_vdb_v(volume: wp.uint64, values: wp.array(dtype=wp.vec3, ndim=3)):
451
+ i, j, k = wp.tid()
452
+ wp.volume_store_v(volume, i, j, k, values[i, j, k])
453
+
454
+
455
+ @wp.kernel
456
+ def copy_dense_volume_to_nano_vdb_f(volume: wp.uint64, values: wp.array(dtype=wp.float32, ndim=3)):
457
+ i, j, k = wp.tid()
458
+ wp.volume_store_f(volume, i, j, k, values[i, j, k])
459
+
460
+
461
+ @wp.kernel
462
+ def copy_dense_volume_to_nano_vdb_i(volume: wp.uint64, values: wp.array(dtype=wp.int32, ndim=3)):
463
+ i, j, k = wp.tid()
464
+ wp.volume_store_i(volume, i, j, k, values[i, j, k])
465
+
466
+
467
+ # represent an edge between v0, v1 with connected faces f0, f1, and opposite vertex o0, and o1
468
+ # winding is such that first tri can be reconstructed as {v0, v1, o0}, and second tri as { v1, v0, o1 }
469
+ class MeshEdge:
470
+ def __init__(self, v0, v1, o0, o1, f0, f1):
471
+ self.v0 = v0 # vertex 0
472
+ self.v1 = v1 # vertex 1
473
+ self.o0 = o0 # opposite vertex 1
474
+ self.o1 = o1 # opposite vertex 2
475
+ self.f0 = f0 # index of tri1
476
+ self.f1 = f1 # index of tri2
477
+
478
+
479
+ class MeshAdjacency:
480
+ def __init__(self, indices, num_tris):
481
+ # map edges (v0, v1) to faces (f0, f1)
482
+ self.edges = {}
483
+ self.indices = indices
484
+
485
+ for index, tri in enumerate(indices):
486
+ self.add_edge(tri[0], tri[1], tri[2], index)
487
+ self.add_edge(tri[1], tri[2], tri[0], index)
488
+ self.add_edge(tri[2], tri[0], tri[1], index)
489
+
490
+ def add_edge(self, i0, i1, o, f): # index1, index2, index3, index of triangle
491
+ key = (min(i0, i1), max(i0, i1))
492
+ edge = None
493
+
494
+ if key in self.edges:
495
+ edge = self.edges[key]
496
+
497
+ if edge.f1 != -1:
498
+ print("Detected non-manifold edge")
499
+ return
500
+ else:
501
+ # update other side of the edge
502
+ edge.o1 = o
503
+ edge.f1 = f
504
+ else:
505
+ # create new edge with opposite yet to be filled
506
+ edge = MeshEdge(i0, i1, o, -1, f, -1)
507
+
508
+ self.edges[key] = edge
509
+
510
+
511
+ def mem_report(): # pragma: no cover
512
+ def _mem_report(tensors, mem_type):
513
+ """Print the selected tensors of type
514
+ There are two major storage types in our major concern:
515
+ - GPU: tensors transferred to CUDA devices
516
+ - CPU: tensors remaining on the system memory (usually unimportant)
517
+ Args:
518
+ - tensors: the tensors of specified type
519
+ - mem_type: 'CPU' or 'GPU' in current implementation"""
520
+ total_numel = 0
521
+ total_mem = 0
522
+ visited_data = []
523
+ for tensor in tensors:
524
+ if tensor.is_sparse:
525
+ continue
526
+ # a data_ptr indicates a memory block allocated
527
+ data_ptr = tensor.storage().data_ptr()
528
+ if data_ptr in visited_data:
529
+ continue
530
+ visited_data.append(data_ptr)
531
+
532
+ numel = tensor.storage().size()
533
+ total_numel += numel
534
+ element_size = tensor.storage().element_size()
535
+ mem = numel * element_size / 1024 / 1024 # 32bit=4Byte, MByte
536
+ total_mem += mem
537
+ print("Type: %s Total Tensors: %d \tUsed Memory Space: %.2f MBytes" % (mem_type, total_numel, total_mem))
538
+
539
+ import gc
540
+
541
+ import torch
542
+
543
+ gc.collect()
544
+
545
+ LEN = 65
546
+ objects = gc.get_objects()
547
+ # print('%s\t%s\t\t\t%s' %('Element type', 'Size', 'Used MEM(MBytes)') )
548
+ tensors = [obj for obj in objects if torch.is_tensor(obj)]
549
+ cuda_tensors = [t for t in tensors if t.is_cuda]
550
+ host_tensors = [t for t in tensors if not t.is_cuda]
551
+ _mem_report(cuda_tensors, "GPU")
552
+ _mem_report(host_tensors, "CPU")
553
+ print("=" * LEN)
554
+
555
+
556
+ class ScopedDevice:
557
+ def __init__(self, device):
558
+ self.device = wp.get_device(device)
559
+
560
+ def __enter__(self):
561
+ # save the previous default device
562
+ self.saved_device = self.device.runtime.default_device
563
+
564
+ # make this the default device
565
+ self.device.runtime.default_device = self.device
566
+
567
+ # make it the current CUDA device so that device alias "cuda" will evaluate to this device
568
+ self.device.context_guard.__enter__()
569
+
570
+ return self.device
571
+
572
+ def __exit__(self, exc_type, exc_value, traceback):
573
+ # restore original CUDA context
574
+ self.device.context_guard.__exit__(exc_type, exc_value, traceback)
575
+
576
+ # restore original target device
577
+ self.device.runtime.default_device = self.saved_device
578
+
579
+
580
+ class ScopedStream:
581
+ def __init__(self, stream, sync_enter=True, sync_exit=False):
582
+ self.stream = stream
583
+ self.sync_enter = sync_enter
584
+ self.sync_exit = sync_exit
585
+ if stream is not None:
586
+ self.device = stream.device
587
+ self.device_scope = ScopedDevice(self.device)
588
+
589
+ def __enter__(self):
590
+ if self.stream is not None:
591
+ self.device_scope.__enter__()
592
+ self.saved_stream = self.device.stream
593
+ self.device.set_stream(self.stream, self.sync_enter)
594
+
595
+ return self.stream
596
+
597
+ def __exit__(self, exc_type, exc_value, traceback):
598
+ if self.stream is not None:
599
+ self.device.set_stream(self.saved_stream, self.sync_exit)
600
+ self.device_scope.__exit__(exc_type, exc_value, traceback)
601
+
602
+
603
+ TIMING_KERNEL = 1
604
+ TIMING_KERNEL_BUILTIN = 2
605
+ TIMING_MEMCPY = 4
606
+ TIMING_MEMSET = 8
607
+ TIMING_GRAPH = 16
608
+ TIMING_ALL = 0xFFFFFFFF
609
+
610
+
611
+ # timer utils
612
+ class ScopedTimer:
613
+ indent = -1
614
+
615
+ enabled = True
616
+
617
+ def __init__(
618
+ self,
619
+ name,
620
+ active=True,
621
+ print=True,
622
+ detailed=False,
623
+ dict=None,
624
+ use_nvtx=False,
625
+ color="rapids",
626
+ synchronize=False,
627
+ cuda_filter=0,
628
+ report_func=None,
629
+ skip_tape=False,
630
+ ):
631
+ """Context manager object for a timer
632
+
633
+ Parameters:
634
+ name (str): Name of timer
635
+ active (bool): Enables this timer
636
+ print (bool): At context manager exit, print elapsed time to sys.stdout
637
+ detailed (bool): Collects additional profiling data using cProfile and calls ``print_stats()`` at context exit
638
+ dict (dict): A dictionary of lists to which the elapsed time will be appended using ``name`` as a key
639
+ use_nvtx (bool): If true, timing functionality is replaced by an NVTX range
640
+ color (int or str): ARGB value (e.g. 0x00FFFF) or color name (e.g. 'cyan') associated with the NVTX range
641
+ synchronize (bool): Synchronize the CPU thread with any outstanding CUDA work to return accurate GPU timings
642
+ cuda_filter (int): Filter flags for CUDA activity timing, e.g. ``warp.TIMING_KERNEL`` or ``warp.TIMING_ALL``
643
+ report_func (Callable): A callback function to print the activity report (``wp.timing_print()`` is used by default)
644
+ skip_tape (bool): If true, the timer will not be recorded in the tape
645
+
646
+ Attributes:
647
+ elapsed (float): The duration of the ``with`` block used with this object
648
+ timing_results (list[TimingResult]): The list of activity timing results, if collection was requested using ``cuda_filter``
649
+ """
650
+ self.name = name
651
+ self.active = active and self.enabled
652
+ self.print = print
653
+ self.detailed = detailed
654
+ self.dict = dict
655
+ self.use_nvtx = use_nvtx
656
+ self.color = color
657
+ self.synchronize = synchronize
658
+ self.skip_tape = skip_tape
659
+ self.elapsed = 0.0
660
+ self.cuda_filter = cuda_filter
661
+ self.report_func = report_func or wp.timing_print
662
+
663
+ if self.dict is not None:
664
+ if name not in self.dict:
665
+ self.dict[name] = []
666
+
667
+ def __enter__(self):
668
+ if not self.skip_tape and warp.context.runtime is not None and warp.context.runtime.tape is not None:
669
+ warp.context.runtime.tape.record_scope_begin(self.name)
670
+ if self.active:
671
+ if self.synchronize:
672
+ wp.synchronize()
673
+
674
+ if self.cuda_filter:
675
+ # begin CUDA activity collection, synchronizing if needed
676
+ timing_begin(self.cuda_filter, synchronize=not self.synchronize)
677
+
678
+ if self.detailed:
679
+ self.cp = cProfile.Profile()
680
+ self.cp.clear()
681
+ self.cp.enable()
682
+
683
+ if self.use_nvtx:
684
+ import nvtx
685
+
686
+ self.nvtx_range_id = nvtx.start_range(self.name, color=self.color)
687
+
688
+ if self.print:
689
+ ScopedTimer.indent += 1
690
+
691
+ self.start = time.perf_counter_ns()
692
+
693
+ return self
694
+
695
+ def __exit__(self, exc_type, exc_value, traceback):
696
+ if not self.skip_tape and warp.context.runtime is not None and warp.context.runtime.tape is not None:
697
+ warp.context.runtime.tape.record_scope_end()
698
+ if self.active:
699
+ if self.synchronize:
700
+ wp.synchronize()
701
+
702
+ self.elapsed = (time.perf_counter_ns() - self.start) / 1000000.0
703
+
704
+ if self.use_nvtx:
705
+ import nvtx
706
+
707
+ nvtx.end_range(self.nvtx_range_id)
708
+
709
+ if self.detailed:
710
+ self.cp.disable()
711
+ self.cp.print_stats(sort="tottime")
712
+
713
+ if self.cuda_filter:
714
+ # end CUDA activity collection, synchronizing if needed
715
+ self.timing_results = timing_end(synchronize=not self.synchronize)
716
+ else:
717
+ self.timing_results = []
718
+
719
+ if self.dict is not None:
720
+ self.dict[self.name].append(self.elapsed)
721
+
722
+ if self.print:
723
+ indent = "\t" * ScopedTimer.indent
724
+
725
+ if self.timing_results:
726
+ self.report_func(self.timing_results, indent=indent)
727
+ print()
728
+
729
+ print(f"{indent}{self.name} took {self.elapsed :.2f} ms")
730
+
731
+ ScopedTimer.indent -= 1
732
+
733
+
734
+ # Allow temporarily enabling/disabling mempool allocators
735
+ class ScopedMempool:
736
+ def __init__(self, device, enable: bool):
737
+ self.device = wp.get_device(device)
738
+ self.enable = enable
739
+
740
+ def __enter__(self):
741
+ self.saved_setting = wp.is_mempool_enabled(self.device)
742
+ wp.set_mempool_enabled(self.device, self.enable)
743
+
744
+ def __exit__(self, exc_type, exc_value, traceback):
745
+ wp.set_mempool_enabled(self.device, self.saved_setting)
746
+
747
+
748
+ # Allow temporarily enabling/disabling mempool access
749
+ class ScopedMempoolAccess:
750
+ def __init__(self, target_device, peer_device, enable: bool):
751
+ self.target_device = target_device
752
+ self.peer_device = peer_device
753
+ self.enable = enable
754
+
755
+ def __enter__(self):
756
+ self.saved_setting = wp.is_mempool_access_enabled(self.target_device, self.peer_device)
757
+ wp.set_mempool_access_enabled(self.target_device, self.peer_device, self.enable)
758
+
759
+ def __exit__(self, exc_type, exc_value, traceback):
760
+ wp.set_mempool_access_enabled(self.target_device, self.peer_device, self.saved_setting)
761
+
762
+
763
+ # Allow temporarily enabling/disabling peer access
764
+ class ScopedPeerAccess:
765
+ def __init__(self, target_device, peer_device, enable: bool):
766
+ self.target_device = target_device
767
+ self.peer_device = peer_device
768
+ self.enable = enable
769
+
770
+ def __enter__(self):
771
+ self.saved_setting = wp.is_peer_access_enabled(self.target_device, self.peer_device)
772
+ wp.set_peer_access_enabled(self.target_device, self.peer_device, self.enable)
773
+
774
+ def __exit__(self, exc_type, exc_value, traceback):
775
+ wp.set_peer_access_enabled(self.target_device, self.peer_device, self.saved_setting)
776
+
777
+
778
+ class ScopedCapture:
779
+ def __init__(self, device=None, stream=None, force_module_load=None, external=False):
780
+ self.device = device
781
+ self.stream = stream
782
+ self.force_module_load = force_module_load
783
+ self.external = external
784
+ self.active = False
785
+ self.graph = None
786
+
787
+ def __enter__(self):
788
+ try:
789
+ wp.capture_begin(
790
+ device=self.device, stream=self.stream, force_module_load=self.force_module_load, external=self.external
791
+ )
792
+ self.active = True
793
+ return self
794
+ except:
795
+ raise
796
+
797
+ def __exit__(self, exc_type, exc_value, traceback):
798
+ if self.active:
799
+ try:
800
+ self.graph = wp.capture_end(device=self.device, stream=self.stream)
801
+ finally:
802
+ self.active = False
803
+
804
+
805
+ # helper kernels for adj_matmul
806
+ @wp.kernel
807
+ def add_kernel_2d(x: wp.array2d(dtype=Any), acc: wp.array2d(dtype=Any), beta: Any):
808
+ i, j = wp.tid()
809
+
810
+ x[i, j] = x[i, j] + beta * acc[i, j]
811
+
812
+
813
+ @wp.kernel
814
+ def add_kernel_3d(x: wp.array3d(dtype=Any), acc: wp.array3d(dtype=Any), beta: Any):
815
+ i, j, k = wp.tid()
816
+
817
+ x[i, j, k] = x[i, j, k] + beta * acc[i, j, k]
818
+
819
+
820
+ # explicit instantiations of generic kernels for adj_matmul
821
+ for T in [wp.float16, wp.float32, wp.float64]:
822
+ wp.overload(add_kernel_2d, [wp.array2d(dtype=T), wp.array2d(dtype=T), T])
823
+ wp.overload(add_kernel_3d, [wp.array3d(dtype=T), wp.array3d(dtype=T), T])
824
+
825
+
826
+ def check_iommu():
827
+ """Check if IOMMU is enabled on Linux, which can affect peer-to-peer transfers.
828
+
829
+ Returns:
830
+ A Boolean indicating whether IOMMU is configured properly for peer-to-peer transfers.
831
+ On Linux, this function attempts to determine if IOMMU is enabled and will return `False` if IOMMU is detected.
832
+ On other operating systems, it always return `True`.
833
+ """
834
+
835
+ if sys.platform == "linux":
836
+ # On modern Linux, there should be IOMMU-related entries in the /sys file system.
837
+ # This should be more reliable than checking kernel logs like dmesg.
838
+ if os.path.isdir("/sys/class/iommu") and os.listdir("/sys/class/iommu"):
839
+ return False
840
+ if os.path.isdir("/sys/kernel/iommu_groups") and os.listdir("/sys/kernel/iommu_groups"):
841
+ return False
842
+
843
+ # HACK: disable P2P tests on misbehaving agents
844
+ disable_p2p_tests = os.getenv("WARP_DISABLE_P2P_TESTS", default="0")
845
+ if int(disable_p2p_tests):
846
+ return False
847
+
848
+ return True
849
+ else:
850
+ # doesn't matter
851
+ return True
852
+
853
+
854
+ class timing_result_t(ctypes.Structure):
855
+ """CUDA timing struct for fetching values from C++"""
856
+
857
+ _fields_ = [
858
+ ("context", ctypes.c_void_p),
859
+ ("name", ctypes.c_char_p),
860
+ ("filter", ctypes.c_int),
861
+ ("elapsed", ctypes.c_float),
862
+ ]
863
+
864
+
865
+ class TimingResult:
866
+ """Timing result for a single activity.
867
+
868
+ Parameters:
869
+ raw_result (warp.utils.timing_result_t): The result structure obtained from C++ (internal use only)
870
+
871
+ Attributes:
872
+ device (warp.Device): The device where the activity was recorded.
873
+ name (str): The activity name.
874
+ filter (int): The type of activity (e.g., ``warp.TIMING_KERNEL``).
875
+ elapsed (float): The elapsed time in milliseconds.
876
+ """
877
+
878
+ def __init__(self, device, name, filter, elapsed):
879
+ self.device = device
880
+ self.name = name
881
+ self.filter = filter
882
+ self.elapsed = elapsed
883
+
884
+
885
+ def timing_begin(cuda_filter=TIMING_ALL, synchronize=True):
886
+ """Begin detailed activity timing.
887
+
888
+ Parameters:
889
+ cuda_filter (int): Filter flags for CUDA activity timing, e.g. ``warp.TIMING_KERNEL`` or ``warp.TIMING_ALL``
890
+ synchronize (bool): Whether to synchronize all CUDA devices before timing starts
891
+ """
892
+
893
+ if synchronize:
894
+ warp.synchronize()
895
+
896
+ warp.context.runtime.core.cuda_timing_begin(cuda_filter)
897
+
898
+
899
+ def timing_end(synchronize=True):
900
+ """End detailed activity timing.
901
+
902
+ Parameters:
903
+ synchronize (bool): Whether to synchronize all CUDA devices before timing ends
904
+
905
+ Returns:
906
+ list[TimingResult]: A list of ``TimingResult`` objects for all recorded activities.
907
+ """
908
+
909
+ if synchronize:
910
+ warp.synchronize()
911
+
912
+ # get result count
913
+ count = warp.context.runtime.core.cuda_timing_get_result_count()
914
+
915
+ # get result array from C++
916
+ result_buffer = (timing_result_t * count)()
917
+ warp.context.runtime.core.cuda_timing_end(ctypes.byref(result_buffer), count)
918
+
919
+ # prepare Python result list
920
+ results = []
921
+ for r in result_buffer:
922
+ device = warp.context.runtime.context_map.get(r.context)
923
+ filter = r.filter
924
+ elapsed = r.elapsed
925
+
926
+ name = r.name.decode()
927
+ if filter == TIMING_KERNEL:
928
+ if name.endswith("forward"):
929
+ # strip trailing "_cuda_kernel_forward"
930
+ name = f"forward kernel {name[:-20]}"
931
+ else:
932
+ # strip trailing "_cuda_kernel_backward"
933
+ name = f"backward kernel {name[:-21]}"
934
+ elif filter == TIMING_KERNEL_BUILTIN:
935
+ if name.startswith("wp::"):
936
+ name = f"builtin kernel {name[4:]}"
937
+ else:
938
+ name = f"builtin kernel {name}"
939
+
940
+ results.append(TimingResult(device, name, filter, elapsed))
941
+
942
+ return results
943
+
944
+
945
+ def timing_print(results, indent=""):
946
+ """Print timing results.
947
+
948
+ Parameters:
949
+ results (list[TimingResult]): List of ``TimingResult`` objects.
950
+ indent (str): Optional indentation for the output.
951
+ """
952
+
953
+ if not results:
954
+ print("No activity")
955
+ return
956
+
957
+ class Aggregate:
958
+ def __init__(self, count=0, elapsed=0):
959
+ self.count = count
960
+ self.elapsed = elapsed
961
+
962
+ device_totals = {}
963
+ activity_totals = {}
964
+
965
+ max_name_len = len("Activity")
966
+ for r in results:
967
+ name_len = len(r.name)
968
+ max_name_len = max(max_name_len, name_len)
969
+
970
+ activity_width = max_name_len + 1
971
+ activity_dashes = "-" * activity_width
972
+
973
+ print(f"{indent}CUDA timeline:")
974
+ print(f"{indent}----------------+---------+{activity_dashes}")
975
+ print(f"{indent}Time | Device | Activity")
976
+ print(f"{indent}----------------+---------+{activity_dashes}")
977
+ for r in results:
978
+ device_agg = device_totals.get(r.device.alias)
979
+ if device_agg is None:
980
+ device_totals[r.device.alias] = Aggregate(count=1, elapsed=r.elapsed)
981
+ else:
982
+ device_agg.count += 1
983
+ device_agg.elapsed += r.elapsed
984
+
985
+ activity_agg = activity_totals.get(r.name)
986
+ if activity_agg is None:
987
+ activity_totals[r.name] = Aggregate(count=1, elapsed=r.elapsed)
988
+ else:
989
+ activity_agg.count += 1
990
+ activity_agg.elapsed += r.elapsed
991
+
992
+ print(f"{indent}{r.elapsed :12.6f} ms | {r.device.alias :7s} | {r.name}")
993
+
994
+ print()
995
+ print(f"{indent}CUDA activity summary:")
996
+ print(f"{indent}----------------+---------+{activity_dashes}")
997
+ print(f"{indent}Total time | Count | Activity")
998
+ print(f"{indent}----------------+---------+{activity_dashes}")
999
+ for name, agg in activity_totals.items():
1000
+ print(f"{indent}{agg.elapsed :12.6f} ms | {agg.count :7d} | {name}")
1001
+
1002
+ print()
1003
+ print(f"{indent}CUDA device summary:")
1004
+ print(f"{indent}----------------+---------+{activity_dashes}")
1005
+ print(f"{indent}Total time | Count | Device")
1006
+ print(f"{indent}----------------+---------+{activity_dashes}")
1007
+ for device, agg in device_totals.items():
1008
+ print(f"{indent}{agg.elapsed :12.6f} ms | {agg.count :7d} | {device}")