warp-lang 1.0.2__py3-none-win_amd64.whl → 1.2.0__py3-none-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of warp-lang might be problematic. Click here for more details.
- warp/__init__.py +108 -97
- warp/__init__.pyi +1 -1
- warp/bin/warp-clang.dll +0 -0
- warp/bin/warp.dll +0 -0
- warp/build.py +88 -113
- warp/build_dll.py +383 -375
- warp/builtins.py +3693 -3354
- warp/codegen.py +2925 -2792
- warp/config.py +40 -36
- warp/constants.py +49 -45
- warp/context.py +5409 -5102
- warp/dlpack.py +442 -442
- warp/examples/__init__.py +16 -16
- warp/examples/assets/bear.usd +0 -0
- warp/examples/assets/bunny.usd +0 -0
- warp/examples/assets/cartpole.urdf +110 -110
- warp/examples/assets/crazyflie.usd +0 -0
- warp/examples/assets/cube.usd +0 -0
- warp/examples/assets/nv_ant.xml +92 -92
- warp/examples/assets/nv_humanoid.xml +183 -183
- warp/examples/assets/quadruped.urdf +267 -267
- warp/examples/assets/rocks.nvdb +0 -0
- warp/examples/assets/rocks.usd +0 -0
- warp/examples/assets/sphere.usd +0 -0
- warp/examples/benchmarks/benchmark_api.py +381 -383
- warp/examples/benchmarks/benchmark_cloth.py +278 -277
- warp/examples/benchmarks/benchmark_cloth_cupy.py +88 -88
- warp/examples/benchmarks/benchmark_cloth_jax.py +97 -100
- warp/examples/benchmarks/benchmark_cloth_numba.py +146 -142
- warp/examples/benchmarks/benchmark_cloth_numpy.py +77 -77
- warp/examples/benchmarks/benchmark_cloth_pytorch.py +86 -86
- warp/examples/benchmarks/benchmark_cloth_taichi.py +112 -112
- warp/examples/benchmarks/benchmark_cloth_warp.py +145 -146
- warp/examples/benchmarks/benchmark_launches.py +293 -295
- warp/examples/browse.py +29 -29
- warp/examples/core/example_dem.py +232 -219
- warp/examples/core/example_fluid.py +291 -267
- warp/examples/core/example_graph_capture.py +142 -126
- warp/examples/core/example_marching_cubes.py +186 -174
- warp/examples/core/example_mesh.py +172 -155
- warp/examples/core/example_mesh_intersect.py +203 -193
- warp/examples/core/example_nvdb.py +174 -170
- warp/examples/core/example_raycast.py +103 -90
- warp/examples/core/example_raymarch.py +197 -178
- warp/examples/core/example_render_opengl.py +183 -141
- warp/examples/core/example_sph.py +403 -387
- warp/examples/core/example_torch.py +219 -181
- warp/examples/core/example_wave.py +261 -248
- warp/examples/fem/bsr_utils.py +378 -380
- warp/examples/fem/example_apic_fluid.py +432 -389
- warp/examples/fem/example_burgers.py +262 -0
- warp/examples/fem/example_convection_diffusion.py +180 -168
- warp/examples/fem/example_convection_diffusion_dg.py +217 -209
- warp/examples/fem/example_deformed_geometry.py +175 -159
- warp/examples/fem/example_diffusion.py +199 -173
- warp/examples/fem/example_diffusion_3d.py +178 -152
- warp/examples/fem/example_diffusion_mgpu.py +219 -214
- warp/examples/fem/example_mixed_elasticity.py +242 -222
- warp/examples/fem/example_navier_stokes.py +257 -243
- warp/examples/fem/example_stokes.py +218 -192
- warp/examples/fem/example_stokes_transfer.py +263 -249
- warp/examples/fem/mesh_utils.py +133 -109
- warp/examples/fem/plot_utils.py +292 -287
- warp/examples/optim/example_bounce.py +258 -246
- warp/examples/optim/example_cloth_throw.py +220 -209
- warp/examples/optim/example_diffray.py +564 -536
- warp/examples/optim/example_drone.py +862 -835
- warp/examples/optim/example_inverse_kinematics.py +174 -168
- warp/examples/optim/example_inverse_kinematics_torch.py +183 -169
- warp/examples/optim/example_spring_cage.py +237 -231
- warp/examples/optim/example_trajectory.py +221 -199
- warp/examples/optim/example_walker.py +304 -293
- warp/examples/sim/example_cartpole.py +137 -129
- warp/examples/sim/example_cloth.py +194 -186
- warp/examples/sim/example_granular.py +122 -111
- warp/examples/sim/example_granular_collision_sdf.py +195 -186
- warp/examples/sim/example_jacobian_ik.py +234 -214
- warp/examples/sim/example_particle_chain.py +116 -105
- warp/examples/sim/example_quadruped.py +191 -180
- warp/examples/sim/example_rigid_chain.py +195 -187
- warp/examples/sim/example_rigid_contact.py +187 -177
- warp/examples/sim/example_rigid_force.py +125 -125
- warp/examples/sim/example_rigid_gyroscopic.py +107 -95
- warp/examples/sim/example_rigid_soft_contact.py +132 -122
- warp/examples/sim/example_soft_body.py +188 -177
- warp/fabric.py +337 -335
- warp/fem/__init__.py +61 -27
- warp/fem/cache.py +403 -388
- warp/fem/dirichlet.py +178 -179
- warp/fem/domain.py +262 -263
- warp/fem/field/__init__.py +100 -101
- warp/fem/field/field.py +148 -149
- warp/fem/field/nodal_field.py +298 -299
- warp/fem/field/restriction.py +22 -21
- warp/fem/field/test.py +180 -181
- warp/fem/field/trial.py +183 -183
- warp/fem/geometry/__init__.py +16 -19
- warp/fem/geometry/closest_point.py +69 -70
- warp/fem/geometry/deformed_geometry.py +270 -271
- warp/fem/geometry/element.py +748 -744
- warp/fem/geometry/geometry.py +184 -186
- warp/fem/geometry/grid_2d.py +380 -373
- warp/fem/geometry/grid_3d.py +437 -435
- warp/fem/geometry/hexmesh.py +953 -953
- warp/fem/geometry/nanogrid.py +455 -0
- warp/fem/geometry/partition.py +374 -376
- warp/fem/geometry/quadmesh_2d.py +532 -532
- warp/fem/geometry/tetmesh.py +840 -840
- warp/fem/geometry/trimesh_2d.py +577 -577
- warp/fem/integrate.py +1684 -1615
- warp/fem/operator.py +190 -191
- warp/fem/polynomial.py +214 -213
- warp/fem/quadrature/__init__.py +2 -2
- warp/fem/quadrature/pic_quadrature.py +243 -245
- warp/fem/quadrature/quadrature.py +295 -294
- warp/fem/space/__init__.py +179 -292
- warp/fem/space/basis_space.py +522 -489
- warp/fem/space/collocated_function_space.py +100 -105
- warp/fem/space/dof_mapper.py +236 -236
- warp/fem/space/function_space.py +148 -145
- warp/fem/space/grid_2d_function_space.py +148 -267
- warp/fem/space/grid_3d_function_space.py +167 -306
- warp/fem/space/hexmesh_function_space.py +253 -352
- warp/fem/space/nanogrid_function_space.py +202 -0
- warp/fem/space/partition.py +350 -350
- warp/fem/space/quadmesh_2d_function_space.py +261 -369
- warp/fem/space/restriction.py +161 -160
- warp/fem/space/shape/__init__.py +90 -15
- warp/fem/space/shape/cube_shape_function.py +728 -738
- warp/fem/space/shape/shape_function.py +102 -103
- warp/fem/space/shape/square_shape_function.py +611 -611
- warp/fem/space/shape/tet_shape_function.py +565 -567
- warp/fem/space/shape/triangle_shape_function.py +429 -429
- warp/fem/space/tetmesh_function_space.py +224 -292
- warp/fem/space/topology.py +297 -295
- warp/fem/space/trimesh_2d_function_space.py +153 -221
- warp/fem/types.py +77 -77
- warp/fem/utils.py +495 -495
- warp/jax.py +166 -141
- warp/jax_experimental.py +341 -339
- warp/native/array.h +1081 -1025
- warp/native/builtin.h +1603 -1560
- warp/native/bvh.cpp +402 -398
- warp/native/bvh.cu +533 -525
- warp/native/bvh.h +430 -429
- warp/native/clang/clang.cpp +496 -464
- warp/native/crt.cpp +42 -32
- warp/native/crt.h +352 -335
- warp/native/cuda_crt.h +1049 -1049
- warp/native/cuda_util.cpp +549 -540
- warp/native/cuda_util.h +288 -203
- warp/native/cutlass_gemm.cpp +34 -34
- warp/native/cutlass_gemm.cu +372 -372
- warp/native/error.cpp +66 -66
- warp/native/error.h +27 -27
- warp/native/exports.h +187 -0
- warp/native/fabric.h +228 -228
- warp/native/hashgrid.cpp +301 -278
- warp/native/hashgrid.cu +78 -77
- warp/native/hashgrid.h +227 -227
- warp/native/initializer_array.h +32 -32
- warp/native/intersect.h +1204 -1204
- warp/native/intersect_adj.h +365 -365
- warp/native/intersect_tri.h +322 -322
- warp/native/marching.cpp +2 -2
- warp/native/marching.cu +497 -497
- warp/native/marching.h +2 -2
- warp/native/mat.h +1545 -1498
- warp/native/matnn.h +333 -333
- warp/native/mesh.cpp +203 -203
- warp/native/mesh.cu +292 -293
- warp/native/mesh.h +1887 -1887
- warp/native/nanovdb/GridHandle.h +366 -0
- warp/native/nanovdb/HostBuffer.h +590 -0
- warp/native/nanovdb/NanoVDB.h +6624 -4782
- warp/native/nanovdb/PNanoVDB.h +3390 -2553
- warp/native/noise.h +850 -850
- warp/native/quat.h +1112 -1085
- warp/native/rand.h +303 -299
- warp/native/range.h +108 -108
- warp/native/reduce.cpp +156 -156
- warp/native/reduce.cu +348 -348
- warp/native/runlength_encode.cpp +61 -61
- warp/native/runlength_encode.cu +46 -46
- warp/native/scan.cpp +30 -30
- warp/native/scan.cu +36 -36
- warp/native/scan.h +7 -7
- warp/native/solid_angle.h +442 -442
- warp/native/sort.cpp +94 -94
- warp/native/sort.cu +97 -97
- warp/native/sort.h +14 -14
- warp/native/sparse.cpp +337 -337
- warp/native/sparse.cu +544 -544
- warp/native/spatial.h +630 -630
- warp/native/svd.h +562 -562
- warp/native/temp_buffer.h +30 -30
- warp/native/vec.h +1177 -1133
- warp/native/volume.cpp +529 -297
- warp/native/volume.cu +58 -32
- warp/native/volume.h +960 -538
- warp/native/volume_builder.cu +446 -425
- warp/native/volume_builder.h +34 -19
- warp/native/volume_impl.h +61 -0
- warp/native/warp.cpp +1057 -1052
- warp/native/warp.cu +2949 -2828
- warp/native/warp.h +321 -305
- warp/optim/__init__.py +9 -9
- warp/optim/adam.py +120 -120
- warp/optim/linear.py +1104 -939
- warp/optim/sgd.py +104 -92
- warp/render/__init__.py +10 -10
- warp/render/render_opengl.py +3356 -3204
- warp/render/render_usd.py +768 -749
- warp/render/utils.py +152 -150
- warp/sim/__init__.py +52 -59
- warp/sim/articulation.py +685 -685
- warp/sim/collide.py +1594 -1590
- warp/sim/import_mjcf.py +489 -481
- warp/sim/import_snu.py +220 -221
- warp/sim/import_urdf.py +536 -516
- warp/sim/import_usd.py +887 -881
- warp/sim/inertia.py +316 -317
- warp/sim/integrator.py +234 -233
- warp/sim/integrator_euler.py +1956 -1956
- warp/sim/integrator_featherstone.py +1917 -1991
- warp/sim/integrator_xpbd.py +3288 -3312
- warp/sim/model.py +4473 -4314
- warp/sim/particles.py +113 -112
- warp/sim/render.py +417 -403
- warp/sim/utils.py +413 -410
- warp/sparse.py +1289 -1227
- warp/stubs.py +2192 -2469
- warp/tape.py +1162 -225
- warp/tests/__init__.py +1 -1
- warp/tests/__main__.py +4 -4
- warp/tests/assets/test_index_grid.nvdb +0 -0
- warp/tests/assets/torus.usda +105 -105
- warp/tests/aux_test_class_kernel.py +26 -26
- warp/tests/aux_test_compile_consts_dummy.py +10 -10
- warp/tests/aux_test_conditional_unequal_types_kernels.py +21 -21
- warp/tests/aux_test_dependent.py +20 -22
- warp/tests/aux_test_grad_customs.py +21 -23
- warp/tests/aux_test_reference.py +9 -11
- warp/tests/aux_test_reference_reference.py +8 -10
- warp/tests/aux_test_square.py +15 -17
- warp/tests/aux_test_unresolved_func.py +14 -14
- warp/tests/aux_test_unresolved_symbol.py +14 -14
- warp/tests/disabled_kinematics.py +237 -239
- warp/tests/run_coverage_serial.py +31 -31
- warp/tests/test_adam.py +155 -157
- warp/tests/test_arithmetic.py +1088 -1124
- warp/tests/test_array.py +2415 -2326
- warp/tests/test_array_reduce.py +148 -150
- warp/tests/test_async.py +666 -656
- warp/tests/test_atomic.py +139 -141
- warp/tests/test_bool.py +212 -149
- warp/tests/test_builtins_resolution.py +1290 -1292
- warp/tests/test_bvh.py +162 -171
- warp/tests/test_closest_point_edge_edge.py +227 -228
- warp/tests/test_codegen.py +562 -553
- warp/tests/test_compile_consts.py +217 -101
- warp/tests/test_conditional.py +244 -246
- warp/tests/test_copy.py +230 -215
- warp/tests/test_ctypes.py +630 -632
- warp/tests/test_dense.py +65 -67
- warp/tests/test_devices.py +89 -98
- warp/tests/test_dlpack.py +528 -529
- warp/tests/test_examples.py +403 -378
- warp/tests/test_fabricarray.py +952 -955
- warp/tests/test_fast_math.py +60 -54
- warp/tests/test_fem.py +1298 -1278
- warp/tests/test_fp16.py +128 -130
- warp/tests/test_func.py +336 -337
- warp/tests/test_generics.py +596 -571
- warp/tests/test_grad.py +885 -640
- warp/tests/test_grad_customs.py +331 -336
- warp/tests/test_hash_grid.py +208 -164
- warp/tests/test_import.py +37 -39
- warp/tests/test_indexedarray.py +1132 -1134
- warp/tests/test_intersect.py +65 -67
- warp/tests/test_jax.py +305 -307
- warp/tests/test_large.py +169 -164
- warp/tests/test_launch.py +352 -354
- warp/tests/test_lerp.py +217 -261
- warp/tests/test_linear_solvers.py +189 -171
- warp/tests/test_lvalue.py +419 -493
- warp/tests/test_marching_cubes.py +63 -65
- warp/tests/test_mat.py +1799 -1827
- warp/tests/test_mat_lite.py +113 -115
- warp/tests/test_mat_scalar_ops.py +2905 -2889
- warp/tests/test_math.py +124 -193
- warp/tests/test_matmul.py +498 -499
- warp/tests/test_matmul_lite.py +408 -410
- warp/tests/test_mempool.py +186 -190
- warp/tests/test_mesh.py +281 -324
- warp/tests/test_mesh_query_aabb.py +226 -241
- warp/tests/test_mesh_query_point.py +690 -702
- warp/tests/test_mesh_query_ray.py +290 -303
- warp/tests/test_mlp.py +274 -276
- warp/tests/test_model.py +108 -110
- warp/tests/test_module_hashing.py +111 -0
- warp/tests/test_modules_lite.py +36 -39
- warp/tests/test_multigpu.py +161 -163
- warp/tests/test_noise.py +244 -248
- warp/tests/test_operators.py +248 -250
- warp/tests/test_options.py +121 -125
- warp/tests/test_peer.py +131 -137
- warp/tests/test_pinned.py +76 -78
- warp/tests/test_print.py +52 -54
- warp/tests/test_quat.py +2084 -2086
- warp/tests/test_rand.py +324 -288
- warp/tests/test_reload.py +207 -217
- warp/tests/test_rounding.py +177 -179
- warp/tests/test_runlength_encode.py +188 -190
- warp/tests/test_sim_grad.py +241 -0
- warp/tests/test_sim_kinematics.py +89 -97
- warp/tests/test_smoothstep.py +166 -168
- warp/tests/test_snippet.py +303 -266
- warp/tests/test_sparse.py +466 -460
- warp/tests/test_spatial.py +2146 -2148
- warp/tests/test_special_values.py +362 -0
- warp/tests/test_streams.py +484 -473
- warp/tests/test_struct.py +708 -675
- warp/tests/test_tape.py +171 -148
- warp/tests/test_torch.py +741 -743
- warp/tests/test_transient_module.py +85 -87
- warp/tests/test_types.py +554 -659
- warp/tests/test_utils.py +488 -499
- warp/tests/test_vec.py +1262 -1268
- warp/tests/test_vec_lite.py +71 -73
- warp/tests/test_vec_scalar_ops.py +2097 -2099
- warp/tests/test_verify_fp.py +92 -94
- warp/tests/test_volume.py +961 -736
- warp/tests/test_volume_write.py +338 -265
- warp/tests/unittest_serial.py +38 -37
- warp/tests/unittest_suites.py +367 -359
- warp/tests/unittest_utils.py +434 -578
- warp/tests/unused_test_misc.py +69 -71
- warp/tests/walkthrough_debug.py +85 -85
- warp/thirdparty/appdirs.py +598 -598
- warp/thirdparty/dlpack.py +143 -143
- warp/thirdparty/unittest_parallel.py +563 -561
- warp/torch.py +321 -295
- warp/types.py +4941 -4450
- warp/utils.py +1008 -821
- {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/LICENSE.md +126 -126
- {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/METADATA +365 -400
- warp_lang-1.2.0.dist-info/RECORD +359 -0
- warp/examples/assets/cube.usda +0 -42
- warp/examples/assets/sphere.usda +0 -56
- warp/examples/assets/torus.usda +0 -105
- warp/examples/fem/example_convection_diffusion_dg0.py +0 -194
- warp/native/nanovdb/PNanoVDBWrite.h +0 -295
- warp_lang-1.0.2.dist-info/RECORD +0 -352
- {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/WHEEL +0 -0
- {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/top_level.txt +0 -0
warp/utils.py
CHANGED
|
@@ -1,821 +1,1008 @@
|
|
|
1
|
-
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
|
|
2
|
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
|
3
|
-
# and proprietary rights in and to this software, related documentation
|
|
4
|
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
|
5
|
-
# distribution of this software and related documentation without an express
|
|
6
|
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
|
7
|
-
|
|
8
|
-
import cProfile
|
|
9
|
-
import
|
|
10
|
-
import
|
|
11
|
-
import
|
|
12
|
-
import
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
import warp
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
line = line
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
warnings.
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
if in_array.
|
|
90
|
-
raise RuntimeError("Array
|
|
91
|
-
|
|
92
|
-
if in_array.size
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
if keys.
|
|
121
|
-
raise RuntimeError("Array storage
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
if run_values.
|
|
145
|
-
raise RuntimeError("
|
|
146
|
-
|
|
147
|
-
if
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
if run_lengths.
|
|
151
|
-
raise RuntimeError("
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
if
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
output_shape =
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
out.
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
stride = values.
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
if a.
|
|
279
|
-
raise RuntimeError("Array
|
|
280
|
-
|
|
281
|
-
if
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
if
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
output_shape =
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
if
|
|
315
|
-
|
|
316
|
-
out.
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
if in_array.
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
else:
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
#
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
#
|
|
438
|
-
# cp.
|
|
439
|
-
#
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
@wp.kernel
|
|
450
|
-
def
|
|
451
|
-
i, j, k = wp.tid()
|
|
452
|
-
wp.
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
@wp.kernel
|
|
456
|
-
def
|
|
457
|
-
i, j, k = wp.tid()
|
|
458
|
-
wp.
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
self.
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
#
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
_mem_report(
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
self.device
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
self.
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
self.
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
if
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
self.
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
self.
|
|
738
|
-
self.enable = enable
|
|
739
|
-
|
|
740
|
-
def __enter__(self):
|
|
741
|
-
self.saved_setting = wp.
|
|
742
|
-
wp.
|
|
743
|
-
|
|
744
|
-
def __exit__(self, exc_type, exc_value, traceback):
|
|
745
|
-
wp.
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
self.
|
|
752
|
-
self.
|
|
753
|
-
self.
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
|
|
821
|
-
|
|
1
|
+
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
|
|
2
|
+
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
|
3
|
+
# and proprietary rights in and to this software, related documentation
|
|
4
|
+
# and any modifications thereto. Any use, reproduction, disclosure or
|
|
5
|
+
# distribution of this software and related documentation without an express
|
|
6
|
+
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
|
7
|
+
|
|
8
|
+
import cProfile
|
|
9
|
+
import ctypes
|
|
10
|
+
import os
|
|
11
|
+
import sys
|
|
12
|
+
import time
|
|
13
|
+
import warnings
|
|
14
|
+
from typing import Any
|
|
15
|
+
|
|
16
|
+
import numpy as np
|
|
17
|
+
|
|
18
|
+
import warp as wp
|
|
19
|
+
import warp.context
|
|
20
|
+
import warp.types
|
|
21
|
+
|
|
22
|
+
warnings_seen = set()
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def warp_showwarning(message, category, filename, lineno, file=None, line=None):
|
|
26
|
+
"""Version of warnings.showwarning that always prints to sys.stdout."""
|
|
27
|
+
|
|
28
|
+
if warp.config.verbose_warnings:
|
|
29
|
+
s = f"Warp {category.__name__}: {message} ({filename}:{lineno})\n"
|
|
30
|
+
|
|
31
|
+
if line is None:
|
|
32
|
+
try:
|
|
33
|
+
import linecache
|
|
34
|
+
|
|
35
|
+
line = linecache.getline(filename, lineno)
|
|
36
|
+
except Exception:
|
|
37
|
+
# When a warning is logged during Python shutdown, linecache
|
|
38
|
+
# and the import machinery don't work anymore
|
|
39
|
+
line = None
|
|
40
|
+
linecache = None
|
|
41
|
+
else:
|
|
42
|
+
line = line
|
|
43
|
+
if line:
|
|
44
|
+
line = line.strip()
|
|
45
|
+
s += " %s\n" % line
|
|
46
|
+
else:
|
|
47
|
+
# simple warning
|
|
48
|
+
s = f"Warp {category.__name__}: {message}\n"
|
|
49
|
+
|
|
50
|
+
sys.stdout.write(s)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def warn(message, category=None, stacklevel=1):
|
|
54
|
+
if (category, message) in warnings_seen:
|
|
55
|
+
return
|
|
56
|
+
|
|
57
|
+
with warnings.catch_warnings():
|
|
58
|
+
warnings.simplefilter("default") # Change the filter in this process
|
|
59
|
+
warnings.showwarning = warp_showwarning
|
|
60
|
+
warnings.warn(
|
|
61
|
+
message,
|
|
62
|
+
category,
|
|
63
|
+
stacklevel=stacklevel + 1, # Increment stacklevel by 1 since we are in a wrapper
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
if category is DeprecationWarning:
|
|
67
|
+
warnings_seen.add((category, message))
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
# expand a 7-vec to a tuple of arrays
|
|
71
|
+
def transform_expand(t):
|
|
72
|
+
return wp.transform(np.array(t[0:3]), np.array(t[3:7]))
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
@wp.func
|
|
76
|
+
def quat_between_vectors(a: wp.vec3, b: wp.vec3) -> wp.quat:
|
|
77
|
+
"""
|
|
78
|
+
Compute the quaternion that rotates vector a to vector b
|
|
79
|
+
"""
|
|
80
|
+
a = wp.normalize(a)
|
|
81
|
+
b = wp.normalize(b)
|
|
82
|
+
c = wp.cross(a, b)
|
|
83
|
+
d = wp.dot(a, b)
|
|
84
|
+
q = wp.quat(c[0], c[1], c[2], 1.0 + d)
|
|
85
|
+
return wp.normalize(q)
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def array_scan(in_array, out_array, inclusive=True):
|
|
89
|
+
if in_array.device != out_array.device:
|
|
90
|
+
raise RuntimeError("Array storage devices do not match")
|
|
91
|
+
|
|
92
|
+
if in_array.size != out_array.size:
|
|
93
|
+
raise RuntimeError("Array storage sizes do not match")
|
|
94
|
+
|
|
95
|
+
if in_array.dtype != out_array.dtype:
|
|
96
|
+
raise RuntimeError("Array data types do not match")
|
|
97
|
+
|
|
98
|
+
if in_array.size == 0:
|
|
99
|
+
return
|
|
100
|
+
|
|
101
|
+
from warp.context import runtime
|
|
102
|
+
|
|
103
|
+
if in_array.device.is_cpu:
|
|
104
|
+
if in_array.dtype == wp.int32:
|
|
105
|
+
runtime.core.array_scan_int_host(in_array.ptr, out_array.ptr, in_array.size, inclusive)
|
|
106
|
+
elif in_array.dtype == wp.float32:
|
|
107
|
+
runtime.core.array_scan_float_host(in_array.ptr, out_array.ptr, in_array.size, inclusive)
|
|
108
|
+
else:
|
|
109
|
+
raise RuntimeError("Unsupported data type")
|
|
110
|
+
elif in_array.device.is_cuda:
|
|
111
|
+
if in_array.dtype == wp.int32:
|
|
112
|
+
runtime.core.array_scan_int_device(in_array.ptr, out_array.ptr, in_array.size, inclusive)
|
|
113
|
+
elif in_array.dtype == wp.float32:
|
|
114
|
+
runtime.core.array_scan_float_device(in_array.ptr, out_array.ptr, in_array.size, inclusive)
|
|
115
|
+
else:
|
|
116
|
+
raise RuntimeError("Unsupported data type")
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def radix_sort_pairs(keys, values, count: int):
|
|
120
|
+
if keys.device != values.device:
|
|
121
|
+
raise RuntimeError("Array storage devices do not match")
|
|
122
|
+
|
|
123
|
+
if count == 0:
|
|
124
|
+
return
|
|
125
|
+
|
|
126
|
+
if keys.size < 2 * count or values.size < 2 * count:
|
|
127
|
+
raise RuntimeError("Array storage must be large enough to contain 2*count elements")
|
|
128
|
+
|
|
129
|
+
from warp.context import runtime
|
|
130
|
+
|
|
131
|
+
if keys.device.is_cpu:
|
|
132
|
+
if keys.dtype == wp.int32 and values.dtype == wp.int32:
|
|
133
|
+
runtime.core.radix_sort_pairs_int_host(keys.ptr, values.ptr, count)
|
|
134
|
+
else:
|
|
135
|
+
raise RuntimeError("Unsupported data type")
|
|
136
|
+
elif keys.device.is_cuda:
|
|
137
|
+
if keys.dtype == wp.int32 and values.dtype == wp.int32:
|
|
138
|
+
runtime.core.radix_sort_pairs_int_device(keys.ptr, values.ptr, count)
|
|
139
|
+
else:
|
|
140
|
+
raise RuntimeError("Unsupported data type")
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def runlength_encode(values, run_values, run_lengths, run_count=None, value_count=None):
|
|
144
|
+
if run_values.device != values.device or run_lengths.device != values.device:
|
|
145
|
+
raise RuntimeError("Array storage devices do not match")
|
|
146
|
+
|
|
147
|
+
if value_count is None:
|
|
148
|
+
value_count = values.size
|
|
149
|
+
|
|
150
|
+
if run_values.size < value_count or run_lengths.size < value_count:
|
|
151
|
+
raise RuntimeError("Output array storage sizes must be at least equal to value_count")
|
|
152
|
+
|
|
153
|
+
if values.dtype != run_values.dtype:
|
|
154
|
+
raise RuntimeError("values and run_values data types do not match")
|
|
155
|
+
|
|
156
|
+
if run_lengths.dtype != wp.int32:
|
|
157
|
+
raise RuntimeError("run_lengths array must be of type int32")
|
|
158
|
+
|
|
159
|
+
# User can provide a device output array for storing the number of runs
|
|
160
|
+
# For convenience, if no such array is provided, number of runs is returned on host
|
|
161
|
+
if run_count is None:
|
|
162
|
+
if value_count == 0:
|
|
163
|
+
return 0
|
|
164
|
+
run_count = wp.empty(shape=(1,), dtype=int, device=values.device)
|
|
165
|
+
host_return = True
|
|
166
|
+
else:
|
|
167
|
+
if run_count.device != values.device:
|
|
168
|
+
raise RuntimeError("run_count storage device does not match other arrays")
|
|
169
|
+
if run_count.dtype != wp.int32:
|
|
170
|
+
raise RuntimeError("run_count array must be of type int32")
|
|
171
|
+
if value_count == 0:
|
|
172
|
+
run_count.zero_()
|
|
173
|
+
return 0
|
|
174
|
+
host_return = False
|
|
175
|
+
|
|
176
|
+
from warp.context import runtime
|
|
177
|
+
|
|
178
|
+
if values.device.is_cpu:
|
|
179
|
+
if values.dtype == wp.int32:
|
|
180
|
+
runtime.core.runlength_encode_int_host(
|
|
181
|
+
values.ptr, run_values.ptr, run_lengths.ptr, run_count.ptr, value_count
|
|
182
|
+
)
|
|
183
|
+
else:
|
|
184
|
+
raise RuntimeError("Unsupported data type")
|
|
185
|
+
elif values.device.is_cuda:
|
|
186
|
+
if values.dtype == wp.int32:
|
|
187
|
+
runtime.core.runlength_encode_int_device(
|
|
188
|
+
values.ptr, run_values.ptr, run_lengths.ptr, run_count.ptr, value_count
|
|
189
|
+
)
|
|
190
|
+
else:
|
|
191
|
+
raise RuntimeError("Unsupported data type")
|
|
192
|
+
|
|
193
|
+
if host_return:
|
|
194
|
+
return int(run_count.numpy()[0])
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
def array_sum(values, out=None, value_count=None, axis=None):
|
|
198
|
+
if value_count is None:
|
|
199
|
+
if axis is None:
|
|
200
|
+
value_count = values.size
|
|
201
|
+
else:
|
|
202
|
+
value_count = values.shape[axis]
|
|
203
|
+
|
|
204
|
+
if axis is None:
|
|
205
|
+
output_shape = (1,)
|
|
206
|
+
else:
|
|
207
|
+
|
|
208
|
+
def output_dim(ax, dim):
|
|
209
|
+
return 1 if ax == axis else dim
|
|
210
|
+
|
|
211
|
+
output_shape = tuple(output_dim(ax, dim) for ax, dim in enumerate(values.shape))
|
|
212
|
+
|
|
213
|
+
type_length = wp.types.type_length(values.dtype)
|
|
214
|
+
scalar_type = wp.types.type_scalar_type(values.dtype)
|
|
215
|
+
|
|
216
|
+
# User can provide a device output array for storing the number of runs
|
|
217
|
+
# For convenience, if no such array is provided, number of runs is returned on host
|
|
218
|
+
if out is None:
|
|
219
|
+
host_return = True
|
|
220
|
+
out = wp.empty(shape=output_shape, dtype=values.dtype, device=values.device)
|
|
221
|
+
else:
|
|
222
|
+
host_return = False
|
|
223
|
+
if out.device != values.device:
|
|
224
|
+
raise RuntimeError("out storage device should match values array")
|
|
225
|
+
if out.dtype != values.dtype:
|
|
226
|
+
raise RuntimeError(f"out array should have type {values.dtype.__name__}")
|
|
227
|
+
if out.shape != output_shape:
|
|
228
|
+
raise RuntimeError(f"out array should have shape {output_shape}")
|
|
229
|
+
|
|
230
|
+
if value_count == 0:
|
|
231
|
+
out.zero_()
|
|
232
|
+
if axis is None and host_return:
|
|
233
|
+
return out.numpy()[0]
|
|
234
|
+
return out
|
|
235
|
+
|
|
236
|
+
from warp.context import runtime
|
|
237
|
+
|
|
238
|
+
if values.device.is_cpu:
|
|
239
|
+
if scalar_type == wp.float32:
|
|
240
|
+
native_func = runtime.core.array_sum_float_host
|
|
241
|
+
elif scalar_type == wp.float64:
|
|
242
|
+
native_func = runtime.core.array_sum_double_host
|
|
243
|
+
else:
|
|
244
|
+
raise RuntimeError("Unsupported data type")
|
|
245
|
+
elif values.device.is_cuda:
|
|
246
|
+
if scalar_type == wp.float32:
|
|
247
|
+
native_func = runtime.core.array_sum_float_device
|
|
248
|
+
elif scalar_type == wp.float64:
|
|
249
|
+
native_func = runtime.core.array_sum_double_device
|
|
250
|
+
else:
|
|
251
|
+
raise RuntimeError("Unsupported data type")
|
|
252
|
+
|
|
253
|
+
if axis is None:
|
|
254
|
+
stride = wp.types.type_size_in_bytes(values.dtype)
|
|
255
|
+
native_func(values.ptr, out.ptr, value_count, stride, type_length)
|
|
256
|
+
|
|
257
|
+
if host_return:
|
|
258
|
+
return out.numpy()[0]
|
|
259
|
+
else:
|
|
260
|
+
stride = values.strides[axis]
|
|
261
|
+
for idx in np.ndindex(output_shape):
|
|
262
|
+
out_offset = sum(i * s for i, s in zip(idx, out.strides))
|
|
263
|
+
val_offset = sum(i * s for i, s in zip(idx, values.strides))
|
|
264
|
+
|
|
265
|
+
native_func(
|
|
266
|
+
values.ptr + val_offset,
|
|
267
|
+
out.ptr + out_offset,
|
|
268
|
+
value_count,
|
|
269
|
+
stride,
|
|
270
|
+
type_length,
|
|
271
|
+
)
|
|
272
|
+
|
|
273
|
+
if host_return:
|
|
274
|
+
return out
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
def array_inner(a, b, out=None, count=None, axis=None):
|
|
278
|
+
if a.size != b.size:
|
|
279
|
+
raise RuntimeError("Array storage sizes do not match")
|
|
280
|
+
|
|
281
|
+
if a.device != b.device:
|
|
282
|
+
raise RuntimeError("Array storage devices do not match")
|
|
283
|
+
|
|
284
|
+
if a.dtype != b.dtype:
|
|
285
|
+
raise RuntimeError("Array data types do not match")
|
|
286
|
+
|
|
287
|
+
if count is None:
|
|
288
|
+
if axis is None:
|
|
289
|
+
count = a.size
|
|
290
|
+
else:
|
|
291
|
+
count = a.shape[axis]
|
|
292
|
+
|
|
293
|
+
if axis is None:
|
|
294
|
+
output_shape = (1,)
|
|
295
|
+
else:
|
|
296
|
+
|
|
297
|
+
def output_dim(ax, dim):
|
|
298
|
+
return 1 if ax == axis else dim
|
|
299
|
+
|
|
300
|
+
output_shape = tuple(output_dim(ax, dim) for ax, dim in enumerate(a.shape))
|
|
301
|
+
|
|
302
|
+
type_length = wp.types.type_length(a.dtype)
|
|
303
|
+
scalar_type = wp.types.type_scalar_type(a.dtype)
|
|
304
|
+
|
|
305
|
+
# User can provide a device output array for storing the number of runs
|
|
306
|
+
# For convenience, if no such array is provided, number of runs is returned on host
|
|
307
|
+
if out is None:
|
|
308
|
+
host_return = True
|
|
309
|
+
out = wp.empty(shape=output_shape, dtype=scalar_type, device=a.device)
|
|
310
|
+
else:
|
|
311
|
+
host_return = False
|
|
312
|
+
if out.device != a.device:
|
|
313
|
+
raise RuntimeError("out storage device should match values array")
|
|
314
|
+
if out.dtype != scalar_type:
|
|
315
|
+
raise RuntimeError(f"out array should have type {scalar_type.__name__}")
|
|
316
|
+
if out.shape != output_shape:
|
|
317
|
+
raise RuntimeError(f"out array should have shape {output_shape}")
|
|
318
|
+
|
|
319
|
+
if count == 0:
|
|
320
|
+
if axis is None and host_return:
|
|
321
|
+
return 0.0
|
|
322
|
+
out.zero_()
|
|
323
|
+
return out
|
|
324
|
+
|
|
325
|
+
from warp.context import runtime
|
|
326
|
+
|
|
327
|
+
if a.device.is_cpu:
|
|
328
|
+
if scalar_type == wp.float32:
|
|
329
|
+
native_func = runtime.core.array_inner_float_host
|
|
330
|
+
elif scalar_type == wp.float64:
|
|
331
|
+
native_func = runtime.core.array_inner_double_host
|
|
332
|
+
else:
|
|
333
|
+
raise RuntimeError("Unsupported data type")
|
|
334
|
+
elif a.device.is_cuda:
|
|
335
|
+
if scalar_type == wp.float32:
|
|
336
|
+
native_func = runtime.core.array_inner_float_device
|
|
337
|
+
elif scalar_type == wp.float64:
|
|
338
|
+
native_func = runtime.core.array_inner_double_device
|
|
339
|
+
else:
|
|
340
|
+
raise RuntimeError("Unsupported data type")
|
|
341
|
+
|
|
342
|
+
if axis is None:
|
|
343
|
+
stride_a = wp.types.type_size_in_bytes(a.dtype)
|
|
344
|
+
stride_b = wp.types.type_size_in_bytes(b.dtype)
|
|
345
|
+
native_func(a.ptr, b.ptr, out.ptr, count, stride_a, stride_b, type_length)
|
|
346
|
+
|
|
347
|
+
if host_return:
|
|
348
|
+
return out.numpy()[0]
|
|
349
|
+
else:
|
|
350
|
+
stride_a = a.strides[axis]
|
|
351
|
+
stride_b = b.strides[axis]
|
|
352
|
+
|
|
353
|
+
for idx in np.ndindex(output_shape):
|
|
354
|
+
out_offset = sum(i * s for i, s in zip(idx, out.strides))
|
|
355
|
+
a_offset = sum(i * s for i, s in zip(idx, a.strides))
|
|
356
|
+
b_offset = sum(i * s for i, s in zip(idx, b.strides))
|
|
357
|
+
|
|
358
|
+
native_func(
|
|
359
|
+
a.ptr + a_offset,
|
|
360
|
+
b.ptr + b_offset,
|
|
361
|
+
out.ptr + out_offset,
|
|
362
|
+
count,
|
|
363
|
+
stride_a,
|
|
364
|
+
stride_b,
|
|
365
|
+
type_length,
|
|
366
|
+
)
|
|
367
|
+
|
|
368
|
+
if host_return:
|
|
369
|
+
return out
|
|
370
|
+
|
|
371
|
+
|
|
372
|
+
@wp.kernel
|
|
373
|
+
def _array_cast_kernel(
|
|
374
|
+
dest: Any,
|
|
375
|
+
src: Any,
|
|
376
|
+
):
|
|
377
|
+
i = wp.tid()
|
|
378
|
+
dest[i] = dest.dtype(src[i])
|
|
379
|
+
|
|
380
|
+
|
|
381
|
+
def array_cast(in_array, out_array, count=None):
|
|
382
|
+
if in_array.device != out_array.device:
|
|
383
|
+
raise RuntimeError("Array storage devices do not match")
|
|
384
|
+
|
|
385
|
+
in_array_data_shape = getattr(in_array.dtype, "_shape_", ())
|
|
386
|
+
out_array_data_shape = getattr(out_array.dtype, "_shape_", ())
|
|
387
|
+
|
|
388
|
+
if in_array.ndim != out_array.ndim or in_array_data_shape != out_array_data_shape:
|
|
389
|
+
# Number of dimensions or data type shape do not match.
|
|
390
|
+
# Flatten arrays and do cast at the scalar level
|
|
391
|
+
in_array = in_array.flatten()
|
|
392
|
+
out_array = out_array.flatten()
|
|
393
|
+
|
|
394
|
+
in_array_data_length = warp.types.type_length(in_array.dtype)
|
|
395
|
+
out_array_data_length = warp.types.type_length(out_array.dtype)
|
|
396
|
+
in_array_scalar_type = wp.types.type_scalar_type(in_array.dtype)
|
|
397
|
+
out_array_scalar_type = wp.types.type_scalar_type(out_array.dtype)
|
|
398
|
+
|
|
399
|
+
in_array = wp.array(
|
|
400
|
+
data=None,
|
|
401
|
+
ptr=in_array.ptr,
|
|
402
|
+
capacity=in_array.capacity,
|
|
403
|
+
device=in_array.device,
|
|
404
|
+
dtype=in_array_scalar_type,
|
|
405
|
+
shape=in_array.shape[0] * in_array_data_length,
|
|
406
|
+
)
|
|
407
|
+
|
|
408
|
+
out_array = wp.array(
|
|
409
|
+
data=None,
|
|
410
|
+
ptr=out_array.ptr,
|
|
411
|
+
capacity=out_array.capacity,
|
|
412
|
+
device=out_array.device,
|
|
413
|
+
dtype=out_array_scalar_type,
|
|
414
|
+
shape=out_array.shape[0] * out_array_data_length,
|
|
415
|
+
)
|
|
416
|
+
|
|
417
|
+
if count is not None:
|
|
418
|
+
count *= in_array_data_length
|
|
419
|
+
|
|
420
|
+
if count is None:
|
|
421
|
+
count = in_array.size
|
|
422
|
+
|
|
423
|
+
if in_array.ndim == 1:
|
|
424
|
+
dim = count
|
|
425
|
+
elif count < in_array.size:
|
|
426
|
+
raise RuntimeError("Partial cast is not supported for arrays with more than one dimension")
|
|
427
|
+
else:
|
|
428
|
+
dim = in_array.shape
|
|
429
|
+
|
|
430
|
+
if in_array.dtype == out_array.dtype:
|
|
431
|
+
# Same data type, can simply copy
|
|
432
|
+
wp.copy(dest=out_array, src=in_array, count=count)
|
|
433
|
+
else:
|
|
434
|
+
wp.launch(kernel=_array_cast_kernel, dim=dim, inputs=[out_array, in_array], device=out_array.device)
|
|
435
|
+
|
|
436
|
+
|
|
437
|
+
# code snippet for invoking cProfile
|
|
438
|
+
# cp = cProfile.Profile()
|
|
439
|
+
# cp.enable()
|
|
440
|
+
# for i in range(1000):
|
|
441
|
+
# self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
|
|
442
|
+
|
|
443
|
+
# cp.disable()
|
|
444
|
+
# cp.print_stats(sort='tottime')
|
|
445
|
+
# exit(0)
|
|
446
|
+
|
|
447
|
+
|
|
448
|
+
# helper kernels for initializing NVDB volumes from a dense array
|
|
449
|
+
@wp.kernel
|
|
450
|
+
def copy_dense_volume_to_nano_vdb_v(volume: wp.uint64, values: wp.array(dtype=wp.vec3, ndim=3)):
|
|
451
|
+
i, j, k = wp.tid()
|
|
452
|
+
wp.volume_store_v(volume, i, j, k, values[i, j, k])
|
|
453
|
+
|
|
454
|
+
|
|
455
|
+
@wp.kernel
|
|
456
|
+
def copy_dense_volume_to_nano_vdb_f(volume: wp.uint64, values: wp.array(dtype=wp.float32, ndim=3)):
|
|
457
|
+
i, j, k = wp.tid()
|
|
458
|
+
wp.volume_store_f(volume, i, j, k, values[i, j, k])
|
|
459
|
+
|
|
460
|
+
|
|
461
|
+
@wp.kernel
|
|
462
|
+
def copy_dense_volume_to_nano_vdb_i(volume: wp.uint64, values: wp.array(dtype=wp.int32, ndim=3)):
|
|
463
|
+
i, j, k = wp.tid()
|
|
464
|
+
wp.volume_store_i(volume, i, j, k, values[i, j, k])
|
|
465
|
+
|
|
466
|
+
|
|
467
|
+
# represent an edge between v0, v1 with connected faces f0, f1, and opposite vertex o0, and o1
|
|
468
|
+
# winding is such that first tri can be reconstructed as {v0, v1, o0}, and second tri as { v1, v0, o1 }
|
|
469
|
+
class MeshEdge:
|
|
470
|
+
def __init__(self, v0, v1, o0, o1, f0, f1):
|
|
471
|
+
self.v0 = v0 # vertex 0
|
|
472
|
+
self.v1 = v1 # vertex 1
|
|
473
|
+
self.o0 = o0 # opposite vertex 1
|
|
474
|
+
self.o1 = o1 # opposite vertex 2
|
|
475
|
+
self.f0 = f0 # index of tri1
|
|
476
|
+
self.f1 = f1 # index of tri2
|
|
477
|
+
|
|
478
|
+
|
|
479
|
+
class MeshAdjacency:
|
|
480
|
+
def __init__(self, indices, num_tris):
|
|
481
|
+
# map edges (v0, v1) to faces (f0, f1)
|
|
482
|
+
self.edges = {}
|
|
483
|
+
self.indices = indices
|
|
484
|
+
|
|
485
|
+
for index, tri in enumerate(indices):
|
|
486
|
+
self.add_edge(tri[0], tri[1], tri[2], index)
|
|
487
|
+
self.add_edge(tri[1], tri[2], tri[0], index)
|
|
488
|
+
self.add_edge(tri[2], tri[0], tri[1], index)
|
|
489
|
+
|
|
490
|
+
def add_edge(self, i0, i1, o, f): # index1, index2, index3, index of triangle
|
|
491
|
+
key = (min(i0, i1), max(i0, i1))
|
|
492
|
+
edge = None
|
|
493
|
+
|
|
494
|
+
if key in self.edges:
|
|
495
|
+
edge = self.edges[key]
|
|
496
|
+
|
|
497
|
+
if edge.f1 != -1:
|
|
498
|
+
print("Detected non-manifold edge")
|
|
499
|
+
return
|
|
500
|
+
else:
|
|
501
|
+
# update other side of the edge
|
|
502
|
+
edge.o1 = o
|
|
503
|
+
edge.f1 = f
|
|
504
|
+
else:
|
|
505
|
+
# create new edge with opposite yet to be filled
|
|
506
|
+
edge = MeshEdge(i0, i1, o, -1, f, -1)
|
|
507
|
+
|
|
508
|
+
self.edges[key] = edge
|
|
509
|
+
|
|
510
|
+
|
|
511
|
+
def mem_report(): # pragma: no cover
|
|
512
|
+
def _mem_report(tensors, mem_type):
|
|
513
|
+
"""Print the selected tensors of type
|
|
514
|
+
There are two major storage types in our major concern:
|
|
515
|
+
- GPU: tensors transferred to CUDA devices
|
|
516
|
+
- CPU: tensors remaining on the system memory (usually unimportant)
|
|
517
|
+
Args:
|
|
518
|
+
- tensors: the tensors of specified type
|
|
519
|
+
- mem_type: 'CPU' or 'GPU' in current implementation"""
|
|
520
|
+
total_numel = 0
|
|
521
|
+
total_mem = 0
|
|
522
|
+
visited_data = []
|
|
523
|
+
for tensor in tensors:
|
|
524
|
+
if tensor.is_sparse:
|
|
525
|
+
continue
|
|
526
|
+
# a data_ptr indicates a memory block allocated
|
|
527
|
+
data_ptr = tensor.storage().data_ptr()
|
|
528
|
+
if data_ptr in visited_data:
|
|
529
|
+
continue
|
|
530
|
+
visited_data.append(data_ptr)
|
|
531
|
+
|
|
532
|
+
numel = tensor.storage().size()
|
|
533
|
+
total_numel += numel
|
|
534
|
+
element_size = tensor.storage().element_size()
|
|
535
|
+
mem = numel * element_size / 1024 / 1024 # 32bit=4Byte, MByte
|
|
536
|
+
total_mem += mem
|
|
537
|
+
print("Type: %s Total Tensors: %d \tUsed Memory Space: %.2f MBytes" % (mem_type, total_numel, total_mem))
|
|
538
|
+
|
|
539
|
+
import gc
|
|
540
|
+
|
|
541
|
+
import torch
|
|
542
|
+
|
|
543
|
+
gc.collect()
|
|
544
|
+
|
|
545
|
+
LEN = 65
|
|
546
|
+
objects = gc.get_objects()
|
|
547
|
+
# print('%s\t%s\t\t\t%s' %('Element type', 'Size', 'Used MEM(MBytes)') )
|
|
548
|
+
tensors = [obj for obj in objects if torch.is_tensor(obj)]
|
|
549
|
+
cuda_tensors = [t for t in tensors if t.is_cuda]
|
|
550
|
+
host_tensors = [t for t in tensors if not t.is_cuda]
|
|
551
|
+
_mem_report(cuda_tensors, "GPU")
|
|
552
|
+
_mem_report(host_tensors, "CPU")
|
|
553
|
+
print("=" * LEN)
|
|
554
|
+
|
|
555
|
+
|
|
556
|
+
class ScopedDevice:
|
|
557
|
+
def __init__(self, device):
|
|
558
|
+
self.device = wp.get_device(device)
|
|
559
|
+
|
|
560
|
+
def __enter__(self):
|
|
561
|
+
# save the previous default device
|
|
562
|
+
self.saved_device = self.device.runtime.default_device
|
|
563
|
+
|
|
564
|
+
# make this the default device
|
|
565
|
+
self.device.runtime.default_device = self.device
|
|
566
|
+
|
|
567
|
+
# make it the current CUDA device so that device alias "cuda" will evaluate to this device
|
|
568
|
+
self.device.context_guard.__enter__()
|
|
569
|
+
|
|
570
|
+
return self.device
|
|
571
|
+
|
|
572
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
|
573
|
+
# restore original CUDA context
|
|
574
|
+
self.device.context_guard.__exit__(exc_type, exc_value, traceback)
|
|
575
|
+
|
|
576
|
+
# restore original target device
|
|
577
|
+
self.device.runtime.default_device = self.saved_device
|
|
578
|
+
|
|
579
|
+
|
|
580
|
+
class ScopedStream:
|
|
581
|
+
def __init__(self, stream, sync_enter=True, sync_exit=False):
|
|
582
|
+
self.stream = stream
|
|
583
|
+
self.sync_enter = sync_enter
|
|
584
|
+
self.sync_exit = sync_exit
|
|
585
|
+
if stream is not None:
|
|
586
|
+
self.device = stream.device
|
|
587
|
+
self.device_scope = ScopedDevice(self.device)
|
|
588
|
+
|
|
589
|
+
def __enter__(self):
|
|
590
|
+
if self.stream is not None:
|
|
591
|
+
self.device_scope.__enter__()
|
|
592
|
+
self.saved_stream = self.device.stream
|
|
593
|
+
self.device.set_stream(self.stream, self.sync_enter)
|
|
594
|
+
|
|
595
|
+
return self.stream
|
|
596
|
+
|
|
597
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
|
598
|
+
if self.stream is not None:
|
|
599
|
+
self.device.set_stream(self.saved_stream, self.sync_exit)
|
|
600
|
+
self.device_scope.__exit__(exc_type, exc_value, traceback)
|
|
601
|
+
|
|
602
|
+
|
|
603
|
+
TIMING_KERNEL = 1
|
|
604
|
+
TIMING_KERNEL_BUILTIN = 2
|
|
605
|
+
TIMING_MEMCPY = 4
|
|
606
|
+
TIMING_MEMSET = 8
|
|
607
|
+
TIMING_GRAPH = 16
|
|
608
|
+
TIMING_ALL = 0xFFFFFFFF
|
|
609
|
+
|
|
610
|
+
|
|
611
|
+
# timer utils
|
|
612
|
+
class ScopedTimer:
|
|
613
|
+
indent = -1
|
|
614
|
+
|
|
615
|
+
enabled = True
|
|
616
|
+
|
|
617
|
+
def __init__(
|
|
618
|
+
self,
|
|
619
|
+
name,
|
|
620
|
+
active=True,
|
|
621
|
+
print=True,
|
|
622
|
+
detailed=False,
|
|
623
|
+
dict=None,
|
|
624
|
+
use_nvtx=False,
|
|
625
|
+
color="rapids",
|
|
626
|
+
synchronize=False,
|
|
627
|
+
cuda_filter=0,
|
|
628
|
+
report_func=None,
|
|
629
|
+
skip_tape=False,
|
|
630
|
+
):
|
|
631
|
+
"""Context manager object for a timer
|
|
632
|
+
|
|
633
|
+
Parameters:
|
|
634
|
+
name (str): Name of timer
|
|
635
|
+
active (bool): Enables this timer
|
|
636
|
+
print (bool): At context manager exit, print elapsed time to sys.stdout
|
|
637
|
+
detailed (bool): Collects additional profiling data using cProfile and calls ``print_stats()`` at context exit
|
|
638
|
+
dict (dict): A dictionary of lists to which the elapsed time will be appended using ``name`` as a key
|
|
639
|
+
use_nvtx (bool): If true, timing functionality is replaced by an NVTX range
|
|
640
|
+
color (int or str): ARGB value (e.g. 0x00FFFF) or color name (e.g. 'cyan') associated with the NVTX range
|
|
641
|
+
synchronize (bool): Synchronize the CPU thread with any outstanding CUDA work to return accurate GPU timings
|
|
642
|
+
cuda_filter (int): Filter flags for CUDA activity timing, e.g. ``warp.TIMING_KERNEL`` or ``warp.TIMING_ALL``
|
|
643
|
+
report_func (Callable): A callback function to print the activity report (``wp.timing_print()`` is used by default)
|
|
644
|
+
skip_tape (bool): If true, the timer will not be recorded in the tape
|
|
645
|
+
|
|
646
|
+
Attributes:
|
|
647
|
+
elapsed (float): The duration of the ``with`` block used with this object
|
|
648
|
+
timing_results (list[TimingResult]): The list of activity timing results, if collection was requested using ``cuda_filter``
|
|
649
|
+
"""
|
|
650
|
+
self.name = name
|
|
651
|
+
self.active = active and self.enabled
|
|
652
|
+
self.print = print
|
|
653
|
+
self.detailed = detailed
|
|
654
|
+
self.dict = dict
|
|
655
|
+
self.use_nvtx = use_nvtx
|
|
656
|
+
self.color = color
|
|
657
|
+
self.synchronize = synchronize
|
|
658
|
+
self.skip_tape = skip_tape
|
|
659
|
+
self.elapsed = 0.0
|
|
660
|
+
self.cuda_filter = cuda_filter
|
|
661
|
+
self.report_func = report_func or wp.timing_print
|
|
662
|
+
|
|
663
|
+
if self.dict is not None:
|
|
664
|
+
if name not in self.dict:
|
|
665
|
+
self.dict[name] = []
|
|
666
|
+
|
|
667
|
+
def __enter__(self):
|
|
668
|
+
if not self.skip_tape and warp.context.runtime is not None and warp.context.runtime.tape is not None:
|
|
669
|
+
warp.context.runtime.tape.record_scope_begin(self.name)
|
|
670
|
+
if self.active:
|
|
671
|
+
if self.synchronize:
|
|
672
|
+
wp.synchronize()
|
|
673
|
+
|
|
674
|
+
if self.cuda_filter:
|
|
675
|
+
# begin CUDA activity collection, synchronizing if needed
|
|
676
|
+
timing_begin(self.cuda_filter, synchronize=not self.synchronize)
|
|
677
|
+
|
|
678
|
+
if self.detailed:
|
|
679
|
+
self.cp = cProfile.Profile()
|
|
680
|
+
self.cp.clear()
|
|
681
|
+
self.cp.enable()
|
|
682
|
+
|
|
683
|
+
if self.use_nvtx:
|
|
684
|
+
import nvtx
|
|
685
|
+
|
|
686
|
+
self.nvtx_range_id = nvtx.start_range(self.name, color=self.color)
|
|
687
|
+
|
|
688
|
+
if self.print:
|
|
689
|
+
ScopedTimer.indent += 1
|
|
690
|
+
|
|
691
|
+
self.start = time.perf_counter_ns()
|
|
692
|
+
|
|
693
|
+
return self
|
|
694
|
+
|
|
695
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
|
696
|
+
if not self.skip_tape and warp.context.runtime is not None and warp.context.runtime.tape is not None:
|
|
697
|
+
warp.context.runtime.tape.record_scope_end()
|
|
698
|
+
if self.active:
|
|
699
|
+
if self.synchronize:
|
|
700
|
+
wp.synchronize()
|
|
701
|
+
|
|
702
|
+
self.elapsed = (time.perf_counter_ns() - self.start) / 1000000.0
|
|
703
|
+
|
|
704
|
+
if self.use_nvtx:
|
|
705
|
+
import nvtx
|
|
706
|
+
|
|
707
|
+
nvtx.end_range(self.nvtx_range_id)
|
|
708
|
+
|
|
709
|
+
if self.detailed:
|
|
710
|
+
self.cp.disable()
|
|
711
|
+
self.cp.print_stats(sort="tottime")
|
|
712
|
+
|
|
713
|
+
if self.cuda_filter:
|
|
714
|
+
# end CUDA activity collection, synchronizing if needed
|
|
715
|
+
self.timing_results = timing_end(synchronize=not self.synchronize)
|
|
716
|
+
else:
|
|
717
|
+
self.timing_results = []
|
|
718
|
+
|
|
719
|
+
if self.dict is not None:
|
|
720
|
+
self.dict[self.name].append(self.elapsed)
|
|
721
|
+
|
|
722
|
+
if self.print:
|
|
723
|
+
indent = "\t" * ScopedTimer.indent
|
|
724
|
+
|
|
725
|
+
if self.timing_results:
|
|
726
|
+
self.report_func(self.timing_results, indent=indent)
|
|
727
|
+
print()
|
|
728
|
+
|
|
729
|
+
print(f"{indent}{self.name} took {self.elapsed :.2f} ms")
|
|
730
|
+
|
|
731
|
+
ScopedTimer.indent -= 1
|
|
732
|
+
|
|
733
|
+
|
|
734
|
+
# Allow temporarily enabling/disabling mempool allocators
|
|
735
|
+
class ScopedMempool:
|
|
736
|
+
def __init__(self, device, enable: bool):
|
|
737
|
+
self.device = wp.get_device(device)
|
|
738
|
+
self.enable = enable
|
|
739
|
+
|
|
740
|
+
def __enter__(self):
|
|
741
|
+
self.saved_setting = wp.is_mempool_enabled(self.device)
|
|
742
|
+
wp.set_mempool_enabled(self.device, self.enable)
|
|
743
|
+
|
|
744
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
|
745
|
+
wp.set_mempool_enabled(self.device, self.saved_setting)
|
|
746
|
+
|
|
747
|
+
|
|
748
|
+
# Allow temporarily enabling/disabling mempool access
|
|
749
|
+
class ScopedMempoolAccess:
|
|
750
|
+
def __init__(self, target_device, peer_device, enable: bool):
|
|
751
|
+
self.target_device = target_device
|
|
752
|
+
self.peer_device = peer_device
|
|
753
|
+
self.enable = enable
|
|
754
|
+
|
|
755
|
+
def __enter__(self):
|
|
756
|
+
self.saved_setting = wp.is_mempool_access_enabled(self.target_device, self.peer_device)
|
|
757
|
+
wp.set_mempool_access_enabled(self.target_device, self.peer_device, self.enable)
|
|
758
|
+
|
|
759
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
|
760
|
+
wp.set_mempool_access_enabled(self.target_device, self.peer_device, self.saved_setting)
|
|
761
|
+
|
|
762
|
+
|
|
763
|
+
# Allow temporarily enabling/disabling peer access
|
|
764
|
+
class ScopedPeerAccess:
|
|
765
|
+
def __init__(self, target_device, peer_device, enable: bool):
|
|
766
|
+
self.target_device = target_device
|
|
767
|
+
self.peer_device = peer_device
|
|
768
|
+
self.enable = enable
|
|
769
|
+
|
|
770
|
+
def __enter__(self):
|
|
771
|
+
self.saved_setting = wp.is_peer_access_enabled(self.target_device, self.peer_device)
|
|
772
|
+
wp.set_peer_access_enabled(self.target_device, self.peer_device, self.enable)
|
|
773
|
+
|
|
774
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
|
775
|
+
wp.set_peer_access_enabled(self.target_device, self.peer_device, self.saved_setting)
|
|
776
|
+
|
|
777
|
+
|
|
778
|
+
class ScopedCapture:
|
|
779
|
+
def __init__(self, device=None, stream=None, force_module_load=None, external=False):
|
|
780
|
+
self.device = device
|
|
781
|
+
self.stream = stream
|
|
782
|
+
self.force_module_load = force_module_load
|
|
783
|
+
self.external = external
|
|
784
|
+
self.active = False
|
|
785
|
+
self.graph = None
|
|
786
|
+
|
|
787
|
+
def __enter__(self):
|
|
788
|
+
try:
|
|
789
|
+
wp.capture_begin(
|
|
790
|
+
device=self.device, stream=self.stream, force_module_load=self.force_module_load, external=self.external
|
|
791
|
+
)
|
|
792
|
+
self.active = True
|
|
793
|
+
return self
|
|
794
|
+
except:
|
|
795
|
+
raise
|
|
796
|
+
|
|
797
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
|
798
|
+
if self.active:
|
|
799
|
+
try:
|
|
800
|
+
self.graph = wp.capture_end(device=self.device, stream=self.stream)
|
|
801
|
+
finally:
|
|
802
|
+
self.active = False
|
|
803
|
+
|
|
804
|
+
|
|
805
|
+
# helper kernels for adj_matmul
|
|
806
|
+
@wp.kernel
|
|
807
|
+
def add_kernel_2d(x: wp.array2d(dtype=Any), acc: wp.array2d(dtype=Any), beta: Any):
|
|
808
|
+
i, j = wp.tid()
|
|
809
|
+
|
|
810
|
+
x[i, j] = x[i, j] + beta * acc[i, j]
|
|
811
|
+
|
|
812
|
+
|
|
813
|
+
@wp.kernel
|
|
814
|
+
def add_kernel_3d(x: wp.array3d(dtype=Any), acc: wp.array3d(dtype=Any), beta: Any):
|
|
815
|
+
i, j, k = wp.tid()
|
|
816
|
+
|
|
817
|
+
x[i, j, k] = x[i, j, k] + beta * acc[i, j, k]
|
|
818
|
+
|
|
819
|
+
|
|
820
|
+
# explicit instantiations of generic kernels for adj_matmul
|
|
821
|
+
for T in [wp.float16, wp.float32, wp.float64]:
|
|
822
|
+
wp.overload(add_kernel_2d, [wp.array2d(dtype=T), wp.array2d(dtype=T), T])
|
|
823
|
+
wp.overload(add_kernel_3d, [wp.array3d(dtype=T), wp.array3d(dtype=T), T])
|
|
824
|
+
|
|
825
|
+
|
|
826
|
+
def check_iommu():
|
|
827
|
+
"""Check if IOMMU is enabled on Linux, which can affect peer-to-peer transfers.
|
|
828
|
+
|
|
829
|
+
Returns:
|
|
830
|
+
A Boolean indicating whether IOMMU is configured properly for peer-to-peer transfers.
|
|
831
|
+
On Linux, this function attempts to determine if IOMMU is enabled and will return `False` if IOMMU is detected.
|
|
832
|
+
On other operating systems, it always return `True`.
|
|
833
|
+
"""
|
|
834
|
+
|
|
835
|
+
if sys.platform == "linux":
|
|
836
|
+
# On modern Linux, there should be IOMMU-related entries in the /sys file system.
|
|
837
|
+
# This should be more reliable than checking kernel logs like dmesg.
|
|
838
|
+
if os.path.isdir("/sys/class/iommu") and os.listdir("/sys/class/iommu"):
|
|
839
|
+
return False
|
|
840
|
+
if os.path.isdir("/sys/kernel/iommu_groups") and os.listdir("/sys/kernel/iommu_groups"):
|
|
841
|
+
return False
|
|
842
|
+
|
|
843
|
+
# HACK: disable P2P tests on misbehaving agents
|
|
844
|
+
disable_p2p_tests = os.getenv("WARP_DISABLE_P2P_TESTS", default="0")
|
|
845
|
+
if int(disable_p2p_tests):
|
|
846
|
+
return False
|
|
847
|
+
|
|
848
|
+
return True
|
|
849
|
+
else:
|
|
850
|
+
# doesn't matter
|
|
851
|
+
return True
|
|
852
|
+
|
|
853
|
+
|
|
854
|
+
class timing_result_t(ctypes.Structure):
|
|
855
|
+
"""CUDA timing struct for fetching values from C++"""
|
|
856
|
+
|
|
857
|
+
_fields_ = [
|
|
858
|
+
("context", ctypes.c_void_p),
|
|
859
|
+
("name", ctypes.c_char_p),
|
|
860
|
+
("filter", ctypes.c_int),
|
|
861
|
+
("elapsed", ctypes.c_float),
|
|
862
|
+
]
|
|
863
|
+
|
|
864
|
+
|
|
865
|
+
class TimingResult:
|
|
866
|
+
"""Timing result for a single activity.
|
|
867
|
+
|
|
868
|
+
Parameters:
|
|
869
|
+
raw_result (warp.utils.timing_result_t): The result structure obtained from C++ (internal use only)
|
|
870
|
+
|
|
871
|
+
Attributes:
|
|
872
|
+
device (warp.Device): The device where the activity was recorded.
|
|
873
|
+
name (str): The activity name.
|
|
874
|
+
filter (int): The type of activity (e.g., ``warp.TIMING_KERNEL``).
|
|
875
|
+
elapsed (float): The elapsed time in milliseconds.
|
|
876
|
+
"""
|
|
877
|
+
|
|
878
|
+
def __init__(self, device, name, filter, elapsed):
|
|
879
|
+
self.device = device
|
|
880
|
+
self.name = name
|
|
881
|
+
self.filter = filter
|
|
882
|
+
self.elapsed = elapsed
|
|
883
|
+
|
|
884
|
+
|
|
885
|
+
def timing_begin(cuda_filter=TIMING_ALL, synchronize=True):
|
|
886
|
+
"""Begin detailed activity timing.
|
|
887
|
+
|
|
888
|
+
Parameters:
|
|
889
|
+
cuda_filter (int): Filter flags for CUDA activity timing, e.g. ``warp.TIMING_KERNEL`` or ``warp.TIMING_ALL``
|
|
890
|
+
synchronize (bool): Whether to synchronize all CUDA devices before timing starts
|
|
891
|
+
"""
|
|
892
|
+
|
|
893
|
+
if synchronize:
|
|
894
|
+
warp.synchronize()
|
|
895
|
+
|
|
896
|
+
warp.context.runtime.core.cuda_timing_begin(cuda_filter)
|
|
897
|
+
|
|
898
|
+
|
|
899
|
+
def timing_end(synchronize=True):
|
|
900
|
+
"""End detailed activity timing.
|
|
901
|
+
|
|
902
|
+
Parameters:
|
|
903
|
+
synchronize (bool): Whether to synchronize all CUDA devices before timing ends
|
|
904
|
+
|
|
905
|
+
Returns:
|
|
906
|
+
list[TimingResult]: A list of ``TimingResult`` objects for all recorded activities.
|
|
907
|
+
"""
|
|
908
|
+
|
|
909
|
+
if synchronize:
|
|
910
|
+
warp.synchronize()
|
|
911
|
+
|
|
912
|
+
# get result count
|
|
913
|
+
count = warp.context.runtime.core.cuda_timing_get_result_count()
|
|
914
|
+
|
|
915
|
+
# get result array from C++
|
|
916
|
+
result_buffer = (timing_result_t * count)()
|
|
917
|
+
warp.context.runtime.core.cuda_timing_end(ctypes.byref(result_buffer), count)
|
|
918
|
+
|
|
919
|
+
# prepare Python result list
|
|
920
|
+
results = []
|
|
921
|
+
for r in result_buffer:
|
|
922
|
+
device = warp.context.runtime.context_map.get(r.context)
|
|
923
|
+
filter = r.filter
|
|
924
|
+
elapsed = r.elapsed
|
|
925
|
+
|
|
926
|
+
name = r.name.decode()
|
|
927
|
+
if filter == TIMING_KERNEL:
|
|
928
|
+
if name.endswith("forward"):
|
|
929
|
+
# strip trailing "_cuda_kernel_forward"
|
|
930
|
+
name = f"forward kernel {name[:-20]}"
|
|
931
|
+
else:
|
|
932
|
+
# strip trailing "_cuda_kernel_backward"
|
|
933
|
+
name = f"backward kernel {name[:-21]}"
|
|
934
|
+
elif filter == TIMING_KERNEL_BUILTIN:
|
|
935
|
+
if name.startswith("wp::"):
|
|
936
|
+
name = f"builtin kernel {name[4:]}"
|
|
937
|
+
else:
|
|
938
|
+
name = f"builtin kernel {name}"
|
|
939
|
+
|
|
940
|
+
results.append(TimingResult(device, name, filter, elapsed))
|
|
941
|
+
|
|
942
|
+
return results
|
|
943
|
+
|
|
944
|
+
|
|
945
|
+
def timing_print(results, indent=""):
|
|
946
|
+
"""Print timing results.
|
|
947
|
+
|
|
948
|
+
Parameters:
|
|
949
|
+
results (list[TimingResult]): List of ``TimingResult`` objects.
|
|
950
|
+
indent (str): Optional indentation for the output.
|
|
951
|
+
"""
|
|
952
|
+
|
|
953
|
+
if not results:
|
|
954
|
+
print("No activity")
|
|
955
|
+
return
|
|
956
|
+
|
|
957
|
+
class Aggregate:
|
|
958
|
+
def __init__(self, count=0, elapsed=0):
|
|
959
|
+
self.count = count
|
|
960
|
+
self.elapsed = elapsed
|
|
961
|
+
|
|
962
|
+
device_totals = {}
|
|
963
|
+
activity_totals = {}
|
|
964
|
+
|
|
965
|
+
max_name_len = len("Activity")
|
|
966
|
+
for r in results:
|
|
967
|
+
name_len = len(r.name)
|
|
968
|
+
max_name_len = max(max_name_len, name_len)
|
|
969
|
+
|
|
970
|
+
activity_width = max_name_len + 1
|
|
971
|
+
activity_dashes = "-" * activity_width
|
|
972
|
+
|
|
973
|
+
print(f"{indent}CUDA timeline:")
|
|
974
|
+
print(f"{indent}----------------+---------+{activity_dashes}")
|
|
975
|
+
print(f"{indent}Time | Device | Activity")
|
|
976
|
+
print(f"{indent}----------------+---------+{activity_dashes}")
|
|
977
|
+
for r in results:
|
|
978
|
+
device_agg = device_totals.get(r.device.alias)
|
|
979
|
+
if device_agg is None:
|
|
980
|
+
device_totals[r.device.alias] = Aggregate(count=1, elapsed=r.elapsed)
|
|
981
|
+
else:
|
|
982
|
+
device_agg.count += 1
|
|
983
|
+
device_agg.elapsed += r.elapsed
|
|
984
|
+
|
|
985
|
+
activity_agg = activity_totals.get(r.name)
|
|
986
|
+
if activity_agg is None:
|
|
987
|
+
activity_totals[r.name] = Aggregate(count=1, elapsed=r.elapsed)
|
|
988
|
+
else:
|
|
989
|
+
activity_agg.count += 1
|
|
990
|
+
activity_agg.elapsed += r.elapsed
|
|
991
|
+
|
|
992
|
+
print(f"{indent}{r.elapsed :12.6f} ms | {r.device.alias :7s} | {r.name}")
|
|
993
|
+
|
|
994
|
+
print()
|
|
995
|
+
print(f"{indent}CUDA activity summary:")
|
|
996
|
+
print(f"{indent}----------------+---------+{activity_dashes}")
|
|
997
|
+
print(f"{indent}Total time | Count | Activity")
|
|
998
|
+
print(f"{indent}----------------+---------+{activity_dashes}")
|
|
999
|
+
for name, agg in activity_totals.items():
|
|
1000
|
+
print(f"{indent}{agg.elapsed :12.6f} ms | {agg.count :7d} | {name}")
|
|
1001
|
+
|
|
1002
|
+
print()
|
|
1003
|
+
print(f"{indent}CUDA device summary:")
|
|
1004
|
+
print(f"{indent}----------------+---------+{activity_dashes}")
|
|
1005
|
+
print(f"{indent}Total time | Count | Device")
|
|
1006
|
+
print(f"{indent}----------------+---------+{activity_dashes}")
|
|
1007
|
+
for device, agg in device_totals.items():
|
|
1008
|
+
print(f"{indent}{agg.elapsed :12.6f} ms | {agg.count :7d} | {device}")
|