warp-lang 1.9.1__py3-none-win_amd64.whl → 1.10.0rc2__py3-none-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of warp-lang might be problematic. Click here for more details.
- warp/__init__.py +301 -287
- warp/__init__.pyi +794 -305
- warp/_src/__init__.py +14 -0
- warp/_src/autograd.py +1075 -0
- warp/_src/build.py +618 -0
- warp/_src/build_dll.py +640 -0
- warp/{builtins.py → _src/builtins.py} +1382 -377
- warp/_src/codegen.py +4359 -0
- warp/{config.py → _src/config.py} +178 -169
- warp/_src/constants.py +57 -0
- warp/_src/context.py +8294 -0
- warp/_src/dlpack.py +462 -0
- warp/_src/fabric.py +355 -0
- warp/_src/fem/__init__.py +14 -0
- warp/_src/fem/adaptivity.py +508 -0
- warp/_src/fem/cache.py +687 -0
- warp/_src/fem/dirichlet.py +188 -0
- warp/{fem → _src/fem}/domain.py +40 -30
- warp/_src/fem/field/__init__.py +131 -0
- warp/_src/fem/field/field.py +701 -0
- warp/{fem → _src/fem}/field/nodal_field.py +30 -15
- warp/{fem → _src/fem}/field/restriction.py +1 -1
- warp/{fem → _src/fem}/field/virtual.py +53 -27
- warp/_src/fem/geometry/__init__.py +32 -0
- warp/{fem → _src/fem}/geometry/adaptive_nanogrid.py +77 -163
- warp/_src/fem/geometry/closest_point.py +97 -0
- warp/{fem → _src/fem}/geometry/deformed_geometry.py +14 -22
- warp/{fem → _src/fem}/geometry/element.py +32 -10
- warp/{fem → _src/fem}/geometry/geometry.py +48 -20
- warp/{fem → _src/fem}/geometry/grid_2d.py +12 -23
- warp/{fem → _src/fem}/geometry/grid_3d.py +12 -23
- warp/{fem → _src/fem}/geometry/hexmesh.py +40 -63
- warp/{fem → _src/fem}/geometry/nanogrid.py +255 -248
- warp/{fem → _src/fem}/geometry/partition.py +121 -63
- warp/{fem → _src/fem}/geometry/quadmesh.py +26 -45
- warp/{fem → _src/fem}/geometry/tetmesh.py +40 -63
- warp/{fem → _src/fem}/geometry/trimesh.py +26 -45
- warp/{fem → _src/fem}/integrate.py +164 -158
- warp/_src/fem/linalg.py +383 -0
- warp/_src/fem/operator.py +396 -0
- warp/_src/fem/polynomial.py +229 -0
- warp/{fem → _src/fem}/quadrature/pic_quadrature.py +15 -20
- warp/{fem → _src/fem}/quadrature/quadrature.py +95 -47
- warp/_src/fem/space/__init__.py +248 -0
- warp/{fem → _src/fem}/space/basis_function_space.py +20 -11
- warp/_src/fem/space/basis_space.py +679 -0
- warp/{fem → _src/fem}/space/dof_mapper.py +3 -3
- warp/{fem → _src/fem}/space/function_space.py +14 -13
- warp/{fem → _src/fem}/space/grid_2d_function_space.py +4 -7
- warp/{fem → _src/fem}/space/grid_3d_function_space.py +4 -4
- warp/{fem → _src/fem}/space/hexmesh_function_space.py +4 -10
- warp/{fem → _src/fem}/space/nanogrid_function_space.py +3 -9
- warp/{fem → _src/fem}/space/partition.py +117 -60
- warp/{fem → _src/fem}/space/quadmesh_function_space.py +4 -10
- warp/{fem → _src/fem}/space/restriction.py +66 -33
- warp/_src/fem/space/shape/__init__.py +152 -0
- warp/{fem → _src/fem}/space/shape/cube_shape_function.py +9 -9
- warp/{fem → _src/fem}/space/shape/shape_function.py +8 -9
- warp/{fem → _src/fem}/space/shape/square_shape_function.py +6 -6
- warp/{fem → _src/fem}/space/shape/tet_shape_function.py +3 -3
- warp/{fem → _src/fem}/space/shape/triangle_shape_function.py +3 -3
- warp/{fem → _src/fem}/space/tetmesh_function_space.py +3 -9
- warp/_src/fem/space/topology.py +459 -0
- warp/{fem → _src/fem}/space/trimesh_function_space.py +3 -9
- warp/_src/fem/types.py +112 -0
- warp/_src/fem/utils.py +486 -0
- warp/_src/jax.py +186 -0
- warp/_src/jax_experimental/__init__.py +14 -0
- warp/_src/jax_experimental/custom_call.py +387 -0
- warp/_src/jax_experimental/ffi.py +1284 -0
- warp/_src/jax_experimental/xla_ffi.py +656 -0
- warp/_src/marching_cubes.py +708 -0
- warp/_src/math.py +414 -0
- warp/_src/optim/__init__.py +14 -0
- warp/_src/optim/adam.py +163 -0
- warp/_src/optim/linear.py +1606 -0
- warp/_src/optim/sgd.py +112 -0
- warp/_src/paddle.py +406 -0
- warp/_src/render/__init__.py +14 -0
- warp/_src/render/imgui_manager.py +289 -0
- warp/_src/render/render_opengl.py +3636 -0
- warp/_src/render/render_usd.py +937 -0
- warp/_src/render/utils.py +160 -0
- warp/_src/sparse.py +2716 -0
- warp/_src/tape.py +1206 -0
- warp/{thirdparty → _src/thirdparty}/unittest_parallel.py +9 -2
- warp/_src/torch.py +391 -0
- warp/_src/types.py +5870 -0
- warp/_src/utils.py +1693 -0
- warp/autograd.py +12 -1054
- warp/bin/warp-clang.dll +0 -0
- warp/bin/warp.dll +0 -0
- warp/build.py +8 -588
- warp/build_dll.py +6 -721
- warp/codegen.py +6 -4251
- warp/constants.py +6 -39
- warp/context.py +12 -8062
- warp/dlpack.py +6 -444
- warp/examples/distributed/example_jacobi_mpi.py +4 -5
- warp/examples/fem/example_adaptive_grid.py +1 -1
- warp/examples/fem/example_apic_fluid.py +1 -1
- warp/examples/fem/example_burgers.py +8 -8
- warp/examples/fem/example_diffusion.py +1 -1
- warp/examples/fem/example_distortion_energy.py +1 -1
- warp/examples/fem/example_mixed_elasticity.py +2 -2
- warp/examples/fem/example_navier_stokes.py +1 -1
- warp/examples/fem/example_nonconforming_contact.py +7 -7
- warp/examples/fem/example_stokes.py +1 -1
- warp/examples/fem/example_stokes_transfer.py +1 -1
- warp/examples/fem/utils.py +2 -2
- warp/examples/interop/example_jax_callable.py +1 -1
- warp/examples/interop/example_jax_ffi_callback.py +1 -1
- warp/examples/interop/example_jax_kernel.py +1 -1
- warp/examples/tile/example_tile_mcgp.py +191 -0
- warp/fabric.py +6 -337
- warp/fem/__init__.py +159 -97
- warp/fem/adaptivity.py +7 -489
- warp/fem/cache.py +9 -648
- warp/fem/dirichlet.py +6 -184
- warp/fem/field/__init__.py +8 -109
- warp/fem/field/field.py +7 -652
- warp/fem/geometry/__init__.py +7 -18
- warp/fem/geometry/closest_point.py +11 -77
- warp/fem/linalg.py +18 -366
- warp/fem/operator.py +11 -369
- warp/fem/polynomial.py +9 -209
- warp/fem/space/__init__.py +5 -211
- warp/fem/space/basis_space.py +6 -662
- warp/fem/space/shape/__init__.py +41 -118
- warp/fem/space/topology.py +6 -437
- warp/fem/types.py +6 -81
- warp/fem/utils.py +11 -444
- warp/jax.py +8 -165
- warp/jax_experimental/__init__.py +14 -1
- warp/jax_experimental/custom_call.py +8 -365
- warp/jax_experimental/ffi.py +17 -873
- warp/jax_experimental/xla_ffi.py +5 -605
- warp/marching_cubes.py +5 -689
- warp/math.py +16 -393
- warp/native/array.h +385 -37
- warp/native/builtin.h +314 -37
- warp/native/bvh.cpp +43 -9
- warp/native/bvh.cu +62 -27
- warp/native/bvh.h +310 -309
- warp/native/clang/clang.cpp +102 -97
- warp/native/coloring.cpp +0 -1
- warp/native/crt.h +208 -0
- warp/native/exports.h +156 -0
- warp/native/hashgrid.cu +2 -0
- warp/native/intersect.h +24 -1
- warp/native/intersect_tri.h +44 -35
- warp/native/mat.h +1456 -276
- warp/native/mesh.cpp +4 -4
- warp/native/mesh.cu +4 -2
- warp/native/mesh.h +176 -61
- warp/native/quat.h +0 -52
- warp/native/scan.cu +2 -0
- warp/native/sparse.cu +7 -3
- warp/native/spatial.h +12 -0
- warp/native/tile.h +681 -89
- warp/native/tile_radix_sort.h +1 -1
- warp/native/tile_reduce.h +394 -46
- warp/native/tile_scan.h +4 -4
- warp/native/vec.h +469 -0
- warp/native/version.h +23 -0
- warp/native/volume.cpp +1 -1
- warp/native/volume.cu +1 -0
- warp/native/volume.h +1 -1
- warp/native/volume_builder.cu +2 -0
- warp/native/warp.cpp +57 -29
- warp/native/warp.cu +253 -171
- warp/native/warp.h +11 -8
- warp/optim/__init__.py +6 -3
- warp/optim/adam.py +6 -145
- warp/optim/linear.py +14 -1585
- warp/optim/sgd.py +6 -94
- warp/paddle.py +6 -388
- warp/render/__init__.py +8 -4
- warp/render/imgui_manager.py +7 -267
- warp/render/render_opengl.py +6 -3618
- warp/render/render_usd.py +6 -919
- warp/render/utils.py +6 -142
- warp/sparse.py +37 -2563
- warp/tape.py +6 -1188
- warp/tests/__main__.py +1 -1
- warp/tests/cuda/test_async.py +4 -4
- warp/tests/cuda/test_conditional_captures.py +1 -1
- warp/tests/cuda/test_multigpu.py +1 -1
- warp/tests/cuda/test_streams.py +58 -1
- warp/tests/geometry/test_bvh.py +157 -22
- warp/tests/geometry/test_marching_cubes.py +0 -1
- warp/tests/geometry/test_mesh.py +5 -3
- warp/tests/geometry/test_mesh_query_aabb.py +5 -12
- warp/tests/geometry/test_mesh_query_point.py +5 -2
- warp/tests/geometry/test_mesh_query_ray.py +15 -3
- warp/tests/geometry/test_volume_write.py +5 -5
- warp/tests/interop/test_dlpack.py +14 -14
- warp/tests/interop/test_jax.py +772 -49
- warp/tests/interop/test_paddle.py +1 -1
- warp/tests/test_adam.py +0 -1
- warp/tests/test_arithmetic.py +9 -9
- warp/tests/test_array.py +527 -100
- warp/tests/test_array_reduce.py +3 -3
- warp/tests/test_atomic.py +12 -8
- warp/tests/test_atomic_bitwise.py +209 -0
- warp/tests/test_atomic_cas.py +4 -4
- warp/tests/test_bool.py +2 -2
- warp/tests/test_builtins_resolution.py +5 -571
- warp/tests/test_codegen.py +33 -14
- warp/tests/test_conditional.py +1 -1
- warp/tests/test_context.py +6 -6
- warp/tests/test_copy.py +242 -161
- warp/tests/test_ctypes.py +3 -3
- warp/tests/test_devices.py +24 -2
- warp/tests/test_examples.py +16 -84
- warp/tests/test_fabricarray.py +35 -35
- warp/tests/test_fast_math.py +0 -2
- warp/tests/test_fem.py +56 -10
- warp/tests/test_fixedarray.py +3 -3
- warp/tests/test_func.py +8 -5
- warp/tests/test_generics.py +1 -1
- warp/tests/test_indexedarray.py +24 -24
- warp/tests/test_intersect.py +39 -9
- warp/tests/test_large.py +1 -1
- warp/tests/test_lerp.py +3 -1
- warp/tests/test_linear_solvers.py +1 -1
- warp/tests/test_map.py +35 -4
- warp/tests/test_mat.py +52 -62
- warp/tests/test_mat_constructors.py +4 -5
- warp/tests/test_mat_lite.py +1 -1
- warp/tests/test_mat_scalar_ops.py +121 -121
- warp/tests/test_math.py +34 -0
- warp/tests/test_module_aot.py +4 -4
- warp/tests/test_modules_lite.py +28 -2
- warp/tests/test_print.py +11 -11
- warp/tests/test_quat.py +93 -58
- warp/tests/test_runlength_encode.py +1 -1
- warp/tests/test_scalar_ops.py +38 -10
- warp/tests/test_smoothstep.py +1 -1
- warp/tests/test_sparse.py +126 -15
- warp/tests/test_spatial.py +105 -87
- warp/tests/test_special_values.py +6 -6
- warp/tests/test_static.py +7 -7
- warp/tests/test_struct.py +13 -2
- warp/tests/test_triangle_closest_point.py +48 -1
- warp/tests/test_types.py +27 -15
- warp/tests/test_utils.py +52 -52
- warp/tests/test_vec.py +29 -29
- warp/tests/test_vec_constructors.py +5 -5
- warp/tests/test_vec_scalar_ops.py +97 -97
- warp/tests/test_version.py +75 -0
- warp/tests/tile/test_tile.py +178 -0
- warp/tests/tile/test_tile_atomic_bitwise.py +403 -0
- warp/tests/tile/test_tile_cholesky.py +7 -4
- warp/tests/tile/test_tile_load.py +26 -2
- warp/tests/tile/test_tile_mathdx.py +3 -3
- warp/tests/tile/test_tile_matmul.py +1 -1
- warp/tests/tile/test_tile_mlp.py +2 -4
- warp/tests/tile/test_tile_reduce.py +214 -13
- warp/tests/unittest_suites.py +6 -14
- warp/tests/unittest_utils.py +10 -9
- warp/tests/walkthrough_debug.py +3 -1
- warp/torch.py +6 -373
- warp/types.py +29 -5764
- warp/utils.py +10 -1659
- {warp_lang-1.9.1.dist-info → warp_lang-1.10.0rc2.dist-info}/METADATA +46 -99
- warp_lang-1.10.0rc2.dist-info/RECORD +468 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/Gaia-LICENSE.txt +6 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/appdirs-LICENSE.txt +22 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/asset_pixel_jpg-LICENSE.txt +3 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/cuda-LICENSE.txt +1582 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/dlpack-LICENSE.txt +201 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/fp16-LICENSE.txt +28 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/libmathdx-LICENSE.txt +220 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/llvm-LICENSE.txt +279 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/moller-LICENSE.txt +16 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/nanovdb-LICENSE.txt +2 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/nvrtc-LICENSE.txt +1592 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/svd-LICENSE.txt +23 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/unittest_parallel-LICENSE.txt +21 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/usd-LICENSE.txt +213 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/windingnumber-LICENSE.txt +21 -0
- warp/examples/assets/cartpole.urdf +0 -110
- warp/examples/assets/crazyflie.usd +0 -0
- warp/examples/assets/nv_ant.xml +0 -92
- warp/examples/assets/nv_humanoid.xml +0 -183
- warp/examples/assets/quadruped.urdf +0 -268
- warp/examples/optim/example_bounce.py +0 -266
- warp/examples/optim/example_cloth_throw.py +0 -228
- warp/examples/optim/example_drone.py +0 -870
- warp/examples/optim/example_inverse_kinematics.py +0 -182
- warp/examples/optim/example_inverse_kinematics_torch.py +0 -191
- warp/examples/optim/example_softbody_properties.py +0 -400
- warp/examples/optim/example_spring_cage.py +0 -245
- warp/examples/optim/example_trajectory.py +0 -227
- warp/examples/sim/example_cartpole.py +0 -143
- warp/examples/sim/example_cloth.py +0 -225
- warp/examples/sim/example_cloth_self_contact.py +0 -316
- warp/examples/sim/example_granular.py +0 -130
- warp/examples/sim/example_granular_collision_sdf.py +0 -202
- warp/examples/sim/example_jacobian_ik.py +0 -244
- warp/examples/sim/example_particle_chain.py +0 -124
- warp/examples/sim/example_quadruped.py +0 -203
- warp/examples/sim/example_rigid_chain.py +0 -203
- warp/examples/sim/example_rigid_contact.py +0 -195
- warp/examples/sim/example_rigid_force.py +0 -133
- warp/examples/sim/example_rigid_gyroscopic.py +0 -115
- warp/examples/sim/example_rigid_soft_contact.py +0 -140
- warp/examples/sim/example_soft_body.py +0 -196
- warp/examples/tile/example_tile_walker.py +0 -327
- warp/sim/__init__.py +0 -74
- warp/sim/articulation.py +0 -793
- warp/sim/collide.py +0 -2570
- warp/sim/graph_coloring.py +0 -307
- warp/sim/import_mjcf.py +0 -791
- warp/sim/import_snu.py +0 -227
- warp/sim/import_urdf.py +0 -579
- warp/sim/import_usd.py +0 -898
- warp/sim/inertia.py +0 -357
- warp/sim/integrator.py +0 -245
- warp/sim/integrator_euler.py +0 -2000
- warp/sim/integrator_featherstone.py +0 -2101
- warp/sim/integrator_vbd.py +0 -2487
- warp/sim/integrator_xpbd.py +0 -3295
- warp/sim/model.py +0 -4821
- warp/sim/particles.py +0 -121
- warp/sim/render.py +0 -431
- warp/sim/utils.py +0 -431
- warp/tests/sim/disabled_kinematics.py +0 -244
- warp/tests/sim/test_cloth.py +0 -863
- warp/tests/sim/test_collision.py +0 -743
- warp/tests/sim/test_coloring.py +0 -347
- warp/tests/sim/test_inertia.py +0 -161
- warp/tests/sim/test_model.py +0 -226
- warp/tests/sim/test_sim_grad.py +0 -287
- warp/tests/sim/test_sim_grad_bounce_linear.py +0 -212
- warp/tests/sim/test_sim_kinematics.py +0 -98
- warp/thirdparty/__init__.py +0 -0
- warp_lang-1.9.1.dist-info/RECORD +0 -456
- /warp/{fem → _src/fem}/quadrature/__init__.py +0 -0
- /warp/{tests/sim → _src/thirdparty}/__init__.py +0 -0
- /warp/{thirdparty → _src/thirdparty}/appdirs.py +0 -0
- /warp/{thirdparty → _src/thirdparty}/dlpack.py +0 -0
- {warp_lang-1.9.1.dist-info → warp_lang-1.10.0rc2.dist-info}/WHEEL +0 -0
- {warp_lang-1.9.1.dist-info → warp_lang-1.10.0rc2.dist-info}/licenses/LICENSE.md +0 -0
- {warp_lang-1.9.1.dist-info → warp_lang-1.10.0rc2.dist-info}/top_level.txt +0 -0
warp/_src/utils.py
ADDED
|
@@ -0,0 +1,1693 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
from __future__ import annotations
|
|
17
|
+
|
|
18
|
+
import cProfile
|
|
19
|
+
import ctypes
|
|
20
|
+
import os
|
|
21
|
+
import sys
|
|
22
|
+
import time
|
|
23
|
+
import warnings
|
|
24
|
+
from types import ModuleType
|
|
25
|
+
from typing import Any, Callable
|
|
26
|
+
|
|
27
|
+
import numpy as np
|
|
28
|
+
|
|
29
|
+
import warp as wp
|
|
30
|
+
import warp._src.context
|
|
31
|
+
import warp._src.types
|
|
32
|
+
from warp._src.context import Devicelike
|
|
33
|
+
from warp._src.types import Array, DType, type_repr, types_equal
|
|
34
|
+
|
|
35
|
+
warnings_seen = set()
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def warp_showwarning(message, category, filename, lineno, file=None, line=None):
|
|
39
|
+
"""Version of warnings.showwarning that always prints to sys.stdout."""
|
|
40
|
+
|
|
41
|
+
if warp.config.verbose_warnings:
|
|
42
|
+
s = f"Warp {category.__name__}: {message} ({filename}:{lineno})\n"
|
|
43
|
+
|
|
44
|
+
if line is None:
|
|
45
|
+
try:
|
|
46
|
+
import linecache
|
|
47
|
+
|
|
48
|
+
line = linecache.getline(filename, lineno)
|
|
49
|
+
except Exception:
|
|
50
|
+
# When a warning is logged during Python shutdown, linecache
|
|
51
|
+
# and the import machinery don't work anymore
|
|
52
|
+
line = None
|
|
53
|
+
linecache = None
|
|
54
|
+
|
|
55
|
+
if line:
|
|
56
|
+
line = line.strip()
|
|
57
|
+
s += f" {line}\n"
|
|
58
|
+
else:
|
|
59
|
+
# simple warning
|
|
60
|
+
s = f"Warp {category.__name__}: {message}\n"
|
|
61
|
+
|
|
62
|
+
sys.stdout.write(s)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def warn(message, category=None, stacklevel=1, once=False):
|
|
66
|
+
if (category, message) in warnings_seen:
|
|
67
|
+
return
|
|
68
|
+
|
|
69
|
+
with warnings.catch_warnings():
|
|
70
|
+
warnings.simplefilter("default") # Change the filter in this process
|
|
71
|
+
warnings.showwarning = warp_showwarning
|
|
72
|
+
warnings.warn(
|
|
73
|
+
message,
|
|
74
|
+
category,
|
|
75
|
+
stacklevel=stacklevel + 1, # Increment stacklevel by 1 since we are in a wrapper
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
if category is DeprecationWarning or once:
|
|
79
|
+
warnings_seen.add((category, message))
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
# expand a 7-vec to a tuple of arrays
|
|
83
|
+
def transform_expand(t):
|
|
84
|
+
return wp.transform(np.array(t[0:3]), np.array(t[3:7]))
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
@wp.func
|
|
88
|
+
def quat_between_vectors(a: wp.vec3, b: wp.vec3) -> wp.quat:
|
|
89
|
+
"""
|
|
90
|
+
Compute the quaternion that rotates vector a to vector b
|
|
91
|
+
"""
|
|
92
|
+
a = wp.normalize(a)
|
|
93
|
+
b = wp.normalize(b)
|
|
94
|
+
c = wp.cross(a, b)
|
|
95
|
+
d = wp.dot(a, b)
|
|
96
|
+
q = wp.quat(c[0], c[1], c[2], 1.0 + d)
|
|
97
|
+
return wp.normalize(q)
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def array_scan(in_array, out_array, inclusive=True):
|
|
101
|
+
"""Perform a scan (prefix sum) operation on an array.
|
|
102
|
+
|
|
103
|
+
This function computes the inclusive or exclusive scan of the input array and stores the result in the output array.
|
|
104
|
+
The scan operation computes a running sum of elements in the array.
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
in_array (wp.array): Input array to scan. Must be of type int32 or float32.
|
|
108
|
+
out_array (wp.array): Output array to store scan results. Must match input array type and size.
|
|
109
|
+
inclusive (bool, optional): If True, performs an inclusive scan (includes current element in sum).
|
|
110
|
+
If False, performs an exclusive scan (excludes current element). Defaults to True.
|
|
111
|
+
|
|
112
|
+
Raises:
|
|
113
|
+
RuntimeError: If array storage devices don't match, if storage size is insufficient, or if data types are unsupported.
|
|
114
|
+
"""
|
|
115
|
+
|
|
116
|
+
if in_array.device != out_array.device:
|
|
117
|
+
raise RuntimeError(f"In and out array storage devices do not match ({in_array.device} vs {out_array.device})")
|
|
118
|
+
|
|
119
|
+
if in_array.size != out_array.size:
|
|
120
|
+
raise RuntimeError(f"In and out array storage sizes do not match ({in_array.size} vs {out_array.size})")
|
|
121
|
+
|
|
122
|
+
if not types_equal(in_array.dtype, out_array.dtype):
|
|
123
|
+
raise RuntimeError(
|
|
124
|
+
f"In and out array data types do not match ({type_repr(in_array.dtype)} vs {type_repr(out_array.dtype)})"
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
if in_array.size == 0:
|
|
128
|
+
return
|
|
129
|
+
|
|
130
|
+
from warp._src.context import runtime
|
|
131
|
+
|
|
132
|
+
if in_array.device.is_cpu:
|
|
133
|
+
if in_array.dtype == wp.int32:
|
|
134
|
+
runtime.core.wp_array_scan_int_host(in_array.ptr, out_array.ptr, in_array.size, inclusive)
|
|
135
|
+
elif in_array.dtype == wp.float32:
|
|
136
|
+
runtime.core.wp_array_scan_float_host(in_array.ptr, out_array.ptr, in_array.size, inclusive)
|
|
137
|
+
else:
|
|
138
|
+
raise RuntimeError(f"Unsupported data type: {type_repr(in_array.dtype)}")
|
|
139
|
+
elif in_array.device.is_cuda:
|
|
140
|
+
if in_array.dtype == wp.int32:
|
|
141
|
+
runtime.core.wp_array_scan_int_device(in_array.ptr, out_array.ptr, in_array.size, inclusive)
|
|
142
|
+
elif in_array.dtype == wp.float32:
|
|
143
|
+
runtime.core.wp_array_scan_float_device(in_array.ptr, out_array.ptr, in_array.size, inclusive)
|
|
144
|
+
else:
|
|
145
|
+
raise RuntimeError(f"Unsupported data type: {type_repr(in_array.dtype)}")
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def radix_sort_pairs(keys, values, count: int):
|
|
149
|
+
"""Sort key-value pairs using radix sort.
|
|
150
|
+
|
|
151
|
+
This function sorts pairs of arrays based on the keys array, maintaining the key-value
|
|
152
|
+
relationship. The sort is stable and operates in linear time.
|
|
153
|
+
The `keys` and `values` arrays must be large enough to accommodate 2*`count` elements.
|
|
154
|
+
|
|
155
|
+
Args:
|
|
156
|
+
keys (wp.array): Array of keys to sort. Must be of type int32, float32, or int64.
|
|
157
|
+
values (wp.array): Array of values to sort along with keys. Must be of type int32.
|
|
158
|
+
count (int): Number of elements to sort.
|
|
159
|
+
|
|
160
|
+
Raises:
|
|
161
|
+
RuntimeError: If array storage devices don't match, if storage size is insufficient, or if data types are unsupported.
|
|
162
|
+
"""
|
|
163
|
+
if keys.device != values.device:
|
|
164
|
+
raise RuntimeError(f"Keys and values array storage devices do not match ({keys.device} vs {values.device})")
|
|
165
|
+
|
|
166
|
+
if count == 0:
|
|
167
|
+
return
|
|
168
|
+
|
|
169
|
+
if keys.size < 2 * count or values.size < 2 * count:
|
|
170
|
+
raise RuntimeError("Keys and values array storage must be large enough to contain 2*count elements")
|
|
171
|
+
|
|
172
|
+
from warp._src.context import runtime
|
|
173
|
+
|
|
174
|
+
if keys.device.is_cpu:
|
|
175
|
+
if keys.dtype == wp.int32 and values.dtype == wp.int32:
|
|
176
|
+
runtime.core.wp_radix_sort_pairs_int_host(keys.ptr, values.ptr, count)
|
|
177
|
+
elif keys.dtype == wp.float32 and values.dtype == wp.int32:
|
|
178
|
+
runtime.core.wp_radix_sort_pairs_float_host(keys.ptr, values.ptr, count)
|
|
179
|
+
elif keys.dtype == wp.int64 and values.dtype == wp.int32:
|
|
180
|
+
runtime.core.wp_radix_sort_pairs_int64_host(keys.ptr, values.ptr, count)
|
|
181
|
+
else:
|
|
182
|
+
raise RuntimeError(
|
|
183
|
+
f"Unsupported keys and values data types: {type_repr(keys.dtype)}, {type_repr(values.dtype)}"
|
|
184
|
+
)
|
|
185
|
+
elif keys.device.is_cuda:
|
|
186
|
+
if keys.dtype == wp.int32 and values.dtype == wp.int32:
|
|
187
|
+
runtime.core.wp_radix_sort_pairs_int_device(keys.ptr, values.ptr, count)
|
|
188
|
+
elif keys.dtype == wp.float32 and values.dtype == wp.int32:
|
|
189
|
+
runtime.core.wp_radix_sort_pairs_float_device(keys.ptr, values.ptr, count)
|
|
190
|
+
elif keys.dtype == wp.int64 and values.dtype == wp.int32:
|
|
191
|
+
runtime.core.wp_radix_sort_pairs_int64_device(keys.ptr, values.ptr, count)
|
|
192
|
+
else:
|
|
193
|
+
raise RuntimeError(
|
|
194
|
+
f"Unsupported keys and values data types: {type_repr(keys.dtype)}, {type_repr(values.dtype)}"
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
def segmented_sort_pairs(
|
|
199
|
+
keys,
|
|
200
|
+
values,
|
|
201
|
+
count: int,
|
|
202
|
+
segment_start_indices: wp.array(dtype=wp.int32),
|
|
203
|
+
segment_end_indices: wp.array(dtype=wp.int32) = None,
|
|
204
|
+
):
|
|
205
|
+
"""Sort key-value pairs within segments.
|
|
206
|
+
|
|
207
|
+
This function performs a segmented sort of key-value pairs, where the sorting is done independently within each segment.
|
|
208
|
+
The segments are defined by their start and optionally end indices.
|
|
209
|
+
The `keys` and `values` arrays must be large enough to accommodate 2*`count` elements.
|
|
210
|
+
|
|
211
|
+
Args:
|
|
212
|
+
keys: Array of keys to sort. Must be of type int32 or float32.
|
|
213
|
+
values: Array of values to sort along with keys. Must be of type int32.
|
|
214
|
+
count: Number of elements to sort.
|
|
215
|
+
segment_start_indices: Array containing start index of each segment. Must be of type int32.
|
|
216
|
+
If segment_end_indices is None, this array must have length at least num_segments + 1,
|
|
217
|
+
and segment_end_indices will be inferred as segment_start_indices[1:].
|
|
218
|
+
If segment_end_indices is provided, this array must have length at least num_segments.
|
|
219
|
+
segment_end_indices: Optional array containing end index of each segment. Must be of type int32 if provided.
|
|
220
|
+
If None, segment_end_indices will be inferred from segment_start_indices[1:].
|
|
221
|
+
If provided, must have length at least num_segments.
|
|
222
|
+
|
|
223
|
+
Raises:
|
|
224
|
+
RuntimeError: If array storage devices don't match, if storage size is insufficient,
|
|
225
|
+
if segment_start_indices is not of type int32, or if data types are unsupported.
|
|
226
|
+
"""
|
|
227
|
+
if keys.device != values.device:
|
|
228
|
+
raise RuntimeError(f"Array storage devices do not match ({keys.device} vs {values.device})")
|
|
229
|
+
|
|
230
|
+
if count == 0:
|
|
231
|
+
return
|
|
232
|
+
|
|
233
|
+
if keys.size < 2 * count or values.size < 2 * count:
|
|
234
|
+
raise RuntimeError("Array storage must be large enough to contain 2*count elements")
|
|
235
|
+
|
|
236
|
+
from warp._src.context import runtime
|
|
237
|
+
|
|
238
|
+
if segment_start_indices.dtype != wp.int32:
|
|
239
|
+
raise RuntimeError("segment_start_indices array must be of type int32")
|
|
240
|
+
|
|
241
|
+
# Handle case where segment_end_indices is not provided
|
|
242
|
+
if segment_end_indices is None:
|
|
243
|
+
num_segments = max(0, segment_start_indices.size - 1)
|
|
244
|
+
|
|
245
|
+
segment_end_indices = segment_start_indices[1:]
|
|
246
|
+
segment_end_indices_ptr = segment_end_indices.ptr
|
|
247
|
+
segment_start_indices_ptr = segment_start_indices.ptr
|
|
248
|
+
else:
|
|
249
|
+
if segment_end_indices.dtype != wp.int32:
|
|
250
|
+
raise RuntimeError("segment_end_indices array must be of type int32")
|
|
251
|
+
|
|
252
|
+
num_segments = segment_start_indices.size
|
|
253
|
+
|
|
254
|
+
segment_end_indices_ptr = segment_end_indices.ptr
|
|
255
|
+
segment_start_indices_ptr = segment_start_indices.ptr
|
|
256
|
+
|
|
257
|
+
if keys.device.is_cpu:
|
|
258
|
+
if keys.dtype == wp.int32 and values.dtype == wp.int32:
|
|
259
|
+
runtime.core.wp_segmented_sort_pairs_int_host(
|
|
260
|
+
keys.ptr,
|
|
261
|
+
values.ptr,
|
|
262
|
+
count,
|
|
263
|
+
segment_start_indices_ptr,
|
|
264
|
+
segment_end_indices_ptr,
|
|
265
|
+
num_segments,
|
|
266
|
+
)
|
|
267
|
+
elif keys.dtype == wp.float32 and values.dtype == wp.int32:
|
|
268
|
+
runtime.core.wp_segmented_sort_pairs_float_host(
|
|
269
|
+
keys.ptr,
|
|
270
|
+
values.ptr,
|
|
271
|
+
count,
|
|
272
|
+
segment_start_indices_ptr,
|
|
273
|
+
segment_end_indices_ptr,
|
|
274
|
+
num_segments,
|
|
275
|
+
)
|
|
276
|
+
else:
|
|
277
|
+
raise RuntimeError(f"Unsupported data type: {type_repr(keys.dtype)}")
|
|
278
|
+
elif keys.device.is_cuda:
|
|
279
|
+
if keys.dtype == wp.int32 and values.dtype == wp.int32:
|
|
280
|
+
runtime.core.wp_segmented_sort_pairs_int_device(
|
|
281
|
+
keys.ptr,
|
|
282
|
+
values.ptr,
|
|
283
|
+
count,
|
|
284
|
+
segment_start_indices_ptr,
|
|
285
|
+
segment_end_indices_ptr,
|
|
286
|
+
num_segments,
|
|
287
|
+
)
|
|
288
|
+
elif keys.dtype == wp.float32 and values.dtype == wp.int32:
|
|
289
|
+
runtime.core.wp_segmented_sort_pairs_float_device(
|
|
290
|
+
keys.ptr,
|
|
291
|
+
values.ptr,
|
|
292
|
+
count,
|
|
293
|
+
segment_start_indices_ptr,
|
|
294
|
+
segment_end_indices_ptr,
|
|
295
|
+
num_segments,
|
|
296
|
+
)
|
|
297
|
+
else:
|
|
298
|
+
raise RuntimeError(f"Unsupported data type: {type_repr(keys.dtype)}")
|
|
299
|
+
|
|
300
|
+
|
|
301
|
+
def runlength_encode(values, run_values, run_lengths, run_count=None, value_count=None):
|
|
302
|
+
"""Perform run-length encoding on an array.
|
|
303
|
+
|
|
304
|
+
This function compresses an array by replacing consecutive identical values with a single value
|
|
305
|
+
and its count. For example, [1,1,1,2,2,3] becomes values=[1,2,3] and lengths=[3,2,1].
|
|
306
|
+
|
|
307
|
+
Args:
|
|
308
|
+
values (wp.array): Input array to encode. Must be of type int32.
|
|
309
|
+
run_values (wp.array): Output array to store unique values. Must be at least value_count in size.
|
|
310
|
+
run_lengths (wp.array): Output array to store run lengths. Must be at least value_count in size.
|
|
311
|
+
run_count (wp.array, optional): Optional output array to store the number of runs.
|
|
312
|
+
If None, returns the count as an integer.
|
|
313
|
+
value_count (int, optional): Number of values to process. If None, processes entire array.
|
|
314
|
+
|
|
315
|
+
Returns:
|
|
316
|
+
int or wp.array: Number of runs if run_count is None, otherwise returns run_count array.
|
|
317
|
+
|
|
318
|
+
Raises:
|
|
319
|
+
RuntimeError: If array storage devices don't match, if storage size is insufficient, or if data types are unsupported.
|
|
320
|
+
"""
|
|
321
|
+
if run_values.device != values.device or run_lengths.device != values.device:
|
|
322
|
+
raise RuntimeError("run_values, run_lengths and values storage devices do not match")
|
|
323
|
+
|
|
324
|
+
if value_count is None:
|
|
325
|
+
value_count = values.size
|
|
326
|
+
|
|
327
|
+
if run_values.size < value_count or run_lengths.size < value_count:
|
|
328
|
+
raise RuntimeError(f"Output array storage sizes must be at least equal to value_count ({value_count})")
|
|
329
|
+
|
|
330
|
+
if not types_equal(values.dtype, run_values.dtype):
|
|
331
|
+
raise RuntimeError(
|
|
332
|
+
f"values and run_values data types do not match ({type_repr(values.dtype)} vs {type_repr(run_values.dtype)})"
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
if run_lengths.dtype != wp.int32:
|
|
336
|
+
raise RuntimeError("run_lengths array must be of type int32")
|
|
337
|
+
|
|
338
|
+
# User can provide a device output array for storing the number of runs
|
|
339
|
+
# For convenience, if no such array is provided, number of runs is returned on host
|
|
340
|
+
if run_count is None:
|
|
341
|
+
if value_count == 0:
|
|
342
|
+
return 0
|
|
343
|
+
run_count = wp.empty(shape=(1,), dtype=int, device=values.device)
|
|
344
|
+
host_return = True
|
|
345
|
+
else:
|
|
346
|
+
if run_count.device != values.device:
|
|
347
|
+
raise RuntimeError("run_count storage device does not match other arrays")
|
|
348
|
+
if run_count.dtype != wp.int32:
|
|
349
|
+
raise RuntimeError("run_count array must be of type int32")
|
|
350
|
+
if value_count == 0:
|
|
351
|
+
run_count.zero_()
|
|
352
|
+
return run_count
|
|
353
|
+
host_return = False
|
|
354
|
+
|
|
355
|
+
from warp._src.context import runtime
|
|
356
|
+
|
|
357
|
+
if values.device.is_cpu:
|
|
358
|
+
if values.dtype == wp.int32:
|
|
359
|
+
runtime.core.wp_runlength_encode_int_host(
|
|
360
|
+
values.ptr, run_values.ptr, run_lengths.ptr, run_count.ptr, value_count
|
|
361
|
+
)
|
|
362
|
+
else:
|
|
363
|
+
raise RuntimeError(f"Unsupported data type: {type_repr(values.dtype)}")
|
|
364
|
+
elif values.device.is_cuda:
|
|
365
|
+
if values.dtype == wp.int32:
|
|
366
|
+
runtime.core.wp_runlength_encode_int_device(
|
|
367
|
+
values.ptr, run_values.ptr, run_lengths.ptr, run_count.ptr, value_count
|
|
368
|
+
)
|
|
369
|
+
else:
|
|
370
|
+
raise RuntimeError(f"Unsupported data type: {type_repr(values.dtype)}")
|
|
371
|
+
|
|
372
|
+
if host_return:
|
|
373
|
+
return int(run_count.numpy()[0])
|
|
374
|
+
return run_count
|
|
375
|
+
|
|
376
|
+
|
|
377
|
+
def array_sum(values, out=None, value_count=None, axis=None):
|
|
378
|
+
"""Compute the sum of array elements.
|
|
379
|
+
|
|
380
|
+
This function computes the sum of array elements, optionally along a specified axis.
|
|
381
|
+
The operation can be performed on the entire array or along a specific dimension.
|
|
382
|
+
|
|
383
|
+
Args:
|
|
384
|
+
values (wp.array): Input array to sum. Must be of type float32 or float64.
|
|
385
|
+
out (wp.array, optional): Output array to store results. If None, a new array is created.
|
|
386
|
+
value_count (int, optional): Number of elements to process. If None, processes entire array.
|
|
387
|
+
axis (int, optional): Axis along which to compute sum. If None, computes sum of all elements.
|
|
388
|
+
|
|
389
|
+
Returns:
|
|
390
|
+
wp.array or float: The sum result. Returns a float if axis is None and out is None,
|
|
391
|
+
otherwise returns the output array.
|
|
392
|
+
|
|
393
|
+
Raises:
|
|
394
|
+
RuntimeError: If output array storage device or data type is incompatible with input array.
|
|
395
|
+
"""
|
|
396
|
+
if value_count is None:
|
|
397
|
+
if axis is None:
|
|
398
|
+
value_count = values.size
|
|
399
|
+
else:
|
|
400
|
+
value_count = values.shape[axis]
|
|
401
|
+
|
|
402
|
+
if axis is None:
|
|
403
|
+
output_shape = (1,)
|
|
404
|
+
else:
|
|
405
|
+
|
|
406
|
+
def output_dim(ax, dim):
|
|
407
|
+
return 1 if ax == axis else dim
|
|
408
|
+
|
|
409
|
+
output_shape = tuple(output_dim(ax, dim) for ax, dim in enumerate(values.shape))
|
|
410
|
+
|
|
411
|
+
type_size = wp._src.types.type_size(values.dtype)
|
|
412
|
+
scalar_type = wp._src.types.type_scalar_type(values.dtype)
|
|
413
|
+
|
|
414
|
+
# User can provide a device output array for storing the number of runs
|
|
415
|
+
# For convenience, if no such array is provided, number of runs is returned on host
|
|
416
|
+
if out is None:
|
|
417
|
+
host_return = True
|
|
418
|
+
out = wp.empty(shape=output_shape, dtype=values.dtype, device=values.device)
|
|
419
|
+
else:
|
|
420
|
+
host_return = False
|
|
421
|
+
if out.device != values.device:
|
|
422
|
+
raise RuntimeError("out storage device should match values array")
|
|
423
|
+
if out.dtype != values.dtype:
|
|
424
|
+
raise RuntimeError(f"out array should have type {values.dtype.__name__}")
|
|
425
|
+
if out.shape != output_shape:
|
|
426
|
+
raise RuntimeError(f"out array should have shape {output_shape}")
|
|
427
|
+
|
|
428
|
+
if value_count == 0:
|
|
429
|
+
out.zero_()
|
|
430
|
+
if axis is None and host_return:
|
|
431
|
+
return out.numpy()[0]
|
|
432
|
+
return out
|
|
433
|
+
|
|
434
|
+
from warp._src.context import runtime
|
|
435
|
+
|
|
436
|
+
if values.device.is_cpu:
|
|
437
|
+
if scalar_type == wp.float32:
|
|
438
|
+
native_func = runtime.core.wp_array_sum_float_host
|
|
439
|
+
elif scalar_type == wp.float64:
|
|
440
|
+
native_func = runtime.core.wp_array_sum_double_host
|
|
441
|
+
else:
|
|
442
|
+
raise RuntimeError(f"Unsupported data type: {type_repr(values.dtype)}")
|
|
443
|
+
elif values.device.is_cuda:
|
|
444
|
+
if scalar_type == wp.float32:
|
|
445
|
+
native_func = runtime.core.wp_array_sum_float_device
|
|
446
|
+
elif scalar_type == wp.float64:
|
|
447
|
+
native_func = runtime.core.wp_array_sum_double_device
|
|
448
|
+
else:
|
|
449
|
+
raise RuntimeError(f"Unsupported data type: {type_repr(values.dtype)}")
|
|
450
|
+
|
|
451
|
+
if axis is None:
|
|
452
|
+
stride = wp._src.types.type_size_in_bytes(values.dtype)
|
|
453
|
+
native_func(values.ptr, out.ptr, value_count, stride, type_size)
|
|
454
|
+
|
|
455
|
+
if host_return:
|
|
456
|
+
return out.numpy()[0]
|
|
457
|
+
return out
|
|
458
|
+
|
|
459
|
+
stride = values.strides[axis]
|
|
460
|
+
for idx in np.ndindex(output_shape):
|
|
461
|
+
out_offset = sum(i * s for i, s in zip(idx, out.strides))
|
|
462
|
+
val_offset = sum(i * s for i, s in zip(idx, values.strides))
|
|
463
|
+
|
|
464
|
+
native_func(
|
|
465
|
+
values.ptr + val_offset,
|
|
466
|
+
out.ptr + out_offset,
|
|
467
|
+
value_count,
|
|
468
|
+
stride,
|
|
469
|
+
type_size,
|
|
470
|
+
)
|
|
471
|
+
|
|
472
|
+
return out
|
|
473
|
+
|
|
474
|
+
|
|
475
|
+
def array_inner(a, b, out=None, count=None, axis=None):
|
|
476
|
+
"""Compute the inner product of two arrays.
|
|
477
|
+
|
|
478
|
+
This function computes the dot product between two arrays, optionally along a specified axis.
|
|
479
|
+
The operation can be performed on the entire arrays or along a specific dimension.
|
|
480
|
+
|
|
481
|
+
Args:
|
|
482
|
+
a (wp.array): First input array.
|
|
483
|
+
b (wp.array): Second input array. Must match shape and type of a.
|
|
484
|
+
out (wp.array, optional): Output array to store results. If None, a new array is created.
|
|
485
|
+
count (int, optional): Number of elements to process. If None, processes entire arrays.
|
|
486
|
+
axis (int, optional): Axis along which to compute inner product. If None, computes on flattened arrays.
|
|
487
|
+
|
|
488
|
+
Returns:
|
|
489
|
+
wp.array or float: The inner product result. Returns a float if axis is None and out is None,
|
|
490
|
+
otherwise returns the output array.
|
|
491
|
+
|
|
492
|
+
Raises:
|
|
493
|
+
RuntimeError: If array storage devices, sizes, or data types are incompatible.
|
|
494
|
+
"""
|
|
495
|
+
if a.size != b.size:
|
|
496
|
+
raise RuntimeError(f"A and b array storage sizes do not match ({a.size} vs {b.size})")
|
|
497
|
+
|
|
498
|
+
if a.device != b.device:
|
|
499
|
+
raise RuntimeError(f"A and b array storage devices do not match ({a.device} vs {b.device})")
|
|
500
|
+
|
|
501
|
+
if not types_equal(a.dtype, b.dtype):
|
|
502
|
+
raise RuntimeError(f"A and b array data types do not match ({type_repr(a.dtype)} vs {type_repr(b.dtype)})")
|
|
503
|
+
|
|
504
|
+
if count is None:
|
|
505
|
+
if axis is None:
|
|
506
|
+
count = a.size
|
|
507
|
+
else:
|
|
508
|
+
count = a.shape[axis]
|
|
509
|
+
|
|
510
|
+
if axis is None:
|
|
511
|
+
output_shape = (1,)
|
|
512
|
+
else:
|
|
513
|
+
|
|
514
|
+
def output_dim(ax, dim):
|
|
515
|
+
return 1 if ax == axis else dim
|
|
516
|
+
|
|
517
|
+
output_shape = tuple(output_dim(ax, dim) for ax, dim in enumerate(a.shape))
|
|
518
|
+
|
|
519
|
+
type_size = wp._src.types.type_size(a.dtype)
|
|
520
|
+
scalar_type = wp._src.types.type_scalar_type(a.dtype)
|
|
521
|
+
|
|
522
|
+
# User can provide a device output array for storing the number of runs
|
|
523
|
+
# For convenience, if no such array is provided, number of runs is returned on host
|
|
524
|
+
if out is None:
|
|
525
|
+
host_return = True
|
|
526
|
+
out = wp.empty(shape=output_shape, dtype=scalar_type, device=a.device)
|
|
527
|
+
else:
|
|
528
|
+
host_return = False
|
|
529
|
+
if out.device != a.device:
|
|
530
|
+
raise RuntimeError("out storage device should match values array")
|
|
531
|
+
if out.dtype != scalar_type:
|
|
532
|
+
raise RuntimeError(f"out array should have type {scalar_type.__name__}")
|
|
533
|
+
if out.shape != output_shape:
|
|
534
|
+
raise RuntimeError(f"out array should have shape {output_shape}")
|
|
535
|
+
|
|
536
|
+
if count == 0:
|
|
537
|
+
if axis is None and host_return:
|
|
538
|
+
return 0.0
|
|
539
|
+
out.zero_()
|
|
540
|
+
return out
|
|
541
|
+
|
|
542
|
+
from warp._src.context import runtime
|
|
543
|
+
|
|
544
|
+
if a.device.is_cpu:
|
|
545
|
+
if scalar_type == wp.float32:
|
|
546
|
+
native_func = runtime.core.wp_array_inner_float_host
|
|
547
|
+
elif scalar_type == wp.float64:
|
|
548
|
+
native_func = runtime.core.wp_array_inner_double_host
|
|
549
|
+
else:
|
|
550
|
+
raise RuntimeError(f"Unsupported data type: {type_repr(a.dtype)}")
|
|
551
|
+
elif a.device.is_cuda:
|
|
552
|
+
if scalar_type == wp.float32:
|
|
553
|
+
native_func = runtime.core.wp_array_inner_float_device
|
|
554
|
+
elif scalar_type == wp.float64:
|
|
555
|
+
native_func = runtime.core.wp_array_inner_double_device
|
|
556
|
+
else:
|
|
557
|
+
raise RuntimeError(f"Unsupported data type: {type_repr(a.dtype)}")
|
|
558
|
+
|
|
559
|
+
if axis is None:
|
|
560
|
+
stride_a = wp._src.types.type_size_in_bytes(a.dtype)
|
|
561
|
+
stride_b = wp._src.types.type_size_in_bytes(b.dtype)
|
|
562
|
+
native_func(a.ptr, b.ptr, out.ptr, count, stride_a, stride_b, type_size)
|
|
563
|
+
|
|
564
|
+
if host_return:
|
|
565
|
+
return out.numpy()[0]
|
|
566
|
+
return out
|
|
567
|
+
|
|
568
|
+
stride_a = a.strides[axis]
|
|
569
|
+
stride_b = b.strides[axis]
|
|
570
|
+
|
|
571
|
+
for idx in np.ndindex(output_shape):
|
|
572
|
+
out_offset = sum(i * s for i, s in zip(idx, out.strides))
|
|
573
|
+
a_offset = sum(i * s for i, s in zip(idx, a.strides))
|
|
574
|
+
b_offset = sum(i * s for i, s in zip(idx, b.strides))
|
|
575
|
+
|
|
576
|
+
native_func(
|
|
577
|
+
a.ptr + a_offset,
|
|
578
|
+
b.ptr + b_offset,
|
|
579
|
+
out.ptr + out_offset,
|
|
580
|
+
count,
|
|
581
|
+
stride_a,
|
|
582
|
+
stride_b,
|
|
583
|
+
type_size,
|
|
584
|
+
)
|
|
585
|
+
|
|
586
|
+
return out
|
|
587
|
+
|
|
588
|
+
|
|
589
|
+
@wp.kernel
|
|
590
|
+
def _array_cast_kernel(
|
|
591
|
+
dest: Any,
|
|
592
|
+
src: Any,
|
|
593
|
+
):
|
|
594
|
+
i = wp.tid()
|
|
595
|
+
dest[i] = dest.dtype(src[i])
|
|
596
|
+
|
|
597
|
+
|
|
598
|
+
def array_cast(in_array, out_array, count=None):
|
|
599
|
+
"""Cast elements from one array to another array with a different data type.
|
|
600
|
+
|
|
601
|
+
This function performs element-wise casting from the input array to the output array.
|
|
602
|
+
The arrays must have the same number of dimensions and data type shapes. If they don't match,
|
|
603
|
+
the arrays will be flattened and casting will be performed at the scalar level.
|
|
604
|
+
|
|
605
|
+
Args:
|
|
606
|
+
in_array (wp.array): Input array to cast from.
|
|
607
|
+
out_array (wp.array): Output array to cast to. Must have the same device as in_array.
|
|
608
|
+
count (int, optional): Number of elements to process. If None, processes entire array.
|
|
609
|
+
For multi-dimensional arrays, partial casting is not supported.
|
|
610
|
+
|
|
611
|
+
Raises:
|
|
612
|
+
RuntimeError: If arrays have different devices or if attempting partial casting
|
|
613
|
+
on multi-dimensional arrays.
|
|
614
|
+
|
|
615
|
+
Note:
|
|
616
|
+
If the input and output arrays have the same data type, this function will
|
|
617
|
+
simply copy the data without any conversion.
|
|
618
|
+
"""
|
|
619
|
+
if in_array.device != out_array.device:
|
|
620
|
+
raise RuntimeError(f"Array storage devices do not match ({in_array.device} vs {out_array.device})")
|
|
621
|
+
|
|
622
|
+
in_array_data_shape = getattr(in_array.dtype, "_shape_", ())
|
|
623
|
+
out_array_data_shape = getattr(out_array.dtype, "_shape_", ())
|
|
624
|
+
|
|
625
|
+
if in_array.ndim != out_array.ndim or in_array_data_shape != out_array_data_shape:
|
|
626
|
+
# Number of dimensions or data type shape do not match.
|
|
627
|
+
# Flatten arrays and do cast at the scalar level
|
|
628
|
+
in_array = in_array.flatten()
|
|
629
|
+
out_array = out_array.flatten()
|
|
630
|
+
|
|
631
|
+
in_array_data_length = warp._src.types.type_size(in_array.dtype)
|
|
632
|
+
out_array_data_length = warp._src.types.type_size(out_array.dtype)
|
|
633
|
+
in_array_scalar_type = wp._src.types.type_scalar_type(in_array.dtype)
|
|
634
|
+
out_array_scalar_type = wp._src.types.type_scalar_type(out_array.dtype)
|
|
635
|
+
|
|
636
|
+
in_array = wp.array(
|
|
637
|
+
data=None,
|
|
638
|
+
ptr=in_array.ptr,
|
|
639
|
+
capacity=in_array.capacity,
|
|
640
|
+
device=in_array.device,
|
|
641
|
+
dtype=in_array_scalar_type,
|
|
642
|
+
shape=in_array.shape[0] * in_array_data_length,
|
|
643
|
+
)
|
|
644
|
+
|
|
645
|
+
out_array = wp.array(
|
|
646
|
+
data=None,
|
|
647
|
+
ptr=out_array.ptr,
|
|
648
|
+
capacity=out_array.capacity,
|
|
649
|
+
device=out_array.device,
|
|
650
|
+
dtype=out_array_scalar_type,
|
|
651
|
+
shape=out_array.shape[0] * out_array_data_length,
|
|
652
|
+
)
|
|
653
|
+
|
|
654
|
+
if count is not None:
|
|
655
|
+
count *= in_array_data_length
|
|
656
|
+
|
|
657
|
+
if count is None:
|
|
658
|
+
count = in_array.size
|
|
659
|
+
|
|
660
|
+
if in_array.ndim == 1:
|
|
661
|
+
dim = count
|
|
662
|
+
elif count < in_array.size:
|
|
663
|
+
raise RuntimeError("Partial cast is not supported for arrays with more than one dimension")
|
|
664
|
+
else:
|
|
665
|
+
dim = in_array.shape
|
|
666
|
+
|
|
667
|
+
if in_array.dtype == out_array.dtype:
|
|
668
|
+
# Same data type, can simply copy
|
|
669
|
+
wp.copy(dest=out_array, src=in_array, count=count)
|
|
670
|
+
else:
|
|
671
|
+
wp.launch(kernel=_array_cast_kernel, dim=dim, inputs=[out_array, in_array], device=out_array.device)
|
|
672
|
+
|
|
673
|
+
|
|
674
|
+
def create_warp_function(func: Callable) -> tuple[wp.Function, warp._src.context.Module]:
|
|
675
|
+
"""Create a Warp function from a Python function.
|
|
676
|
+
|
|
677
|
+
Args:
|
|
678
|
+
func (Callable): A Python function to be converted to a Warp function.
|
|
679
|
+
|
|
680
|
+
Returns:
|
|
681
|
+
wp.Function: A Warp function created from the input function.
|
|
682
|
+
"""
|
|
683
|
+
|
|
684
|
+
from .codegen import Adjoint, get_full_arg_spec
|
|
685
|
+
|
|
686
|
+
def unique_name(code: str):
|
|
687
|
+
return "func_" + hex(hash(code))[-8:]
|
|
688
|
+
|
|
689
|
+
# Create a Warp function from the input function
|
|
690
|
+
source = None
|
|
691
|
+
argspec = get_full_arg_spec(func)
|
|
692
|
+
key = getattr(func, "__name__", None)
|
|
693
|
+
if key is None:
|
|
694
|
+
source, _ = Adjoint.extract_function_source(func)
|
|
695
|
+
key = unique_name(source)
|
|
696
|
+
elif key == "<lambda>":
|
|
697
|
+
body = Adjoint.extract_lambda_source(func, only_body=True)
|
|
698
|
+
if body is None:
|
|
699
|
+
raise ValueError("Could not extract lambda source code")
|
|
700
|
+
key = unique_name(body)
|
|
701
|
+
source = f"def {key}({', '.join(argspec.args)}):\n return {body}"
|
|
702
|
+
else:
|
|
703
|
+
# use the qualname of the function as the key
|
|
704
|
+
key = getattr(func, "__qualname__", key)
|
|
705
|
+
key = key.replace(".", "_").replace(" ", "_").replace("<", "").replace(">", "_")
|
|
706
|
+
|
|
707
|
+
module = warp._src.context.get_module(f"map_{key}")
|
|
708
|
+
func = wp.Function(
|
|
709
|
+
func,
|
|
710
|
+
namespace="",
|
|
711
|
+
module=module,
|
|
712
|
+
key=key,
|
|
713
|
+
source=source,
|
|
714
|
+
overloaded_annotations=dict.fromkeys(argspec.args, Any),
|
|
715
|
+
)
|
|
716
|
+
return func, module
|
|
717
|
+
|
|
718
|
+
|
|
719
|
+
def broadcast_shapes(shapes: list[tuple[int]]) -> tuple[int]:
|
|
720
|
+
"""Broadcast a list of shapes to a common shape.
|
|
721
|
+
|
|
722
|
+
Following the broadcasting rules of NumPy, two shapes are compatible when:
|
|
723
|
+
starting from the trailing dimension,
|
|
724
|
+
1. the two dimensions are equal, or
|
|
725
|
+
2. one of the dimensions is 1.
|
|
726
|
+
|
|
727
|
+
Example:
|
|
728
|
+
>>> broadcast_shapes([(3, 1, 4), (5, 4)])
|
|
729
|
+
(3, 5, 4)
|
|
730
|
+
|
|
731
|
+
Returns:
|
|
732
|
+
tuple[int]: The broadcasted shape.
|
|
733
|
+
|
|
734
|
+
Raises:
|
|
735
|
+
ValueError: If the shapes are not broadcastable.
|
|
736
|
+
"""
|
|
737
|
+
ref = shapes[0]
|
|
738
|
+
for shape in shapes[1:]:
|
|
739
|
+
broad = []
|
|
740
|
+
for j in range(1, max(len(ref), len(shape)) + 1):
|
|
741
|
+
if j <= len(ref) and j <= len(shape):
|
|
742
|
+
s = shape[-j]
|
|
743
|
+
r = ref[-j]
|
|
744
|
+
if s == r:
|
|
745
|
+
broad.append(s)
|
|
746
|
+
elif s == 1 or r == 1:
|
|
747
|
+
broad.append(max(s, r))
|
|
748
|
+
else:
|
|
749
|
+
raise ValueError(f"Shapes {ref} and {shape} are not broadcastable")
|
|
750
|
+
elif j <= len(ref):
|
|
751
|
+
broad.append(ref[-j])
|
|
752
|
+
else:
|
|
753
|
+
broad.append(shape[-j])
|
|
754
|
+
ref = tuple(reversed(broad))
|
|
755
|
+
return ref
|
|
756
|
+
|
|
757
|
+
|
|
758
|
+
def map(
|
|
759
|
+
func: Callable | wp.Function,
|
|
760
|
+
*inputs: Array[DType] | Any,
|
|
761
|
+
out: Array[DType] | list[Array[DType]] | None = None,
|
|
762
|
+
return_kernel: bool = False,
|
|
763
|
+
block_dim=256,
|
|
764
|
+
device: Devicelike = None,
|
|
765
|
+
) -> Array[DType] | list[Array[DType]] | wp.Kernel:
|
|
766
|
+
"""
|
|
767
|
+
Map a function over the elements of one or more arrays.
|
|
768
|
+
|
|
769
|
+
You can use a Warp function, a regular Python function, or a lambda expression to map it to a set of arrays.
|
|
770
|
+
|
|
771
|
+
.. testcode::
|
|
772
|
+
|
|
773
|
+
a = wp.array([1, 2, 3], dtype=wp.float32)
|
|
774
|
+
b = wp.array([4, 5, 6], dtype=wp.float32)
|
|
775
|
+
c = wp.array([7, 8, 9], dtype=wp.float32)
|
|
776
|
+
result = wp.map(lambda x, y, z: x + 2.0 * y - z, a, b, c)
|
|
777
|
+
print(result)
|
|
778
|
+
|
|
779
|
+
.. testoutput::
|
|
780
|
+
|
|
781
|
+
[2. 4. 6.]
|
|
782
|
+
|
|
783
|
+
Clamp values in an array in place:
|
|
784
|
+
|
|
785
|
+
.. testcode::
|
|
786
|
+
|
|
787
|
+
xs = wp.array([-1.0, 0.0, 1.0], dtype=wp.float32)
|
|
788
|
+
wp.map(wp.clamp, xs, -0.5, 0.5, out=xs)
|
|
789
|
+
print(xs)
|
|
790
|
+
|
|
791
|
+
.. testoutput::
|
|
792
|
+
|
|
793
|
+
[-0.5 0. 0.5]
|
|
794
|
+
|
|
795
|
+
Note that only one of the inputs must be a Warp array. For example, it is possible
|
|
796
|
+
vectorize the function :func:`warp.transform_point` over a collection of points
|
|
797
|
+
with a given input transform as follows:
|
|
798
|
+
|
|
799
|
+
.. code-block:: python
|
|
800
|
+
|
|
801
|
+
tf = wp.transform((1.0, 2.0, 3.0), wp.quat_rpy(0.2, -0.6, 0.1))
|
|
802
|
+
points = wp.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=wp.vec3)
|
|
803
|
+
transformed = wp.map(wp.transform_point, tf, points)
|
|
804
|
+
|
|
805
|
+
Besides regular Warp arrays, other array types, such as the ``indexedarray``, are supported as well:
|
|
806
|
+
|
|
807
|
+
.. testcode::
|
|
808
|
+
|
|
809
|
+
arr = wp.array(data=np.arange(10, dtype=np.float32))
|
|
810
|
+
indices = wp.array([1, 3, 5, 7, 9], dtype=int)
|
|
811
|
+
iarr = wp.indexedarray1d(arr, [indices])
|
|
812
|
+
out = wp.map(lambda x: x * 10.0, iarr)
|
|
813
|
+
print(out)
|
|
814
|
+
|
|
815
|
+
.. testoutput::
|
|
816
|
+
|
|
817
|
+
[10. 30. 50. 70. 90.]
|
|
818
|
+
|
|
819
|
+
If multiple arrays are provided, the
|
|
820
|
+
`NumPy broadcasting rules <https://numpy.org/doc/stable/user/basics.broadcasting.html>`_
|
|
821
|
+
are applied to determine the shape of the output array.
|
|
822
|
+
Two shapes are compatible when:
|
|
823
|
+
starting from the trailing dimension,
|
|
824
|
+
|
|
825
|
+
1. the two dimensions are equal, or
|
|
826
|
+
2. one of the dimensions is 1.
|
|
827
|
+
|
|
828
|
+
For example, given arrays of shapes ``(3, 1, 4)`` and ``(5, 4)``, the broadcasted
|
|
829
|
+
shape is ``(3, 5, 4)``.
|
|
830
|
+
|
|
831
|
+
If no array(s) are provided to the ``out`` argument, the output array(s) are created automatically.
|
|
832
|
+
The data type(s) of the output array(s) are determined by the type of the return value(s) of
|
|
833
|
+
the function. The ``requires_grad`` flag for an automatically created output array is set to ``True``
|
|
834
|
+
if any of the input arrays have it set to ``True`` and the respective output array's ``dtype`` is a type that
|
|
835
|
+
supports differentiation.
|
|
836
|
+
|
|
837
|
+
Args:
|
|
838
|
+
func (Callable | Function): The function to map over the arrays.
|
|
839
|
+
*inputs (array | Any): The input arrays or values to pass to the function.
|
|
840
|
+
out (array | list[array] | None): Optional output array(s) to store the result(s). If None, the output array(s) will be created automatically.
|
|
841
|
+
return_kernel (bool): If True, only return the generated kernel without performing the mapping operation.
|
|
842
|
+
block_dim (int): The block dimension for the kernel launch.
|
|
843
|
+
device (Devicelike): The device on which to run the kernel.
|
|
844
|
+
|
|
845
|
+
Returns:
|
|
846
|
+
array | list[array] | Kernel:
|
|
847
|
+
The resulting array(s) of the mapping. If ``return_kernel`` is True, only returns the kernel used for mapping.
|
|
848
|
+
"""
|
|
849
|
+
|
|
850
|
+
import builtins
|
|
851
|
+
|
|
852
|
+
from .codegen import Adjoint, Struct, StructInstance
|
|
853
|
+
from .types import (
|
|
854
|
+
is_array,
|
|
855
|
+
type_is_matrix,
|
|
856
|
+
type_is_quaternion,
|
|
857
|
+
type_is_transformation,
|
|
858
|
+
type_is_vector,
|
|
859
|
+
type_repr,
|
|
860
|
+
type_to_warp,
|
|
861
|
+
types_equal,
|
|
862
|
+
)
|
|
863
|
+
|
|
864
|
+
# mapping from struct name to its Python definition
|
|
865
|
+
referenced_modules: dict[str, ModuleType] = {}
|
|
866
|
+
|
|
867
|
+
def type_to_code(wp_type) -> str:
|
|
868
|
+
"""Returns the string representation of a given Warp type."""
|
|
869
|
+
if is_array(wp_type):
|
|
870
|
+
return f"warp.array(ndim={wp_type.ndim}, dtype={type_to_code(wp_type.dtype)})"
|
|
871
|
+
if isinstance(wp_type, Struct):
|
|
872
|
+
key = f"{wp_type.__module__}.{wp_type.key}"
|
|
873
|
+
module = sys.modules.get(wp_type.__module__, None)
|
|
874
|
+
if module is not None:
|
|
875
|
+
referenced_modules[wp_type.__module__] = module
|
|
876
|
+
return key
|
|
877
|
+
if type_is_transformation(wp_type):
|
|
878
|
+
return f"warp._src.types.transformation(dtype={type_to_code(wp_type._wp_scalar_type_)})"
|
|
879
|
+
if type_is_quaternion(wp_type):
|
|
880
|
+
return f"warp._src.types.quaternion(dtype={type_to_code(wp_type._wp_scalar_type_)})"
|
|
881
|
+
if type_is_vector(wp_type):
|
|
882
|
+
return (
|
|
883
|
+
f"warp._src.types.vector(length={wp_type._shape_[0]}, dtype={type_to_code(wp_type._wp_scalar_type_)})"
|
|
884
|
+
)
|
|
885
|
+
if type_is_matrix(wp_type):
|
|
886
|
+
return f"warp._src.types.matrix(shape=({wp_type._shape_[0]}, {wp_type._shape_[1]}), dtype={type_to_code(wp_type._wp_scalar_type_)})"
|
|
887
|
+
if wp_type == builtins.bool:
|
|
888
|
+
return "bool"
|
|
889
|
+
if wp_type == builtins.float:
|
|
890
|
+
return "float"
|
|
891
|
+
if wp_type == builtins.int:
|
|
892
|
+
return "int"
|
|
893
|
+
|
|
894
|
+
name = getattr(wp_type, "__name__", None)
|
|
895
|
+
if name is None:
|
|
896
|
+
return type_repr(wp_type)
|
|
897
|
+
name = getattr(wp_type, "__qualname__", name)
|
|
898
|
+
module = getattr(wp_type, "__module__", None)
|
|
899
|
+
if module is not None:
|
|
900
|
+
referenced_modules[wp_type.__module__] = module
|
|
901
|
+
return wp_type.__module__ + "." + name
|
|
902
|
+
|
|
903
|
+
def get_warp_type(value):
|
|
904
|
+
dtype = type(value)
|
|
905
|
+
if issubclass(dtype, StructInstance):
|
|
906
|
+
# a struct
|
|
907
|
+
return value._cls
|
|
908
|
+
return type_to_warp(dtype)
|
|
909
|
+
|
|
910
|
+
# gather the arrays in the inputs
|
|
911
|
+
array_shapes = [a.shape for a in inputs if is_array(a)]
|
|
912
|
+
if len(array_shapes) == 0:
|
|
913
|
+
raise ValueError("map requires at least one warp.array input")
|
|
914
|
+
# broadcast the shapes of the arrays
|
|
915
|
+
out_shape = broadcast_shapes(array_shapes)
|
|
916
|
+
|
|
917
|
+
module = None
|
|
918
|
+
out_dtypes = None
|
|
919
|
+
if isinstance(func, wp.Function):
|
|
920
|
+
func_name = func.key
|
|
921
|
+
wp_func = func
|
|
922
|
+
else:
|
|
923
|
+
# check if op is a callable function
|
|
924
|
+
if not callable(func):
|
|
925
|
+
raise TypeError("func must be a callable function or a warp.Function")
|
|
926
|
+
wp_func, module = create_warp_function(func)
|
|
927
|
+
func_name = wp_func.key
|
|
928
|
+
if module is None:
|
|
929
|
+
module = warp._src.context.get_module(f"map_{func_name}")
|
|
930
|
+
|
|
931
|
+
arg_names = list(wp_func.input_types.keys())
|
|
932
|
+
|
|
933
|
+
if len(inputs) != len(arg_names):
|
|
934
|
+
raise TypeError(
|
|
935
|
+
f"Number of input arguments ({len(inputs)}) does not match expected number of function arguments ({len(arg_names)})"
|
|
936
|
+
)
|
|
937
|
+
|
|
938
|
+
# determine output dtype
|
|
939
|
+
arg_types = {}
|
|
940
|
+
arg_values = {}
|
|
941
|
+
for i, arg_name in enumerate(arg_names):
|
|
942
|
+
if is_array(inputs[i]):
|
|
943
|
+
# we will pass an element of the array to the function
|
|
944
|
+
arg_types[arg_name] = inputs[i].dtype
|
|
945
|
+
if device is None:
|
|
946
|
+
device = inputs[i].device
|
|
947
|
+
else:
|
|
948
|
+
# we pass the input value directly to the function
|
|
949
|
+
arg_types[arg_name] = get_warp_type(inputs[i])
|
|
950
|
+
func_or_none = wp_func.get_overload(list(arg_types.values()), {})
|
|
951
|
+
if func_or_none is None:
|
|
952
|
+
raise TypeError(
|
|
953
|
+
f"Function {func_name} does not support the provided argument types {', '.join(type_repr(t) for t in arg_types.values())}"
|
|
954
|
+
)
|
|
955
|
+
func = func_or_none
|
|
956
|
+
|
|
957
|
+
if func.value_type is not None:
|
|
958
|
+
out_dtype = func.value_type
|
|
959
|
+
elif func.value_func is not None:
|
|
960
|
+
out_dtype = func.value_func(arg_types, arg_values)
|
|
961
|
+
else:
|
|
962
|
+
func.build(None)
|
|
963
|
+
out_dtype = func.value_func(arg_types, arg_values)
|
|
964
|
+
|
|
965
|
+
if out_dtype is None:
|
|
966
|
+
raise TypeError("The provided function must return a value")
|
|
967
|
+
|
|
968
|
+
if isinstance(out_dtype, tuple) or isinstance(out_dtype, list):
|
|
969
|
+
out_dtypes = out_dtype
|
|
970
|
+
else:
|
|
971
|
+
out_dtypes = (out_dtype,)
|
|
972
|
+
|
|
973
|
+
if out is None:
|
|
974
|
+
requires_grad = any(getattr(a, "requires_grad", False) for a in inputs if is_array(a))
|
|
975
|
+
outputs = []
|
|
976
|
+
for dtype in out_dtypes:
|
|
977
|
+
rg = requires_grad and Adjoint.is_differentiable_value_type(dtype)
|
|
978
|
+
outputs.append(wp.empty(out_shape, dtype=dtype, requires_grad=rg, device=device))
|
|
979
|
+
elif len(out_dtypes) == 1 and is_array(out):
|
|
980
|
+
if not types_equal(out.dtype, out_dtypes[0]):
|
|
981
|
+
raise TypeError(
|
|
982
|
+
f"Output array dtype {type_repr(out.dtype)} does not match expected dtype {type_repr(out_dtypes[0])}"
|
|
983
|
+
)
|
|
984
|
+
if out.shape != out_shape:
|
|
985
|
+
raise TypeError(f"Output array shape {out.shape} does not match expected shape {out_shape}")
|
|
986
|
+
outputs = [out]
|
|
987
|
+
elif len(out_dtypes) > 1:
|
|
988
|
+
if isinstance(out, tuple) or isinstance(out, list):
|
|
989
|
+
if len(out) != len(out_dtypes):
|
|
990
|
+
raise TypeError(
|
|
991
|
+
f"Number of provided output arrays ({len(out)}) does not match expected number of function outputs ({len(out_dtypes)})"
|
|
992
|
+
)
|
|
993
|
+
for i, a in enumerate(out):
|
|
994
|
+
if not types_equal(a.dtype, out_dtypes[i]):
|
|
995
|
+
raise TypeError(
|
|
996
|
+
f"Output array {i} dtype {type_repr(a.dtype)} does not match expected dtype {type_repr(out_dtypes[i])}"
|
|
997
|
+
)
|
|
998
|
+
if a.shape != out_shape:
|
|
999
|
+
raise TypeError(f"Output array {i} shape {a.shape} does not match expected shape {out_shape}")
|
|
1000
|
+
outputs = list(out)
|
|
1001
|
+
else:
|
|
1002
|
+
raise TypeError(
|
|
1003
|
+
f"Invalid output provided, expected {len(out_dtypes)} Warp arrays with shape {out_shape} and dtypes ({', '.join(type_repr(t) for t in out_dtypes)})"
|
|
1004
|
+
)
|
|
1005
|
+
|
|
1006
|
+
# create code for a kernel
|
|
1007
|
+
code = """def map_kernel({kernel_args}):
|
|
1008
|
+
{tids} = wp.tid()
|
|
1009
|
+
{load_args}
|
|
1010
|
+
"""
|
|
1011
|
+
if len(outputs) == 1:
|
|
1012
|
+
code += "__out_0[{tids}] = {func_name}({arg_names})"
|
|
1013
|
+
else:
|
|
1014
|
+
code += ", ".join(f"__o_{i}" for i in range(len(outputs)))
|
|
1015
|
+
code += " = {func_name}({arg_names})\n"
|
|
1016
|
+
for i in range(len(outputs)):
|
|
1017
|
+
code += f" __out_{i}" + "[{tids}]" + f" = __o_{i}\n"
|
|
1018
|
+
|
|
1019
|
+
tids = [f"__tid_{i}" for i in range(len(out_shape))]
|
|
1020
|
+
|
|
1021
|
+
load_args = []
|
|
1022
|
+
kernel_args = []
|
|
1023
|
+
for arg_name, input in zip(arg_names, inputs):
|
|
1024
|
+
if is_array(input):
|
|
1025
|
+
arr_name = f"{arg_name}_array"
|
|
1026
|
+
array_type_name = type(input).__name__
|
|
1027
|
+
kernel_args.append(
|
|
1028
|
+
f"{arr_name}: wp.{array_type_name}(dtype={type_to_code(input.dtype)}, ndim={input.ndim})"
|
|
1029
|
+
)
|
|
1030
|
+
shape = input.shape
|
|
1031
|
+
indices = []
|
|
1032
|
+
for i in range(1, len(shape) + 1):
|
|
1033
|
+
if shape[-i] == 1:
|
|
1034
|
+
indices.append("0")
|
|
1035
|
+
else:
|
|
1036
|
+
indices.append(tids[-i])
|
|
1037
|
+
|
|
1038
|
+
load_args.append(f"{arg_name} = {arr_name}[{', '.join(reversed(indices))}]")
|
|
1039
|
+
else:
|
|
1040
|
+
kernel_args.append(f"{arg_name}: {type_to_code(type(input))}")
|
|
1041
|
+
for i, o in enumerate(outputs):
|
|
1042
|
+
array_type_name = type(o).__name__
|
|
1043
|
+
kernel_args.append(f"__out_{i}: wp.{array_type_name}(dtype={type_to_code(o.dtype)}, ndim={o.ndim})")
|
|
1044
|
+
code = code.format(
|
|
1045
|
+
func_name=func_name,
|
|
1046
|
+
kernel_args=", ".join(kernel_args),
|
|
1047
|
+
arg_names=", ".join(arg_names),
|
|
1048
|
+
tids=", ".join(tids),
|
|
1049
|
+
load_args="\n ".join(load_args),
|
|
1050
|
+
)
|
|
1051
|
+
namespace = {}
|
|
1052
|
+
namespace.update({"wp": wp, "warp": wp, func_name: wp_func, "Any": Any})
|
|
1053
|
+
namespace.update(referenced_modules)
|
|
1054
|
+
exec(code, namespace)
|
|
1055
|
+
|
|
1056
|
+
kernel = wp.Kernel(namespace["map_kernel"], key="map_kernel", source=code, module=module)
|
|
1057
|
+
if return_kernel:
|
|
1058
|
+
return kernel
|
|
1059
|
+
|
|
1060
|
+
wp.launch(
|
|
1061
|
+
kernel,
|
|
1062
|
+
dim=out_shape,
|
|
1063
|
+
inputs=inputs,
|
|
1064
|
+
outputs=outputs,
|
|
1065
|
+
block_dim=block_dim,
|
|
1066
|
+
device=device,
|
|
1067
|
+
)
|
|
1068
|
+
|
|
1069
|
+
if len(outputs) == 1:
|
|
1070
|
+
o = outputs[0]
|
|
1071
|
+
else:
|
|
1072
|
+
o = outputs
|
|
1073
|
+
|
|
1074
|
+
return o
|
|
1075
|
+
|
|
1076
|
+
|
|
1077
|
+
# code snippet for invoking cProfile
|
|
1078
|
+
# cp = cProfile.Profile()
|
|
1079
|
+
# cp.enable()
|
|
1080
|
+
# for i in range(1000):
|
|
1081
|
+
# self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
|
|
1082
|
+
|
|
1083
|
+
# cp.disable()
|
|
1084
|
+
# cp.print_stats(sort='tottime')
|
|
1085
|
+
# exit(0)
|
|
1086
|
+
|
|
1087
|
+
|
|
1088
|
+
# helper kernels for initializing NVDB volumes from a dense array
|
|
1089
|
+
@wp.kernel
|
|
1090
|
+
def copy_dense_volume_to_nano_vdb_v(volume: wp.uint64, values: wp.array(dtype=wp.vec3, ndim=3)):
|
|
1091
|
+
i, j, k = wp.tid()
|
|
1092
|
+
wp.volume_store_v(volume, i, j, k, values[i, j, k])
|
|
1093
|
+
|
|
1094
|
+
|
|
1095
|
+
@wp.kernel
|
|
1096
|
+
def copy_dense_volume_to_nano_vdb_f(volume: wp.uint64, values: wp.array(dtype=wp.float32, ndim=3)):
|
|
1097
|
+
i, j, k = wp.tid()
|
|
1098
|
+
wp.volume_store_f(volume, i, j, k, values[i, j, k])
|
|
1099
|
+
|
|
1100
|
+
|
|
1101
|
+
@wp.kernel
|
|
1102
|
+
def copy_dense_volume_to_nano_vdb_i(volume: wp.uint64, values: wp.array(dtype=wp.int32, ndim=3)):
|
|
1103
|
+
i, j, k = wp.tid()
|
|
1104
|
+
wp.volume_store_i(volume, i, j, k, values[i, j, k])
|
|
1105
|
+
|
|
1106
|
+
|
|
1107
|
+
# represent an edge between v0, v1 with connected faces f0, f1, and opposite vertex o0, and o1
|
|
1108
|
+
# winding is such that first tri can be reconstructed as {v0, v1, o0}, and second tri as { v1, v0, o1 }
|
|
1109
|
+
class MeshEdge:
|
|
1110
|
+
def __init__(self, v0, v1, o0, o1, f0, f1):
|
|
1111
|
+
self.v0 = v0 # vertex 0
|
|
1112
|
+
self.v1 = v1 # vertex 1
|
|
1113
|
+
self.o0 = o0 # opposite vertex 1
|
|
1114
|
+
self.o1 = o1 # opposite vertex 2
|
|
1115
|
+
self.f0 = f0 # index of tri1
|
|
1116
|
+
self.f1 = f1 # index of tri2
|
|
1117
|
+
|
|
1118
|
+
|
|
1119
|
+
class MeshAdjacency:
|
|
1120
|
+
def __init__(self, indices, num_tris):
|
|
1121
|
+
# map edges (v0, v1) to faces (f0, f1)
|
|
1122
|
+
self.edges = {}
|
|
1123
|
+
self.indices = indices
|
|
1124
|
+
|
|
1125
|
+
for index, tri in enumerate(indices):
|
|
1126
|
+
self.add_edge(tri[0], tri[1], tri[2], index)
|
|
1127
|
+
self.add_edge(tri[1], tri[2], tri[0], index)
|
|
1128
|
+
self.add_edge(tri[2], tri[0], tri[1], index)
|
|
1129
|
+
|
|
1130
|
+
def add_edge(self, i0, i1, o, f): # index1, index2, index3, index of triangle
|
|
1131
|
+
key = (min(i0, i1), max(i0, i1))
|
|
1132
|
+
edge = None
|
|
1133
|
+
|
|
1134
|
+
if key in self.edges:
|
|
1135
|
+
edge = self.edges[key]
|
|
1136
|
+
|
|
1137
|
+
if edge.f1 != -1:
|
|
1138
|
+
print("Detected non-manifold edge")
|
|
1139
|
+
return
|
|
1140
|
+
else:
|
|
1141
|
+
# update other side of the edge
|
|
1142
|
+
edge.o1 = o
|
|
1143
|
+
edge.f1 = f
|
|
1144
|
+
else:
|
|
1145
|
+
# create new edge with opposite yet to be filled
|
|
1146
|
+
edge = MeshEdge(i0, i1, o, -1, f, -1)
|
|
1147
|
+
|
|
1148
|
+
self.edges[key] = edge
|
|
1149
|
+
|
|
1150
|
+
|
|
1151
|
+
def mem_report(): # pragma: no cover
|
|
1152
|
+
def _mem_report(tensors, mem_type):
|
|
1153
|
+
"""Print the selected tensors of type
|
|
1154
|
+
There are two major storage types in our major concern:
|
|
1155
|
+
- GPU: tensors transferred to CUDA devices
|
|
1156
|
+
- CPU: tensors remaining on the system memory (usually unimportant)
|
|
1157
|
+
Args:
|
|
1158
|
+
- tensors: the tensors of specified type
|
|
1159
|
+
- mem_type: 'CPU' or 'GPU' in current implementation"""
|
|
1160
|
+
total_numel = 0
|
|
1161
|
+
total_mem = 0
|
|
1162
|
+
visited_data = []
|
|
1163
|
+
for tensor in tensors:
|
|
1164
|
+
if tensor.is_sparse:
|
|
1165
|
+
continue
|
|
1166
|
+
# a data_ptr indicates a memory block allocated
|
|
1167
|
+
data_ptr = tensor.storage().data_ptr()
|
|
1168
|
+
if data_ptr in visited_data:
|
|
1169
|
+
continue
|
|
1170
|
+
visited_data.append(data_ptr)
|
|
1171
|
+
|
|
1172
|
+
numel = tensor.storage().size()
|
|
1173
|
+
total_numel += numel
|
|
1174
|
+
element_size = tensor.storage().element_size()
|
|
1175
|
+
mem = numel * element_size / 1024 / 1024 # 32bit=4Byte, MByte
|
|
1176
|
+
total_mem += mem
|
|
1177
|
+
print(f"Type: {mem_type:<4} | Total Tensors: {total_numel:>8} | Used Memory: {total_mem:>8.2f} MB")
|
|
1178
|
+
|
|
1179
|
+
import gc
|
|
1180
|
+
|
|
1181
|
+
import torch
|
|
1182
|
+
|
|
1183
|
+
gc.collect()
|
|
1184
|
+
|
|
1185
|
+
LEN = 65
|
|
1186
|
+
objects = gc.get_objects()
|
|
1187
|
+
# print('%s\t%s\t\t\t%s' %('Element type', 'Size', 'Used MEM(MBytes)') )
|
|
1188
|
+
tensors = [obj for obj in objects if torch.is_tensor(obj)]
|
|
1189
|
+
cuda_tensors = [t for t in tensors if t.is_cuda]
|
|
1190
|
+
host_tensors = [t for t in tensors if not t.is_cuda]
|
|
1191
|
+
_mem_report(cuda_tensors, "GPU")
|
|
1192
|
+
_mem_report(host_tensors, "CPU")
|
|
1193
|
+
print("=" * LEN)
|
|
1194
|
+
|
|
1195
|
+
|
|
1196
|
+
class ScopedDevice:
|
|
1197
|
+
"""A context manager to temporarily change the current default device.
|
|
1198
|
+
|
|
1199
|
+
For CUDA devices, this context manager makes the device's CUDA context
|
|
1200
|
+
current and restores the previous CUDA context on exit. This is handy when
|
|
1201
|
+
running Warp scripts as part of a bigger pipeline because it avoids any side
|
|
1202
|
+
effects of changing the CUDA context in the enclosed code.
|
|
1203
|
+
|
|
1204
|
+
Attributes:
|
|
1205
|
+
device (Device): The device that will temporarily become the default
|
|
1206
|
+
device within the context.
|
|
1207
|
+
saved_device (Device): The previous default device. This is restored as
|
|
1208
|
+
the default device on exiting the context.
|
|
1209
|
+
"""
|
|
1210
|
+
|
|
1211
|
+
def __init__(self, device: Devicelike):
|
|
1212
|
+
"""Initializes the context manager with a device.
|
|
1213
|
+
|
|
1214
|
+
Args:
|
|
1215
|
+
device: The device that will temporarily become the default device
|
|
1216
|
+
within the context.
|
|
1217
|
+
"""
|
|
1218
|
+
self.device = wp.get_device(device)
|
|
1219
|
+
|
|
1220
|
+
def __enter__(self):
|
|
1221
|
+
# save the previous default device
|
|
1222
|
+
self.saved_device = self.device.runtime.default_device
|
|
1223
|
+
|
|
1224
|
+
# make this the default device
|
|
1225
|
+
self.device.runtime.default_device = self.device
|
|
1226
|
+
|
|
1227
|
+
# make it the current CUDA device so that device alias "cuda" will evaluate to this device
|
|
1228
|
+
self.device.context_guard.__enter__()
|
|
1229
|
+
|
|
1230
|
+
return self.device
|
|
1231
|
+
|
|
1232
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
|
1233
|
+
# restore original CUDA context
|
|
1234
|
+
self.device.context_guard.__exit__(exc_type, exc_value, traceback)
|
|
1235
|
+
|
|
1236
|
+
# restore original target device
|
|
1237
|
+
self.device.runtime.default_device = self.saved_device
|
|
1238
|
+
|
|
1239
|
+
|
|
1240
|
+
class ScopedStream:
|
|
1241
|
+
"""A context manager to temporarily change the current stream on a device.
|
|
1242
|
+
|
|
1243
|
+
Attributes:
|
|
1244
|
+
stream (Stream or None): The stream that will temporarily become the device's
|
|
1245
|
+
default stream within the context.
|
|
1246
|
+
saved_stream (Stream): The device's previous current stream. This is
|
|
1247
|
+
restored as the device's current stream on exiting the context.
|
|
1248
|
+
sync_enter (bool): Whether to synchronize this context's stream with
|
|
1249
|
+
the device's previous current stream on entering the context.
|
|
1250
|
+
sync_exit (bool): Whether to synchronize the device's previous current
|
|
1251
|
+
with this context's stream on exiting the context.
|
|
1252
|
+
device (Device): The device associated with the stream.
|
|
1253
|
+
"""
|
|
1254
|
+
|
|
1255
|
+
def __init__(self, stream: wp.Stream | None, sync_enter: bool = True, sync_exit: bool = False):
|
|
1256
|
+
"""Initializes the context manager with a stream and synchronization options.
|
|
1257
|
+
|
|
1258
|
+
Args:
|
|
1259
|
+
stream: The stream that will temporarily become the device's
|
|
1260
|
+
default stream within the context.
|
|
1261
|
+
sync_enter (bool): Whether to synchronize this context's stream with
|
|
1262
|
+
the device's previous current stream on entering the context.
|
|
1263
|
+
sync_exit (bool): Whether to synchronize the device's previous current
|
|
1264
|
+
with this context's stream on exiting the context.
|
|
1265
|
+
"""
|
|
1266
|
+
|
|
1267
|
+
self.stream = stream
|
|
1268
|
+
self.sync_enter = sync_enter
|
|
1269
|
+
self.sync_exit = sync_exit
|
|
1270
|
+
if stream is not None:
|
|
1271
|
+
self.device = stream.device
|
|
1272
|
+
self.device_scope = ScopedDevice(self.device)
|
|
1273
|
+
|
|
1274
|
+
def __enter__(self):
|
|
1275
|
+
if self.stream is not None:
|
|
1276
|
+
self.device_scope.__enter__()
|
|
1277
|
+
self.saved_stream = self.device.stream
|
|
1278
|
+
self.device.set_stream(self.stream, self.sync_enter)
|
|
1279
|
+
|
|
1280
|
+
return self.stream
|
|
1281
|
+
|
|
1282
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
|
1283
|
+
if self.stream is not None:
|
|
1284
|
+
self.device.set_stream(self.saved_stream, self.sync_exit)
|
|
1285
|
+
self.device_scope.__exit__(exc_type, exc_value, traceback)
|
|
1286
|
+
|
|
1287
|
+
|
|
1288
|
+
TIMING_KERNEL = 1
|
|
1289
|
+
TIMING_KERNEL_BUILTIN = 2
|
|
1290
|
+
TIMING_MEMCPY = 4
|
|
1291
|
+
TIMING_MEMSET = 8
|
|
1292
|
+
TIMING_GRAPH = 16
|
|
1293
|
+
TIMING_ALL = 0xFFFFFFFF
|
|
1294
|
+
|
|
1295
|
+
|
|
1296
|
+
# timer utils
|
|
1297
|
+
class ScopedTimer:
|
|
1298
|
+
indent = -1
|
|
1299
|
+
|
|
1300
|
+
enabled = True
|
|
1301
|
+
|
|
1302
|
+
def __init__(
|
|
1303
|
+
self,
|
|
1304
|
+
name: str,
|
|
1305
|
+
active: bool = True,
|
|
1306
|
+
print: bool = True,
|
|
1307
|
+
detailed: bool = False,
|
|
1308
|
+
dict: dict[str, list[float]] | None = None,
|
|
1309
|
+
use_nvtx: bool = False,
|
|
1310
|
+
color: int | str = "rapids",
|
|
1311
|
+
synchronize: bool = False,
|
|
1312
|
+
cuda_filter: int = 0,
|
|
1313
|
+
report_func: Callable[[list[TimingResult], str], None] | None = None,
|
|
1314
|
+
skip_tape: bool = False,
|
|
1315
|
+
):
|
|
1316
|
+
"""Context manager object for a timer
|
|
1317
|
+
|
|
1318
|
+
Parameters:
|
|
1319
|
+
name: Name of timer
|
|
1320
|
+
active: Enables this timer
|
|
1321
|
+
print: At context manager exit, print elapsed time to ``sys.stdout``
|
|
1322
|
+
detailed: Collects additional profiling data using cProfile and calls ``print_stats()`` at context exit
|
|
1323
|
+
dict: A dictionary of lists to which the elapsed time will be appended using ``name`` as a key
|
|
1324
|
+
use_nvtx: If true, timing functionality is replaced by an NVTX range
|
|
1325
|
+
color: ARGB value (e.g. 0x00FFFF) or color name (e.g. 'cyan') associated with the NVTX range
|
|
1326
|
+
synchronize: Synchronize the CPU thread with any outstanding CUDA work to return accurate GPU timings
|
|
1327
|
+
cuda_filter: Filter flags for CUDA activity timing, e.g. ``warp.TIMING_KERNEL`` or ``warp.TIMING_ALL``
|
|
1328
|
+
report_func: A callback function to print the activity report.
|
|
1329
|
+
If ``None``, :func:`wp.timing_print() <timing_print>` will be used.
|
|
1330
|
+
skip_tape: If true, the timer will not be recorded in the tape
|
|
1331
|
+
|
|
1332
|
+
Attributes:
|
|
1333
|
+
extra_msg (str): Can be set to a string that will be added to the printout at context exit.
|
|
1334
|
+
elapsed (float): The duration of the ``with`` block used with this object
|
|
1335
|
+
timing_results (list[TimingResult]): The list of activity timing results, if collection was requested using ``cuda_filter``
|
|
1336
|
+
"""
|
|
1337
|
+
self.name = name
|
|
1338
|
+
self.active = active and self.enabled
|
|
1339
|
+
self.print = print
|
|
1340
|
+
self.detailed = detailed
|
|
1341
|
+
self.dict = dict
|
|
1342
|
+
self.use_nvtx = use_nvtx
|
|
1343
|
+
self.color = color
|
|
1344
|
+
self.synchronize = synchronize
|
|
1345
|
+
self.skip_tape = skip_tape
|
|
1346
|
+
self.elapsed = 0.0
|
|
1347
|
+
self.cuda_filter = cuda_filter
|
|
1348
|
+
self.report_func = report_func or wp.timing_print
|
|
1349
|
+
self.extra_msg = "" # Can be used to add to the message printed at manager exit
|
|
1350
|
+
|
|
1351
|
+
if self.dict is not None:
|
|
1352
|
+
if name not in self.dict:
|
|
1353
|
+
self.dict[name] = []
|
|
1354
|
+
|
|
1355
|
+
def __enter__(self):
|
|
1356
|
+
if not self.skip_tape and warp._src.context.runtime is not None and warp._src.context.runtime.tape is not None:
|
|
1357
|
+
warp._src.context.runtime.tape.record_scope_begin(self.name)
|
|
1358
|
+
if self.active:
|
|
1359
|
+
if self.synchronize:
|
|
1360
|
+
wp.synchronize()
|
|
1361
|
+
|
|
1362
|
+
if self.cuda_filter:
|
|
1363
|
+
# begin CUDA activity collection, synchronizing if needed
|
|
1364
|
+
timing_begin(self.cuda_filter, synchronize=not self.synchronize)
|
|
1365
|
+
|
|
1366
|
+
if self.detailed:
|
|
1367
|
+
self.cp = cProfile.Profile()
|
|
1368
|
+
self.cp.clear()
|
|
1369
|
+
self.cp.enable()
|
|
1370
|
+
|
|
1371
|
+
if self.use_nvtx:
|
|
1372
|
+
import nvtx
|
|
1373
|
+
|
|
1374
|
+
self.nvtx_range_id = nvtx.start_range(self.name, color=self.color)
|
|
1375
|
+
|
|
1376
|
+
if self.print:
|
|
1377
|
+
ScopedTimer.indent += 1
|
|
1378
|
+
|
|
1379
|
+
if warp.config.verbose:
|
|
1380
|
+
indent = " " * ScopedTimer.indent
|
|
1381
|
+
print(f"{indent}{self.name} ...", flush=True)
|
|
1382
|
+
|
|
1383
|
+
self.start = time.perf_counter_ns()
|
|
1384
|
+
|
|
1385
|
+
return self
|
|
1386
|
+
|
|
1387
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
|
1388
|
+
if not self.skip_tape and warp._src.context.runtime is not None and warp._src.context.runtime.tape is not None:
|
|
1389
|
+
warp._src.context.runtime.tape.record_scope_end()
|
|
1390
|
+
if self.active:
|
|
1391
|
+
if self.synchronize:
|
|
1392
|
+
wp.synchronize()
|
|
1393
|
+
|
|
1394
|
+
self.elapsed = (time.perf_counter_ns() - self.start) / 1000000.0
|
|
1395
|
+
|
|
1396
|
+
if self.use_nvtx:
|
|
1397
|
+
import nvtx
|
|
1398
|
+
|
|
1399
|
+
nvtx.end_range(self.nvtx_range_id)
|
|
1400
|
+
|
|
1401
|
+
if self.detailed:
|
|
1402
|
+
self.cp.disable()
|
|
1403
|
+
self.cp.print_stats(sort="tottime")
|
|
1404
|
+
|
|
1405
|
+
if self.cuda_filter:
|
|
1406
|
+
# end CUDA activity collection, synchronizing if needed
|
|
1407
|
+
self.timing_results = timing_end(synchronize=not self.synchronize)
|
|
1408
|
+
else:
|
|
1409
|
+
self.timing_results = []
|
|
1410
|
+
|
|
1411
|
+
if self.dict is not None:
|
|
1412
|
+
self.dict[self.name].append(self.elapsed)
|
|
1413
|
+
|
|
1414
|
+
if self.print:
|
|
1415
|
+
indent = " " * ScopedTimer.indent
|
|
1416
|
+
|
|
1417
|
+
if self.timing_results:
|
|
1418
|
+
self.report_func(self.timing_results, indent=indent)
|
|
1419
|
+
print()
|
|
1420
|
+
|
|
1421
|
+
if self.extra_msg:
|
|
1422
|
+
print(f"{indent}{self.name} took {self.elapsed:.2f} ms {self.extra_msg}")
|
|
1423
|
+
else:
|
|
1424
|
+
print(f"{indent}{self.name} took {self.elapsed:.2f} ms")
|
|
1425
|
+
|
|
1426
|
+
ScopedTimer.indent -= 1
|
|
1427
|
+
|
|
1428
|
+
|
|
1429
|
+
# Allow temporarily enabling/disabling mempool allocators
|
|
1430
|
+
class ScopedMempool:
|
|
1431
|
+
def __init__(self, device: Devicelike, enable: bool):
|
|
1432
|
+
self.device = wp.get_device(device)
|
|
1433
|
+
self.enable = enable
|
|
1434
|
+
|
|
1435
|
+
def __enter__(self):
|
|
1436
|
+
self.saved_setting = wp.is_mempool_enabled(self.device)
|
|
1437
|
+
wp.set_mempool_enabled(self.device, self.enable)
|
|
1438
|
+
|
|
1439
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
|
1440
|
+
wp.set_mempool_enabled(self.device, self.saved_setting)
|
|
1441
|
+
|
|
1442
|
+
|
|
1443
|
+
# Allow temporarily enabling/disabling mempool access
|
|
1444
|
+
class ScopedMempoolAccess:
|
|
1445
|
+
def __init__(self, target_device: Devicelike, peer_device: Devicelike, enable: bool):
|
|
1446
|
+
self.target_device = target_device
|
|
1447
|
+
self.peer_device = peer_device
|
|
1448
|
+
self.enable = enable
|
|
1449
|
+
|
|
1450
|
+
def __enter__(self):
|
|
1451
|
+
self.saved_setting = wp.is_mempool_access_enabled(self.target_device, self.peer_device)
|
|
1452
|
+
wp.set_mempool_access_enabled(self.target_device, self.peer_device, self.enable)
|
|
1453
|
+
|
|
1454
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
|
1455
|
+
wp.set_mempool_access_enabled(self.target_device, self.peer_device, self.saved_setting)
|
|
1456
|
+
|
|
1457
|
+
|
|
1458
|
+
# Allow temporarily enabling/disabling peer access
|
|
1459
|
+
class ScopedPeerAccess:
|
|
1460
|
+
def __init__(self, target_device: Devicelike, peer_device: Devicelike, enable: bool):
|
|
1461
|
+
self.target_device = target_device
|
|
1462
|
+
self.peer_device = peer_device
|
|
1463
|
+
self.enable = enable
|
|
1464
|
+
|
|
1465
|
+
def __enter__(self):
|
|
1466
|
+
self.saved_setting = wp.is_peer_access_enabled(self.target_device, self.peer_device)
|
|
1467
|
+
wp.set_peer_access_enabled(self.target_device, self.peer_device, self.enable)
|
|
1468
|
+
|
|
1469
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
|
1470
|
+
wp.set_peer_access_enabled(self.target_device, self.peer_device, self.saved_setting)
|
|
1471
|
+
|
|
1472
|
+
|
|
1473
|
+
class ScopedCapture:
|
|
1474
|
+
def __init__(self, device: Devicelike = None, stream=None, force_module_load=None, external=False):
|
|
1475
|
+
self.device = device
|
|
1476
|
+
self.stream = stream
|
|
1477
|
+
self.force_module_load = force_module_load
|
|
1478
|
+
self.external = external
|
|
1479
|
+
self.active = False
|
|
1480
|
+
self.graph = None
|
|
1481
|
+
|
|
1482
|
+
def __enter__(self):
|
|
1483
|
+
try:
|
|
1484
|
+
wp.capture_begin(
|
|
1485
|
+
device=self.device, stream=self.stream, force_module_load=self.force_module_load, external=self.external
|
|
1486
|
+
)
|
|
1487
|
+
self.active = True
|
|
1488
|
+
return self
|
|
1489
|
+
except:
|
|
1490
|
+
raise
|
|
1491
|
+
|
|
1492
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
|
1493
|
+
if self.active:
|
|
1494
|
+
try:
|
|
1495
|
+
self.graph = wp.capture_end(device=self.device, stream=self.stream)
|
|
1496
|
+
except Exception:
|
|
1497
|
+
# Only report this exception if __exit__() was reached without an exception,
|
|
1498
|
+
# otherwise re-raise the original exception.
|
|
1499
|
+
if exc_type is None:
|
|
1500
|
+
raise
|
|
1501
|
+
finally:
|
|
1502
|
+
self.active = False
|
|
1503
|
+
|
|
1504
|
+
|
|
1505
|
+
def check_p2p():
|
|
1506
|
+
"""Check if the machine is configured properly for peer-to-peer transfers.
|
|
1507
|
+
|
|
1508
|
+
Returns:
|
|
1509
|
+
A Boolean indicating whether the machine is configured properly for peer-to-peer transfers.
|
|
1510
|
+
On Linux, this function attempts to determine if IOMMU is enabled and will return `False` if IOMMU is detected.
|
|
1511
|
+
On other operating systems, it always return `True`.
|
|
1512
|
+
"""
|
|
1513
|
+
|
|
1514
|
+
# HACK: allow disabling P2P tests using an environment variable
|
|
1515
|
+
disable_p2p_tests = os.getenv("WARP_DISABLE_P2P_TESTS", default="0")
|
|
1516
|
+
if int(disable_p2p_tests):
|
|
1517
|
+
return False
|
|
1518
|
+
|
|
1519
|
+
if sys.platform == "linux":
|
|
1520
|
+
# IOMMU enablement can affect peer-to-peer transfers.
|
|
1521
|
+
# On modern Linux, there should be IOMMU-related entries in the /sys file system.
|
|
1522
|
+
# This should be more reliable than checking kernel logs like dmesg.
|
|
1523
|
+
if os.path.isdir("/sys/class/iommu") and os.listdir("/sys/class/iommu"):
|
|
1524
|
+
return False
|
|
1525
|
+
if os.path.isdir("/sys/kernel/iommu_groups") and os.listdir("/sys/kernel/iommu_groups"):
|
|
1526
|
+
return False
|
|
1527
|
+
|
|
1528
|
+
return True
|
|
1529
|
+
|
|
1530
|
+
|
|
1531
|
+
class timing_result_t(ctypes.Structure):
|
|
1532
|
+
"""CUDA timing struct for fetching values from C++"""
|
|
1533
|
+
|
|
1534
|
+
_fields_ = (
|
|
1535
|
+
("context", ctypes.c_void_p),
|
|
1536
|
+
("name", ctypes.c_char_p),
|
|
1537
|
+
("filter", ctypes.c_int),
|
|
1538
|
+
("elapsed", ctypes.c_float),
|
|
1539
|
+
)
|
|
1540
|
+
|
|
1541
|
+
|
|
1542
|
+
class TimingResult:
|
|
1543
|
+
"""Timing result for a single activity."""
|
|
1544
|
+
|
|
1545
|
+
def __init__(self, device, name, filter, elapsed):
|
|
1546
|
+
self.device: warp._src.context.Device = device
|
|
1547
|
+
"""The device where the activity was recorded."""
|
|
1548
|
+
|
|
1549
|
+
self.name: str = name
|
|
1550
|
+
"""The activity name."""
|
|
1551
|
+
|
|
1552
|
+
self.filter: int = filter
|
|
1553
|
+
"""The type of activity (e.g., ``warp.TIMING_KERNEL``)."""
|
|
1554
|
+
|
|
1555
|
+
self.elapsed: float = elapsed
|
|
1556
|
+
"""The elapsed time in milliseconds."""
|
|
1557
|
+
|
|
1558
|
+
|
|
1559
|
+
def timing_begin(cuda_filter: int = TIMING_ALL, synchronize: bool = True) -> None:
|
|
1560
|
+
"""Begin detailed activity timing.
|
|
1561
|
+
|
|
1562
|
+
Parameters:
|
|
1563
|
+
cuda_filter: Filter flags for CUDA activity timing, e.g. ``warp.TIMING_KERNEL`` or ``warp.TIMING_ALL``
|
|
1564
|
+
synchronize: Whether to synchronize all CUDA devices before timing starts
|
|
1565
|
+
"""
|
|
1566
|
+
|
|
1567
|
+
if synchronize:
|
|
1568
|
+
warp.synchronize()
|
|
1569
|
+
|
|
1570
|
+
warp._src.context.runtime.core.wp_cuda_timing_begin(cuda_filter)
|
|
1571
|
+
|
|
1572
|
+
|
|
1573
|
+
def timing_end(synchronize: bool = True) -> list[TimingResult]:
|
|
1574
|
+
"""End detailed activity timing.
|
|
1575
|
+
|
|
1576
|
+
Parameters:
|
|
1577
|
+
synchronize: Whether to synchronize all CUDA devices before timing ends
|
|
1578
|
+
|
|
1579
|
+
Returns:
|
|
1580
|
+
A list of :class:`TimingResult` objects for all recorded activities.
|
|
1581
|
+
"""
|
|
1582
|
+
|
|
1583
|
+
if synchronize:
|
|
1584
|
+
warp.synchronize()
|
|
1585
|
+
|
|
1586
|
+
# get result count
|
|
1587
|
+
count = warp._src.context.runtime.core.wp_cuda_timing_get_result_count()
|
|
1588
|
+
|
|
1589
|
+
# get result array from C++
|
|
1590
|
+
result_buffer = (timing_result_t * count)()
|
|
1591
|
+
warp._src.context.runtime.core.wp_cuda_timing_end(ctypes.byref(result_buffer), count)
|
|
1592
|
+
|
|
1593
|
+
# prepare Python result list
|
|
1594
|
+
results = []
|
|
1595
|
+
for r in result_buffer:
|
|
1596
|
+
device = warp._src.context.runtime.context_map.get(r.context)
|
|
1597
|
+
filter = r.filter
|
|
1598
|
+
elapsed = r.elapsed
|
|
1599
|
+
|
|
1600
|
+
name = r.name.decode()
|
|
1601
|
+
if filter == TIMING_KERNEL:
|
|
1602
|
+
if name.endswith("forward"):
|
|
1603
|
+
# strip trailing "_cuda_kernel_forward"
|
|
1604
|
+
name = f"forward kernel {name[:-20]}"
|
|
1605
|
+
else:
|
|
1606
|
+
# strip trailing "_cuda_kernel_backward"
|
|
1607
|
+
name = f"backward kernel {name[:-21]}"
|
|
1608
|
+
elif filter == TIMING_KERNEL_BUILTIN:
|
|
1609
|
+
if name.startswith("wp::"):
|
|
1610
|
+
name = f"builtin kernel {name[4:]}"
|
|
1611
|
+
else:
|
|
1612
|
+
name = f"builtin kernel {name}"
|
|
1613
|
+
|
|
1614
|
+
results.append(TimingResult(device, name, filter, elapsed))
|
|
1615
|
+
|
|
1616
|
+
return results
|
|
1617
|
+
|
|
1618
|
+
|
|
1619
|
+
def timing_print(results: list[TimingResult], indent: str = "") -> None:
|
|
1620
|
+
"""Print timing results.
|
|
1621
|
+
|
|
1622
|
+
Parameters:
|
|
1623
|
+
results: List of :class:`TimingResult` objects to print.
|
|
1624
|
+
indent: Optional indentation to prepend to all output lines.
|
|
1625
|
+
"""
|
|
1626
|
+
|
|
1627
|
+
if not results:
|
|
1628
|
+
print("No activity")
|
|
1629
|
+
return
|
|
1630
|
+
|
|
1631
|
+
class Aggregate:
|
|
1632
|
+
def __init__(self, count=0, elapsed=0):
|
|
1633
|
+
self.count = count
|
|
1634
|
+
self.elapsed = elapsed
|
|
1635
|
+
|
|
1636
|
+
device_totals = {}
|
|
1637
|
+
activity_totals = {}
|
|
1638
|
+
|
|
1639
|
+
max_name_len = len("Activity")
|
|
1640
|
+
for r in results:
|
|
1641
|
+
name_len = len(r.name)
|
|
1642
|
+
max_name_len = max(max_name_len, name_len)
|
|
1643
|
+
|
|
1644
|
+
activity_width = max_name_len + 1
|
|
1645
|
+
activity_dashes = "-" * activity_width
|
|
1646
|
+
|
|
1647
|
+
print(f"{indent}CUDA timeline:")
|
|
1648
|
+
print(f"{indent}----------------+---------+{activity_dashes}")
|
|
1649
|
+
print(f"{indent}Time | Device | Activity")
|
|
1650
|
+
print(f"{indent}----------------+---------+{activity_dashes}")
|
|
1651
|
+
for r in results:
|
|
1652
|
+
device_agg = device_totals.get(r.device.alias)
|
|
1653
|
+
if device_agg is None:
|
|
1654
|
+
device_totals[r.device.alias] = Aggregate(count=1, elapsed=r.elapsed)
|
|
1655
|
+
else:
|
|
1656
|
+
device_agg.count += 1
|
|
1657
|
+
device_agg.elapsed += r.elapsed
|
|
1658
|
+
|
|
1659
|
+
activity_agg = activity_totals.get(r.name)
|
|
1660
|
+
if activity_agg is None:
|
|
1661
|
+
activity_totals[r.name] = Aggregate(count=1, elapsed=r.elapsed)
|
|
1662
|
+
else:
|
|
1663
|
+
activity_agg.count += 1
|
|
1664
|
+
activity_agg.elapsed += r.elapsed
|
|
1665
|
+
|
|
1666
|
+
print(f"{indent}{r.elapsed:12.6f} ms | {r.device.alias:7s} | {r.name}")
|
|
1667
|
+
|
|
1668
|
+
print()
|
|
1669
|
+
print(f"{indent}CUDA activity summary:")
|
|
1670
|
+
print(f"{indent}----------------+---------+{activity_dashes}")
|
|
1671
|
+
print(f"{indent}Total time | Count | Activity")
|
|
1672
|
+
print(f"{indent}----------------+---------+{activity_dashes}")
|
|
1673
|
+
for name, agg in activity_totals.items():
|
|
1674
|
+
print(f"{indent}{agg.elapsed:12.6f} ms | {agg.count:7d} | {name}")
|
|
1675
|
+
|
|
1676
|
+
print()
|
|
1677
|
+
print(f"{indent}CUDA device summary:")
|
|
1678
|
+
print(f"{indent}----------------+---------+{activity_dashes}")
|
|
1679
|
+
print(f"{indent}Total time | Count | Device")
|
|
1680
|
+
print(f"{indent}----------------+---------+{activity_dashes}")
|
|
1681
|
+
for device, agg in device_totals.items():
|
|
1682
|
+
print(f"{indent}{agg.elapsed:12.6f} ms | {agg.count:7d} | {device}")
|
|
1683
|
+
|
|
1684
|
+
|
|
1685
|
+
def get_deprecated_api(module, namespace, attr_name):
|
|
1686
|
+
# if not attr_name.startswith("__"):
|
|
1687
|
+
# module_name = module.__name__.split(".")[-1]
|
|
1688
|
+
# warn(
|
|
1689
|
+
# f"The symbol `{namespace}.{module_name}.{attr_name}` is internal and will be removed from the public API.",
|
|
1690
|
+
# DeprecationWarning,
|
|
1691
|
+
# )
|
|
1692
|
+
|
|
1693
|
+
return getattr(module, attr_name)
|