warp-lang 1.9.1__py3-none-manylinux_2_34_aarch64.whl → 1.10.0rc2__py3-none-manylinux_2_34_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of warp-lang might be problematic. Click here for more details.
- warp/__init__.py +301 -287
- warp/__init__.pyi +794 -305
- warp/_src/__init__.py +14 -0
- warp/_src/autograd.py +1075 -0
- warp/_src/build.py +618 -0
- warp/_src/build_dll.py +640 -0
- warp/{builtins.py → _src/builtins.py} +1382 -377
- warp/_src/codegen.py +4359 -0
- warp/{config.py → _src/config.py} +178 -169
- warp/_src/constants.py +57 -0
- warp/_src/context.py +8294 -0
- warp/_src/dlpack.py +462 -0
- warp/_src/fabric.py +355 -0
- warp/_src/fem/__init__.py +14 -0
- warp/_src/fem/adaptivity.py +508 -0
- warp/_src/fem/cache.py +687 -0
- warp/_src/fem/dirichlet.py +188 -0
- warp/{fem → _src/fem}/domain.py +40 -30
- warp/_src/fem/field/__init__.py +131 -0
- warp/_src/fem/field/field.py +701 -0
- warp/{fem → _src/fem}/field/nodal_field.py +30 -15
- warp/{fem → _src/fem}/field/restriction.py +1 -1
- warp/{fem → _src/fem}/field/virtual.py +53 -27
- warp/_src/fem/geometry/__init__.py +32 -0
- warp/{fem → _src/fem}/geometry/adaptive_nanogrid.py +77 -163
- warp/_src/fem/geometry/closest_point.py +97 -0
- warp/{fem → _src/fem}/geometry/deformed_geometry.py +14 -22
- warp/{fem → _src/fem}/geometry/element.py +32 -10
- warp/{fem → _src/fem}/geometry/geometry.py +48 -20
- warp/{fem → _src/fem}/geometry/grid_2d.py +12 -23
- warp/{fem → _src/fem}/geometry/grid_3d.py +12 -23
- warp/{fem → _src/fem}/geometry/hexmesh.py +40 -63
- warp/{fem → _src/fem}/geometry/nanogrid.py +255 -248
- warp/{fem → _src/fem}/geometry/partition.py +121 -63
- warp/{fem → _src/fem}/geometry/quadmesh.py +26 -45
- warp/{fem → _src/fem}/geometry/tetmesh.py +40 -63
- warp/{fem → _src/fem}/geometry/trimesh.py +26 -45
- warp/{fem → _src/fem}/integrate.py +164 -158
- warp/_src/fem/linalg.py +383 -0
- warp/_src/fem/operator.py +396 -0
- warp/_src/fem/polynomial.py +229 -0
- warp/{fem → _src/fem}/quadrature/pic_quadrature.py +15 -20
- warp/{fem → _src/fem}/quadrature/quadrature.py +95 -47
- warp/_src/fem/space/__init__.py +248 -0
- warp/{fem → _src/fem}/space/basis_function_space.py +20 -11
- warp/_src/fem/space/basis_space.py +679 -0
- warp/{fem → _src/fem}/space/dof_mapper.py +3 -3
- warp/{fem → _src/fem}/space/function_space.py +14 -13
- warp/{fem → _src/fem}/space/grid_2d_function_space.py +4 -7
- warp/{fem → _src/fem}/space/grid_3d_function_space.py +4 -4
- warp/{fem → _src/fem}/space/hexmesh_function_space.py +4 -10
- warp/{fem → _src/fem}/space/nanogrid_function_space.py +3 -9
- warp/{fem → _src/fem}/space/partition.py +117 -60
- warp/{fem → _src/fem}/space/quadmesh_function_space.py +4 -10
- warp/{fem → _src/fem}/space/restriction.py +66 -33
- warp/_src/fem/space/shape/__init__.py +152 -0
- warp/{fem → _src/fem}/space/shape/cube_shape_function.py +9 -9
- warp/{fem → _src/fem}/space/shape/shape_function.py +8 -9
- warp/{fem → _src/fem}/space/shape/square_shape_function.py +6 -6
- warp/{fem → _src/fem}/space/shape/tet_shape_function.py +3 -3
- warp/{fem → _src/fem}/space/shape/triangle_shape_function.py +3 -3
- warp/{fem → _src/fem}/space/tetmesh_function_space.py +3 -9
- warp/_src/fem/space/topology.py +459 -0
- warp/{fem → _src/fem}/space/trimesh_function_space.py +3 -9
- warp/_src/fem/types.py +112 -0
- warp/_src/fem/utils.py +486 -0
- warp/_src/jax.py +186 -0
- warp/_src/jax_experimental/__init__.py +14 -0
- warp/_src/jax_experimental/custom_call.py +387 -0
- warp/_src/jax_experimental/ffi.py +1284 -0
- warp/_src/jax_experimental/xla_ffi.py +656 -0
- warp/_src/marching_cubes.py +708 -0
- warp/_src/math.py +414 -0
- warp/_src/optim/__init__.py +14 -0
- warp/_src/optim/adam.py +163 -0
- warp/_src/optim/linear.py +1606 -0
- warp/_src/optim/sgd.py +112 -0
- warp/_src/paddle.py +406 -0
- warp/_src/render/__init__.py +14 -0
- warp/_src/render/imgui_manager.py +289 -0
- warp/_src/render/render_opengl.py +3636 -0
- warp/_src/render/render_usd.py +937 -0
- warp/_src/render/utils.py +160 -0
- warp/_src/sparse.py +2716 -0
- warp/_src/tape.py +1206 -0
- warp/{thirdparty → _src/thirdparty}/unittest_parallel.py +9 -2
- warp/_src/torch.py +391 -0
- warp/_src/types.py +5870 -0
- warp/_src/utils.py +1693 -0
- warp/autograd.py +12 -1054
- warp/bin/warp-clang.so +0 -0
- warp/bin/warp.so +0 -0
- warp/build.py +8 -588
- warp/build_dll.py +6 -721
- warp/codegen.py +6 -4251
- warp/constants.py +6 -39
- warp/context.py +12 -8062
- warp/dlpack.py +6 -444
- warp/examples/distributed/example_jacobi_mpi.py +4 -5
- warp/examples/fem/example_adaptive_grid.py +1 -1
- warp/examples/fem/example_apic_fluid.py +1 -1
- warp/examples/fem/example_burgers.py +8 -8
- warp/examples/fem/example_diffusion.py +1 -1
- warp/examples/fem/example_distortion_energy.py +1 -1
- warp/examples/fem/example_mixed_elasticity.py +2 -2
- warp/examples/fem/example_navier_stokes.py +1 -1
- warp/examples/fem/example_nonconforming_contact.py +7 -7
- warp/examples/fem/example_stokes.py +1 -1
- warp/examples/fem/example_stokes_transfer.py +1 -1
- warp/examples/fem/utils.py +2 -2
- warp/examples/interop/example_jax_callable.py +1 -1
- warp/examples/interop/example_jax_ffi_callback.py +1 -1
- warp/examples/interop/example_jax_kernel.py +1 -1
- warp/examples/tile/example_tile_mcgp.py +191 -0
- warp/fabric.py +6 -337
- warp/fem/__init__.py +159 -97
- warp/fem/adaptivity.py +7 -489
- warp/fem/cache.py +9 -648
- warp/fem/dirichlet.py +6 -184
- warp/fem/field/__init__.py +8 -109
- warp/fem/field/field.py +7 -652
- warp/fem/geometry/__init__.py +7 -18
- warp/fem/geometry/closest_point.py +11 -77
- warp/fem/linalg.py +18 -366
- warp/fem/operator.py +11 -369
- warp/fem/polynomial.py +9 -209
- warp/fem/space/__init__.py +5 -211
- warp/fem/space/basis_space.py +6 -662
- warp/fem/space/shape/__init__.py +41 -118
- warp/fem/space/topology.py +6 -437
- warp/fem/types.py +6 -81
- warp/fem/utils.py +11 -444
- warp/jax.py +8 -165
- warp/jax_experimental/__init__.py +14 -1
- warp/jax_experimental/custom_call.py +8 -365
- warp/jax_experimental/ffi.py +17 -873
- warp/jax_experimental/xla_ffi.py +5 -605
- warp/marching_cubes.py +5 -689
- warp/math.py +16 -393
- warp/native/array.h +385 -37
- warp/native/builtin.h +314 -37
- warp/native/bvh.cpp +43 -9
- warp/native/bvh.cu +62 -27
- warp/native/bvh.h +310 -309
- warp/native/clang/clang.cpp +102 -97
- warp/native/coloring.cpp +0 -1
- warp/native/crt.h +208 -0
- warp/native/exports.h +156 -0
- warp/native/hashgrid.cu +2 -0
- warp/native/intersect.h +24 -1
- warp/native/intersect_tri.h +44 -35
- warp/native/mat.h +1456 -276
- warp/native/mesh.cpp +4 -4
- warp/native/mesh.cu +4 -2
- warp/native/mesh.h +176 -61
- warp/native/quat.h +0 -52
- warp/native/scan.cu +2 -0
- warp/native/sparse.cu +7 -3
- warp/native/spatial.h +12 -0
- warp/native/tile.h +681 -89
- warp/native/tile_radix_sort.h +1 -1
- warp/native/tile_reduce.h +394 -46
- warp/native/tile_scan.h +4 -4
- warp/native/vec.h +469 -0
- warp/native/version.h +23 -0
- warp/native/volume.cpp +1 -1
- warp/native/volume.cu +1 -0
- warp/native/volume.h +1 -1
- warp/native/volume_builder.cu +2 -0
- warp/native/warp.cpp +57 -29
- warp/native/warp.cu +253 -171
- warp/native/warp.h +11 -8
- warp/optim/__init__.py +6 -3
- warp/optim/adam.py +6 -145
- warp/optim/linear.py +14 -1585
- warp/optim/sgd.py +6 -94
- warp/paddle.py +6 -388
- warp/render/__init__.py +8 -4
- warp/render/imgui_manager.py +7 -267
- warp/render/render_opengl.py +6 -3618
- warp/render/render_usd.py +6 -919
- warp/render/utils.py +6 -142
- warp/sparse.py +37 -2563
- warp/tape.py +6 -1188
- warp/tests/__main__.py +1 -1
- warp/tests/cuda/test_async.py +4 -4
- warp/tests/cuda/test_conditional_captures.py +1 -1
- warp/tests/cuda/test_multigpu.py +1 -1
- warp/tests/cuda/test_streams.py +58 -1
- warp/tests/geometry/test_bvh.py +157 -22
- warp/tests/geometry/test_marching_cubes.py +0 -1
- warp/tests/geometry/test_mesh.py +5 -3
- warp/tests/geometry/test_mesh_query_aabb.py +5 -12
- warp/tests/geometry/test_mesh_query_point.py +5 -2
- warp/tests/geometry/test_mesh_query_ray.py +15 -3
- warp/tests/geometry/test_volume_write.py +5 -5
- warp/tests/interop/test_dlpack.py +14 -14
- warp/tests/interop/test_jax.py +772 -49
- warp/tests/interop/test_paddle.py +1 -1
- warp/tests/test_adam.py +0 -1
- warp/tests/test_arithmetic.py +9 -9
- warp/tests/test_array.py +527 -100
- warp/tests/test_array_reduce.py +3 -3
- warp/tests/test_atomic.py +12 -8
- warp/tests/test_atomic_bitwise.py +209 -0
- warp/tests/test_atomic_cas.py +4 -4
- warp/tests/test_bool.py +2 -2
- warp/tests/test_builtins_resolution.py +5 -571
- warp/tests/test_codegen.py +33 -14
- warp/tests/test_conditional.py +1 -1
- warp/tests/test_context.py +6 -6
- warp/tests/test_copy.py +242 -161
- warp/tests/test_ctypes.py +3 -3
- warp/tests/test_devices.py +24 -2
- warp/tests/test_examples.py +16 -84
- warp/tests/test_fabricarray.py +35 -35
- warp/tests/test_fast_math.py +0 -2
- warp/tests/test_fem.py +56 -10
- warp/tests/test_fixedarray.py +3 -3
- warp/tests/test_func.py +8 -5
- warp/tests/test_generics.py +1 -1
- warp/tests/test_indexedarray.py +24 -24
- warp/tests/test_intersect.py +39 -9
- warp/tests/test_large.py +1 -1
- warp/tests/test_lerp.py +3 -1
- warp/tests/test_linear_solvers.py +1 -1
- warp/tests/test_map.py +35 -4
- warp/tests/test_mat.py +52 -62
- warp/tests/test_mat_constructors.py +4 -5
- warp/tests/test_mat_lite.py +1 -1
- warp/tests/test_mat_scalar_ops.py +121 -121
- warp/tests/test_math.py +34 -0
- warp/tests/test_module_aot.py +4 -4
- warp/tests/test_modules_lite.py +28 -2
- warp/tests/test_print.py +11 -11
- warp/tests/test_quat.py +93 -58
- warp/tests/test_runlength_encode.py +1 -1
- warp/tests/test_scalar_ops.py +38 -10
- warp/tests/test_smoothstep.py +1 -1
- warp/tests/test_sparse.py +126 -15
- warp/tests/test_spatial.py +105 -87
- warp/tests/test_special_values.py +6 -6
- warp/tests/test_static.py +7 -7
- warp/tests/test_struct.py +13 -2
- warp/tests/test_triangle_closest_point.py +48 -1
- warp/tests/test_types.py +27 -15
- warp/tests/test_utils.py +52 -52
- warp/tests/test_vec.py +29 -29
- warp/tests/test_vec_constructors.py +5 -5
- warp/tests/test_vec_scalar_ops.py +97 -97
- warp/tests/test_version.py +75 -0
- warp/tests/tile/test_tile.py +178 -0
- warp/tests/tile/test_tile_atomic_bitwise.py +403 -0
- warp/tests/tile/test_tile_cholesky.py +7 -4
- warp/tests/tile/test_tile_load.py +26 -2
- warp/tests/tile/test_tile_mathdx.py +3 -3
- warp/tests/tile/test_tile_matmul.py +1 -1
- warp/tests/tile/test_tile_mlp.py +2 -4
- warp/tests/tile/test_tile_reduce.py +214 -13
- warp/tests/unittest_suites.py +6 -14
- warp/tests/unittest_utils.py +10 -9
- warp/tests/walkthrough_debug.py +3 -1
- warp/torch.py +6 -373
- warp/types.py +29 -5764
- warp/utils.py +10 -1659
- {warp_lang-1.9.1.dist-info → warp_lang-1.10.0rc2.dist-info}/METADATA +46 -99
- warp_lang-1.10.0rc2.dist-info/RECORD +468 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/Gaia-LICENSE.txt +6 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/appdirs-LICENSE.txt +22 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/asset_pixel_jpg-LICENSE.txt +3 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/cuda-LICENSE.txt +1582 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/dlpack-LICENSE.txt +201 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/fp16-LICENSE.txt +28 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/libmathdx-LICENSE.txt +220 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/llvm-LICENSE.txt +279 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/moller-LICENSE.txt +16 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/nanovdb-LICENSE.txt +2 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/nvrtc-LICENSE.txt +1592 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/svd-LICENSE.txt +23 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/unittest_parallel-LICENSE.txt +21 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/usd-LICENSE.txt +213 -0
- warp_lang-1.10.0rc2.dist-info/licenses/licenses/windingnumber-LICENSE.txt +21 -0
- warp/examples/assets/cartpole.urdf +0 -110
- warp/examples/assets/crazyflie.usd +0 -0
- warp/examples/assets/nv_ant.xml +0 -92
- warp/examples/assets/nv_humanoid.xml +0 -183
- warp/examples/assets/quadruped.urdf +0 -268
- warp/examples/optim/example_bounce.py +0 -266
- warp/examples/optim/example_cloth_throw.py +0 -228
- warp/examples/optim/example_drone.py +0 -870
- warp/examples/optim/example_inverse_kinematics.py +0 -182
- warp/examples/optim/example_inverse_kinematics_torch.py +0 -191
- warp/examples/optim/example_softbody_properties.py +0 -400
- warp/examples/optim/example_spring_cage.py +0 -245
- warp/examples/optim/example_trajectory.py +0 -227
- warp/examples/sim/example_cartpole.py +0 -143
- warp/examples/sim/example_cloth.py +0 -225
- warp/examples/sim/example_cloth_self_contact.py +0 -316
- warp/examples/sim/example_granular.py +0 -130
- warp/examples/sim/example_granular_collision_sdf.py +0 -202
- warp/examples/sim/example_jacobian_ik.py +0 -244
- warp/examples/sim/example_particle_chain.py +0 -124
- warp/examples/sim/example_quadruped.py +0 -203
- warp/examples/sim/example_rigid_chain.py +0 -203
- warp/examples/sim/example_rigid_contact.py +0 -195
- warp/examples/sim/example_rigid_force.py +0 -133
- warp/examples/sim/example_rigid_gyroscopic.py +0 -115
- warp/examples/sim/example_rigid_soft_contact.py +0 -140
- warp/examples/sim/example_soft_body.py +0 -196
- warp/examples/tile/example_tile_walker.py +0 -327
- warp/sim/__init__.py +0 -74
- warp/sim/articulation.py +0 -793
- warp/sim/collide.py +0 -2570
- warp/sim/graph_coloring.py +0 -307
- warp/sim/import_mjcf.py +0 -791
- warp/sim/import_snu.py +0 -227
- warp/sim/import_urdf.py +0 -579
- warp/sim/import_usd.py +0 -898
- warp/sim/inertia.py +0 -357
- warp/sim/integrator.py +0 -245
- warp/sim/integrator_euler.py +0 -2000
- warp/sim/integrator_featherstone.py +0 -2101
- warp/sim/integrator_vbd.py +0 -2487
- warp/sim/integrator_xpbd.py +0 -3295
- warp/sim/model.py +0 -4821
- warp/sim/particles.py +0 -121
- warp/sim/render.py +0 -431
- warp/sim/utils.py +0 -431
- warp/tests/sim/disabled_kinematics.py +0 -244
- warp/tests/sim/test_cloth.py +0 -863
- warp/tests/sim/test_collision.py +0 -743
- warp/tests/sim/test_coloring.py +0 -347
- warp/tests/sim/test_inertia.py +0 -161
- warp/tests/sim/test_model.py +0 -226
- warp/tests/sim/test_sim_grad.py +0 -287
- warp/tests/sim/test_sim_grad_bounce_linear.py +0 -212
- warp/tests/sim/test_sim_kinematics.py +0 -98
- warp/thirdparty/__init__.py +0 -0
- warp_lang-1.9.1.dist-info/RECORD +0 -456
- /warp/{fem → _src/fem}/quadrature/__init__.py +0 -0
- /warp/{tests/sim → _src/thirdparty}/__init__.py +0 -0
- /warp/{thirdparty → _src/thirdparty}/appdirs.py +0 -0
- /warp/{thirdparty → _src/thirdparty}/dlpack.py +0 -0
- {warp_lang-1.9.1.dist-info → warp_lang-1.10.0rc2.dist-info}/WHEEL +0 -0
- {warp_lang-1.9.1.dist-info → warp_lang-1.10.0rc2.dist-info}/licenses/LICENSE.md +0 -0
- {warp_lang-1.9.1.dist-info → warp_lang-1.10.0rc2.dist-info}/top_level.txt +0 -0
warp/_src/dlpack.py
ADDED
|
@@ -0,0 +1,462 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
# Python specification for DLpack:
|
|
17
|
+
# https://dmlc.github.io/dlpack/latest/python_spec.html
|
|
18
|
+
|
|
19
|
+
import ctypes
|
|
20
|
+
|
|
21
|
+
import warp
|
|
22
|
+
from warp._src.thirdparty.dlpack import (
|
|
23
|
+
DLDataType,
|
|
24
|
+
DLDataTypeCode,
|
|
25
|
+
DLDevice,
|
|
26
|
+
DLDeviceType,
|
|
27
|
+
DLManagedTensor,
|
|
28
|
+
_c_str_dltensor,
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
_c_str_used_dltensor = b"used_dltensor"
|
|
32
|
+
|
|
33
|
+
PyMem_RawMalloc = ctypes.pythonapi.PyMem_RawMalloc
|
|
34
|
+
PyMem_RawMalloc.argtypes = [ctypes.c_size_t]
|
|
35
|
+
PyMem_RawMalloc.restype = ctypes.c_void_p
|
|
36
|
+
|
|
37
|
+
PyMem_RawFree = ctypes.pythonapi.PyMem_RawFree
|
|
38
|
+
PyMem_RawFree.argtypes = [ctypes.c_void_p]
|
|
39
|
+
PyMem_RawFree.restype = None
|
|
40
|
+
|
|
41
|
+
Py_IncRef = ctypes.pythonapi.Py_IncRef
|
|
42
|
+
Py_IncRef.argtypes = [ctypes.py_object]
|
|
43
|
+
Py_IncRef.restype = None
|
|
44
|
+
|
|
45
|
+
Py_DecRef = ctypes.pythonapi.Py_DecRef
|
|
46
|
+
Py_DecRef.argtypes = [ctypes.py_object]
|
|
47
|
+
Py_DecRef.restype = None
|
|
48
|
+
|
|
49
|
+
PyCapsule_Destructor = ctypes.CFUNCTYPE(None, ctypes.c_void_p)
|
|
50
|
+
|
|
51
|
+
PyCapsule_IsValid = ctypes.pythonapi.PyCapsule_IsValid
|
|
52
|
+
PyCapsule_IsValid.argtypes = [ctypes.py_object, ctypes.c_char_p]
|
|
53
|
+
PyCapsule_IsValid.restype = ctypes.c_int
|
|
54
|
+
|
|
55
|
+
PyCapsule_GetPointer = ctypes.pythonapi.PyCapsule_GetPointer
|
|
56
|
+
PyCapsule_GetPointer.argtypes = [ctypes.py_object, ctypes.c_char_p]
|
|
57
|
+
PyCapsule_GetPointer.restype = ctypes.c_void_p
|
|
58
|
+
|
|
59
|
+
PyCapsule_SetName = ctypes.pythonapi.PyCapsule_SetName
|
|
60
|
+
PyCapsule_SetName.argtypes = [ctypes.py_object, ctypes.c_char_p]
|
|
61
|
+
PyCapsule_SetName.restype = ctypes.c_int
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class _DLPackTensorHolder:
|
|
65
|
+
"""Class responsible for deleting DLManagedTensor memory after ownership is transferred from a capsule."""
|
|
66
|
+
|
|
67
|
+
def __new__(cls, *args, **kwargs):
|
|
68
|
+
instance = super().__new__(cls)
|
|
69
|
+
instance.mem_ptr = None
|
|
70
|
+
return instance
|
|
71
|
+
|
|
72
|
+
def __init__(self, mem_ptr):
|
|
73
|
+
self.mem_ptr = mem_ptr
|
|
74
|
+
|
|
75
|
+
def __del__(self):
|
|
76
|
+
if not self.mem_ptr:
|
|
77
|
+
return
|
|
78
|
+
|
|
79
|
+
managed_tensor = DLManagedTensor.from_address(self.mem_ptr)
|
|
80
|
+
if managed_tensor.deleter:
|
|
81
|
+
managed_tensor.deleter(self.mem_ptr)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
@ctypes.CFUNCTYPE(None, ctypes.c_void_p)
|
|
85
|
+
def _dlpack_tensor_deleter(managed_ptr) -> None:
|
|
86
|
+
"""A function to deallocate a DLManagedTensor."""
|
|
87
|
+
|
|
88
|
+
managed_tensor = DLManagedTensor.from_address(managed_ptr)
|
|
89
|
+
|
|
90
|
+
# unreference the source array
|
|
91
|
+
manager = ctypes.cast(managed_tensor.manager_ctx, ctypes.py_object)
|
|
92
|
+
ctypes.pythonapi.Py_DecRef(manager)
|
|
93
|
+
|
|
94
|
+
# free the DLManagedTensor memory, including shape and strides
|
|
95
|
+
PyMem_RawFree(ctypes.c_void_p(managed_ptr))
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
@PyCapsule_Destructor
|
|
99
|
+
def _dlpack_capsule_deleter(ptr) -> None:
|
|
100
|
+
"""Destructor for a capsule holding a DLManagedTensor."""
|
|
101
|
+
|
|
102
|
+
capsule = ctypes.cast(ptr, ctypes.py_object)
|
|
103
|
+
|
|
104
|
+
if PyCapsule_IsValid(capsule, _c_str_dltensor):
|
|
105
|
+
managed_ptr = PyCapsule_GetPointer(capsule, _c_str_dltensor)
|
|
106
|
+
managed_tensor = DLManagedTensor.from_address(managed_ptr)
|
|
107
|
+
if managed_tensor.deleter:
|
|
108
|
+
managed_tensor.deleter(managed_ptr)
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def _device_to_dlpack(wp_device: warp._src.context.Device) -> DLDevice:
|
|
112
|
+
dl_device = DLDevice()
|
|
113
|
+
|
|
114
|
+
if wp_device.is_cpu:
|
|
115
|
+
dl_device.device_type = DLDeviceType.kDLCPU
|
|
116
|
+
dl_device.device_id = 0
|
|
117
|
+
elif wp_device.is_cuda:
|
|
118
|
+
dl_device.device_type = DLDeviceType.kDLCUDA
|
|
119
|
+
dl_device.device_id = wp_device.ordinal
|
|
120
|
+
else:
|
|
121
|
+
raise RuntimeError(f"Invalid device type converting to DLPack: {wp_device}")
|
|
122
|
+
|
|
123
|
+
return dl_device
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def device_to_dlpack(wp_device) -> DLDevice:
|
|
127
|
+
return _device_to_dlpack(warp.get_device(wp_device))
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def dtype_to_dlpack(wp_dtype) -> DLDataType:
|
|
131
|
+
if wp_dtype == warp.bool:
|
|
132
|
+
return (DLDataTypeCode.kDLBool, 8, 1)
|
|
133
|
+
if wp_dtype == warp.int8:
|
|
134
|
+
return (DLDataTypeCode.kDLInt, 8, 1)
|
|
135
|
+
elif wp_dtype == warp.uint8:
|
|
136
|
+
return (DLDataTypeCode.kDLUInt, 8, 1)
|
|
137
|
+
elif wp_dtype == warp.int16:
|
|
138
|
+
return (DLDataTypeCode.kDLInt, 16, 1)
|
|
139
|
+
elif wp_dtype == warp.uint16:
|
|
140
|
+
return (DLDataTypeCode.kDLUInt, 16, 1)
|
|
141
|
+
elif wp_dtype == warp.int32:
|
|
142
|
+
return (DLDataTypeCode.kDLInt, 32, 1)
|
|
143
|
+
elif wp_dtype == warp.uint32:
|
|
144
|
+
return (DLDataTypeCode.kDLUInt, 32, 1)
|
|
145
|
+
elif wp_dtype == warp.int64:
|
|
146
|
+
return (DLDataTypeCode.kDLInt, 64, 1)
|
|
147
|
+
elif wp_dtype == warp.uint64:
|
|
148
|
+
return (DLDataTypeCode.kDLUInt, 64, 1)
|
|
149
|
+
elif wp_dtype == warp.float16:
|
|
150
|
+
return (DLDataTypeCode.kDLFloat, 16, 1)
|
|
151
|
+
elif wp_dtype == warp.float32:
|
|
152
|
+
return (DLDataTypeCode.kDLFloat, 32, 1)
|
|
153
|
+
elif wp_dtype == warp.float64:
|
|
154
|
+
return (DLDataTypeCode.kDLFloat, 64, 1)
|
|
155
|
+
else:
|
|
156
|
+
raise RuntimeError(f"No conversion from Warp type {wp_dtype} to DLPack type")
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
def dtype_from_dlpack(dl_dtype):
|
|
160
|
+
# unpack to tuple for easier comparison
|
|
161
|
+
dl_dtype = (dl_dtype.type_code.value, dl_dtype.bits)
|
|
162
|
+
|
|
163
|
+
if dl_dtype == (DLDataTypeCode.kDLUInt, 1):
|
|
164
|
+
raise RuntimeError("Warp does not support bit boolean types")
|
|
165
|
+
elif dl_dtype == (DLDataTypeCode.kDLInt, 8):
|
|
166
|
+
return warp._src.types.int8
|
|
167
|
+
elif dl_dtype == (DLDataTypeCode.kDLInt, 16):
|
|
168
|
+
return warp._src.types.int16
|
|
169
|
+
elif dl_dtype == (DLDataTypeCode.kDLInt, 32):
|
|
170
|
+
return warp._src.types.int32
|
|
171
|
+
elif dl_dtype == (DLDataTypeCode.kDLInt, 64):
|
|
172
|
+
return warp._src.types.int64
|
|
173
|
+
elif dl_dtype == (DLDataTypeCode.kDLUInt, 8):
|
|
174
|
+
return warp._src.types.uint8
|
|
175
|
+
elif dl_dtype == (DLDataTypeCode.kDLUInt, 16):
|
|
176
|
+
return warp._src.types.uint16
|
|
177
|
+
elif dl_dtype == (DLDataTypeCode.kDLUInt, 32):
|
|
178
|
+
return warp._src.types.uint32
|
|
179
|
+
elif dl_dtype == (DLDataTypeCode.kDLUInt, 64):
|
|
180
|
+
return warp._src.types.uint64
|
|
181
|
+
elif dl_dtype == (DLDataTypeCode.kDLFloat, 16):
|
|
182
|
+
return warp._src.types.float16
|
|
183
|
+
elif dl_dtype == (DLDataTypeCode.kDLFloat, 32):
|
|
184
|
+
return warp._src.types.float32
|
|
185
|
+
elif dl_dtype == (DLDataTypeCode.kDLFloat, 64):
|
|
186
|
+
return warp._src.types.float64
|
|
187
|
+
elif dl_dtype == (DLDataTypeCode.kDLComplex, 64):
|
|
188
|
+
raise RuntimeError("Warp does not support complex types")
|
|
189
|
+
elif dl_dtype == (DLDataTypeCode.kDLComplex, 128):
|
|
190
|
+
raise RuntimeError("Warp does not support complex types")
|
|
191
|
+
else:
|
|
192
|
+
raise RuntimeError(f"Unknown DLPack datatype {dl_dtype}")
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
def device_from_dlpack(dl_device):
|
|
196
|
+
assert warp._src.context.runtime is not None, "Warp not initialized, call wp.init() before use"
|
|
197
|
+
|
|
198
|
+
if dl_device.device_type.value == DLDeviceType.kDLCPU or dl_device.device_type.value == DLDeviceType.kDLCUDAHost:
|
|
199
|
+
return warp._src.context.runtime.cpu_device
|
|
200
|
+
elif (
|
|
201
|
+
dl_device.device_type.value == DLDeviceType.kDLCUDA
|
|
202
|
+
or dl_device.device_type.value == DLDeviceType.kDLCUDAManaged
|
|
203
|
+
):
|
|
204
|
+
return warp._src.context.runtime.cuda_devices[dl_device.device_id]
|
|
205
|
+
else:
|
|
206
|
+
raise RuntimeError(f"Unknown device type from DLPack: {dl_device.device_type.value}")
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
def shape_to_dlpack(shape):
|
|
210
|
+
a = (ctypes.c_int64 * len(shape))(*shape)
|
|
211
|
+
return a
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def strides_to_dlpack(strides, dtype):
|
|
215
|
+
# convert from byte count to element count
|
|
216
|
+
ndim = len(strides)
|
|
217
|
+
a = (ctypes.c_int64 * ndim)()
|
|
218
|
+
dtype_size = warp._src.types.type_size_in_bytes(dtype)
|
|
219
|
+
for i in range(ndim):
|
|
220
|
+
a[i] = strides[i] // dtype_size
|
|
221
|
+
return a
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
def to_dlpack(wp_array: warp.array):
|
|
225
|
+
"""Convert a Warp array to another type of DLPack-compatible array.
|
|
226
|
+
|
|
227
|
+
Args:
|
|
228
|
+
wp_array: The source Warp array that will be converted.
|
|
229
|
+
|
|
230
|
+
Returns:
|
|
231
|
+
A capsule containing a DLManagedTensor that can be converted
|
|
232
|
+
to another array type without copying the underlying memory.
|
|
233
|
+
"""
|
|
234
|
+
|
|
235
|
+
# DLPack does not support structured arrays
|
|
236
|
+
if isinstance(wp_array.dtype, warp._src.codegen.Struct):
|
|
237
|
+
raise RuntimeError("Cannot convert structured Warp arrays to DLPack.")
|
|
238
|
+
|
|
239
|
+
# handle vector types
|
|
240
|
+
if hasattr(wp_array.dtype, "_wp_scalar_type_"):
|
|
241
|
+
# vector type, flatten the dimensions into one tuple
|
|
242
|
+
target_dtype = wp_array.dtype._wp_scalar_type_
|
|
243
|
+
target_ndim = wp_array.ndim + len(wp_array.dtype._shape_)
|
|
244
|
+
target_shape = (*wp_array.shape, *wp_array.dtype._shape_)
|
|
245
|
+
dtype_strides = warp._src.types.strides_from_shape(wp_array.dtype._shape_, wp_array.dtype._wp_scalar_type_)
|
|
246
|
+
target_strides = (*wp_array.strides, *dtype_strides)
|
|
247
|
+
else:
|
|
248
|
+
# scalar type
|
|
249
|
+
target_dtype = wp_array.dtype
|
|
250
|
+
target_ndim = wp_array.ndim
|
|
251
|
+
target_shape = wp_array.shape
|
|
252
|
+
target_strides = wp_array.strides
|
|
253
|
+
|
|
254
|
+
if wp_array.pinned:
|
|
255
|
+
dl_device = DLDevice()
|
|
256
|
+
dl_device.device_type = DLDeviceType.kDLCUDAHost
|
|
257
|
+
dl_device.device_id = 0
|
|
258
|
+
else:
|
|
259
|
+
dl_device = _device_to_dlpack(wp_array.device)
|
|
260
|
+
|
|
261
|
+
# allocate DLManagedTensor, shape, and strides together
|
|
262
|
+
managed_tensor_size = ctypes.sizeof(DLManagedTensor)
|
|
263
|
+
padding = managed_tensor_size & 7
|
|
264
|
+
shape_size = target_ndim * 8
|
|
265
|
+
mem_size = managed_tensor_size + padding + 2 * shape_size
|
|
266
|
+
mem_ptr = PyMem_RawMalloc(mem_size)
|
|
267
|
+
assert mem_ptr, "Failed to allocate memory for DLManagedTensor"
|
|
268
|
+
|
|
269
|
+
# set managed tensor attributes
|
|
270
|
+
managed_tensor = DLManagedTensor.from_address(mem_ptr)
|
|
271
|
+
managed_tensor.dl_tensor.data = wp_array.ptr
|
|
272
|
+
managed_tensor.dl_tensor.device = dl_device
|
|
273
|
+
managed_tensor.dl_tensor.ndim = target_ndim
|
|
274
|
+
managed_tensor.dl_tensor.dtype = dtype_to_dlpack(target_dtype)
|
|
275
|
+
managed_tensor.dl_tensor.byte_offset = 0
|
|
276
|
+
|
|
277
|
+
# shape
|
|
278
|
+
shape_offset = managed_tensor_size + padding
|
|
279
|
+
shape_ptr = ctypes.cast(mem_ptr + shape_offset, ctypes.POINTER(ctypes.c_int64))
|
|
280
|
+
for i in range(target_ndim):
|
|
281
|
+
shape_ptr[i] = target_shape[i]
|
|
282
|
+
managed_tensor.dl_tensor.shape = shape_ptr
|
|
283
|
+
|
|
284
|
+
# strides, if not contiguous
|
|
285
|
+
if wp_array.is_contiguous:
|
|
286
|
+
managed_tensor.dl_tensor.strides = None
|
|
287
|
+
else:
|
|
288
|
+
stride_offset = shape_offset + shape_size
|
|
289
|
+
stride_ptr = ctypes.cast(mem_ptr + stride_offset, ctypes.POINTER(ctypes.c_int64))
|
|
290
|
+
dtype_size = warp._src.types.type_size_in_bytes(target_dtype)
|
|
291
|
+
for i in range(target_ndim):
|
|
292
|
+
stride_ptr[i] = target_strides[i] // dtype_size
|
|
293
|
+
managed_tensor.dl_tensor.strides = stride_ptr
|
|
294
|
+
|
|
295
|
+
# DLManagedTensor holds a reference to the source array
|
|
296
|
+
managed_tensor.manager_ctx = id(wp_array)
|
|
297
|
+
Py_IncRef(wp_array)
|
|
298
|
+
|
|
299
|
+
managed_tensor.deleter = _dlpack_tensor_deleter
|
|
300
|
+
|
|
301
|
+
# NOTE: jax.ffi.pycapsule() defines the PyCapsule_New() argtypes incorrectly, which causes problems.
|
|
302
|
+
# Here we make sure that the PyCapsule_Destructor callback is correctly defined.
|
|
303
|
+
PyCapsule_New = ctypes.pythonapi.PyCapsule_New
|
|
304
|
+
PyCapsule_New.argtypes = [ctypes.c_void_p, ctypes.c_char_p, PyCapsule_Destructor]
|
|
305
|
+
PyCapsule_New.restype = ctypes.py_object
|
|
306
|
+
|
|
307
|
+
capsule = PyCapsule_New(
|
|
308
|
+
ctypes.byref(managed_tensor),
|
|
309
|
+
_c_str_dltensor,
|
|
310
|
+
_dlpack_capsule_deleter,
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
return capsule
|
|
314
|
+
|
|
315
|
+
|
|
316
|
+
def dtype_is_compatible(dl_dtype, wp_dtype):
|
|
317
|
+
if dl_dtype.bits % 8 != 0:
|
|
318
|
+
raise RuntimeError("Data types with less than 8 bits are not supported")
|
|
319
|
+
|
|
320
|
+
if dl_dtype.type_code.value == DLDataTypeCode.kDLFloat:
|
|
321
|
+
if dl_dtype.bits == 16:
|
|
322
|
+
return wp_dtype == warp.float16
|
|
323
|
+
elif dl_dtype.bits == 32:
|
|
324
|
+
return wp_dtype == warp.float32
|
|
325
|
+
elif dl_dtype.bits == 64:
|
|
326
|
+
return wp_dtype == warp.float64
|
|
327
|
+
elif dl_dtype.type_code.value == DLDataTypeCode.kDLInt or dl_dtype.type_code.value == DLDataTypeCode.kDLUInt:
|
|
328
|
+
if dl_dtype.bits == 8:
|
|
329
|
+
return wp_dtype == warp.int8 or wp_dtype == warp.uint8
|
|
330
|
+
elif dl_dtype.bits == 16:
|
|
331
|
+
return wp_dtype == warp.int16 or wp_dtype == warp.uint16
|
|
332
|
+
elif dl_dtype.bits == 32:
|
|
333
|
+
return wp_dtype == warp.int32 or wp_dtype == warp.uint32
|
|
334
|
+
elif dl_dtype.bits == 64:
|
|
335
|
+
return wp_dtype == warp.int64 or wp_dtype == warp.uint64
|
|
336
|
+
elif dl_dtype.type_code.value == DLDataTypeCode.kDLBfloat:
|
|
337
|
+
raise RuntimeError("Bfloat data type is not supported")
|
|
338
|
+
elif dl_dtype.type_code.value == DLDataTypeCode.kDLComplex:
|
|
339
|
+
raise RuntimeError("Complex data types are not supported")
|
|
340
|
+
else:
|
|
341
|
+
raise RuntimeError(f"Unsupported DLPack dtype {(str(dl_dtype.type_code), dl_dtype.bits)}")
|
|
342
|
+
|
|
343
|
+
|
|
344
|
+
def _from_dlpack(capsule, dtype=None) -> warp.array:
|
|
345
|
+
"""Convert a DLPack capsule into a Warp array without copying.
|
|
346
|
+
|
|
347
|
+
Args:
|
|
348
|
+
capsule: A DLPack capsule wrapping an external array or tensor.
|
|
349
|
+
dtype: An optional Warp data type to interpret the source data.
|
|
350
|
+
|
|
351
|
+
Returns:
|
|
352
|
+
A new Warp array that uses the same underlying memory as the input capsule.
|
|
353
|
+
"""
|
|
354
|
+
|
|
355
|
+
assert PyCapsule_IsValid(capsule, _c_str_dltensor), "Invalid capsule"
|
|
356
|
+
mem_ptr = PyCapsule_GetPointer(capsule, _c_str_dltensor)
|
|
357
|
+
managed_tensor = DLManagedTensor.from_address(mem_ptr)
|
|
358
|
+
|
|
359
|
+
dlt = managed_tensor.dl_tensor
|
|
360
|
+
|
|
361
|
+
device = device_from_dlpack(dlt.device)
|
|
362
|
+
pinned = dlt.device.device_type.value == DLDeviceType.kDLCUDAHost
|
|
363
|
+
shape = tuple(dlt.shape[dim] for dim in range(dlt.ndim))
|
|
364
|
+
|
|
365
|
+
# strides, if not contiguous
|
|
366
|
+
itemsize = dlt.dtype.bits // 8
|
|
367
|
+
if dlt.strides:
|
|
368
|
+
strides = tuple(dlt.strides[dim] * itemsize for dim in range(dlt.ndim))
|
|
369
|
+
else:
|
|
370
|
+
strides = None
|
|
371
|
+
|
|
372
|
+
# handle multi-lane dtypes as another dimension
|
|
373
|
+
if dlt.dtype.lanes > 1:
|
|
374
|
+
shape = (*shape, dlt.dtype.lanes)
|
|
375
|
+
if strides is not None:
|
|
376
|
+
strides = (*strides, itemsize)
|
|
377
|
+
|
|
378
|
+
if dtype is None:
|
|
379
|
+
# automatically detect dtype
|
|
380
|
+
dtype = dtype_from_dlpack(dlt.dtype)
|
|
381
|
+
|
|
382
|
+
elif hasattr(dtype, "_wp_scalar_type_"):
|
|
383
|
+
# handle vector/matrix types
|
|
384
|
+
|
|
385
|
+
if not dtype_is_compatible(dlt.dtype, dtype._wp_scalar_type_):
|
|
386
|
+
raise RuntimeError(f"Incompatible data types: {dlt.dtype} and {dtype}")
|
|
387
|
+
|
|
388
|
+
dtype_shape = dtype._shape_
|
|
389
|
+
dtype_dims = len(dtype._shape_)
|
|
390
|
+
if dtype_dims > len(shape) or dtype_shape != shape[-dtype_dims:]:
|
|
391
|
+
raise RuntimeError(
|
|
392
|
+
f"Could not convert DLPack tensor with shape {shape} to Warp array with dtype={dtype}, ensure that source inner shape is {dtype_shape}"
|
|
393
|
+
)
|
|
394
|
+
|
|
395
|
+
if strides is not None:
|
|
396
|
+
# ensure the inner strides are contiguous
|
|
397
|
+
stride = itemsize
|
|
398
|
+
for i in range(dtype_dims):
|
|
399
|
+
if strides[-i - 1] != stride:
|
|
400
|
+
raise RuntimeError(
|
|
401
|
+
f"Could not convert DLPack tensor with shape {shape} to Warp array with dtype={dtype}, because the source inner strides are not contiguous"
|
|
402
|
+
)
|
|
403
|
+
stride *= dtype_shape[-i - 1]
|
|
404
|
+
strides = tuple(strides[:-dtype_dims]) or (itemsize,)
|
|
405
|
+
|
|
406
|
+
shape = tuple(shape[:-dtype_dims]) or (1,)
|
|
407
|
+
|
|
408
|
+
elif not dtype_is_compatible(dlt.dtype, dtype):
|
|
409
|
+
# incompatible dtype requested
|
|
410
|
+
raise RuntimeError(f"Incompatible data types: {dlt.dtype} and {dtype}")
|
|
411
|
+
|
|
412
|
+
a = warp._src.types.array(
|
|
413
|
+
ptr=dlt.data, dtype=dtype, shape=shape, strides=strides, copy=False, device=device, pinned=pinned
|
|
414
|
+
)
|
|
415
|
+
|
|
416
|
+
# take ownership of the DLManagedTensor
|
|
417
|
+
a._dlpack_tensor_holder = _DLPackTensorHolder(mem_ptr)
|
|
418
|
+
|
|
419
|
+
# rename the capsule so that it no longer owns the DLManagedTensor
|
|
420
|
+
PyCapsule_SetName(capsule, _c_str_used_dltensor)
|
|
421
|
+
|
|
422
|
+
return a
|
|
423
|
+
|
|
424
|
+
|
|
425
|
+
def from_dlpack(source, dtype=None) -> warp.array:
|
|
426
|
+
"""Convert a source array or DLPack capsule into a Warp array without copying.
|
|
427
|
+
|
|
428
|
+
Args:
|
|
429
|
+
source: A DLPack-compatible array or PyCapsule
|
|
430
|
+
dtype: An optional Warp data type to interpret the source data.
|
|
431
|
+
|
|
432
|
+
Returns:
|
|
433
|
+
A new Warp array that uses the same underlying memory as the input
|
|
434
|
+
pycapsule.
|
|
435
|
+
"""
|
|
436
|
+
|
|
437
|
+
# See https://data-apis.org/array-api/2022.12/API_specification/generated/array_api.array.__dlpack__.html
|
|
438
|
+
|
|
439
|
+
if hasattr(source, "__dlpack__"):
|
|
440
|
+
device_type, device_id = source.__dlpack_device__()
|
|
441
|
+
# Check if the source lives on a CUDA device
|
|
442
|
+
if device_type in (DLDeviceType.kDLCUDA, DLDeviceType.kDLCUDAManaged):
|
|
443
|
+
# Assume that the caller will use the array on its device's current stream.
|
|
444
|
+
# Note that we pass 1 for the null stream, per DLPack spec.
|
|
445
|
+
cuda_stream = warp.get_cuda_device(device_id).stream.cuda_stream or 1
|
|
446
|
+
elif device_type == DLDeviceType.kDLCPU:
|
|
447
|
+
# No stream sync for CPU arrays.
|
|
448
|
+
cuda_stream = None
|
|
449
|
+
elif device_type == DLDeviceType.kDLCUDAHost:
|
|
450
|
+
# For pinned memory, we sync with the current CUDA device's stream.
|
|
451
|
+
# Note that we pass 1 for the null stream, per DLPack spec.
|
|
452
|
+
cuda_stream = warp.get_cuda_device().stream.cuda_stream or 1
|
|
453
|
+
else:
|
|
454
|
+
raise TypeError("Unsupported source device")
|
|
455
|
+
|
|
456
|
+
capsule = source.__dlpack__(stream=cuda_stream)
|
|
457
|
+
|
|
458
|
+
else:
|
|
459
|
+
# legacy behaviour, assume source is a capsule
|
|
460
|
+
capsule = source
|
|
461
|
+
|
|
462
|
+
return _from_dlpack(capsule, dtype=dtype)
|