warp-lang 1.0.2__py3-none-win_amd64.whl → 1.2.0__py3-none-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of warp-lang might be problematic. Click here for more details.
- warp/__init__.py +108 -97
- warp/__init__.pyi +1 -1
- warp/bin/warp-clang.dll +0 -0
- warp/bin/warp.dll +0 -0
- warp/build.py +88 -113
- warp/build_dll.py +383 -375
- warp/builtins.py +3693 -3354
- warp/codegen.py +2925 -2792
- warp/config.py +40 -36
- warp/constants.py +49 -45
- warp/context.py +5409 -5102
- warp/dlpack.py +442 -442
- warp/examples/__init__.py +16 -16
- warp/examples/assets/bear.usd +0 -0
- warp/examples/assets/bunny.usd +0 -0
- warp/examples/assets/cartpole.urdf +110 -110
- warp/examples/assets/crazyflie.usd +0 -0
- warp/examples/assets/cube.usd +0 -0
- warp/examples/assets/nv_ant.xml +92 -92
- warp/examples/assets/nv_humanoid.xml +183 -183
- warp/examples/assets/quadruped.urdf +267 -267
- warp/examples/assets/rocks.nvdb +0 -0
- warp/examples/assets/rocks.usd +0 -0
- warp/examples/assets/sphere.usd +0 -0
- warp/examples/benchmarks/benchmark_api.py +381 -383
- warp/examples/benchmarks/benchmark_cloth.py +278 -277
- warp/examples/benchmarks/benchmark_cloth_cupy.py +88 -88
- warp/examples/benchmarks/benchmark_cloth_jax.py +97 -100
- warp/examples/benchmarks/benchmark_cloth_numba.py +146 -142
- warp/examples/benchmarks/benchmark_cloth_numpy.py +77 -77
- warp/examples/benchmarks/benchmark_cloth_pytorch.py +86 -86
- warp/examples/benchmarks/benchmark_cloth_taichi.py +112 -112
- warp/examples/benchmarks/benchmark_cloth_warp.py +145 -146
- warp/examples/benchmarks/benchmark_launches.py +293 -295
- warp/examples/browse.py +29 -29
- warp/examples/core/example_dem.py +232 -219
- warp/examples/core/example_fluid.py +291 -267
- warp/examples/core/example_graph_capture.py +142 -126
- warp/examples/core/example_marching_cubes.py +186 -174
- warp/examples/core/example_mesh.py +172 -155
- warp/examples/core/example_mesh_intersect.py +203 -193
- warp/examples/core/example_nvdb.py +174 -170
- warp/examples/core/example_raycast.py +103 -90
- warp/examples/core/example_raymarch.py +197 -178
- warp/examples/core/example_render_opengl.py +183 -141
- warp/examples/core/example_sph.py +403 -387
- warp/examples/core/example_torch.py +219 -181
- warp/examples/core/example_wave.py +261 -248
- warp/examples/fem/bsr_utils.py +378 -380
- warp/examples/fem/example_apic_fluid.py +432 -389
- warp/examples/fem/example_burgers.py +262 -0
- warp/examples/fem/example_convection_diffusion.py +180 -168
- warp/examples/fem/example_convection_diffusion_dg.py +217 -209
- warp/examples/fem/example_deformed_geometry.py +175 -159
- warp/examples/fem/example_diffusion.py +199 -173
- warp/examples/fem/example_diffusion_3d.py +178 -152
- warp/examples/fem/example_diffusion_mgpu.py +219 -214
- warp/examples/fem/example_mixed_elasticity.py +242 -222
- warp/examples/fem/example_navier_stokes.py +257 -243
- warp/examples/fem/example_stokes.py +218 -192
- warp/examples/fem/example_stokes_transfer.py +263 -249
- warp/examples/fem/mesh_utils.py +133 -109
- warp/examples/fem/plot_utils.py +292 -287
- warp/examples/optim/example_bounce.py +258 -246
- warp/examples/optim/example_cloth_throw.py +220 -209
- warp/examples/optim/example_diffray.py +564 -536
- warp/examples/optim/example_drone.py +862 -835
- warp/examples/optim/example_inverse_kinematics.py +174 -168
- warp/examples/optim/example_inverse_kinematics_torch.py +183 -169
- warp/examples/optim/example_spring_cage.py +237 -231
- warp/examples/optim/example_trajectory.py +221 -199
- warp/examples/optim/example_walker.py +304 -293
- warp/examples/sim/example_cartpole.py +137 -129
- warp/examples/sim/example_cloth.py +194 -186
- warp/examples/sim/example_granular.py +122 -111
- warp/examples/sim/example_granular_collision_sdf.py +195 -186
- warp/examples/sim/example_jacobian_ik.py +234 -214
- warp/examples/sim/example_particle_chain.py +116 -105
- warp/examples/sim/example_quadruped.py +191 -180
- warp/examples/sim/example_rigid_chain.py +195 -187
- warp/examples/sim/example_rigid_contact.py +187 -177
- warp/examples/sim/example_rigid_force.py +125 -125
- warp/examples/sim/example_rigid_gyroscopic.py +107 -95
- warp/examples/sim/example_rigid_soft_contact.py +132 -122
- warp/examples/sim/example_soft_body.py +188 -177
- warp/fabric.py +337 -335
- warp/fem/__init__.py +61 -27
- warp/fem/cache.py +403 -388
- warp/fem/dirichlet.py +178 -179
- warp/fem/domain.py +262 -263
- warp/fem/field/__init__.py +100 -101
- warp/fem/field/field.py +148 -149
- warp/fem/field/nodal_field.py +298 -299
- warp/fem/field/restriction.py +22 -21
- warp/fem/field/test.py +180 -181
- warp/fem/field/trial.py +183 -183
- warp/fem/geometry/__init__.py +16 -19
- warp/fem/geometry/closest_point.py +69 -70
- warp/fem/geometry/deformed_geometry.py +270 -271
- warp/fem/geometry/element.py +748 -744
- warp/fem/geometry/geometry.py +184 -186
- warp/fem/geometry/grid_2d.py +380 -373
- warp/fem/geometry/grid_3d.py +437 -435
- warp/fem/geometry/hexmesh.py +953 -953
- warp/fem/geometry/nanogrid.py +455 -0
- warp/fem/geometry/partition.py +374 -376
- warp/fem/geometry/quadmesh_2d.py +532 -532
- warp/fem/geometry/tetmesh.py +840 -840
- warp/fem/geometry/trimesh_2d.py +577 -577
- warp/fem/integrate.py +1684 -1615
- warp/fem/operator.py +190 -191
- warp/fem/polynomial.py +214 -213
- warp/fem/quadrature/__init__.py +2 -2
- warp/fem/quadrature/pic_quadrature.py +243 -245
- warp/fem/quadrature/quadrature.py +295 -294
- warp/fem/space/__init__.py +179 -292
- warp/fem/space/basis_space.py +522 -489
- warp/fem/space/collocated_function_space.py +100 -105
- warp/fem/space/dof_mapper.py +236 -236
- warp/fem/space/function_space.py +148 -145
- warp/fem/space/grid_2d_function_space.py +148 -267
- warp/fem/space/grid_3d_function_space.py +167 -306
- warp/fem/space/hexmesh_function_space.py +253 -352
- warp/fem/space/nanogrid_function_space.py +202 -0
- warp/fem/space/partition.py +350 -350
- warp/fem/space/quadmesh_2d_function_space.py +261 -369
- warp/fem/space/restriction.py +161 -160
- warp/fem/space/shape/__init__.py +90 -15
- warp/fem/space/shape/cube_shape_function.py +728 -738
- warp/fem/space/shape/shape_function.py +102 -103
- warp/fem/space/shape/square_shape_function.py +611 -611
- warp/fem/space/shape/tet_shape_function.py +565 -567
- warp/fem/space/shape/triangle_shape_function.py +429 -429
- warp/fem/space/tetmesh_function_space.py +224 -292
- warp/fem/space/topology.py +297 -295
- warp/fem/space/trimesh_2d_function_space.py +153 -221
- warp/fem/types.py +77 -77
- warp/fem/utils.py +495 -495
- warp/jax.py +166 -141
- warp/jax_experimental.py +341 -339
- warp/native/array.h +1081 -1025
- warp/native/builtin.h +1603 -1560
- warp/native/bvh.cpp +402 -398
- warp/native/bvh.cu +533 -525
- warp/native/bvh.h +430 -429
- warp/native/clang/clang.cpp +496 -464
- warp/native/crt.cpp +42 -32
- warp/native/crt.h +352 -335
- warp/native/cuda_crt.h +1049 -1049
- warp/native/cuda_util.cpp +549 -540
- warp/native/cuda_util.h +288 -203
- warp/native/cutlass_gemm.cpp +34 -34
- warp/native/cutlass_gemm.cu +372 -372
- warp/native/error.cpp +66 -66
- warp/native/error.h +27 -27
- warp/native/exports.h +187 -0
- warp/native/fabric.h +228 -228
- warp/native/hashgrid.cpp +301 -278
- warp/native/hashgrid.cu +78 -77
- warp/native/hashgrid.h +227 -227
- warp/native/initializer_array.h +32 -32
- warp/native/intersect.h +1204 -1204
- warp/native/intersect_adj.h +365 -365
- warp/native/intersect_tri.h +322 -322
- warp/native/marching.cpp +2 -2
- warp/native/marching.cu +497 -497
- warp/native/marching.h +2 -2
- warp/native/mat.h +1545 -1498
- warp/native/matnn.h +333 -333
- warp/native/mesh.cpp +203 -203
- warp/native/mesh.cu +292 -293
- warp/native/mesh.h +1887 -1887
- warp/native/nanovdb/GridHandle.h +366 -0
- warp/native/nanovdb/HostBuffer.h +590 -0
- warp/native/nanovdb/NanoVDB.h +6624 -4782
- warp/native/nanovdb/PNanoVDB.h +3390 -2553
- warp/native/noise.h +850 -850
- warp/native/quat.h +1112 -1085
- warp/native/rand.h +303 -299
- warp/native/range.h +108 -108
- warp/native/reduce.cpp +156 -156
- warp/native/reduce.cu +348 -348
- warp/native/runlength_encode.cpp +61 -61
- warp/native/runlength_encode.cu +46 -46
- warp/native/scan.cpp +30 -30
- warp/native/scan.cu +36 -36
- warp/native/scan.h +7 -7
- warp/native/solid_angle.h +442 -442
- warp/native/sort.cpp +94 -94
- warp/native/sort.cu +97 -97
- warp/native/sort.h +14 -14
- warp/native/sparse.cpp +337 -337
- warp/native/sparse.cu +544 -544
- warp/native/spatial.h +630 -630
- warp/native/svd.h +562 -562
- warp/native/temp_buffer.h +30 -30
- warp/native/vec.h +1177 -1133
- warp/native/volume.cpp +529 -297
- warp/native/volume.cu +58 -32
- warp/native/volume.h +960 -538
- warp/native/volume_builder.cu +446 -425
- warp/native/volume_builder.h +34 -19
- warp/native/volume_impl.h +61 -0
- warp/native/warp.cpp +1057 -1052
- warp/native/warp.cu +2949 -2828
- warp/native/warp.h +321 -305
- warp/optim/__init__.py +9 -9
- warp/optim/adam.py +120 -120
- warp/optim/linear.py +1104 -939
- warp/optim/sgd.py +104 -92
- warp/render/__init__.py +10 -10
- warp/render/render_opengl.py +3356 -3204
- warp/render/render_usd.py +768 -749
- warp/render/utils.py +152 -150
- warp/sim/__init__.py +52 -59
- warp/sim/articulation.py +685 -685
- warp/sim/collide.py +1594 -1590
- warp/sim/import_mjcf.py +489 -481
- warp/sim/import_snu.py +220 -221
- warp/sim/import_urdf.py +536 -516
- warp/sim/import_usd.py +887 -881
- warp/sim/inertia.py +316 -317
- warp/sim/integrator.py +234 -233
- warp/sim/integrator_euler.py +1956 -1956
- warp/sim/integrator_featherstone.py +1917 -1991
- warp/sim/integrator_xpbd.py +3288 -3312
- warp/sim/model.py +4473 -4314
- warp/sim/particles.py +113 -112
- warp/sim/render.py +417 -403
- warp/sim/utils.py +413 -410
- warp/sparse.py +1289 -1227
- warp/stubs.py +2192 -2469
- warp/tape.py +1162 -225
- warp/tests/__init__.py +1 -1
- warp/tests/__main__.py +4 -4
- warp/tests/assets/test_index_grid.nvdb +0 -0
- warp/tests/assets/torus.usda +105 -105
- warp/tests/aux_test_class_kernel.py +26 -26
- warp/tests/aux_test_compile_consts_dummy.py +10 -10
- warp/tests/aux_test_conditional_unequal_types_kernels.py +21 -21
- warp/tests/aux_test_dependent.py +20 -22
- warp/tests/aux_test_grad_customs.py +21 -23
- warp/tests/aux_test_reference.py +9 -11
- warp/tests/aux_test_reference_reference.py +8 -10
- warp/tests/aux_test_square.py +15 -17
- warp/tests/aux_test_unresolved_func.py +14 -14
- warp/tests/aux_test_unresolved_symbol.py +14 -14
- warp/tests/disabled_kinematics.py +237 -239
- warp/tests/run_coverage_serial.py +31 -31
- warp/tests/test_adam.py +155 -157
- warp/tests/test_arithmetic.py +1088 -1124
- warp/tests/test_array.py +2415 -2326
- warp/tests/test_array_reduce.py +148 -150
- warp/tests/test_async.py +666 -656
- warp/tests/test_atomic.py +139 -141
- warp/tests/test_bool.py +212 -149
- warp/tests/test_builtins_resolution.py +1290 -1292
- warp/tests/test_bvh.py +162 -171
- warp/tests/test_closest_point_edge_edge.py +227 -228
- warp/tests/test_codegen.py +562 -553
- warp/tests/test_compile_consts.py +217 -101
- warp/tests/test_conditional.py +244 -246
- warp/tests/test_copy.py +230 -215
- warp/tests/test_ctypes.py +630 -632
- warp/tests/test_dense.py +65 -67
- warp/tests/test_devices.py +89 -98
- warp/tests/test_dlpack.py +528 -529
- warp/tests/test_examples.py +403 -378
- warp/tests/test_fabricarray.py +952 -955
- warp/tests/test_fast_math.py +60 -54
- warp/tests/test_fem.py +1298 -1278
- warp/tests/test_fp16.py +128 -130
- warp/tests/test_func.py +336 -337
- warp/tests/test_generics.py +596 -571
- warp/tests/test_grad.py +885 -640
- warp/tests/test_grad_customs.py +331 -336
- warp/tests/test_hash_grid.py +208 -164
- warp/tests/test_import.py +37 -39
- warp/tests/test_indexedarray.py +1132 -1134
- warp/tests/test_intersect.py +65 -67
- warp/tests/test_jax.py +305 -307
- warp/tests/test_large.py +169 -164
- warp/tests/test_launch.py +352 -354
- warp/tests/test_lerp.py +217 -261
- warp/tests/test_linear_solvers.py +189 -171
- warp/tests/test_lvalue.py +419 -493
- warp/tests/test_marching_cubes.py +63 -65
- warp/tests/test_mat.py +1799 -1827
- warp/tests/test_mat_lite.py +113 -115
- warp/tests/test_mat_scalar_ops.py +2905 -2889
- warp/tests/test_math.py +124 -193
- warp/tests/test_matmul.py +498 -499
- warp/tests/test_matmul_lite.py +408 -410
- warp/tests/test_mempool.py +186 -190
- warp/tests/test_mesh.py +281 -324
- warp/tests/test_mesh_query_aabb.py +226 -241
- warp/tests/test_mesh_query_point.py +690 -702
- warp/tests/test_mesh_query_ray.py +290 -303
- warp/tests/test_mlp.py +274 -276
- warp/tests/test_model.py +108 -110
- warp/tests/test_module_hashing.py +111 -0
- warp/tests/test_modules_lite.py +36 -39
- warp/tests/test_multigpu.py +161 -163
- warp/tests/test_noise.py +244 -248
- warp/tests/test_operators.py +248 -250
- warp/tests/test_options.py +121 -125
- warp/tests/test_peer.py +131 -137
- warp/tests/test_pinned.py +76 -78
- warp/tests/test_print.py +52 -54
- warp/tests/test_quat.py +2084 -2086
- warp/tests/test_rand.py +324 -288
- warp/tests/test_reload.py +207 -217
- warp/tests/test_rounding.py +177 -179
- warp/tests/test_runlength_encode.py +188 -190
- warp/tests/test_sim_grad.py +241 -0
- warp/tests/test_sim_kinematics.py +89 -97
- warp/tests/test_smoothstep.py +166 -168
- warp/tests/test_snippet.py +303 -266
- warp/tests/test_sparse.py +466 -460
- warp/tests/test_spatial.py +2146 -2148
- warp/tests/test_special_values.py +362 -0
- warp/tests/test_streams.py +484 -473
- warp/tests/test_struct.py +708 -675
- warp/tests/test_tape.py +171 -148
- warp/tests/test_torch.py +741 -743
- warp/tests/test_transient_module.py +85 -87
- warp/tests/test_types.py +554 -659
- warp/tests/test_utils.py +488 -499
- warp/tests/test_vec.py +1262 -1268
- warp/tests/test_vec_lite.py +71 -73
- warp/tests/test_vec_scalar_ops.py +2097 -2099
- warp/tests/test_verify_fp.py +92 -94
- warp/tests/test_volume.py +961 -736
- warp/tests/test_volume_write.py +338 -265
- warp/tests/unittest_serial.py +38 -37
- warp/tests/unittest_suites.py +367 -359
- warp/tests/unittest_utils.py +434 -578
- warp/tests/unused_test_misc.py +69 -71
- warp/tests/walkthrough_debug.py +85 -85
- warp/thirdparty/appdirs.py +598 -598
- warp/thirdparty/dlpack.py +143 -143
- warp/thirdparty/unittest_parallel.py +563 -561
- warp/torch.py +321 -295
- warp/types.py +4941 -4450
- warp/utils.py +1008 -821
- {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/LICENSE.md +126 -126
- {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/METADATA +365 -400
- warp_lang-1.2.0.dist-info/RECORD +359 -0
- warp/examples/assets/cube.usda +0 -42
- warp/examples/assets/sphere.usda +0 -56
- warp/examples/assets/torus.usda +0 -105
- warp/examples/fem/example_convection_diffusion_dg0.py +0 -194
- warp/native/nanovdb/PNanoVDBWrite.h +0 -295
- warp_lang-1.0.2.dist-info/RECORD +0 -352
- {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/WHEEL +0 -0
- {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/top_level.txt +0 -0
warp/dlpack.py
CHANGED
|
@@ -1,442 +1,442 @@
|
|
|
1
|
-
# Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
|
|
2
|
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
|
3
|
-
# and proprietary rights in and to this software, related documentation
|
|
4
|
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
|
5
|
-
# distribution of this software and related documentation without an express
|
|
6
|
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
|
7
|
-
|
|
8
|
-
# Python specification for DLpack:
|
|
9
|
-
# https://dmlc.github.io/dlpack/latest/python_spec.html
|
|
10
|
-
|
|
11
|
-
import
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
from warp.thirdparty.dlpack import (
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
_c_str_dltensor,
|
|
21
|
-
)
|
|
22
|
-
|
|
23
|
-
_c_str_used_dltensor = b"used_dltensor"
|
|
24
|
-
|
|
25
|
-
PyMem_RawMalloc = ctypes.pythonapi.PyMem_RawMalloc
|
|
26
|
-
PyMem_RawMalloc.argtypes = [ctypes.c_size_t]
|
|
27
|
-
PyMem_RawMalloc.restype = ctypes.c_void_p
|
|
28
|
-
|
|
29
|
-
PyMem_RawFree = ctypes.pythonapi.PyMem_RawFree
|
|
30
|
-
PyMem_RawFree.argtypes = [ctypes.c_void_p]
|
|
31
|
-
PyMem_RawFree.restype = None
|
|
32
|
-
|
|
33
|
-
Py_IncRef = ctypes.pythonapi.Py_IncRef
|
|
34
|
-
Py_IncRef.argtypes = [ctypes.py_object]
|
|
35
|
-
Py_IncRef.restype = None
|
|
36
|
-
|
|
37
|
-
Py_DecRef = ctypes.pythonapi.Py_DecRef
|
|
38
|
-
Py_DecRef.argtypes = [ctypes.py_object]
|
|
39
|
-
Py_DecRef.restype = None
|
|
40
|
-
|
|
41
|
-
PyCapsule_Destructor = ctypes.CFUNCTYPE(None, ctypes.c_void_p)
|
|
42
|
-
|
|
43
|
-
PyCapsule_New = ctypes.pythonapi.PyCapsule_New
|
|
44
|
-
PyCapsule_New.argtypes = [ctypes.c_void_p, ctypes.c_char_p, PyCapsule_Destructor]
|
|
45
|
-
PyCapsule_New.restype = ctypes.py_object
|
|
46
|
-
|
|
47
|
-
PyCapsule_IsValid = ctypes.pythonapi.PyCapsule_IsValid
|
|
48
|
-
PyCapsule_IsValid.argtypes = [ctypes.py_object, ctypes.c_char_p]
|
|
49
|
-
PyCapsule_IsValid.restype = ctypes.c_int
|
|
50
|
-
|
|
51
|
-
PyCapsule_GetPointer = ctypes.pythonapi.PyCapsule_GetPointer
|
|
52
|
-
PyCapsule_GetPointer.argtypes = [ctypes.py_object, ctypes.c_char_p]
|
|
53
|
-
PyCapsule_GetPointer.restype = ctypes.c_void_p
|
|
54
|
-
|
|
55
|
-
PyCapsule_SetName = ctypes.pythonapi.PyCapsule_SetName
|
|
56
|
-
PyCapsule_SetName.argtypes = [ctypes.py_object, ctypes.c_char_p]
|
|
57
|
-
PyCapsule_SetName.restype = ctypes.c_int
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
class _DLPackTensorHolder:
|
|
61
|
-
"""Class responsible for deleting DLManagedTensor memory after ownership is transferred from a capsule."""
|
|
62
|
-
|
|
63
|
-
def __init__(self, mem_ptr):
|
|
64
|
-
self.mem_ptr = mem_ptr
|
|
65
|
-
|
|
66
|
-
def __del__(self):
|
|
67
|
-
managed_tensor = DLManagedTensor.from_address(self.mem_ptr)
|
|
68
|
-
if managed_tensor.deleter:
|
|
69
|
-
managed_tensor.deleter(self.mem_ptr)
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
@ctypes.CFUNCTYPE(None, ctypes.c_void_p)
|
|
73
|
-
def _dlpack_tensor_deleter(managed_ptr) -> None:
|
|
74
|
-
"""A function to deallocate a DLManagedTensor."""
|
|
75
|
-
|
|
76
|
-
managed_tensor = DLManagedTensor.from_address(managed_ptr)
|
|
77
|
-
|
|
78
|
-
# unreference the source array
|
|
79
|
-
manager = ctypes.cast(managed_tensor.manager_ctx, ctypes.py_object)
|
|
80
|
-
ctypes.pythonapi.Py_DecRef(manager)
|
|
81
|
-
|
|
82
|
-
# free the DLManagedTensor memory, including shape and strides
|
|
83
|
-
PyMem_RawFree(ctypes.c_void_p(managed_ptr))
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
@PyCapsule_Destructor
|
|
87
|
-
def _dlpack_capsule_deleter(ptr) -> None:
|
|
88
|
-
"""Destructor for a capsule holding a DLManagedTensor."""
|
|
89
|
-
|
|
90
|
-
capsule = ctypes.cast(ptr, ctypes.py_object)
|
|
91
|
-
|
|
92
|
-
if ctypes.pythonapi.PyCapsule_IsValid(capsule, _c_str_dltensor):
|
|
93
|
-
managed_ptr = ctypes.pythonapi.PyCapsule_GetPointer(capsule, _c_str_dltensor)
|
|
94
|
-
managed_tensor = DLManagedTensor.from_address(managed_ptr)
|
|
95
|
-
if managed_tensor.deleter:
|
|
96
|
-
managed_tensor.deleter(managed_ptr)
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
def _device_to_dlpack(wp_device: warp.context.Device) -> DLDevice:
|
|
100
|
-
dl_device = DLDevice()
|
|
101
|
-
|
|
102
|
-
if wp_device.is_cpu:
|
|
103
|
-
dl_device.device_type = DLDeviceType.kDLCPU
|
|
104
|
-
dl_device.device_id = 0
|
|
105
|
-
elif wp_device.is_cuda:
|
|
106
|
-
dl_device.device_type = DLDeviceType.kDLCUDA
|
|
107
|
-
dl_device.device_id = wp_device.ordinal
|
|
108
|
-
else:
|
|
109
|
-
raise RuntimeError(f"Invalid device type converting to
|
|
110
|
-
|
|
111
|
-
return dl_device
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
def device_to_dlpack(wp_device) -> DLDevice:
|
|
115
|
-
return _device_to_dlpack(warp.get_device(wp_device))
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
def dtype_to_dlpack(wp_dtype) -> DLDataType:
|
|
119
|
-
if wp_dtype == warp.int8:
|
|
120
|
-
return (DLDataTypeCode.kDLInt, 8, 1)
|
|
121
|
-
elif wp_dtype == warp.uint8:
|
|
122
|
-
return (DLDataTypeCode.kDLUInt, 8, 1)
|
|
123
|
-
elif wp_dtype == warp.int16:
|
|
124
|
-
return (DLDataTypeCode.kDLInt, 16, 1)
|
|
125
|
-
elif wp_dtype == warp.uint16:
|
|
126
|
-
return (DLDataTypeCode.kDLUInt, 16, 1)
|
|
127
|
-
elif wp_dtype == warp.int32:
|
|
128
|
-
return (DLDataTypeCode.kDLInt, 32, 1)
|
|
129
|
-
elif wp_dtype == warp.uint32:
|
|
130
|
-
return (DLDataTypeCode.kDLUInt, 32, 1)
|
|
131
|
-
elif wp_dtype == warp.int64:
|
|
132
|
-
return (DLDataTypeCode.kDLInt, 64, 1)
|
|
133
|
-
elif wp_dtype == warp.uint64:
|
|
134
|
-
return (DLDataTypeCode.kDLUInt, 64, 1)
|
|
135
|
-
elif wp_dtype == warp.float16:
|
|
136
|
-
return (DLDataTypeCode.kDLFloat, 16, 1)
|
|
137
|
-
elif wp_dtype == warp.float32:
|
|
138
|
-
return (DLDataTypeCode.kDLFloat, 32, 1)
|
|
139
|
-
elif wp_dtype == warp.float64:
|
|
140
|
-
return (DLDataTypeCode.kDLFloat, 64, 1)
|
|
141
|
-
else:
|
|
142
|
-
raise RuntimeError(f"No conversion from Warp type {wp_dtype} to DLPack type")
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
def dtype_from_dlpack(dl_dtype):
|
|
146
|
-
# unpack to tuple for easier comparison
|
|
147
|
-
dl_dtype = (dl_dtype.type_code.value, dl_dtype.bits)
|
|
148
|
-
|
|
149
|
-
if dl_dtype == (DLDataTypeCode.kDLUInt, 1):
|
|
150
|
-
raise RuntimeError("Warp does not support bit boolean types")
|
|
151
|
-
elif dl_dtype == (DLDataTypeCode.kDLInt, 8):
|
|
152
|
-
return warp.types.int8
|
|
153
|
-
elif dl_dtype == (DLDataTypeCode.kDLInt, 16):
|
|
154
|
-
return warp.types.int16
|
|
155
|
-
elif dl_dtype == (DLDataTypeCode.kDLInt, 32):
|
|
156
|
-
return warp.types.int32
|
|
157
|
-
elif dl_dtype == (DLDataTypeCode.kDLInt, 64):
|
|
158
|
-
return warp.types.int64
|
|
159
|
-
elif dl_dtype == (DLDataTypeCode.kDLUInt, 8):
|
|
160
|
-
return warp.types.uint8
|
|
161
|
-
elif dl_dtype == (DLDataTypeCode.kDLUInt, 16):
|
|
162
|
-
return warp.types.uint16
|
|
163
|
-
elif dl_dtype == (DLDataTypeCode.kDLUInt, 32):
|
|
164
|
-
return warp.types.uint32
|
|
165
|
-
elif dl_dtype == (DLDataTypeCode.kDLUInt, 64):
|
|
166
|
-
return warp.types.uint64
|
|
167
|
-
elif dl_dtype == (DLDataTypeCode.kDLFloat, 16):
|
|
168
|
-
return warp.types.float16
|
|
169
|
-
elif dl_dtype == (DLDataTypeCode.kDLFloat, 32):
|
|
170
|
-
return warp.types.float32
|
|
171
|
-
elif dl_dtype == (DLDataTypeCode.kDLFloat, 64):
|
|
172
|
-
return warp.types.float64
|
|
173
|
-
elif dl_dtype == (DLDataTypeCode.kDLComplex, 64):
|
|
174
|
-
raise RuntimeError("Warp does not support complex types")
|
|
175
|
-
elif dl_dtype == (DLDataTypeCode.kDLComplex, 128):
|
|
176
|
-
raise RuntimeError("Warp does not support complex types")
|
|
177
|
-
else:
|
|
178
|
-
raise RuntimeError(f"Unknown
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
def device_from_dlpack(dl_device):
|
|
182
|
-
assert warp.context.runtime is not None, "Warp not initialized, call wp.init() before use"
|
|
183
|
-
|
|
184
|
-
if dl_device.device_type.value == DLDeviceType.kDLCPU or dl_device.device_type.value == DLDeviceType.kDLCUDAHost:
|
|
185
|
-
return warp.context.runtime.cpu_device
|
|
186
|
-
elif (
|
|
187
|
-
dl_device.device_type.value == DLDeviceType.kDLCUDA
|
|
188
|
-
or dl_device.device_type.value == DLDeviceType.kDLCUDAManaged
|
|
189
|
-
):
|
|
190
|
-
return warp.context.runtime.cuda_devices[dl_device.device_id]
|
|
191
|
-
else:
|
|
192
|
-
raise RuntimeError(f"Unknown device type from
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
def shape_to_dlpack(shape):
|
|
196
|
-
a = (ctypes.c_int64 * len(shape))(*shape)
|
|
197
|
-
return a
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
def strides_to_dlpack(strides, dtype):
|
|
201
|
-
# convert from byte count to element count
|
|
202
|
-
ndim = len(strides)
|
|
203
|
-
a = (ctypes.c_int64 * ndim)()
|
|
204
|
-
dtype_size = warp.types.type_size_in_bytes(dtype)
|
|
205
|
-
for i in range(ndim):
|
|
206
|
-
a[i] = strides[i] // dtype_size
|
|
207
|
-
return a
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
def to_dlpack(wp_array: warp.array):
|
|
211
|
-
"""Convert a Warp array to another type of
|
|
212
|
-
|
|
213
|
-
Args:
|
|
214
|
-
wp_array: The source Warp array that will be converted.
|
|
215
|
-
|
|
216
|
-
Returns:
|
|
217
|
-
A capsule containing a DLManagedTensor that can be converted
|
|
218
|
-
to another array type without copying the underlying memory.
|
|
219
|
-
"""
|
|
220
|
-
|
|
221
|
-
# DLPack does not support structured arrays
|
|
222
|
-
if isinstance(wp_array.dtype, warp.codegen.Struct):
|
|
223
|
-
raise RuntimeError("Cannot convert structured Warp arrays to DLPack.")
|
|
224
|
-
|
|
225
|
-
# handle vector types
|
|
226
|
-
if hasattr(wp_array.dtype, "_wp_scalar_type_"):
|
|
227
|
-
# vector type, flatten the dimensions into one tuple
|
|
228
|
-
target_dtype = wp_array.dtype._wp_scalar_type_
|
|
229
|
-
target_ndim = wp_array.ndim + len(wp_array.dtype._shape_)
|
|
230
|
-
target_shape = (*wp_array.shape, *wp_array.dtype._shape_)
|
|
231
|
-
dtype_strides = warp.types.strides_from_shape(wp_array.dtype._shape_, wp_array.dtype._wp_scalar_type_)
|
|
232
|
-
target_strides = (*wp_array.strides, *dtype_strides)
|
|
233
|
-
else:
|
|
234
|
-
# scalar type
|
|
235
|
-
target_dtype = wp_array.dtype
|
|
236
|
-
target_ndim = wp_array.ndim
|
|
237
|
-
target_shape = wp_array.shape
|
|
238
|
-
target_strides = wp_array.strides
|
|
239
|
-
|
|
240
|
-
if wp_array.pinned:
|
|
241
|
-
dl_device = DLDevice()
|
|
242
|
-
dl_device.device_type = DLDeviceType.kDLCUDAHost
|
|
243
|
-
dl_device.device_id = 0
|
|
244
|
-
else:
|
|
245
|
-
dl_device = _device_to_dlpack(wp_array.device)
|
|
246
|
-
|
|
247
|
-
# allocate DLManagedTensor, shape, and strides together
|
|
248
|
-
managed_tensor_size = ctypes.sizeof(DLManagedTensor)
|
|
249
|
-
padding = managed_tensor_size & 7
|
|
250
|
-
shape_size = target_ndim * 8
|
|
251
|
-
mem_size = managed_tensor_size + padding + 2 * shape_size
|
|
252
|
-
mem_ptr = PyMem_RawMalloc(mem_size)
|
|
253
|
-
assert mem_ptr, "Failed to allocate memory for DLManagedTensor"
|
|
254
|
-
|
|
255
|
-
# set managed tensor attributes
|
|
256
|
-
managed_tensor = DLManagedTensor.from_address(mem_ptr)
|
|
257
|
-
managed_tensor.dl_tensor.data = wp_array.ptr
|
|
258
|
-
managed_tensor.dl_tensor.device = dl_device
|
|
259
|
-
managed_tensor.dl_tensor.ndim = target_ndim
|
|
260
|
-
managed_tensor.dl_tensor.dtype = dtype_to_dlpack(target_dtype)
|
|
261
|
-
managed_tensor.dl_tensor.byte_offset = 0
|
|
262
|
-
|
|
263
|
-
# shape
|
|
264
|
-
shape_offset = managed_tensor_size + padding
|
|
265
|
-
shape_ptr = ctypes.cast(mem_ptr + shape_offset, ctypes.POINTER(ctypes.c_int64))
|
|
266
|
-
for i in range(target_ndim):
|
|
267
|
-
shape_ptr[i] = target_shape[i]
|
|
268
|
-
managed_tensor.dl_tensor.shape = shape_ptr
|
|
269
|
-
|
|
270
|
-
# strides, if not contiguous
|
|
271
|
-
if wp_array.is_contiguous:
|
|
272
|
-
managed_tensor.dl_tensor.strides = None
|
|
273
|
-
else:
|
|
274
|
-
stride_offset = shape_offset + shape_size
|
|
275
|
-
stride_ptr = ctypes.cast(mem_ptr + stride_offset, ctypes.POINTER(ctypes.c_int64))
|
|
276
|
-
dtype_size = warp.types.type_size_in_bytes(target_dtype)
|
|
277
|
-
for i in range(target_ndim):
|
|
278
|
-
stride_ptr[i] = target_strides[i] // dtype_size
|
|
279
|
-
managed_tensor.dl_tensor.strides = stride_ptr
|
|
280
|
-
|
|
281
|
-
# DLManagedTensor holds a reference to the source array
|
|
282
|
-
managed_tensor.manager_ctx = id(wp_array)
|
|
283
|
-
Py_IncRef(wp_array)
|
|
284
|
-
|
|
285
|
-
managed_tensor.deleter = _dlpack_tensor_deleter
|
|
286
|
-
|
|
287
|
-
capsule = PyCapsule_New(
|
|
288
|
-
ctypes.byref(managed_tensor),
|
|
289
|
-
_c_str_dltensor,
|
|
290
|
-
_dlpack_capsule_deleter,
|
|
291
|
-
)
|
|
292
|
-
|
|
293
|
-
return capsule
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
def dtype_is_compatible(dl_dtype, wp_dtype):
|
|
297
|
-
if dl_dtype.bits % 8 != 0:
|
|
298
|
-
raise RuntimeError("Data types with less than 8 bits are not supported")
|
|
299
|
-
|
|
300
|
-
if dl_dtype.type_code.value == DLDataTypeCode.kDLFloat:
|
|
301
|
-
if dl_dtype.bits == 16:
|
|
302
|
-
return wp_dtype == warp.float16
|
|
303
|
-
elif dl_dtype.bits == 32:
|
|
304
|
-
return wp_dtype == warp.float32
|
|
305
|
-
elif dl_dtype.bits == 64:
|
|
306
|
-
return wp_dtype == warp.float64
|
|
307
|
-
elif dl_dtype.type_code.value == DLDataTypeCode.kDLInt or dl_dtype.type_code.value == DLDataTypeCode.kDLUInt:
|
|
308
|
-
if dl_dtype.bits == 8:
|
|
309
|
-
return wp_dtype == warp.int8 or wp_dtype == warp.uint8
|
|
310
|
-
elif dl_dtype.bits == 16:
|
|
311
|
-
return wp_dtype == warp.int16 or wp_dtype == warp.uint16
|
|
312
|
-
elif dl_dtype.bits == 32:
|
|
313
|
-
return wp_dtype == warp.int32 or wp_dtype == warp.uint32
|
|
314
|
-
elif dl_dtype.bits == 64:
|
|
315
|
-
return wp_dtype == warp.int64 or wp_dtype == warp.uint64
|
|
316
|
-
elif dl_dtype.type_code.value == DLDataTypeCode.kDLBfloat:
|
|
317
|
-
raise RuntimeError("Bfloat data type is not supported")
|
|
318
|
-
elif dl_dtype.type_code.value == DLDataTypeCode.kDLComplex:
|
|
319
|
-
raise RuntimeError("Complex data types are not supported")
|
|
320
|
-
else:
|
|
321
|
-
raise RuntimeError(f"Unsupported
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
def _from_dlpack(capsule, dtype=None) -> warp.array:
|
|
325
|
-
"""Convert a DLPack capsule into a Warp array without copying.
|
|
326
|
-
|
|
327
|
-
Args:
|
|
328
|
-
capsule: A DLPack capsule wrapping an external array or tensor.
|
|
329
|
-
dtype: An optional Warp data type to interpret the source data.
|
|
330
|
-
|
|
331
|
-
Returns:
|
|
332
|
-
A new Warp array that uses the same underlying memory as the input capsule.
|
|
333
|
-
"""
|
|
334
|
-
|
|
335
|
-
assert PyCapsule_IsValid(capsule, _c_str_dltensor), "Invalid capsule"
|
|
336
|
-
mem_ptr = PyCapsule_GetPointer(capsule, _c_str_dltensor)
|
|
337
|
-
managed_tensor = DLManagedTensor.from_address(mem_ptr)
|
|
338
|
-
|
|
339
|
-
dlt = managed_tensor.dl_tensor
|
|
340
|
-
|
|
341
|
-
device = device_from_dlpack(dlt.device)
|
|
342
|
-
pinned = dlt.device.device_type.value == DLDeviceType.kDLCUDAHost
|
|
343
|
-
shape = tuple(dlt.shape[dim] for dim in range(dlt.ndim))
|
|
344
|
-
|
|
345
|
-
# strides, if not contiguous
|
|
346
|
-
itemsize = dlt.dtype.bits // 8
|
|
347
|
-
if dlt.strides:
|
|
348
|
-
strides = tuple(dlt.strides[dim] * itemsize for dim in range(dlt.ndim))
|
|
349
|
-
else:
|
|
350
|
-
strides = None
|
|
351
|
-
|
|
352
|
-
# handle multi-lane dtypes as another dimension
|
|
353
|
-
if dlt.dtype.lanes > 1:
|
|
354
|
-
shape = (*shape, dlt.dtype.lanes)
|
|
355
|
-
if strides is not None:
|
|
356
|
-
strides = (*strides, itemsize)
|
|
357
|
-
|
|
358
|
-
if dtype is None:
|
|
359
|
-
# automatically detect dtype
|
|
360
|
-
dtype = dtype_from_dlpack(dlt.dtype)
|
|
361
|
-
|
|
362
|
-
elif hasattr(dtype, "_wp_scalar_type_"):
|
|
363
|
-
# handle vector/matrix types
|
|
364
|
-
|
|
365
|
-
if not dtype_is_compatible(dlt.dtype, dtype._wp_scalar_type_):
|
|
366
|
-
raise RuntimeError(f"Incompatible data types: {dlt.dtype} and {dtype}")
|
|
367
|
-
|
|
368
|
-
dtype_shape = dtype._shape_
|
|
369
|
-
dtype_dims = len(dtype._shape_)
|
|
370
|
-
if dtype_dims > len(shape) or dtype_shape != shape[-dtype_dims:]:
|
|
371
|
-
raise RuntimeError(
|
|
372
|
-
f"Could not convert DLPack tensor with shape {shape} to Warp array with dtype={dtype}, ensure that source inner shape is {dtype_shape}"
|
|
373
|
-
)
|
|
374
|
-
|
|
375
|
-
if strides is not None:
|
|
376
|
-
# ensure the inner strides are contiguous
|
|
377
|
-
stride = itemsize
|
|
378
|
-
for i in range(dtype_dims):
|
|
379
|
-
if strides[-i - 1] != stride:
|
|
380
|
-
raise RuntimeError(
|
|
381
|
-
f"Could not convert DLPack tensor with shape {shape} to Warp array with dtype={dtype}, because the source inner strides are not contiguous"
|
|
382
|
-
)
|
|
383
|
-
stride *= dtype_shape[-i - 1]
|
|
384
|
-
strides = tuple(strides[:-dtype_dims]) or (itemsize,)
|
|
385
|
-
|
|
386
|
-
shape = tuple(shape[:-dtype_dims]) or (1,)
|
|
387
|
-
|
|
388
|
-
elif not dtype_is_compatible(dlt.dtype, dtype):
|
|
389
|
-
# incompatible dtype requested
|
|
390
|
-
raise RuntimeError(f"Incompatible data types: {dlt.dtype} and {dtype}")
|
|
391
|
-
|
|
392
|
-
a = warp.types.array(
|
|
393
|
-
ptr=dlt.data, dtype=dtype, shape=shape, strides=strides, copy=False, device=device, pinned=pinned
|
|
394
|
-
)
|
|
395
|
-
|
|
396
|
-
# take ownership of the DLManagedTensor
|
|
397
|
-
a._dlpack_tensor_holder = _DLPackTensorHolder(mem_ptr)
|
|
398
|
-
|
|
399
|
-
# rename the capsule so that it no longer owns the DLManagedTensor
|
|
400
|
-
PyCapsule_SetName(capsule, _c_str_used_dltensor)
|
|
401
|
-
|
|
402
|
-
return a
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
def from_dlpack(source, dtype=None) -> warp.array:
|
|
406
|
-
"""Convert a source array or DLPack capsule into a Warp array without copying.
|
|
407
|
-
|
|
408
|
-
Args:
|
|
409
|
-
source: A DLPack-compatible array or PyCapsule
|
|
410
|
-
dtype: An optional Warp data type to interpret the source data.
|
|
411
|
-
|
|
412
|
-
Returns:
|
|
413
|
-
A new Warp array that uses the same underlying memory as the input
|
|
414
|
-
pycapsule.
|
|
415
|
-
"""
|
|
416
|
-
|
|
417
|
-
# See https://data-apis.org/array-api/2022.12/API_specification/generated/array_api.array.__dlpack__.html
|
|
418
|
-
|
|
419
|
-
if hasattr(source, "__dlpack__"):
|
|
420
|
-
device_type, device_id = source.__dlpack_device__()
|
|
421
|
-
# Check if the source lives on a CUDA device
|
|
422
|
-
if device_type in (DLDeviceType.kDLCUDA, DLDeviceType.kDLCUDAManaged):
|
|
423
|
-
# Assume that the caller will use the array on its device's current stream.
|
|
424
|
-
# Note that we pass 1 for the null stream, per DLPack spec.
|
|
425
|
-
cuda_stream = warp.get_cuda_device(device_id).stream.cuda_stream or 1
|
|
426
|
-
elif device_type == DLDeviceType.kDLCPU:
|
|
427
|
-
# No stream sync for CPU arrays.
|
|
428
|
-
cuda_stream = None
|
|
429
|
-
elif device_type == DLDeviceType.kDLCUDAHost:
|
|
430
|
-
# For pinned memory, we sync with the current CUDA device's stream.
|
|
431
|
-
# Note that we pass 1 for the null stream, per DLPack spec.
|
|
432
|
-
cuda_stream = warp.get_cuda_device().stream.cuda_stream or 1
|
|
433
|
-
else:
|
|
434
|
-
raise TypeError("Unsupported source device")
|
|
435
|
-
|
|
436
|
-
capsule = source.__dlpack__(stream=cuda_stream)
|
|
437
|
-
|
|
438
|
-
else:
|
|
439
|
-
# legacy behaviour, assume source is a capsule
|
|
440
|
-
capsule = source
|
|
441
|
-
|
|
442
|
-
return _from_dlpack(capsule, dtype=dtype)
|
|
1
|
+
# Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
|
|
2
|
+
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
|
3
|
+
# and proprietary rights in and to this software, related documentation
|
|
4
|
+
# and any modifications thereto. Any use, reproduction, disclosure or
|
|
5
|
+
# distribution of this software and related documentation without an express
|
|
6
|
+
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
|
7
|
+
|
|
8
|
+
# Python specification for DLpack:
|
|
9
|
+
# https://dmlc.github.io/dlpack/latest/python_spec.html
|
|
10
|
+
|
|
11
|
+
import ctypes
|
|
12
|
+
|
|
13
|
+
import warp
|
|
14
|
+
from warp.thirdparty.dlpack import (
|
|
15
|
+
DLDataType,
|
|
16
|
+
DLDataTypeCode,
|
|
17
|
+
DLDevice,
|
|
18
|
+
DLDeviceType,
|
|
19
|
+
DLManagedTensor,
|
|
20
|
+
_c_str_dltensor,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
_c_str_used_dltensor = b"used_dltensor"
|
|
24
|
+
|
|
25
|
+
PyMem_RawMalloc = ctypes.pythonapi.PyMem_RawMalloc
|
|
26
|
+
PyMem_RawMalloc.argtypes = [ctypes.c_size_t]
|
|
27
|
+
PyMem_RawMalloc.restype = ctypes.c_void_p
|
|
28
|
+
|
|
29
|
+
PyMem_RawFree = ctypes.pythonapi.PyMem_RawFree
|
|
30
|
+
PyMem_RawFree.argtypes = [ctypes.c_void_p]
|
|
31
|
+
PyMem_RawFree.restype = None
|
|
32
|
+
|
|
33
|
+
Py_IncRef = ctypes.pythonapi.Py_IncRef
|
|
34
|
+
Py_IncRef.argtypes = [ctypes.py_object]
|
|
35
|
+
Py_IncRef.restype = None
|
|
36
|
+
|
|
37
|
+
Py_DecRef = ctypes.pythonapi.Py_DecRef
|
|
38
|
+
Py_DecRef.argtypes = [ctypes.py_object]
|
|
39
|
+
Py_DecRef.restype = None
|
|
40
|
+
|
|
41
|
+
PyCapsule_Destructor = ctypes.CFUNCTYPE(None, ctypes.c_void_p)
|
|
42
|
+
|
|
43
|
+
PyCapsule_New = ctypes.pythonapi.PyCapsule_New
|
|
44
|
+
PyCapsule_New.argtypes = [ctypes.c_void_p, ctypes.c_char_p, PyCapsule_Destructor]
|
|
45
|
+
PyCapsule_New.restype = ctypes.py_object
|
|
46
|
+
|
|
47
|
+
PyCapsule_IsValid = ctypes.pythonapi.PyCapsule_IsValid
|
|
48
|
+
PyCapsule_IsValid.argtypes = [ctypes.py_object, ctypes.c_char_p]
|
|
49
|
+
PyCapsule_IsValid.restype = ctypes.c_int
|
|
50
|
+
|
|
51
|
+
PyCapsule_GetPointer = ctypes.pythonapi.PyCapsule_GetPointer
|
|
52
|
+
PyCapsule_GetPointer.argtypes = [ctypes.py_object, ctypes.c_char_p]
|
|
53
|
+
PyCapsule_GetPointer.restype = ctypes.c_void_p
|
|
54
|
+
|
|
55
|
+
PyCapsule_SetName = ctypes.pythonapi.PyCapsule_SetName
|
|
56
|
+
PyCapsule_SetName.argtypes = [ctypes.py_object, ctypes.c_char_p]
|
|
57
|
+
PyCapsule_SetName.restype = ctypes.c_int
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class _DLPackTensorHolder:
|
|
61
|
+
"""Class responsible for deleting DLManagedTensor memory after ownership is transferred from a capsule."""
|
|
62
|
+
|
|
63
|
+
def __init__(self, mem_ptr):
|
|
64
|
+
self.mem_ptr = mem_ptr
|
|
65
|
+
|
|
66
|
+
def __del__(self):
|
|
67
|
+
managed_tensor = DLManagedTensor.from_address(self.mem_ptr)
|
|
68
|
+
if managed_tensor.deleter:
|
|
69
|
+
managed_tensor.deleter(self.mem_ptr)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
@ctypes.CFUNCTYPE(None, ctypes.c_void_p)
|
|
73
|
+
def _dlpack_tensor_deleter(managed_ptr) -> None:
|
|
74
|
+
"""A function to deallocate a DLManagedTensor."""
|
|
75
|
+
|
|
76
|
+
managed_tensor = DLManagedTensor.from_address(managed_ptr)
|
|
77
|
+
|
|
78
|
+
# unreference the source array
|
|
79
|
+
manager = ctypes.cast(managed_tensor.manager_ctx, ctypes.py_object)
|
|
80
|
+
ctypes.pythonapi.Py_DecRef(manager)
|
|
81
|
+
|
|
82
|
+
# free the DLManagedTensor memory, including shape and strides
|
|
83
|
+
PyMem_RawFree(ctypes.c_void_p(managed_ptr))
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
@PyCapsule_Destructor
|
|
87
|
+
def _dlpack_capsule_deleter(ptr) -> None:
|
|
88
|
+
"""Destructor for a capsule holding a DLManagedTensor."""
|
|
89
|
+
|
|
90
|
+
capsule = ctypes.cast(ptr, ctypes.py_object)
|
|
91
|
+
|
|
92
|
+
if ctypes.pythonapi.PyCapsule_IsValid(capsule, _c_str_dltensor):
|
|
93
|
+
managed_ptr = ctypes.pythonapi.PyCapsule_GetPointer(capsule, _c_str_dltensor)
|
|
94
|
+
managed_tensor = DLManagedTensor.from_address(managed_ptr)
|
|
95
|
+
if managed_tensor.deleter:
|
|
96
|
+
managed_tensor.deleter(managed_ptr)
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def _device_to_dlpack(wp_device: warp.context.Device) -> DLDevice:
|
|
100
|
+
dl_device = DLDevice()
|
|
101
|
+
|
|
102
|
+
if wp_device.is_cpu:
|
|
103
|
+
dl_device.device_type = DLDeviceType.kDLCPU
|
|
104
|
+
dl_device.device_id = 0
|
|
105
|
+
elif wp_device.is_cuda:
|
|
106
|
+
dl_device.device_type = DLDeviceType.kDLCUDA
|
|
107
|
+
dl_device.device_id = wp_device.ordinal
|
|
108
|
+
else:
|
|
109
|
+
raise RuntimeError(f"Invalid device type converting to DLPack: {wp_device}")
|
|
110
|
+
|
|
111
|
+
return dl_device
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def device_to_dlpack(wp_device) -> DLDevice:
|
|
115
|
+
return _device_to_dlpack(warp.get_device(wp_device))
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def dtype_to_dlpack(wp_dtype) -> DLDataType:
|
|
119
|
+
if wp_dtype == warp.int8:
|
|
120
|
+
return (DLDataTypeCode.kDLInt, 8, 1)
|
|
121
|
+
elif wp_dtype == warp.uint8:
|
|
122
|
+
return (DLDataTypeCode.kDLUInt, 8, 1)
|
|
123
|
+
elif wp_dtype == warp.int16:
|
|
124
|
+
return (DLDataTypeCode.kDLInt, 16, 1)
|
|
125
|
+
elif wp_dtype == warp.uint16:
|
|
126
|
+
return (DLDataTypeCode.kDLUInt, 16, 1)
|
|
127
|
+
elif wp_dtype == warp.int32:
|
|
128
|
+
return (DLDataTypeCode.kDLInt, 32, 1)
|
|
129
|
+
elif wp_dtype == warp.uint32:
|
|
130
|
+
return (DLDataTypeCode.kDLUInt, 32, 1)
|
|
131
|
+
elif wp_dtype == warp.int64:
|
|
132
|
+
return (DLDataTypeCode.kDLInt, 64, 1)
|
|
133
|
+
elif wp_dtype == warp.uint64:
|
|
134
|
+
return (DLDataTypeCode.kDLUInt, 64, 1)
|
|
135
|
+
elif wp_dtype == warp.float16:
|
|
136
|
+
return (DLDataTypeCode.kDLFloat, 16, 1)
|
|
137
|
+
elif wp_dtype == warp.float32:
|
|
138
|
+
return (DLDataTypeCode.kDLFloat, 32, 1)
|
|
139
|
+
elif wp_dtype == warp.float64:
|
|
140
|
+
return (DLDataTypeCode.kDLFloat, 64, 1)
|
|
141
|
+
else:
|
|
142
|
+
raise RuntimeError(f"No conversion from Warp type {wp_dtype} to DLPack type")
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def dtype_from_dlpack(dl_dtype):
|
|
146
|
+
# unpack to tuple for easier comparison
|
|
147
|
+
dl_dtype = (dl_dtype.type_code.value, dl_dtype.bits)
|
|
148
|
+
|
|
149
|
+
if dl_dtype == (DLDataTypeCode.kDLUInt, 1):
|
|
150
|
+
raise RuntimeError("Warp does not support bit boolean types")
|
|
151
|
+
elif dl_dtype == (DLDataTypeCode.kDLInt, 8):
|
|
152
|
+
return warp.types.int8
|
|
153
|
+
elif dl_dtype == (DLDataTypeCode.kDLInt, 16):
|
|
154
|
+
return warp.types.int16
|
|
155
|
+
elif dl_dtype == (DLDataTypeCode.kDLInt, 32):
|
|
156
|
+
return warp.types.int32
|
|
157
|
+
elif dl_dtype == (DLDataTypeCode.kDLInt, 64):
|
|
158
|
+
return warp.types.int64
|
|
159
|
+
elif dl_dtype == (DLDataTypeCode.kDLUInt, 8):
|
|
160
|
+
return warp.types.uint8
|
|
161
|
+
elif dl_dtype == (DLDataTypeCode.kDLUInt, 16):
|
|
162
|
+
return warp.types.uint16
|
|
163
|
+
elif dl_dtype == (DLDataTypeCode.kDLUInt, 32):
|
|
164
|
+
return warp.types.uint32
|
|
165
|
+
elif dl_dtype == (DLDataTypeCode.kDLUInt, 64):
|
|
166
|
+
return warp.types.uint64
|
|
167
|
+
elif dl_dtype == (DLDataTypeCode.kDLFloat, 16):
|
|
168
|
+
return warp.types.float16
|
|
169
|
+
elif dl_dtype == (DLDataTypeCode.kDLFloat, 32):
|
|
170
|
+
return warp.types.float32
|
|
171
|
+
elif dl_dtype == (DLDataTypeCode.kDLFloat, 64):
|
|
172
|
+
return warp.types.float64
|
|
173
|
+
elif dl_dtype == (DLDataTypeCode.kDLComplex, 64):
|
|
174
|
+
raise RuntimeError("Warp does not support complex types")
|
|
175
|
+
elif dl_dtype == (DLDataTypeCode.kDLComplex, 128):
|
|
176
|
+
raise RuntimeError("Warp does not support complex types")
|
|
177
|
+
else:
|
|
178
|
+
raise RuntimeError(f"Unknown DLPack datatype {dl_dtype}")
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
def device_from_dlpack(dl_device):
|
|
182
|
+
assert warp.context.runtime is not None, "Warp not initialized, call wp.init() before use"
|
|
183
|
+
|
|
184
|
+
if dl_device.device_type.value == DLDeviceType.kDLCPU or dl_device.device_type.value == DLDeviceType.kDLCUDAHost:
|
|
185
|
+
return warp.context.runtime.cpu_device
|
|
186
|
+
elif (
|
|
187
|
+
dl_device.device_type.value == DLDeviceType.kDLCUDA
|
|
188
|
+
or dl_device.device_type.value == DLDeviceType.kDLCUDAManaged
|
|
189
|
+
):
|
|
190
|
+
return warp.context.runtime.cuda_devices[dl_device.device_id]
|
|
191
|
+
else:
|
|
192
|
+
raise RuntimeError(f"Unknown device type from DLPack: {dl_device.device_type.value}")
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
def shape_to_dlpack(shape):
|
|
196
|
+
a = (ctypes.c_int64 * len(shape))(*shape)
|
|
197
|
+
return a
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
def strides_to_dlpack(strides, dtype):
|
|
201
|
+
# convert from byte count to element count
|
|
202
|
+
ndim = len(strides)
|
|
203
|
+
a = (ctypes.c_int64 * ndim)()
|
|
204
|
+
dtype_size = warp.types.type_size_in_bytes(dtype)
|
|
205
|
+
for i in range(ndim):
|
|
206
|
+
a[i] = strides[i] // dtype_size
|
|
207
|
+
return a
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
def to_dlpack(wp_array: warp.array):
|
|
211
|
+
"""Convert a Warp array to another type of DLPack-compatible array.
|
|
212
|
+
|
|
213
|
+
Args:
|
|
214
|
+
wp_array: The source Warp array that will be converted.
|
|
215
|
+
|
|
216
|
+
Returns:
|
|
217
|
+
A capsule containing a DLManagedTensor that can be converted
|
|
218
|
+
to another array type without copying the underlying memory.
|
|
219
|
+
"""
|
|
220
|
+
|
|
221
|
+
# DLPack does not support structured arrays
|
|
222
|
+
if isinstance(wp_array.dtype, warp.codegen.Struct):
|
|
223
|
+
raise RuntimeError("Cannot convert structured Warp arrays to DLPack.")
|
|
224
|
+
|
|
225
|
+
# handle vector types
|
|
226
|
+
if hasattr(wp_array.dtype, "_wp_scalar_type_"):
|
|
227
|
+
# vector type, flatten the dimensions into one tuple
|
|
228
|
+
target_dtype = wp_array.dtype._wp_scalar_type_
|
|
229
|
+
target_ndim = wp_array.ndim + len(wp_array.dtype._shape_)
|
|
230
|
+
target_shape = (*wp_array.shape, *wp_array.dtype._shape_)
|
|
231
|
+
dtype_strides = warp.types.strides_from_shape(wp_array.dtype._shape_, wp_array.dtype._wp_scalar_type_)
|
|
232
|
+
target_strides = (*wp_array.strides, *dtype_strides)
|
|
233
|
+
else:
|
|
234
|
+
# scalar type
|
|
235
|
+
target_dtype = wp_array.dtype
|
|
236
|
+
target_ndim = wp_array.ndim
|
|
237
|
+
target_shape = wp_array.shape
|
|
238
|
+
target_strides = wp_array.strides
|
|
239
|
+
|
|
240
|
+
if wp_array.pinned:
|
|
241
|
+
dl_device = DLDevice()
|
|
242
|
+
dl_device.device_type = DLDeviceType.kDLCUDAHost
|
|
243
|
+
dl_device.device_id = 0
|
|
244
|
+
else:
|
|
245
|
+
dl_device = _device_to_dlpack(wp_array.device)
|
|
246
|
+
|
|
247
|
+
# allocate DLManagedTensor, shape, and strides together
|
|
248
|
+
managed_tensor_size = ctypes.sizeof(DLManagedTensor)
|
|
249
|
+
padding = managed_tensor_size & 7
|
|
250
|
+
shape_size = target_ndim * 8
|
|
251
|
+
mem_size = managed_tensor_size + padding + 2 * shape_size
|
|
252
|
+
mem_ptr = PyMem_RawMalloc(mem_size)
|
|
253
|
+
assert mem_ptr, "Failed to allocate memory for DLManagedTensor"
|
|
254
|
+
|
|
255
|
+
# set managed tensor attributes
|
|
256
|
+
managed_tensor = DLManagedTensor.from_address(mem_ptr)
|
|
257
|
+
managed_tensor.dl_tensor.data = wp_array.ptr
|
|
258
|
+
managed_tensor.dl_tensor.device = dl_device
|
|
259
|
+
managed_tensor.dl_tensor.ndim = target_ndim
|
|
260
|
+
managed_tensor.dl_tensor.dtype = dtype_to_dlpack(target_dtype)
|
|
261
|
+
managed_tensor.dl_tensor.byte_offset = 0
|
|
262
|
+
|
|
263
|
+
# shape
|
|
264
|
+
shape_offset = managed_tensor_size + padding
|
|
265
|
+
shape_ptr = ctypes.cast(mem_ptr + shape_offset, ctypes.POINTER(ctypes.c_int64))
|
|
266
|
+
for i in range(target_ndim):
|
|
267
|
+
shape_ptr[i] = target_shape[i]
|
|
268
|
+
managed_tensor.dl_tensor.shape = shape_ptr
|
|
269
|
+
|
|
270
|
+
# strides, if not contiguous
|
|
271
|
+
if wp_array.is_contiguous:
|
|
272
|
+
managed_tensor.dl_tensor.strides = None
|
|
273
|
+
else:
|
|
274
|
+
stride_offset = shape_offset + shape_size
|
|
275
|
+
stride_ptr = ctypes.cast(mem_ptr + stride_offset, ctypes.POINTER(ctypes.c_int64))
|
|
276
|
+
dtype_size = warp.types.type_size_in_bytes(target_dtype)
|
|
277
|
+
for i in range(target_ndim):
|
|
278
|
+
stride_ptr[i] = target_strides[i] // dtype_size
|
|
279
|
+
managed_tensor.dl_tensor.strides = stride_ptr
|
|
280
|
+
|
|
281
|
+
# DLManagedTensor holds a reference to the source array
|
|
282
|
+
managed_tensor.manager_ctx = id(wp_array)
|
|
283
|
+
Py_IncRef(wp_array)
|
|
284
|
+
|
|
285
|
+
managed_tensor.deleter = _dlpack_tensor_deleter
|
|
286
|
+
|
|
287
|
+
capsule = PyCapsule_New(
|
|
288
|
+
ctypes.byref(managed_tensor),
|
|
289
|
+
_c_str_dltensor,
|
|
290
|
+
_dlpack_capsule_deleter,
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
return capsule
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
def dtype_is_compatible(dl_dtype, wp_dtype):
|
|
297
|
+
if dl_dtype.bits % 8 != 0:
|
|
298
|
+
raise RuntimeError("Data types with less than 8 bits are not supported")
|
|
299
|
+
|
|
300
|
+
if dl_dtype.type_code.value == DLDataTypeCode.kDLFloat:
|
|
301
|
+
if dl_dtype.bits == 16:
|
|
302
|
+
return wp_dtype == warp.float16
|
|
303
|
+
elif dl_dtype.bits == 32:
|
|
304
|
+
return wp_dtype == warp.float32
|
|
305
|
+
elif dl_dtype.bits == 64:
|
|
306
|
+
return wp_dtype == warp.float64
|
|
307
|
+
elif dl_dtype.type_code.value == DLDataTypeCode.kDLInt or dl_dtype.type_code.value == DLDataTypeCode.kDLUInt:
|
|
308
|
+
if dl_dtype.bits == 8:
|
|
309
|
+
return wp_dtype == warp.int8 or wp_dtype == warp.uint8
|
|
310
|
+
elif dl_dtype.bits == 16:
|
|
311
|
+
return wp_dtype == warp.int16 or wp_dtype == warp.uint16
|
|
312
|
+
elif dl_dtype.bits == 32:
|
|
313
|
+
return wp_dtype == warp.int32 or wp_dtype == warp.uint32
|
|
314
|
+
elif dl_dtype.bits == 64:
|
|
315
|
+
return wp_dtype == warp.int64 or wp_dtype == warp.uint64
|
|
316
|
+
elif dl_dtype.type_code.value == DLDataTypeCode.kDLBfloat:
|
|
317
|
+
raise RuntimeError("Bfloat data type is not supported")
|
|
318
|
+
elif dl_dtype.type_code.value == DLDataTypeCode.kDLComplex:
|
|
319
|
+
raise RuntimeError("Complex data types are not supported")
|
|
320
|
+
else:
|
|
321
|
+
raise RuntimeError(f"Unsupported DLPack dtype {(str(dl_dtype.type_code), dl_dtype.bits)}")
|
|
322
|
+
|
|
323
|
+
|
|
324
|
+
def _from_dlpack(capsule, dtype=None) -> warp.array:
|
|
325
|
+
"""Convert a DLPack capsule into a Warp array without copying.
|
|
326
|
+
|
|
327
|
+
Args:
|
|
328
|
+
capsule: A DLPack capsule wrapping an external array or tensor.
|
|
329
|
+
dtype: An optional Warp data type to interpret the source data.
|
|
330
|
+
|
|
331
|
+
Returns:
|
|
332
|
+
A new Warp array that uses the same underlying memory as the input capsule.
|
|
333
|
+
"""
|
|
334
|
+
|
|
335
|
+
assert PyCapsule_IsValid(capsule, _c_str_dltensor), "Invalid capsule"
|
|
336
|
+
mem_ptr = PyCapsule_GetPointer(capsule, _c_str_dltensor)
|
|
337
|
+
managed_tensor = DLManagedTensor.from_address(mem_ptr)
|
|
338
|
+
|
|
339
|
+
dlt = managed_tensor.dl_tensor
|
|
340
|
+
|
|
341
|
+
device = device_from_dlpack(dlt.device)
|
|
342
|
+
pinned = dlt.device.device_type.value == DLDeviceType.kDLCUDAHost
|
|
343
|
+
shape = tuple(dlt.shape[dim] for dim in range(dlt.ndim))
|
|
344
|
+
|
|
345
|
+
# strides, if not contiguous
|
|
346
|
+
itemsize = dlt.dtype.bits // 8
|
|
347
|
+
if dlt.strides:
|
|
348
|
+
strides = tuple(dlt.strides[dim] * itemsize for dim in range(dlt.ndim))
|
|
349
|
+
else:
|
|
350
|
+
strides = None
|
|
351
|
+
|
|
352
|
+
# handle multi-lane dtypes as another dimension
|
|
353
|
+
if dlt.dtype.lanes > 1:
|
|
354
|
+
shape = (*shape, dlt.dtype.lanes)
|
|
355
|
+
if strides is not None:
|
|
356
|
+
strides = (*strides, itemsize)
|
|
357
|
+
|
|
358
|
+
if dtype is None:
|
|
359
|
+
# automatically detect dtype
|
|
360
|
+
dtype = dtype_from_dlpack(dlt.dtype)
|
|
361
|
+
|
|
362
|
+
elif hasattr(dtype, "_wp_scalar_type_"):
|
|
363
|
+
# handle vector/matrix types
|
|
364
|
+
|
|
365
|
+
if not dtype_is_compatible(dlt.dtype, dtype._wp_scalar_type_):
|
|
366
|
+
raise RuntimeError(f"Incompatible data types: {dlt.dtype} and {dtype}")
|
|
367
|
+
|
|
368
|
+
dtype_shape = dtype._shape_
|
|
369
|
+
dtype_dims = len(dtype._shape_)
|
|
370
|
+
if dtype_dims > len(shape) or dtype_shape != shape[-dtype_dims:]:
|
|
371
|
+
raise RuntimeError(
|
|
372
|
+
f"Could not convert DLPack tensor with shape {shape} to Warp array with dtype={dtype}, ensure that source inner shape is {dtype_shape}"
|
|
373
|
+
)
|
|
374
|
+
|
|
375
|
+
if strides is not None:
|
|
376
|
+
# ensure the inner strides are contiguous
|
|
377
|
+
stride = itemsize
|
|
378
|
+
for i in range(dtype_dims):
|
|
379
|
+
if strides[-i - 1] != stride:
|
|
380
|
+
raise RuntimeError(
|
|
381
|
+
f"Could not convert DLPack tensor with shape {shape} to Warp array with dtype={dtype}, because the source inner strides are not contiguous"
|
|
382
|
+
)
|
|
383
|
+
stride *= dtype_shape[-i - 1]
|
|
384
|
+
strides = tuple(strides[:-dtype_dims]) or (itemsize,)
|
|
385
|
+
|
|
386
|
+
shape = tuple(shape[:-dtype_dims]) or (1,)
|
|
387
|
+
|
|
388
|
+
elif not dtype_is_compatible(dlt.dtype, dtype):
|
|
389
|
+
# incompatible dtype requested
|
|
390
|
+
raise RuntimeError(f"Incompatible data types: {dlt.dtype} and {dtype}")
|
|
391
|
+
|
|
392
|
+
a = warp.types.array(
|
|
393
|
+
ptr=dlt.data, dtype=dtype, shape=shape, strides=strides, copy=False, device=device, pinned=pinned
|
|
394
|
+
)
|
|
395
|
+
|
|
396
|
+
# take ownership of the DLManagedTensor
|
|
397
|
+
a._dlpack_tensor_holder = _DLPackTensorHolder(mem_ptr)
|
|
398
|
+
|
|
399
|
+
# rename the capsule so that it no longer owns the DLManagedTensor
|
|
400
|
+
PyCapsule_SetName(capsule, _c_str_used_dltensor)
|
|
401
|
+
|
|
402
|
+
return a
|
|
403
|
+
|
|
404
|
+
|
|
405
|
+
def from_dlpack(source, dtype=None) -> warp.array:
|
|
406
|
+
"""Convert a source array or DLPack capsule into a Warp array without copying.
|
|
407
|
+
|
|
408
|
+
Args:
|
|
409
|
+
source: A DLPack-compatible array or PyCapsule
|
|
410
|
+
dtype: An optional Warp data type to interpret the source data.
|
|
411
|
+
|
|
412
|
+
Returns:
|
|
413
|
+
A new Warp array that uses the same underlying memory as the input
|
|
414
|
+
pycapsule.
|
|
415
|
+
"""
|
|
416
|
+
|
|
417
|
+
# See https://data-apis.org/array-api/2022.12/API_specification/generated/array_api.array.__dlpack__.html
|
|
418
|
+
|
|
419
|
+
if hasattr(source, "__dlpack__"):
|
|
420
|
+
device_type, device_id = source.__dlpack_device__()
|
|
421
|
+
# Check if the source lives on a CUDA device
|
|
422
|
+
if device_type in (DLDeviceType.kDLCUDA, DLDeviceType.kDLCUDAManaged):
|
|
423
|
+
# Assume that the caller will use the array on its device's current stream.
|
|
424
|
+
# Note that we pass 1 for the null stream, per DLPack spec.
|
|
425
|
+
cuda_stream = warp.get_cuda_device(device_id).stream.cuda_stream or 1
|
|
426
|
+
elif device_type == DLDeviceType.kDLCPU:
|
|
427
|
+
# No stream sync for CPU arrays.
|
|
428
|
+
cuda_stream = None
|
|
429
|
+
elif device_type == DLDeviceType.kDLCUDAHost:
|
|
430
|
+
# For pinned memory, we sync with the current CUDA device's stream.
|
|
431
|
+
# Note that we pass 1 for the null stream, per DLPack spec.
|
|
432
|
+
cuda_stream = warp.get_cuda_device().stream.cuda_stream or 1
|
|
433
|
+
else:
|
|
434
|
+
raise TypeError("Unsupported source device")
|
|
435
|
+
|
|
436
|
+
capsule = source.__dlpack__(stream=cuda_stream)
|
|
437
|
+
|
|
438
|
+
else:
|
|
439
|
+
# legacy behaviour, assume source is a capsule
|
|
440
|
+
capsule = source
|
|
441
|
+
|
|
442
|
+
return _from_dlpack(capsule, dtype=dtype)
|