warp-lang 1.0.2__py3-none-manylinux2014_x86_64.whl → 1.1.0__py3-none-manylinux2014_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of warp-lang might be problematic. Click here for more details.
- warp/__init__.py +108 -97
- warp/__init__.pyi +1 -1
- warp/bin/warp-clang.so +0 -0
- warp/bin/warp.so +0 -0
- warp/build.py +115 -113
- warp/build_dll.py +383 -375
- warp/builtins.py +3425 -3354
- warp/codegen.py +2878 -2792
- warp/config.py +40 -36
- warp/constants.py +45 -45
- warp/context.py +5194 -5102
- warp/dlpack.py +442 -442
- warp/examples/__init__.py +16 -16
- warp/examples/assets/bear.usd +0 -0
- warp/examples/assets/bunny.usd +0 -0
- warp/examples/assets/cartpole.urdf +110 -110
- warp/examples/assets/crazyflie.usd +0 -0
- warp/examples/assets/cube.usd +0 -0
- warp/examples/assets/nv_ant.xml +92 -92
- warp/examples/assets/nv_humanoid.xml +183 -183
- warp/examples/assets/quadruped.urdf +267 -267
- warp/examples/assets/rocks.nvdb +0 -0
- warp/examples/assets/rocks.usd +0 -0
- warp/examples/assets/sphere.usd +0 -0
- warp/examples/benchmarks/benchmark_api.py +383 -383
- warp/examples/benchmarks/benchmark_cloth.py +278 -277
- warp/examples/benchmarks/benchmark_cloth_cupy.py +88 -88
- warp/examples/benchmarks/benchmark_cloth_jax.py +97 -100
- warp/examples/benchmarks/benchmark_cloth_numba.py +146 -142
- warp/examples/benchmarks/benchmark_cloth_numpy.py +77 -77
- warp/examples/benchmarks/benchmark_cloth_pytorch.py +86 -86
- warp/examples/benchmarks/benchmark_cloth_taichi.py +112 -112
- warp/examples/benchmarks/benchmark_cloth_warp.py +146 -146
- warp/examples/benchmarks/benchmark_launches.py +295 -295
- warp/examples/browse.py +29 -29
- warp/examples/core/example_dem.py +234 -219
- warp/examples/core/example_fluid.py +293 -267
- warp/examples/core/example_graph_capture.py +144 -126
- warp/examples/core/example_marching_cubes.py +188 -174
- warp/examples/core/example_mesh.py +174 -155
- warp/examples/core/example_mesh_intersect.py +205 -193
- warp/examples/core/example_nvdb.py +176 -170
- warp/examples/core/example_raycast.py +105 -90
- warp/examples/core/example_raymarch.py +199 -178
- warp/examples/core/example_render_opengl.py +185 -141
- warp/examples/core/example_sph.py +405 -387
- warp/examples/core/example_torch.py +222 -181
- warp/examples/core/example_wave.py +263 -248
- warp/examples/fem/bsr_utils.py +378 -380
- warp/examples/fem/example_apic_fluid.py +407 -389
- warp/examples/fem/example_convection_diffusion.py +182 -168
- warp/examples/fem/example_convection_diffusion_dg.py +219 -209
- warp/examples/fem/example_convection_diffusion_dg0.py +204 -194
- warp/examples/fem/example_deformed_geometry.py +177 -159
- warp/examples/fem/example_diffusion.py +201 -173
- warp/examples/fem/example_diffusion_3d.py +177 -152
- warp/examples/fem/example_diffusion_mgpu.py +221 -214
- warp/examples/fem/example_mixed_elasticity.py +244 -222
- warp/examples/fem/example_navier_stokes.py +259 -243
- warp/examples/fem/example_stokes.py +220 -192
- warp/examples/fem/example_stokes_transfer.py +265 -249
- warp/examples/fem/mesh_utils.py +133 -109
- warp/examples/fem/plot_utils.py +292 -287
- warp/examples/optim/example_bounce.py +260 -246
- warp/examples/optim/example_cloth_throw.py +222 -209
- warp/examples/optim/example_diffray.py +566 -536
- warp/examples/optim/example_drone.py +864 -835
- warp/examples/optim/example_inverse_kinematics.py +176 -168
- warp/examples/optim/example_inverse_kinematics_torch.py +185 -169
- warp/examples/optim/example_spring_cage.py +239 -231
- warp/examples/optim/example_trajectory.py +223 -199
- warp/examples/optim/example_walker.py +306 -293
- warp/examples/sim/example_cartpole.py +139 -129
- warp/examples/sim/example_cloth.py +196 -186
- warp/examples/sim/example_granular.py +124 -111
- warp/examples/sim/example_granular_collision_sdf.py +197 -186
- warp/examples/sim/example_jacobian_ik.py +236 -214
- warp/examples/sim/example_particle_chain.py +118 -105
- warp/examples/sim/example_quadruped.py +193 -180
- warp/examples/sim/example_rigid_chain.py +197 -187
- warp/examples/sim/example_rigid_contact.py +189 -177
- warp/examples/sim/example_rigid_force.py +127 -125
- warp/examples/sim/example_rigid_gyroscopic.py +109 -95
- warp/examples/sim/example_rigid_soft_contact.py +134 -122
- warp/examples/sim/example_soft_body.py +190 -177
- warp/fabric.py +337 -335
- warp/fem/__init__.py +60 -27
- warp/fem/cache.py +401 -388
- warp/fem/dirichlet.py +178 -179
- warp/fem/domain.py +262 -263
- warp/fem/field/__init__.py +100 -101
- warp/fem/field/field.py +148 -149
- warp/fem/field/nodal_field.py +298 -299
- warp/fem/field/restriction.py +22 -21
- warp/fem/field/test.py +180 -181
- warp/fem/field/trial.py +183 -183
- warp/fem/geometry/__init__.py +15 -19
- warp/fem/geometry/closest_point.py +69 -70
- warp/fem/geometry/deformed_geometry.py +270 -271
- warp/fem/geometry/element.py +744 -744
- warp/fem/geometry/geometry.py +184 -186
- warp/fem/geometry/grid_2d.py +380 -373
- warp/fem/geometry/grid_3d.py +441 -435
- warp/fem/geometry/hexmesh.py +953 -953
- warp/fem/geometry/partition.py +374 -376
- warp/fem/geometry/quadmesh_2d.py +532 -532
- warp/fem/geometry/tetmesh.py +840 -840
- warp/fem/geometry/trimesh_2d.py +577 -577
- warp/fem/integrate.py +1630 -1615
- warp/fem/operator.py +190 -191
- warp/fem/polynomial.py +214 -213
- warp/fem/quadrature/__init__.py +2 -2
- warp/fem/quadrature/pic_quadrature.py +243 -245
- warp/fem/quadrature/quadrature.py +295 -294
- warp/fem/space/__init__.py +294 -292
- warp/fem/space/basis_space.py +488 -489
- warp/fem/space/collocated_function_space.py +100 -105
- warp/fem/space/dof_mapper.py +236 -236
- warp/fem/space/function_space.py +148 -145
- warp/fem/space/grid_2d_function_space.py +267 -267
- warp/fem/space/grid_3d_function_space.py +305 -306
- warp/fem/space/hexmesh_function_space.py +350 -352
- warp/fem/space/partition.py +350 -350
- warp/fem/space/quadmesh_2d_function_space.py +368 -369
- warp/fem/space/restriction.py +158 -160
- warp/fem/space/shape/__init__.py +13 -15
- warp/fem/space/shape/cube_shape_function.py +738 -738
- warp/fem/space/shape/shape_function.py +102 -103
- warp/fem/space/shape/square_shape_function.py +611 -611
- warp/fem/space/shape/tet_shape_function.py +565 -567
- warp/fem/space/shape/triangle_shape_function.py +429 -429
- warp/fem/space/tetmesh_function_space.py +294 -292
- warp/fem/space/topology.py +297 -295
- warp/fem/space/trimesh_2d_function_space.py +223 -221
- warp/fem/types.py +77 -77
- warp/fem/utils.py +495 -495
- warp/jax.py +166 -141
- warp/jax_experimental.py +341 -339
- warp/native/array.h +1072 -1025
- warp/native/builtin.h +1560 -1560
- warp/native/bvh.cpp +398 -398
- warp/native/bvh.cu +525 -525
- warp/native/bvh.h +429 -429
- warp/native/clang/clang.cpp +495 -464
- warp/native/crt.cpp +31 -31
- warp/native/crt.h +334 -334
- warp/native/cuda_crt.h +1049 -1049
- warp/native/cuda_util.cpp +549 -540
- warp/native/cuda_util.h +288 -203
- warp/native/cutlass_gemm.cpp +34 -34
- warp/native/cutlass_gemm.cu +372 -372
- warp/native/error.cpp +66 -66
- warp/native/error.h +27 -27
- warp/native/fabric.h +228 -228
- warp/native/hashgrid.cpp +301 -278
- warp/native/hashgrid.cu +78 -77
- warp/native/hashgrid.h +227 -227
- warp/native/initializer_array.h +32 -32
- warp/native/intersect.h +1204 -1204
- warp/native/intersect_adj.h +365 -365
- warp/native/intersect_tri.h +322 -322
- warp/native/marching.cpp +2 -2
- warp/native/marching.cu +497 -497
- warp/native/marching.h +2 -2
- warp/native/mat.h +1498 -1498
- warp/native/matnn.h +333 -333
- warp/native/mesh.cpp +203 -203
- warp/native/mesh.cu +293 -293
- warp/native/mesh.h +1887 -1887
- warp/native/nanovdb/NanoVDB.h +4782 -4782
- warp/native/nanovdb/PNanoVDB.h +2553 -2553
- warp/native/nanovdb/PNanoVDBWrite.h +294 -294
- warp/native/noise.h +850 -850
- warp/native/quat.h +1084 -1084
- warp/native/rand.h +299 -299
- warp/native/range.h +108 -108
- warp/native/reduce.cpp +156 -156
- warp/native/reduce.cu +348 -348
- warp/native/runlength_encode.cpp +61 -61
- warp/native/runlength_encode.cu +46 -46
- warp/native/scan.cpp +30 -30
- warp/native/scan.cu +36 -36
- warp/native/scan.h +7 -7
- warp/native/solid_angle.h +442 -442
- warp/native/sort.cpp +94 -94
- warp/native/sort.cu +97 -97
- warp/native/sort.h +14 -14
- warp/native/sparse.cpp +337 -337
- warp/native/sparse.cu +544 -544
- warp/native/spatial.h +630 -630
- warp/native/svd.h +562 -562
- warp/native/temp_buffer.h +30 -30
- warp/native/vec.h +1132 -1132
- warp/native/volume.cpp +297 -297
- warp/native/volume.cu +32 -32
- warp/native/volume.h +538 -538
- warp/native/volume_builder.cu +425 -425
- warp/native/volume_builder.h +19 -19
- warp/native/warp.cpp +1057 -1052
- warp/native/warp.cu +2943 -2828
- warp/native/warp.h +313 -305
- warp/optim/__init__.py +9 -9
- warp/optim/adam.py +120 -120
- warp/optim/linear.py +1104 -939
- warp/optim/sgd.py +104 -92
- warp/render/__init__.py +10 -10
- warp/render/render_opengl.py +3217 -3204
- warp/render/render_usd.py +768 -749
- warp/render/utils.py +152 -150
- warp/sim/__init__.py +52 -59
- warp/sim/articulation.py +685 -685
- warp/sim/collide.py +1594 -1590
- warp/sim/import_mjcf.py +489 -481
- warp/sim/import_snu.py +220 -221
- warp/sim/import_urdf.py +536 -516
- warp/sim/import_usd.py +887 -881
- warp/sim/inertia.py +316 -317
- warp/sim/integrator.py +234 -233
- warp/sim/integrator_euler.py +1956 -1956
- warp/sim/integrator_featherstone.py +1910 -1991
- warp/sim/integrator_xpbd.py +3294 -3312
- warp/sim/model.py +4473 -4314
- warp/sim/particles.py +113 -112
- warp/sim/render.py +417 -403
- warp/sim/utils.py +413 -410
- warp/sparse.py +1227 -1227
- warp/stubs.py +2109 -2469
- warp/tape.py +1162 -225
- warp/tests/__init__.py +1 -1
- warp/tests/__main__.py +4 -4
- warp/tests/assets/torus.usda +105 -105
- warp/tests/aux_test_class_kernel.py +26 -26
- warp/tests/aux_test_compile_consts_dummy.py +10 -10
- warp/tests/aux_test_conditional_unequal_types_kernels.py +21 -21
- warp/tests/aux_test_dependent.py +22 -22
- warp/tests/aux_test_grad_customs.py +23 -23
- warp/tests/aux_test_reference.py +11 -11
- warp/tests/aux_test_reference_reference.py +10 -10
- warp/tests/aux_test_square.py +17 -17
- warp/tests/aux_test_unresolved_func.py +14 -14
- warp/tests/aux_test_unresolved_symbol.py +14 -14
- warp/tests/disabled_kinematics.py +239 -239
- warp/tests/run_coverage_serial.py +31 -31
- warp/tests/test_adam.py +157 -157
- warp/tests/test_arithmetic.py +1124 -1124
- warp/tests/test_array.py +2417 -2326
- warp/tests/test_array_reduce.py +150 -150
- warp/tests/test_async.py +668 -656
- warp/tests/test_atomic.py +141 -141
- warp/tests/test_bool.py +204 -149
- warp/tests/test_builtins_resolution.py +1292 -1292
- warp/tests/test_bvh.py +164 -171
- warp/tests/test_closest_point_edge_edge.py +228 -228
- warp/tests/test_codegen.py +566 -553
- warp/tests/test_compile_consts.py +97 -101
- warp/tests/test_conditional.py +246 -246
- warp/tests/test_copy.py +232 -215
- warp/tests/test_ctypes.py +632 -632
- warp/tests/test_dense.py +67 -67
- warp/tests/test_devices.py +91 -98
- warp/tests/test_dlpack.py +530 -529
- warp/tests/test_examples.py +400 -378
- warp/tests/test_fabricarray.py +955 -955
- warp/tests/test_fast_math.py +62 -54
- warp/tests/test_fem.py +1277 -1278
- warp/tests/test_fp16.py +130 -130
- warp/tests/test_func.py +338 -337
- warp/tests/test_generics.py +571 -571
- warp/tests/test_grad.py +746 -640
- warp/tests/test_grad_customs.py +333 -336
- warp/tests/test_hash_grid.py +210 -164
- warp/tests/test_import.py +39 -39
- warp/tests/test_indexedarray.py +1134 -1134
- warp/tests/test_intersect.py +67 -67
- warp/tests/test_jax.py +307 -307
- warp/tests/test_large.py +167 -164
- warp/tests/test_launch.py +354 -354
- warp/tests/test_lerp.py +261 -261
- warp/tests/test_linear_solvers.py +191 -171
- warp/tests/test_lvalue.py +421 -493
- warp/tests/test_marching_cubes.py +65 -65
- warp/tests/test_mat.py +1801 -1827
- warp/tests/test_mat_lite.py +115 -115
- warp/tests/test_mat_scalar_ops.py +2907 -2889
- warp/tests/test_math.py +126 -193
- warp/tests/test_matmul.py +500 -499
- warp/tests/test_matmul_lite.py +410 -410
- warp/tests/test_mempool.py +188 -190
- warp/tests/test_mesh.py +284 -324
- warp/tests/test_mesh_query_aabb.py +228 -241
- warp/tests/test_mesh_query_point.py +692 -702
- warp/tests/test_mesh_query_ray.py +292 -303
- warp/tests/test_mlp.py +276 -276
- warp/tests/test_model.py +110 -110
- warp/tests/test_modules_lite.py +39 -39
- warp/tests/test_multigpu.py +163 -163
- warp/tests/test_noise.py +248 -248
- warp/tests/test_operators.py +250 -250
- warp/tests/test_options.py +123 -125
- warp/tests/test_peer.py +133 -137
- warp/tests/test_pinned.py +78 -78
- warp/tests/test_print.py +54 -54
- warp/tests/test_quat.py +2086 -2086
- warp/tests/test_rand.py +288 -288
- warp/tests/test_reload.py +217 -217
- warp/tests/test_rounding.py +179 -179
- warp/tests/test_runlength_encode.py +190 -190
- warp/tests/test_sim_grad.py +243 -0
- warp/tests/test_sim_kinematics.py +91 -97
- warp/tests/test_smoothstep.py +168 -168
- warp/tests/test_snippet.py +305 -266
- warp/tests/test_sparse.py +468 -460
- warp/tests/test_spatial.py +2148 -2148
- warp/tests/test_streams.py +486 -473
- warp/tests/test_struct.py +710 -675
- warp/tests/test_tape.py +173 -148
- warp/tests/test_torch.py +743 -743
- warp/tests/test_transient_module.py +87 -87
- warp/tests/test_types.py +556 -659
- warp/tests/test_utils.py +490 -499
- warp/tests/test_vec.py +1264 -1268
- warp/tests/test_vec_lite.py +73 -73
- warp/tests/test_vec_scalar_ops.py +2099 -2099
- warp/tests/test_verify_fp.py +94 -94
- warp/tests/test_volume.py +737 -736
- warp/tests/test_volume_write.py +255 -265
- warp/tests/unittest_serial.py +37 -37
- warp/tests/unittest_suites.py +363 -359
- warp/tests/unittest_utils.py +603 -578
- warp/tests/unused_test_misc.py +71 -71
- warp/tests/walkthrough_debug.py +85 -85
- warp/thirdparty/appdirs.py +598 -598
- warp/thirdparty/dlpack.py +143 -143
- warp/thirdparty/unittest_parallel.py +566 -561
- warp/torch.py +321 -295
- warp/types.py +4504 -4450
- warp/utils.py +1008 -821
- {warp_lang-1.0.2.dist-info → warp_lang-1.1.0.dist-info}/LICENSE.md +126 -126
- {warp_lang-1.0.2.dist-info → warp_lang-1.1.0.dist-info}/METADATA +338 -400
- warp_lang-1.1.0.dist-info/RECORD +352 -0
- warp/examples/assets/cube.usda +0 -42
- warp/examples/assets/sphere.usda +0 -56
- warp/examples/assets/torus.usda +0 -105
- warp_lang-1.0.2.dist-info/RECORD +0 -352
- {warp_lang-1.0.2.dist-info → warp_lang-1.1.0.dist-info}/WHEEL +0 -0
- {warp_lang-1.0.2.dist-info → warp_lang-1.1.0.dist-info}/top_level.txt +0 -0
warp/jax_experimental.py
CHANGED
|
@@ -1,339 +1,341 @@
|
|
|
1
|
-
# Copyright (c) 2024 NVIDIA CORPORATION. All rights reserved.
|
|
2
|
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
|
3
|
-
# and proprietary rights in and to this software, related documentation
|
|
4
|
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
|
5
|
-
# distribution of this software and related documentation without an express
|
|
6
|
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
|
7
|
-
|
|
8
|
-
import ctypes
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
import
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
-
|
|
31
|
-
-
|
|
32
|
-
-
|
|
33
|
-
-
|
|
34
|
-
-
|
|
35
|
-
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
_registered_kernels
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
#
|
|
57
|
-
#
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
kernel_params
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
#
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
device
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
import jax
|
|
117
|
-
from jax._src.interpreters import batching
|
|
118
|
-
from jax.interpreters import mlir
|
|
119
|
-
from jax.interpreters.mlir import ir
|
|
120
|
-
from jaxlib.hlo_helpers import custom_call
|
|
121
|
-
|
|
122
|
-
global _jax_warp_p
|
|
123
|
-
global _cc_callback
|
|
124
|
-
|
|
125
|
-
# Create and register the primitive.
|
|
126
|
-
# TODO add default implementation that calls the kernel via warp.
|
|
127
|
-
_jax_warp_p = jax.core.Primitive("jax_warp")
|
|
128
|
-
_jax_warp_p.multiple_results = True
|
|
129
|
-
|
|
130
|
-
# TODO Just launch the kernel directly, but make sure the argument
|
|
131
|
-
# shapes are massaged the same way as below so that vmap works.
|
|
132
|
-
def impl(*args):
|
|
133
|
-
raise Exception("Not implemented")
|
|
134
|
-
|
|
135
|
-
_jax_warp_p.def_impl(impl)
|
|
136
|
-
|
|
137
|
-
# Auto-batching. Make sure all the arguments are fully broadcasted
|
|
138
|
-
# so that Warp is not confused about dimensions.
|
|
139
|
-
def vectorized_multi_batcher(args, dims, **params):
|
|
140
|
-
# Figure out the number of outputs.
|
|
141
|
-
wp_kernel = _registered_kernels[params["kernel"]]
|
|
142
|
-
output_count = len(wp_kernel.adj.args) - len(args)
|
|
143
|
-
shape, dim = next((a.shape, d) for a, d in zip(args, dims) if d is not None)
|
|
144
|
-
size = shape[dim]
|
|
145
|
-
args = [batching.bdim_at_front(a, d, size) if len(a.shape) else a for a, d in zip(args, dims)]
|
|
146
|
-
# Create the batched primitive.
|
|
147
|
-
return _jax_warp_p.bind(*args, **params), [dims[0]] * output_count
|
|
148
|
-
|
|
149
|
-
batching.primitive_batchers[_jax_warp_p] = vectorized_multi_batcher
|
|
150
|
-
|
|
151
|
-
def get_vecmat_shape(warp_type):
|
|
152
|
-
if hasattr(warp_type.dtype, "_shape_"):
|
|
153
|
-
return warp_type.dtype._shape_
|
|
154
|
-
return []
|
|
155
|
-
|
|
156
|
-
def strip_vecmat_dimensions(warp_arg, actual_shape):
|
|
157
|
-
shape = get_vecmat_shape(warp_arg.type)
|
|
158
|
-
for i, s in enumerate(reversed(shape)):
|
|
159
|
-
item = actual_shape[-i - 1]
|
|
160
|
-
if s != item:
|
|
161
|
-
raise Exception(f"The vector/matrix shape for argument {warp_arg.label} does not match")
|
|
162
|
-
return actual_shape[: len(actual_shape) - len(shape)]
|
|
163
|
-
|
|
164
|
-
def collapse_into_leading_dimension(warp_arg, actual_shape):
|
|
165
|
-
if len(actual_shape) < warp_arg.type.ndim:
|
|
166
|
-
raise Exception(f"Argument {warp_arg.label} has too few non-matrix/vector dimensions")
|
|
167
|
-
index_rest = len(actual_shape) - warp_arg.type.ndim + 1
|
|
168
|
-
leading_size = reduce(lambda x, y: x * y, actual_shape[:index_rest])
|
|
169
|
-
return [leading_size] + actual_shape[index_rest:]
|
|
170
|
-
|
|
171
|
-
# Infer array dimensions from input type.
|
|
172
|
-
def infer_dimensions(warp_arg, actual_shape):
|
|
173
|
-
actual_shape = strip_vecmat_dimensions(warp_arg, actual_shape)
|
|
174
|
-
return collapse_into_leading_dimension(warp_arg, actual_shape)
|
|
175
|
-
|
|
176
|
-
def base_type_to_jax(warp_dtype):
|
|
177
|
-
if hasattr(warp_dtype, "_wp_scalar_type_"):
|
|
178
|
-
return wp.dtype_to_jax(warp_dtype._wp_scalar_type_)
|
|
179
|
-
return wp.dtype_to_jax(warp_dtype)
|
|
180
|
-
|
|
181
|
-
def base_type_to_jax_ir(warp_dtype):
|
|
182
|
-
warp_to_jax_dict = {
|
|
183
|
-
wp.float16: ir.F16Type.get(),
|
|
184
|
-
wp.float32: ir.F32Type.get(),
|
|
185
|
-
wp.float64: ir.F64Type.get(),
|
|
186
|
-
wp.int8: ir.IntegerType.get_signless(8),
|
|
187
|
-
wp.int16: ir.IntegerType.get_signless(16),
|
|
188
|
-
wp.int32: ir.IntegerType.get_signless(32),
|
|
189
|
-
wp.int64: ir.IntegerType.get_signless(64),
|
|
190
|
-
wp.uint8: ir.IntegerType.get_unsigned(8),
|
|
191
|
-
wp.uint16: ir.IntegerType.get_unsigned(16),
|
|
192
|
-
wp.uint32: ir.IntegerType.get_unsigned(32),
|
|
193
|
-
wp.uint64: ir.IntegerType.get_unsigned(64),
|
|
194
|
-
}
|
|
195
|
-
if hasattr(warp_dtype, "_wp_scalar_type_"):
|
|
196
|
-
warp_dtype = warp_dtype._wp_scalar_type_
|
|
197
|
-
jax_dtype = warp_to_jax_dict.get(warp_dtype)
|
|
198
|
-
if jax_dtype is None:
|
|
199
|
-
raise TypeError(f"Invalid or unsupported data type: {warp_dtype}")
|
|
200
|
-
return jax_dtype
|
|
201
|
-
|
|
202
|
-
def base_type_is_compatible(warp_type, jax_ir_type):
|
|
203
|
-
jax_ir_to_warp = {
|
|
204
|
-
"f16": wp.float16,
|
|
205
|
-
"f32": wp.float32,
|
|
206
|
-
"f64": wp.float64,
|
|
207
|
-
"i8": wp.int8,
|
|
208
|
-
"i16": wp.int16,
|
|
209
|
-
"i32": wp.int32,
|
|
210
|
-
"i64": wp.int64,
|
|
211
|
-
"ui8": wp.uint8,
|
|
212
|
-
"ui16": wp.uint16,
|
|
213
|
-
"ui32": wp.uint32,
|
|
214
|
-
"ui64": wp.uint64,
|
|
215
|
-
}
|
|
216
|
-
expected_warp_type = jax_ir_to_warp.get(str(jax_ir_type))
|
|
217
|
-
if expected_warp_type is not None:
|
|
218
|
-
if hasattr(warp_type, "_wp_scalar_type_"):
|
|
219
|
-
return warp_type._wp_scalar_type_ == expected_warp_type
|
|
220
|
-
else:
|
|
221
|
-
return warp_type == expected_warp_type
|
|
222
|
-
else:
|
|
223
|
-
raise TypeError(f"Invalid or unsupported data type: {jax_ir_type}")
|
|
224
|
-
|
|
225
|
-
# Abstract evaluation.
|
|
226
|
-
def jax_warp_abstract(*args, kernel=None):
|
|
227
|
-
wp_kernel = _registered_kernels[kernel]
|
|
228
|
-
# All the extra arguments to the warp kernel are outputs.
|
|
229
|
-
warp_outputs = [o.type for o in wp_kernel.adj.args[len(args) :]]
|
|
230
|
-
# TODO. Let's just use the first input dimension to infer the output's dimensions.
|
|
231
|
-
dims = strip_vecmat_dimensions(wp_kernel.adj.args[0], list(args[0].shape))
|
|
232
|
-
jax_outputs = []
|
|
233
|
-
for o in warp_outputs:
|
|
234
|
-
shape = list(dims) + list(get_vecmat_shape(o))
|
|
235
|
-
dtype = base_type_to_jax(o.dtype)
|
|
236
|
-
jax_outputs.append(jax.core.ShapedArray(shape, dtype))
|
|
237
|
-
return jax_outputs
|
|
238
|
-
|
|
239
|
-
_jax_warp_p.def_abstract_eval(jax_warp_abstract)
|
|
240
|
-
|
|
241
|
-
# Lowering to MLIR.
|
|
242
|
-
|
|
243
|
-
# Create python-land custom call target.
|
|
244
|
-
CCALLFUNC = ctypes.CFUNCTYPE(
|
|
245
|
-
ctypes.c_voidp, ctypes.c_void_p, ctypes.POINTER(ctypes.c_void_p), ctypes.c_char_p, ctypes.c_size_t
|
|
246
|
-
)
|
|
247
|
-
_cc_callback = CCALLFUNC(_warp_custom_callback)
|
|
248
|
-
ccall_address = ctypes.cast(_cc_callback, ctypes.c_void_p)
|
|
249
|
-
|
|
250
|
-
# Put the custom call into a capsule, as required by XLA.
|
|
251
|
-
PyCapsule_Destructor = ctypes.CFUNCTYPE(None, ctypes.py_object)
|
|
252
|
-
PyCapsule_New = ctypes.pythonapi.PyCapsule_New
|
|
253
|
-
PyCapsule_New.restype = ctypes.py_object
|
|
254
|
-
PyCapsule_New.argtypes = (ctypes.c_void_p, ctypes.c_char_p, PyCapsule_Destructor)
|
|
255
|
-
capsule = PyCapsule_New(ccall_address.value, b"xla._CUSTOM_CALL_TARGET", PyCapsule_Destructor(0))
|
|
256
|
-
|
|
257
|
-
# Register the callback in XLA.
|
|
258
|
-
jax.lib.xla_client.register_custom_call_target("warp_call", capsule, platform="gpu")
|
|
259
|
-
|
|
260
|
-
def default_layout(shape):
|
|
261
|
-
return range(len(shape) - 1, -1, -1)
|
|
262
|
-
|
|
263
|
-
def warp_call_lowering(ctx, *args, kernel=None):
|
|
264
|
-
if not kernel:
|
|
265
|
-
raise Exception("Unknown kernel id " + str(kernel))
|
|
266
|
-
wp_kernel = _registered_kernels[kernel]
|
|
267
|
-
|
|
268
|
-
# TODO This may not be necessary, but it is perhaps better not to be
|
|
269
|
-
# mucking with kernel loading while already running the workload.
|
|
270
|
-
module = wp_kernel.module
|
|
271
|
-
device = wp.device_from_jax(_get_jax_device())
|
|
272
|
-
if not module.load(device):
|
|
273
|
-
raise Exception("Could not load kernel on device")
|
|
274
|
-
|
|
275
|
-
# Infer dimensions from the first input.
|
|
276
|
-
warp_arg0 = wp_kernel.adj.args[0]
|
|
277
|
-
actual_shape0 = ir.RankedTensorType(args[0].type).shape
|
|
278
|
-
dims = strip_vecmat_dimensions(warp_arg0, actual_shape0)
|
|
279
|
-
warp_dims = collapse_into_leading_dimension(warp_arg0, dims)
|
|
280
|
-
|
|
281
|
-
# Figure out the types and shapes of the input arrays.
|
|
282
|
-
arg_strings = []
|
|
283
|
-
operand_layouts = []
|
|
284
|
-
for actual, warg in zip(args, wp_kernel.adj.args):
|
|
285
|
-
wtype = warg.type
|
|
286
|
-
rtt = ir.RankedTensorType(actual.type)
|
|
287
|
-
|
|
288
|
-
if not isinstance(wtype, wp.array):
|
|
289
|
-
raise Exception("Only contiguous arrays are supported for Jax kernel arguments")
|
|
290
|
-
|
|
291
|
-
if not base_type_is_compatible(wtype.dtype, rtt.element_type):
|
|
292
|
-
raise TypeError(
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
1
|
+
# Copyright (c) 2024 NVIDIA CORPORATION. All rights reserved.
|
|
2
|
+
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
|
3
|
+
# and proprietary rights in and to this software, related documentation
|
|
4
|
+
# and any modifications thereto. Any use, reproduction, disclosure or
|
|
5
|
+
# distribution of this software and related documentation without an express
|
|
6
|
+
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
|
7
|
+
|
|
8
|
+
import ctypes
|
|
9
|
+
|
|
10
|
+
import jax
|
|
11
|
+
|
|
12
|
+
import warp as wp
|
|
13
|
+
from warp.context import type_str
|
|
14
|
+
from warp.types import array_t, launch_bounds_t, strides_from_shape
|
|
15
|
+
|
|
16
|
+
_jax_warp_p = None
|
|
17
|
+
|
|
18
|
+
# Holder for the custom callback to keep it alive.
|
|
19
|
+
_cc_callback = None
|
|
20
|
+
_registered_kernels = [None]
|
|
21
|
+
_registered_kernel_to_id = {}
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def jax_kernel(wp_kernel):
|
|
25
|
+
"""Create a Jax primitive from a Warp kernel.
|
|
26
|
+
|
|
27
|
+
NOTE: This is an experimental feature under development.
|
|
28
|
+
|
|
29
|
+
Current limitations:
|
|
30
|
+
- All kernel arguments must be arrays.
|
|
31
|
+
- Kernel launch dimensions are inferred from the shape of the first argument.
|
|
32
|
+
- Input arguments are followed by output arguments in the Warp kernel definition.
|
|
33
|
+
- There must be at least one input argument and at least one output argument.
|
|
34
|
+
- Output shapes must match the launch dimensions (i.e., output shapes must match the shape of the first argument).
|
|
35
|
+
- All arrays must be contiguous.
|
|
36
|
+
- Only the CUDA backend is supported.
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
if _jax_warp_p is None:
|
|
40
|
+
# Create and register the primitive
|
|
41
|
+
_create_jax_warp_primitive()
|
|
42
|
+
if wp_kernel not in _registered_kernel_to_id:
|
|
43
|
+
id = len(_registered_kernels)
|
|
44
|
+
_registered_kernels.append(wp_kernel)
|
|
45
|
+
_registered_kernel_to_id[wp_kernel] = id
|
|
46
|
+
else:
|
|
47
|
+
id = _registered_kernel_to_id[wp_kernel]
|
|
48
|
+
|
|
49
|
+
def bind(*args):
|
|
50
|
+
return _jax_warp_p.bind(*args, kernel=id)
|
|
51
|
+
|
|
52
|
+
return bind
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def _warp_custom_callback(stream, buffers, opaque, opaque_len):
|
|
56
|
+
# The descriptor is the form
|
|
57
|
+
# <kernel-id>|<launch-dims>|<arg-dims-list>
|
|
58
|
+
# Example: 42|16,32|16,32;100;16,32
|
|
59
|
+
kernel_id_str, dim_str, args_str = opaque.decode().split("|")
|
|
60
|
+
|
|
61
|
+
# Get the kernel from the registry.
|
|
62
|
+
kernel_id = int(kernel_id_str)
|
|
63
|
+
kernel = _registered_kernels[kernel_id]
|
|
64
|
+
|
|
65
|
+
# Parse launch dimensions.
|
|
66
|
+
dims = [int(d) for d in dim_str.split(",")]
|
|
67
|
+
bounds = launch_bounds_t(dims)
|
|
68
|
+
|
|
69
|
+
# Parse arguments.
|
|
70
|
+
arg_strings = args_str.split(";")
|
|
71
|
+
num_args = len(arg_strings)
|
|
72
|
+
assert num_args == len(kernel.adj.args), "Incorrect number of arguments"
|
|
73
|
+
|
|
74
|
+
# First param is the launch bounds.
|
|
75
|
+
kernel_params = (ctypes.c_void_p * (1 + num_args))()
|
|
76
|
+
kernel_params[0] = ctypes.addressof(bounds)
|
|
77
|
+
|
|
78
|
+
# Parse array descriptors.
|
|
79
|
+
args = []
|
|
80
|
+
for i in range(num_args):
|
|
81
|
+
dtype = kernel.adj.args[i].type.dtype
|
|
82
|
+
shape = [int(d) for d in arg_strings[i].split(",")]
|
|
83
|
+
strides = strides_from_shape(shape, dtype)
|
|
84
|
+
|
|
85
|
+
arr = array_t(buffers[i], 0, len(shape), shape, strides)
|
|
86
|
+
args.append(arr) # keep a reference
|
|
87
|
+
arg_ptr = ctypes.addressof(arr)
|
|
88
|
+
|
|
89
|
+
kernel_params[i + 1] = arg_ptr
|
|
90
|
+
|
|
91
|
+
# Get current device.
|
|
92
|
+
device = wp.device_from_jax(_get_jax_device())
|
|
93
|
+
|
|
94
|
+
# Get kernel hooks.
|
|
95
|
+
# Note: module was loaded during jit lowering.
|
|
96
|
+
hooks = kernel.module.get_kernel_hooks(kernel, device)
|
|
97
|
+
assert hooks.forward, "Failed to find kernel entry point"
|
|
98
|
+
|
|
99
|
+
# Launch the kernel.
|
|
100
|
+
wp.context.runtime.core.cuda_launch_kernel(device.context, hooks.forward, bounds.size, 0, kernel_params, stream)
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
# TODO: is there a simpler way of getting the Jax "current" device?
|
|
104
|
+
def _get_jax_device():
|
|
105
|
+
# check if jax.default_device() context manager is active
|
|
106
|
+
device = jax.config.jax_default_device
|
|
107
|
+
# if default device is not set, use first device
|
|
108
|
+
if device is None:
|
|
109
|
+
device = jax.devices()[0]
|
|
110
|
+
return device
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def _create_jax_warp_primitive():
|
|
114
|
+
from functools import reduce
|
|
115
|
+
|
|
116
|
+
import jax
|
|
117
|
+
from jax._src.interpreters import batching
|
|
118
|
+
from jax.interpreters import mlir
|
|
119
|
+
from jax.interpreters.mlir import ir
|
|
120
|
+
from jaxlib.hlo_helpers import custom_call
|
|
121
|
+
|
|
122
|
+
global _jax_warp_p
|
|
123
|
+
global _cc_callback
|
|
124
|
+
|
|
125
|
+
# Create and register the primitive.
|
|
126
|
+
# TODO add default implementation that calls the kernel via warp.
|
|
127
|
+
_jax_warp_p = jax.core.Primitive("jax_warp")
|
|
128
|
+
_jax_warp_p.multiple_results = True
|
|
129
|
+
|
|
130
|
+
# TODO Just launch the kernel directly, but make sure the argument
|
|
131
|
+
# shapes are massaged the same way as below so that vmap works.
|
|
132
|
+
def impl(*args):
|
|
133
|
+
raise Exception("Not implemented")
|
|
134
|
+
|
|
135
|
+
_jax_warp_p.def_impl(impl)
|
|
136
|
+
|
|
137
|
+
# Auto-batching. Make sure all the arguments are fully broadcasted
|
|
138
|
+
# so that Warp is not confused about dimensions.
|
|
139
|
+
def vectorized_multi_batcher(args, dims, **params):
|
|
140
|
+
# Figure out the number of outputs.
|
|
141
|
+
wp_kernel = _registered_kernels[params["kernel"]]
|
|
142
|
+
output_count = len(wp_kernel.adj.args) - len(args)
|
|
143
|
+
shape, dim = next((a.shape, d) for a, d in zip(args, dims) if d is not None)
|
|
144
|
+
size = shape[dim]
|
|
145
|
+
args = [batching.bdim_at_front(a, d, size) if len(a.shape) else a for a, d in zip(args, dims)]
|
|
146
|
+
# Create the batched primitive.
|
|
147
|
+
return _jax_warp_p.bind(*args, **params), [dims[0]] * output_count
|
|
148
|
+
|
|
149
|
+
batching.primitive_batchers[_jax_warp_p] = vectorized_multi_batcher
|
|
150
|
+
|
|
151
|
+
def get_vecmat_shape(warp_type):
|
|
152
|
+
if hasattr(warp_type.dtype, "_shape_"):
|
|
153
|
+
return warp_type.dtype._shape_
|
|
154
|
+
return []
|
|
155
|
+
|
|
156
|
+
def strip_vecmat_dimensions(warp_arg, actual_shape):
|
|
157
|
+
shape = get_vecmat_shape(warp_arg.type)
|
|
158
|
+
for i, s in enumerate(reversed(shape)):
|
|
159
|
+
item = actual_shape[-i - 1]
|
|
160
|
+
if s != item:
|
|
161
|
+
raise Exception(f"The vector/matrix shape for argument {warp_arg.label} does not match")
|
|
162
|
+
return actual_shape[: len(actual_shape) - len(shape)]
|
|
163
|
+
|
|
164
|
+
def collapse_into_leading_dimension(warp_arg, actual_shape):
|
|
165
|
+
if len(actual_shape) < warp_arg.type.ndim:
|
|
166
|
+
raise Exception(f"Argument {warp_arg.label} has too few non-matrix/vector dimensions")
|
|
167
|
+
index_rest = len(actual_shape) - warp_arg.type.ndim + 1
|
|
168
|
+
leading_size = reduce(lambda x, y: x * y, actual_shape[:index_rest])
|
|
169
|
+
return [leading_size] + actual_shape[index_rest:]
|
|
170
|
+
|
|
171
|
+
# Infer array dimensions from input type.
|
|
172
|
+
def infer_dimensions(warp_arg, actual_shape):
|
|
173
|
+
actual_shape = strip_vecmat_dimensions(warp_arg, actual_shape)
|
|
174
|
+
return collapse_into_leading_dimension(warp_arg, actual_shape)
|
|
175
|
+
|
|
176
|
+
def base_type_to_jax(warp_dtype):
|
|
177
|
+
if hasattr(warp_dtype, "_wp_scalar_type_"):
|
|
178
|
+
return wp.dtype_to_jax(warp_dtype._wp_scalar_type_)
|
|
179
|
+
return wp.dtype_to_jax(warp_dtype)
|
|
180
|
+
|
|
181
|
+
def base_type_to_jax_ir(warp_dtype):
|
|
182
|
+
warp_to_jax_dict = {
|
|
183
|
+
wp.float16: ir.F16Type.get(),
|
|
184
|
+
wp.float32: ir.F32Type.get(),
|
|
185
|
+
wp.float64: ir.F64Type.get(),
|
|
186
|
+
wp.int8: ir.IntegerType.get_signless(8),
|
|
187
|
+
wp.int16: ir.IntegerType.get_signless(16),
|
|
188
|
+
wp.int32: ir.IntegerType.get_signless(32),
|
|
189
|
+
wp.int64: ir.IntegerType.get_signless(64),
|
|
190
|
+
wp.uint8: ir.IntegerType.get_unsigned(8),
|
|
191
|
+
wp.uint16: ir.IntegerType.get_unsigned(16),
|
|
192
|
+
wp.uint32: ir.IntegerType.get_unsigned(32),
|
|
193
|
+
wp.uint64: ir.IntegerType.get_unsigned(64),
|
|
194
|
+
}
|
|
195
|
+
if hasattr(warp_dtype, "_wp_scalar_type_"):
|
|
196
|
+
warp_dtype = warp_dtype._wp_scalar_type_
|
|
197
|
+
jax_dtype = warp_to_jax_dict.get(warp_dtype)
|
|
198
|
+
if jax_dtype is None:
|
|
199
|
+
raise TypeError(f"Invalid or unsupported data type: {warp_dtype}")
|
|
200
|
+
return jax_dtype
|
|
201
|
+
|
|
202
|
+
def base_type_is_compatible(warp_type, jax_ir_type):
|
|
203
|
+
jax_ir_to_warp = {
|
|
204
|
+
"f16": wp.float16,
|
|
205
|
+
"f32": wp.float32,
|
|
206
|
+
"f64": wp.float64,
|
|
207
|
+
"i8": wp.int8,
|
|
208
|
+
"i16": wp.int16,
|
|
209
|
+
"i32": wp.int32,
|
|
210
|
+
"i64": wp.int64,
|
|
211
|
+
"ui8": wp.uint8,
|
|
212
|
+
"ui16": wp.uint16,
|
|
213
|
+
"ui32": wp.uint32,
|
|
214
|
+
"ui64": wp.uint64,
|
|
215
|
+
}
|
|
216
|
+
expected_warp_type = jax_ir_to_warp.get(str(jax_ir_type))
|
|
217
|
+
if expected_warp_type is not None:
|
|
218
|
+
if hasattr(warp_type, "_wp_scalar_type_"):
|
|
219
|
+
return warp_type._wp_scalar_type_ == expected_warp_type
|
|
220
|
+
else:
|
|
221
|
+
return warp_type == expected_warp_type
|
|
222
|
+
else:
|
|
223
|
+
raise TypeError(f"Invalid or unsupported data type: {jax_ir_type}")
|
|
224
|
+
|
|
225
|
+
# Abstract evaluation.
|
|
226
|
+
def jax_warp_abstract(*args, kernel=None):
|
|
227
|
+
wp_kernel = _registered_kernels[kernel]
|
|
228
|
+
# All the extra arguments to the warp kernel are outputs.
|
|
229
|
+
warp_outputs = [o.type for o in wp_kernel.adj.args[len(args) :]]
|
|
230
|
+
# TODO. Let's just use the first input dimension to infer the output's dimensions.
|
|
231
|
+
dims = strip_vecmat_dimensions(wp_kernel.adj.args[0], list(args[0].shape))
|
|
232
|
+
jax_outputs = []
|
|
233
|
+
for o in warp_outputs:
|
|
234
|
+
shape = list(dims) + list(get_vecmat_shape(o))
|
|
235
|
+
dtype = base_type_to_jax(o.dtype)
|
|
236
|
+
jax_outputs.append(jax.core.ShapedArray(shape, dtype))
|
|
237
|
+
return jax_outputs
|
|
238
|
+
|
|
239
|
+
_jax_warp_p.def_abstract_eval(jax_warp_abstract)
|
|
240
|
+
|
|
241
|
+
# Lowering to MLIR.
|
|
242
|
+
|
|
243
|
+
# Create python-land custom call target.
|
|
244
|
+
CCALLFUNC = ctypes.CFUNCTYPE(
|
|
245
|
+
ctypes.c_voidp, ctypes.c_void_p, ctypes.POINTER(ctypes.c_void_p), ctypes.c_char_p, ctypes.c_size_t
|
|
246
|
+
)
|
|
247
|
+
_cc_callback = CCALLFUNC(_warp_custom_callback)
|
|
248
|
+
ccall_address = ctypes.cast(_cc_callback, ctypes.c_void_p)
|
|
249
|
+
|
|
250
|
+
# Put the custom call into a capsule, as required by XLA.
|
|
251
|
+
PyCapsule_Destructor = ctypes.CFUNCTYPE(None, ctypes.py_object)
|
|
252
|
+
PyCapsule_New = ctypes.pythonapi.PyCapsule_New
|
|
253
|
+
PyCapsule_New.restype = ctypes.py_object
|
|
254
|
+
PyCapsule_New.argtypes = (ctypes.c_void_p, ctypes.c_char_p, PyCapsule_Destructor)
|
|
255
|
+
capsule = PyCapsule_New(ccall_address.value, b"xla._CUSTOM_CALL_TARGET", PyCapsule_Destructor(0))
|
|
256
|
+
|
|
257
|
+
# Register the callback in XLA.
|
|
258
|
+
jax.lib.xla_client.register_custom_call_target("warp_call", capsule, platform="gpu")
|
|
259
|
+
|
|
260
|
+
def default_layout(shape):
|
|
261
|
+
return range(len(shape) - 1, -1, -1)
|
|
262
|
+
|
|
263
|
+
def warp_call_lowering(ctx, *args, kernel=None):
|
|
264
|
+
if not kernel:
|
|
265
|
+
raise Exception("Unknown kernel id " + str(kernel))
|
|
266
|
+
wp_kernel = _registered_kernels[kernel]
|
|
267
|
+
|
|
268
|
+
# TODO This may not be necessary, but it is perhaps better not to be
|
|
269
|
+
# mucking with kernel loading while already running the workload.
|
|
270
|
+
module = wp_kernel.module
|
|
271
|
+
device = wp.device_from_jax(_get_jax_device())
|
|
272
|
+
if not module.load(device):
|
|
273
|
+
raise Exception("Could not load kernel on device")
|
|
274
|
+
|
|
275
|
+
# Infer dimensions from the first input.
|
|
276
|
+
warp_arg0 = wp_kernel.adj.args[0]
|
|
277
|
+
actual_shape0 = ir.RankedTensorType(args[0].type).shape
|
|
278
|
+
dims = strip_vecmat_dimensions(warp_arg0, actual_shape0)
|
|
279
|
+
warp_dims = collapse_into_leading_dimension(warp_arg0, dims)
|
|
280
|
+
|
|
281
|
+
# Figure out the types and shapes of the input arrays.
|
|
282
|
+
arg_strings = []
|
|
283
|
+
operand_layouts = []
|
|
284
|
+
for actual, warg in zip(args, wp_kernel.adj.args):
|
|
285
|
+
wtype = warg.type
|
|
286
|
+
rtt = ir.RankedTensorType(actual.type)
|
|
287
|
+
|
|
288
|
+
if not isinstance(wtype, wp.array):
|
|
289
|
+
raise Exception("Only contiguous arrays are supported for Jax kernel arguments")
|
|
290
|
+
|
|
291
|
+
if not base_type_is_compatible(wtype.dtype, rtt.element_type):
|
|
292
|
+
raise TypeError(
|
|
293
|
+
f"Incompatible data type for argument '{warg.label}', expected {type_str(wtype.dtype)}, got {rtt.element_type}"
|
|
294
|
+
)
|
|
295
|
+
|
|
296
|
+
# Infer array dimension (by removing the vector/matrix dimensions and
|
|
297
|
+
# collapsing the initial dimensions).
|
|
298
|
+
shape = infer_dimensions(warg, rtt.shape)
|
|
299
|
+
|
|
300
|
+
if len(shape) != wtype.ndim:
|
|
301
|
+
raise TypeError(f"Incompatible array dimensionality for argument '{warg.label}'")
|
|
302
|
+
|
|
303
|
+
arg_strings.append(",".join([str(d) for d in shape]))
|
|
304
|
+
operand_layouts.append(default_layout(rtt.shape))
|
|
305
|
+
|
|
306
|
+
# Figure out the types and shapes of the output arrays.
|
|
307
|
+
result_types = []
|
|
308
|
+
result_layouts = []
|
|
309
|
+
for warg in wp_kernel.adj.args[len(args) :]:
|
|
310
|
+
wtype = warg.type
|
|
311
|
+
|
|
312
|
+
if not isinstance(wtype, wp.array):
|
|
313
|
+
raise Exception("Only contiguous arrays are supported for Jax kernel arguments")
|
|
314
|
+
|
|
315
|
+
# Infer dimensions from the first input.
|
|
316
|
+
arg_strings.append(",".join([str(d) for d in warp_dims]))
|
|
317
|
+
|
|
318
|
+
result_shape = list(dims) + list(get_vecmat_shape(wtype))
|
|
319
|
+
result_types.append(ir.RankedTensorType.get(result_shape, base_type_to_jax_ir(wtype.dtype)))
|
|
320
|
+
result_layouts.append(default_layout(result_shape))
|
|
321
|
+
|
|
322
|
+
# Build opaque descriptor for callback.
|
|
323
|
+
shape_str = ",".join([str(d) for d in warp_dims])
|
|
324
|
+
args_str = ";".join(arg_strings)
|
|
325
|
+
descriptor = f"{kernel}|{shape_str}|{args_str}"
|
|
326
|
+
|
|
327
|
+
out = custom_call(
|
|
328
|
+
b"warp_call",
|
|
329
|
+
result_types=result_types,
|
|
330
|
+
operands=args,
|
|
331
|
+
backend_config=descriptor.encode("utf-8"),
|
|
332
|
+
operand_layouts=operand_layouts,
|
|
333
|
+
result_layouts=result_layouts,
|
|
334
|
+
).results
|
|
335
|
+
return out
|
|
336
|
+
|
|
337
|
+
mlir.register_lowering(
|
|
338
|
+
_jax_warp_p,
|
|
339
|
+
warp_call_lowering,
|
|
340
|
+
platform="gpu",
|
|
341
|
+
)
|