warp-lang 1.10.0__py3-none-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of warp-lang might be problematic. Click here for more details.
- warp/__init__.py +334 -0
- warp/__init__.pyi +5856 -0
- warp/_src/__init__.py +14 -0
- warp/_src/autograd.py +1077 -0
- warp/_src/build.py +620 -0
- warp/_src/build_dll.py +642 -0
- warp/_src/builtins.py +10555 -0
- warp/_src/codegen.py +4361 -0
- warp/_src/config.py +178 -0
- warp/_src/constants.py +59 -0
- warp/_src/context.py +8352 -0
- warp/_src/dlpack.py +464 -0
- warp/_src/fabric.py +362 -0
- warp/_src/fem/__init__.py +14 -0
- warp/_src/fem/adaptivity.py +510 -0
- warp/_src/fem/cache.py +689 -0
- warp/_src/fem/dirichlet.py +190 -0
- warp/_src/fem/domain.py +553 -0
- warp/_src/fem/field/__init__.py +131 -0
- warp/_src/fem/field/field.py +703 -0
- warp/_src/fem/field/nodal_field.py +403 -0
- warp/_src/fem/field/restriction.py +39 -0
- warp/_src/fem/field/virtual.py +1021 -0
- warp/_src/fem/geometry/__init__.py +32 -0
- warp/_src/fem/geometry/adaptive_nanogrid.py +782 -0
- warp/_src/fem/geometry/closest_point.py +99 -0
- warp/_src/fem/geometry/deformed_geometry.py +277 -0
- warp/_src/fem/geometry/element.py +854 -0
- warp/_src/fem/geometry/geometry.py +693 -0
- warp/_src/fem/geometry/grid_2d.py +478 -0
- warp/_src/fem/geometry/grid_3d.py +539 -0
- warp/_src/fem/geometry/hexmesh.py +956 -0
- warp/_src/fem/geometry/nanogrid.py +660 -0
- warp/_src/fem/geometry/partition.py +483 -0
- warp/_src/fem/geometry/quadmesh.py +597 -0
- warp/_src/fem/geometry/tetmesh.py +762 -0
- warp/_src/fem/geometry/trimesh.py +588 -0
- warp/_src/fem/integrate.py +2507 -0
- warp/_src/fem/linalg.py +385 -0
- warp/_src/fem/operator.py +398 -0
- warp/_src/fem/polynomial.py +231 -0
- warp/_src/fem/quadrature/__init__.py +17 -0
- warp/_src/fem/quadrature/pic_quadrature.py +318 -0
- warp/_src/fem/quadrature/quadrature.py +665 -0
- warp/_src/fem/space/__init__.py +248 -0
- warp/_src/fem/space/basis_function_space.py +499 -0
- warp/_src/fem/space/basis_space.py +681 -0
- warp/_src/fem/space/dof_mapper.py +253 -0
- warp/_src/fem/space/function_space.py +312 -0
- warp/_src/fem/space/grid_2d_function_space.py +179 -0
- warp/_src/fem/space/grid_3d_function_space.py +229 -0
- warp/_src/fem/space/hexmesh_function_space.py +255 -0
- warp/_src/fem/space/nanogrid_function_space.py +199 -0
- warp/_src/fem/space/partition.py +435 -0
- warp/_src/fem/space/quadmesh_function_space.py +222 -0
- warp/_src/fem/space/restriction.py +221 -0
- warp/_src/fem/space/shape/__init__.py +152 -0
- warp/_src/fem/space/shape/cube_shape_function.py +1107 -0
- warp/_src/fem/space/shape/shape_function.py +134 -0
- warp/_src/fem/space/shape/square_shape_function.py +928 -0
- warp/_src/fem/space/shape/tet_shape_function.py +829 -0
- warp/_src/fem/space/shape/triangle_shape_function.py +674 -0
- warp/_src/fem/space/tetmesh_function_space.py +270 -0
- warp/_src/fem/space/topology.py +461 -0
- warp/_src/fem/space/trimesh_function_space.py +193 -0
- warp/_src/fem/types.py +114 -0
- warp/_src/fem/utils.py +488 -0
- warp/_src/jax.py +188 -0
- warp/_src/jax_experimental/__init__.py +14 -0
- warp/_src/jax_experimental/custom_call.py +389 -0
- warp/_src/jax_experimental/ffi.py +1286 -0
- warp/_src/jax_experimental/xla_ffi.py +658 -0
- warp/_src/marching_cubes.py +710 -0
- warp/_src/math.py +416 -0
- warp/_src/optim/__init__.py +14 -0
- warp/_src/optim/adam.py +165 -0
- warp/_src/optim/linear.py +1608 -0
- warp/_src/optim/sgd.py +114 -0
- warp/_src/paddle.py +408 -0
- warp/_src/render/__init__.py +14 -0
- warp/_src/render/imgui_manager.py +291 -0
- warp/_src/render/render_opengl.py +3638 -0
- warp/_src/render/render_usd.py +939 -0
- warp/_src/render/utils.py +162 -0
- warp/_src/sparse.py +2718 -0
- warp/_src/tape.py +1208 -0
- warp/_src/thirdparty/__init__.py +0 -0
- warp/_src/thirdparty/appdirs.py +598 -0
- warp/_src/thirdparty/dlpack.py +145 -0
- warp/_src/thirdparty/unittest_parallel.py +676 -0
- warp/_src/torch.py +393 -0
- warp/_src/types.py +5888 -0
- warp/_src/utils.py +1695 -0
- warp/autograd.py +33 -0
- warp/bin/libwarp-clang.dylib +0 -0
- warp/bin/libwarp.dylib +0 -0
- warp/build.py +29 -0
- warp/build_dll.py +24 -0
- warp/codegen.py +24 -0
- warp/constants.py +24 -0
- warp/context.py +33 -0
- warp/dlpack.py +24 -0
- warp/examples/__init__.py +24 -0
- warp/examples/assets/bear.usd +0 -0
- warp/examples/assets/bunny.usd +0 -0
- warp/examples/assets/cube.usd +0 -0
- warp/examples/assets/nonuniform.usd +0 -0
- warp/examples/assets/nvidia_logo.png +0 -0
- warp/examples/assets/pixel.jpg +0 -0
- warp/examples/assets/rocks.nvdb +0 -0
- warp/examples/assets/rocks.usd +0 -0
- warp/examples/assets/sphere.usd +0 -0
- warp/examples/assets/square_cloth.usd +0 -0
- warp/examples/benchmarks/benchmark_api.py +389 -0
- warp/examples/benchmarks/benchmark_cloth.py +296 -0
- warp/examples/benchmarks/benchmark_cloth_cupy.py +96 -0
- warp/examples/benchmarks/benchmark_cloth_jax.py +105 -0
- warp/examples/benchmarks/benchmark_cloth_numba.py +161 -0
- warp/examples/benchmarks/benchmark_cloth_numpy.py +85 -0
- warp/examples/benchmarks/benchmark_cloth_paddle.py +94 -0
- warp/examples/benchmarks/benchmark_cloth_pytorch.py +94 -0
- warp/examples/benchmarks/benchmark_cloth_taichi.py +120 -0
- warp/examples/benchmarks/benchmark_cloth_warp.py +153 -0
- warp/examples/benchmarks/benchmark_gemm.py +164 -0
- warp/examples/benchmarks/benchmark_interop_paddle.py +166 -0
- warp/examples/benchmarks/benchmark_interop_torch.py +166 -0
- warp/examples/benchmarks/benchmark_launches.py +301 -0
- warp/examples/benchmarks/benchmark_tile_load_store.py +103 -0
- warp/examples/benchmarks/benchmark_tile_sort.py +155 -0
- warp/examples/browse.py +37 -0
- warp/examples/core/example_cupy.py +86 -0
- warp/examples/core/example_dem.py +241 -0
- warp/examples/core/example_fluid.py +299 -0
- warp/examples/core/example_graph_capture.py +150 -0
- warp/examples/core/example_marching_cubes.py +195 -0
- warp/examples/core/example_mesh.py +180 -0
- warp/examples/core/example_mesh_intersect.py +211 -0
- warp/examples/core/example_nvdb.py +182 -0
- warp/examples/core/example_raycast.py +111 -0
- warp/examples/core/example_raymarch.py +205 -0
- warp/examples/core/example_render_opengl.py +290 -0
- warp/examples/core/example_sample_mesh.py +300 -0
- warp/examples/core/example_sph.py +411 -0
- warp/examples/core/example_spin_lock.py +93 -0
- warp/examples/core/example_torch.py +211 -0
- warp/examples/core/example_wave.py +269 -0
- warp/examples/core/example_work_queue.py +118 -0
- warp/examples/distributed/example_jacobi_mpi.py +506 -0
- warp/examples/fem/example_adaptive_grid.py +286 -0
- warp/examples/fem/example_apic_fluid.py +469 -0
- warp/examples/fem/example_burgers.py +261 -0
- warp/examples/fem/example_convection_diffusion.py +181 -0
- warp/examples/fem/example_convection_diffusion_dg.py +225 -0
- warp/examples/fem/example_darcy_ls_optimization.py +489 -0
- warp/examples/fem/example_deformed_geometry.py +172 -0
- warp/examples/fem/example_diffusion.py +196 -0
- warp/examples/fem/example_diffusion_3d.py +225 -0
- warp/examples/fem/example_diffusion_mgpu.py +225 -0
- warp/examples/fem/example_distortion_energy.py +228 -0
- warp/examples/fem/example_elastic_shape_optimization.py +387 -0
- warp/examples/fem/example_magnetostatics.py +242 -0
- warp/examples/fem/example_mixed_elasticity.py +293 -0
- warp/examples/fem/example_navier_stokes.py +263 -0
- warp/examples/fem/example_nonconforming_contact.py +300 -0
- warp/examples/fem/example_stokes.py +213 -0
- warp/examples/fem/example_stokes_transfer.py +262 -0
- warp/examples/fem/example_streamlines.py +357 -0
- warp/examples/fem/utils.py +1047 -0
- warp/examples/interop/example_jax_callable.py +146 -0
- warp/examples/interop/example_jax_ffi_callback.py +132 -0
- warp/examples/interop/example_jax_kernel.py +232 -0
- warp/examples/optim/example_diffray.py +561 -0
- warp/examples/optim/example_fluid_checkpoint.py +497 -0
- warp/examples/tile/example_tile_block_cholesky.py +502 -0
- warp/examples/tile/example_tile_cholesky.py +88 -0
- warp/examples/tile/example_tile_convolution.py +66 -0
- warp/examples/tile/example_tile_fft.py +55 -0
- warp/examples/tile/example_tile_filtering.py +113 -0
- warp/examples/tile/example_tile_matmul.py +85 -0
- warp/examples/tile/example_tile_mcgp.py +191 -0
- warp/examples/tile/example_tile_mlp.py +385 -0
- warp/examples/tile/example_tile_nbody.py +199 -0
- warp/fabric.py +24 -0
- warp/fem/__init__.py +173 -0
- warp/fem/adaptivity.py +26 -0
- warp/fem/cache.py +30 -0
- warp/fem/dirichlet.py +24 -0
- warp/fem/field/__init__.py +24 -0
- warp/fem/field/field.py +26 -0
- warp/fem/geometry/__init__.py +21 -0
- warp/fem/geometry/closest_point.py +31 -0
- warp/fem/linalg.py +38 -0
- warp/fem/operator.py +32 -0
- warp/fem/polynomial.py +29 -0
- warp/fem/space/__init__.py +22 -0
- warp/fem/space/basis_space.py +24 -0
- warp/fem/space/shape/__init__.py +68 -0
- warp/fem/space/topology.py +24 -0
- warp/fem/types.py +24 -0
- warp/fem/utils.py +32 -0
- warp/jax.py +29 -0
- warp/jax_experimental/__init__.py +29 -0
- warp/jax_experimental/custom_call.py +29 -0
- warp/jax_experimental/ffi.py +39 -0
- warp/jax_experimental/xla_ffi.py +24 -0
- warp/marching_cubes.py +24 -0
- warp/math.py +37 -0
- warp/native/array.h +1687 -0
- warp/native/builtin.h +2327 -0
- warp/native/bvh.cpp +562 -0
- warp/native/bvh.cu +826 -0
- warp/native/bvh.h +555 -0
- warp/native/clang/clang.cpp +541 -0
- warp/native/coloring.cpp +622 -0
- warp/native/crt.cpp +51 -0
- warp/native/crt.h +568 -0
- warp/native/cuda_crt.h +1058 -0
- warp/native/cuda_util.cpp +677 -0
- warp/native/cuda_util.h +313 -0
- warp/native/error.cpp +77 -0
- warp/native/error.h +36 -0
- warp/native/exports.h +2023 -0
- warp/native/fabric.h +246 -0
- warp/native/hashgrid.cpp +311 -0
- warp/native/hashgrid.cu +89 -0
- warp/native/hashgrid.h +240 -0
- warp/native/initializer_array.h +41 -0
- warp/native/intersect.h +1253 -0
- warp/native/intersect_adj.h +375 -0
- warp/native/intersect_tri.h +348 -0
- warp/native/mat.h +5189 -0
- warp/native/mathdx.cpp +93 -0
- warp/native/matnn.h +221 -0
- warp/native/mesh.cpp +266 -0
- warp/native/mesh.cu +406 -0
- warp/native/mesh.h +2097 -0
- warp/native/nanovdb/GridHandle.h +533 -0
- warp/native/nanovdb/HostBuffer.h +591 -0
- warp/native/nanovdb/NanoVDB.h +6246 -0
- warp/native/nanovdb/NodeManager.h +323 -0
- warp/native/nanovdb/PNanoVDB.h +3390 -0
- warp/native/noise.h +859 -0
- warp/native/quat.h +1664 -0
- warp/native/rand.h +342 -0
- warp/native/range.h +145 -0
- warp/native/reduce.cpp +174 -0
- warp/native/reduce.cu +363 -0
- warp/native/runlength_encode.cpp +79 -0
- warp/native/runlength_encode.cu +61 -0
- warp/native/scan.cpp +47 -0
- warp/native/scan.cu +55 -0
- warp/native/scan.h +23 -0
- warp/native/solid_angle.h +466 -0
- warp/native/sort.cpp +251 -0
- warp/native/sort.cu +286 -0
- warp/native/sort.h +35 -0
- warp/native/sparse.cpp +241 -0
- warp/native/sparse.cu +435 -0
- warp/native/spatial.h +1306 -0
- warp/native/svd.h +727 -0
- warp/native/temp_buffer.h +46 -0
- warp/native/tile.h +4124 -0
- warp/native/tile_radix_sort.h +1112 -0
- warp/native/tile_reduce.h +838 -0
- warp/native/tile_scan.h +240 -0
- warp/native/tuple.h +189 -0
- warp/native/vec.h +2199 -0
- warp/native/version.h +23 -0
- warp/native/volume.cpp +501 -0
- warp/native/volume.cu +68 -0
- warp/native/volume.h +970 -0
- warp/native/volume_builder.cu +483 -0
- warp/native/volume_builder.h +52 -0
- warp/native/volume_impl.h +70 -0
- warp/native/warp.cpp +1143 -0
- warp/native/warp.cu +4604 -0
- warp/native/warp.h +358 -0
- warp/optim/__init__.py +20 -0
- warp/optim/adam.py +24 -0
- warp/optim/linear.py +35 -0
- warp/optim/sgd.py +24 -0
- warp/paddle.py +24 -0
- warp/py.typed +0 -0
- warp/render/__init__.py +22 -0
- warp/render/imgui_manager.py +29 -0
- warp/render/render_opengl.py +24 -0
- warp/render/render_usd.py +24 -0
- warp/render/utils.py +24 -0
- warp/sparse.py +51 -0
- warp/tape.py +24 -0
- warp/tests/__init__.py +1 -0
- warp/tests/__main__.py +4 -0
- warp/tests/assets/curlnoise_golden.npy +0 -0
- warp/tests/assets/mlp_golden.npy +0 -0
- warp/tests/assets/pixel.npy +0 -0
- warp/tests/assets/pnoise_golden.npy +0 -0
- warp/tests/assets/spiky.usd +0 -0
- warp/tests/assets/test_grid.nvdb +0 -0
- warp/tests/assets/test_index_grid.nvdb +0 -0
- warp/tests/assets/test_int32_grid.nvdb +0 -0
- warp/tests/assets/test_vec_grid.nvdb +0 -0
- warp/tests/assets/torus.nvdb +0 -0
- warp/tests/assets/torus.usda +105 -0
- warp/tests/aux_test_class_kernel.py +34 -0
- warp/tests/aux_test_compile_consts_dummy.py +18 -0
- warp/tests/aux_test_conditional_unequal_types_kernels.py +29 -0
- warp/tests/aux_test_dependent.py +29 -0
- warp/tests/aux_test_grad_customs.py +29 -0
- warp/tests/aux_test_instancing_gc.py +26 -0
- warp/tests/aux_test_module_aot.py +7 -0
- warp/tests/aux_test_module_unload.py +23 -0
- warp/tests/aux_test_name_clash1.py +40 -0
- warp/tests/aux_test_name_clash2.py +40 -0
- warp/tests/aux_test_reference.py +9 -0
- warp/tests/aux_test_reference_reference.py +8 -0
- warp/tests/aux_test_square.py +16 -0
- warp/tests/aux_test_unresolved_func.py +22 -0
- warp/tests/aux_test_unresolved_symbol.py +22 -0
- warp/tests/cuda/__init__.py +0 -0
- warp/tests/cuda/test_async.py +676 -0
- warp/tests/cuda/test_conditional_captures.py +1147 -0
- warp/tests/cuda/test_ipc.py +124 -0
- warp/tests/cuda/test_mempool.py +233 -0
- warp/tests/cuda/test_multigpu.py +169 -0
- warp/tests/cuda/test_peer.py +139 -0
- warp/tests/cuda/test_pinned.py +84 -0
- warp/tests/cuda/test_streams.py +691 -0
- warp/tests/geometry/__init__.py +0 -0
- warp/tests/geometry/test_bvh.py +335 -0
- warp/tests/geometry/test_hash_grid.py +259 -0
- warp/tests/geometry/test_marching_cubes.py +294 -0
- warp/tests/geometry/test_mesh.py +318 -0
- warp/tests/geometry/test_mesh_query_aabb.py +392 -0
- warp/tests/geometry/test_mesh_query_point.py +935 -0
- warp/tests/geometry/test_mesh_query_ray.py +323 -0
- warp/tests/geometry/test_volume.py +1103 -0
- warp/tests/geometry/test_volume_write.py +346 -0
- warp/tests/interop/__init__.py +0 -0
- warp/tests/interop/test_dlpack.py +730 -0
- warp/tests/interop/test_jax.py +1673 -0
- warp/tests/interop/test_paddle.py +800 -0
- warp/tests/interop/test_torch.py +1001 -0
- warp/tests/run_coverage_serial.py +39 -0
- warp/tests/test_adam.py +162 -0
- warp/tests/test_arithmetic.py +1096 -0
- warp/tests/test_array.py +3756 -0
- warp/tests/test_array_reduce.py +156 -0
- warp/tests/test_assert.py +303 -0
- warp/tests/test_atomic.py +336 -0
- warp/tests/test_atomic_bitwise.py +209 -0
- warp/tests/test_atomic_cas.py +312 -0
- warp/tests/test_bool.py +220 -0
- warp/tests/test_builtins_resolution.py +732 -0
- warp/tests/test_closest_point_edge_edge.py +327 -0
- warp/tests/test_codegen.py +974 -0
- warp/tests/test_codegen_instancing.py +1495 -0
- warp/tests/test_compile_consts.py +215 -0
- warp/tests/test_conditional.py +298 -0
- warp/tests/test_context.py +35 -0
- warp/tests/test_copy.py +319 -0
- warp/tests/test_ctypes.py +618 -0
- warp/tests/test_dense.py +73 -0
- warp/tests/test_devices.py +127 -0
- warp/tests/test_enum.py +136 -0
- warp/tests/test_examples.py +424 -0
- warp/tests/test_fabricarray.py +998 -0
- warp/tests/test_fast_math.py +72 -0
- warp/tests/test_fem.py +2204 -0
- warp/tests/test_fixedarray.py +229 -0
- warp/tests/test_fp16.py +136 -0
- warp/tests/test_func.py +501 -0
- warp/tests/test_future_annotations.py +100 -0
- warp/tests/test_generics.py +656 -0
- warp/tests/test_grad.py +893 -0
- warp/tests/test_grad_customs.py +339 -0
- warp/tests/test_grad_debug.py +341 -0
- warp/tests/test_implicit_init.py +411 -0
- warp/tests/test_import.py +45 -0
- warp/tests/test_indexedarray.py +1140 -0
- warp/tests/test_intersect.py +103 -0
- warp/tests/test_iter.py +76 -0
- warp/tests/test_large.py +177 -0
- warp/tests/test_launch.py +411 -0
- warp/tests/test_lerp.py +151 -0
- warp/tests/test_linear_solvers.py +223 -0
- warp/tests/test_lvalue.py +427 -0
- warp/tests/test_map.py +526 -0
- warp/tests/test_mat.py +3515 -0
- warp/tests/test_mat_assign_copy.py +178 -0
- warp/tests/test_mat_constructors.py +573 -0
- warp/tests/test_mat_lite.py +122 -0
- warp/tests/test_mat_scalar_ops.py +2913 -0
- warp/tests/test_math.py +212 -0
- warp/tests/test_module_aot.py +287 -0
- warp/tests/test_module_hashing.py +258 -0
- warp/tests/test_modules_lite.py +70 -0
- warp/tests/test_noise.py +252 -0
- warp/tests/test_operators.py +299 -0
- warp/tests/test_options.py +129 -0
- warp/tests/test_overwrite.py +551 -0
- warp/tests/test_print.py +408 -0
- warp/tests/test_quat.py +2653 -0
- warp/tests/test_quat_assign_copy.py +145 -0
- warp/tests/test_rand.py +339 -0
- warp/tests/test_reload.py +303 -0
- warp/tests/test_rounding.py +157 -0
- warp/tests/test_runlength_encode.py +196 -0
- warp/tests/test_scalar_ops.py +133 -0
- warp/tests/test_smoothstep.py +108 -0
- warp/tests/test_snippet.py +318 -0
- warp/tests/test_sparse.py +845 -0
- warp/tests/test_spatial.py +2859 -0
- warp/tests/test_spatial_assign_copy.py +160 -0
- warp/tests/test_special_values.py +361 -0
- warp/tests/test_static.py +640 -0
- warp/tests/test_struct.py +901 -0
- warp/tests/test_tape.py +242 -0
- warp/tests/test_transient_module.py +93 -0
- warp/tests/test_triangle_closest_point.py +192 -0
- warp/tests/test_tuple.py +361 -0
- warp/tests/test_types.py +615 -0
- warp/tests/test_utils.py +594 -0
- warp/tests/test_vec.py +1408 -0
- warp/tests/test_vec_assign_copy.py +143 -0
- warp/tests/test_vec_constructors.py +325 -0
- warp/tests/test_vec_lite.py +80 -0
- warp/tests/test_vec_scalar_ops.py +2327 -0
- warp/tests/test_verify_fp.py +100 -0
- warp/tests/test_version.py +75 -0
- warp/tests/tile/__init__.py +0 -0
- warp/tests/tile/test_tile.py +1519 -0
- warp/tests/tile/test_tile_atomic_bitwise.py +403 -0
- warp/tests/tile/test_tile_cholesky.py +608 -0
- warp/tests/tile/test_tile_load.py +724 -0
- warp/tests/tile/test_tile_mathdx.py +156 -0
- warp/tests/tile/test_tile_matmul.py +179 -0
- warp/tests/tile/test_tile_mlp.py +400 -0
- warp/tests/tile/test_tile_reduce.py +950 -0
- warp/tests/tile/test_tile_shared_memory.py +376 -0
- warp/tests/tile/test_tile_sort.py +121 -0
- warp/tests/tile/test_tile_view.py +173 -0
- warp/tests/unittest_serial.py +47 -0
- warp/tests/unittest_suites.py +430 -0
- warp/tests/unittest_utils.py +469 -0
- warp/tests/walkthrough_debug.py +95 -0
- warp/torch.py +24 -0
- warp/types.py +51 -0
- warp/utils.py +31 -0
- warp_lang-1.10.0.dist-info/METADATA +459 -0
- warp_lang-1.10.0.dist-info/RECORD +468 -0
- warp_lang-1.10.0.dist-info/WHEEL +5 -0
- warp_lang-1.10.0.dist-info/licenses/LICENSE.md +176 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/Gaia-LICENSE.txt +6 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/appdirs-LICENSE.txt +22 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/asset_pixel_jpg-LICENSE.txt +3 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/cuda-LICENSE.txt +1582 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/dlpack-LICENSE.txt +201 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/fp16-LICENSE.txt +28 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/libmathdx-LICENSE.txt +220 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/llvm-LICENSE.txt +279 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/moller-LICENSE.txt +16 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/nanovdb-LICENSE.txt +2 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/nvrtc-LICENSE.txt +1592 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/svd-LICENSE.txt +23 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/unittest_parallel-LICENSE.txt +21 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/usd-LICENSE.txt +213 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/windingnumber-LICENSE.txt +21 -0
- warp_lang-1.10.0.dist-info/top_level.txt +1 -0
warp/_src/jax.py
ADDED
|
@@ -0,0 +1,188 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
import warp
|
|
17
|
+
|
|
18
|
+
_wp_module_name_ = "warp.jax"
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def device_to_jax(warp_device: warp._src.context.Devicelike):
|
|
22
|
+
"""Return the Jax device corresponding to a Warp device.
|
|
23
|
+
|
|
24
|
+
Returns:
|
|
25
|
+
:class:`jax.Device`
|
|
26
|
+
|
|
27
|
+
Raises:
|
|
28
|
+
RuntimeError: Failed to find the corresponding Jax device.
|
|
29
|
+
"""
|
|
30
|
+
import jax
|
|
31
|
+
|
|
32
|
+
d = warp.get_device(warp_device)
|
|
33
|
+
|
|
34
|
+
if d.is_cuda:
|
|
35
|
+
cuda_devices = jax.devices("cuda")
|
|
36
|
+
if d.ordinal >= len(cuda_devices):
|
|
37
|
+
raise RuntimeError(f"Jax device corresponding to '{warp_device}' is not available")
|
|
38
|
+
return cuda_devices[d.ordinal]
|
|
39
|
+
else:
|
|
40
|
+
cpu_devices = jax.devices("cpu")
|
|
41
|
+
if not cpu_devices:
|
|
42
|
+
raise RuntimeError(f"Jax device corresponding to '{warp_device}' is not available")
|
|
43
|
+
return cpu_devices[0]
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def device_from_jax(jax_device) -> warp._src.context.Device:
|
|
47
|
+
"""Return the Warp device corresponding to a Jax device.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
jax_device (jax.Device): A Jax device descriptor.
|
|
51
|
+
|
|
52
|
+
Raises:
|
|
53
|
+
RuntimeError: The Jax device is neither a CPU nor GPU device.
|
|
54
|
+
"""
|
|
55
|
+
if jax_device.platform == "cpu":
|
|
56
|
+
return warp.get_device("cpu")
|
|
57
|
+
elif jax_device.platform == "gpu":
|
|
58
|
+
return warp.get_cuda_device(jax_device.id)
|
|
59
|
+
else:
|
|
60
|
+
raise RuntimeError(f"Unsupported Jax device platform '{jax_device.platform}'")
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def get_jax_device():
|
|
64
|
+
"""Get the current Jax device."""
|
|
65
|
+
import jax
|
|
66
|
+
|
|
67
|
+
# TODO: is there a simpler way of getting the Jax "current" device?
|
|
68
|
+
# check if jax.default_device() context manager is active
|
|
69
|
+
device = jax.config.jax_default_device
|
|
70
|
+
# if default device is not set, use first device
|
|
71
|
+
if device is None:
|
|
72
|
+
device = jax.local_devices()[0]
|
|
73
|
+
return device
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def dtype_to_jax(warp_dtype):
|
|
77
|
+
"""Return the Jax dtype corresponding to a Warp dtype.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
warp_dtype: A Warp data type that has a corresponding Jax data type.
|
|
81
|
+
|
|
82
|
+
Raises:
|
|
83
|
+
TypeError: Unable to find a corresponding Jax data type.
|
|
84
|
+
"""
|
|
85
|
+
# initialize lookup table on first call to defer jax import
|
|
86
|
+
if dtype_to_jax.type_map is None:
|
|
87
|
+
import jax.numpy as jp
|
|
88
|
+
|
|
89
|
+
dtype_to_jax.type_map = {
|
|
90
|
+
warp.float16: jp.float16,
|
|
91
|
+
warp.float32: jp.float32,
|
|
92
|
+
warp.float64: jp.float64,
|
|
93
|
+
warp.int8: jp.int8,
|
|
94
|
+
warp.int16: jp.int16,
|
|
95
|
+
warp.int32: jp.int32,
|
|
96
|
+
warp.int64: jp.int64,
|
|
97
|
+
warp.uint8: jp.uint8,
|
|
98
|
+
warp.uint16: jp.uint16,
|
|
99
|
+
warp.uint32: jp.uint32,
|
|
100
|
+
warp.uint64: jp.uint64,
|
|
101
|
+
warp.bool: jp.bool_,
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
jax_dtype = dtype_to_jax.type_map.get(warp_dtype)
|
|
105
|
+
if jax_dtype is not None:
|
|
106
|
+
return jax_dtype
|
|
107
|
+
else:
|
|
108
|
+
raise TypeError(f"Cannot convert {warp_dtype} to a Jax type")
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def dtype_from_jax(jax_dtype):
|
|
112
|
+
"""Return the Warp dtype corresponding to a Jax dtype.
|
|
113
|
+
|
|
114
|
+
Raises:
|
|
115
|
+
TypeError: Unable to find a corresponding Warp data type.
|
|
116
|
+
"""
|
|
117
|
+
# initialize lookup table on first call to defer jax import
|
|
118
|
+
if dtype_from_jax.type_map is None:
|
|
119
|
+
import jax.numpy as jp
|
|
120
|
+
|
|
121
|
+
dtype_from_jax.type_map = {
|
|
122
|
+
# Jax scalar types
|
|
123
|
+
jp.float16: warp.float16,
|
|
124
|
+
jp.float32: warp.float32,
|
|
125
|
+
jp.float64: warp.float64,
|
|
126
|
+
jp.int8: warp.int8,
|
|
127
|
+
jp.int16: warp.int16,
|
|
128
|
+
jp.int32: warp.int32,
|
|
129
|
+
jp.int64: warp.int64,
|
|
130
|
+
jp.uint8: warp.uint8,
|
|
131
|
+
jp.uint16: warp.uint16,
|
|
132
|
+
jp.uint32: warp.uint32,
|
|
133
|
+
jp.uint64: warp.uint64,
|
|
134
|
+
jp.bool_: warp.bool,
|
|
135
|
+
# Jax dtype objects
|
|
136
|
+
jp.dtype(jp.float16): warp.float16,
|
|
137
|
+
jp.dtype(jp.float32): warp.float32,
|
|
138
|
+
jp.dtype(jp.float64): warp.float64,
|
|
139
|
+
jp.dtype(jp.int8): warp.int8,
|
|
140
|
+
jp.dtype(jp.int16): warp.int16,
|
|
141
|
+
jp.dtype(jp.int32): warp.int32,
|
|
142
|
+
jp.dtype(jp.int64): warp.int64,
|
|
143
|
+
jp.dtype(jp.uint8): warp.uint8,
|
|
144
|
+
jp.dtype(jp.uint16): warp.uint16,
|
|
145
|
+
jp.dtype(jp.uint32): warp.uint32,
|
|
146
|
+
jp.dtype(jp.uint64): warp.uint64,
|
|
147
|
+
jp.dtype(jp.bool_): warp.bool,
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
wp_dtype = dtype_from_jax.type_map.get(jax_dtype)
|
|
151
|
+
if wp_dtype is not None:
|
|
152
|
+
return wp_dtype
|
|
153
|
+
else:
|
|
154
|
+
raise TypeError(f"Cannot convert {jax_dtype} to a Warp type")
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
# lookup tables initialized when needed
|
|
158
|
+
dtype_from_jax.type_map = None
|
|
159
|
+
dtype_to_jax.type_map = None
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def to_jax(warp_array):
|
|
163
|
+
"""
|
|
164
|
+
Convert a Warp array to a Jax array without copying the data.
|
|
165
|
+
|
|
166
|
+
Args:
|
|
167
|
+
warp_array (warp.array): The Warp array to convert.
|
|
168
|
+
|
|
169
|
+
Returns:
|
|
170
|
+
jax.Array: The converted Jax array.
|
|
171
|
+
"""
|
|
172
|
+
import jax.dlpack
|
|
173
|
+
|
|
174
|
+
return jax.dlpack.from_dlpack(warp_array)
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
def from_jax(jax_array, dtype=None) -> warp.array:
|
|
178
|
+
"""Convert a Jax array to a Warp array without copying the data.
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
jax_array (jax.Array): The Jax array to convert.
|
|
182
|
+
dtype (optional): The target data type of the resulting Warp array. Defaults to the Jax array's data type mapped to a Warp data type.
|
|
183
|
+
|
|
184
|
+
Returns:
|
|
185
|
+
warp.array: The converted Warp array.
|
|
186
|
+
"""
|
|
187
|
+
|
|
188
|
+
return warp.from_dlpack(jax_array, dtype=dtype)
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
@@ -0,0 +1,389 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
import ctypes
|
|
17
|
+
|
|
18
|
+
import warp as wp
|
|
19
|
+
from warp._src.context import type_str
|
|
20
|
+
from warp._src.jax import get_jax_device
|
|
21
|
+
from warp._src.types import array_t, launch_bounds_t, strides_from_shape
|
|
22
|
+
from warp._src.utils import warn
|
|
23
|
+
|
|
24
|
+
_wp_module_name_ = "warp.jax_experimental.custom_call"
|
|
25
|
+
|
|
26
|
+
_jax_warp_p = None
|
|
27
|
+
|
|
28
|
+
# Holder for the custom callback to keep it alive.
|
|
29
|
+
_cc_callback = None
|
|
30
|
+
_registered_kernels = [None]
|
|
31
|
+
_registered_kernel_to_id = {}
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def jax_kernel(kernel, launch_dims=None, quiet=False):
|
|
35
|
+
"""Create a Jax primitive from a Warp kernel.
|
|
36
|
+
|
|
37
|
+
NOTE: This is an experimental feature under development.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
kernel: The Warp kernel to be wrapped.
|
|
41
|
+
launch_dims: Optional. Specify the kernel launch dimensions. If None,
|
|
42
|
+
dimensions are inferred from the shape of the first argument.
|
|
43
|
+
This option when set will specify the output dimensions.
|
|
44
|
+
quiet: Optional. If True, suppress deprecation warnings with newer JAX versions.
|
|
45
|
+
|
|
46
|
+
Limitations:
|
|
47
|
+
- All kernel arguments must be contiguous arrays.
|
|
48
|
+
- Input arguments are followed by output arguments in the Warp kernel definition.
|
|
49
|
+
- There must be at least one input argument and at least one output argument.
|
|
50
|
+
- Only the CUDA backend is supported.
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
import jax
|
|
54
|
+
|
|
55
|
+
# check if JAX version supports this
|
|
56
|
+
if jax.__version_info__ < (0, 4, 25) or jax.__version_info__ >= (0, 8, 0):
|
|
57
|
+
msg = (
|
|
58
|
+
"This version of jax_kernel() requires JAX version 0.4.25 - 0.7.x, "
|
|
59
|
+
f"but installed JAX version is {jax.__version_info__}."
|
|
60
|
+
)
|
|
61
|
+
if jax.__version_info__ >= (0, 8, 0):
|
|
62
|
+
msg += " Please use warp.jax_experimental.ffi.jax_kernel instead."
|
|
63
|
+
raise RuntimeError(msg)
|
|
64
|
+
|
|
65
|
+
# deprecation warning
|
|
66
|
+
if jax.__version_info__ >= (0, 5, 0) and not quiet:
|
|
67
|
+
warn(
|
|
68
|
+
"This version of jax_kernel() is deprecated and will not be supported with newer JAX versions. "
|
|
69
|
+
"Please use the newer FFI version instead (warp.jax_experimental.ffi.jax_kernel). "
|
|
70
|
+
"As of Warp release 1.10, the FFI version is the default implementation of jax_kernel(). "
|
|
71
|
+
"Pass quiet=True to disable this warning.",
|
|
72
|
+
DeprecationWarning,
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
if _jax_warp_p is None:
|
|
76
|
+
# Create and register the primitive
|
|
77
|
+
_create_jax_warp_primitive()
|
|
78
|
+
if kernel not in _registered_kernel_to_id:
|
|
79
|
+
id = len(_registered_kernels)
|
|
80
|
+
_registered_kernels.append(kernel)
|
|
81
|
+
_registered_kernel_to_id[kernel] = id
|
|
82
|
+
else:
|
|
83
|
+
id = _registered_kernel_to_id[kernel]
|
|
84
|
+
|
|
85
|
+
def bind(*args):
|
|
86
|
+
return _jax_warp_p.bind(*args, kernel=id, launch_dims=launch_dims)
|
|
87
|
+
|
|
88
|
+
return bind
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def _warp_custom_callback(stream, buffers, opaque, opaque_len):
|
|
92
|
+
# The descriptor is the form
|
|
93
|
+
# <kernel-id>|<launch-dims>|<arg-dims-list>
|
|
94
|
+
# Example: 42|16,32|16,32;100;16,32
|
|
95
|
+
kernel_id_str, dim_str, args_str = opaque.decode().split("|")
|
|
96
|
+
|
|
97
|
+
# Get the kernel from the registry.
|
|
98
|
+
kernel_id = int(kernel_id_str)
|
|
99
|
+
kernel = _registered_kernels[kernel_id]
|
|
100
|
+
|
|
101
|
+
# Parse launch dimensions.
|
|
102
|
+
dims = [int(d) for d in dim_str.split(",")]
|
|
103
|
+
bounds = launch_bounds_t(dims)
|
|
104
|
+
|
|
105
|
+
# Parse arguments.
|
|
106
|
+
arg_strings = args_str.split(";")
|
|
107
|
+
num_args = len(arg_strings)
|
|
108
|
+
assert num_args == len(kernel.adj.args), "Incorrect number of arguments"
|
|
109
|
+
|
|
110
|
+
# First param is the launch bounds.
|
|
111
|
+
kernel_params = (ctypes.c_void_p * (1 + num_args))()
|
|
112
|
+
kernel_params[0] = ctypes.addressof(bounds)
|
|
113
|
+
|
|
114
|
+
# Parse array descriptors.
|
|
115
|
+
args = []
|
|
116
|
+
for i in range(num_args):
|
|
117
|
+
dtype = kernel.adj.args[i].type.dtype
|
|
118
|
+
shape = [int(d) for d in arg_strings[i].split(",")]
|
|
119
|
+
strides = strides_from_shape(shape, dtype)
|
|
120
|
+
|
|
121
|
+
arr = array_t(buffers[i], 0, len(shape), shape, strides)
|
|
122
|
+
args.append(arr) # keep a reference
|
|
123
|
+
arg_ptr = ctypes.addressof(arr)
|
|
124
|
+
|
|
125
|
+
kernel_params[i + 1] = arg_ptr
|
|
126
|
+
|
|
127
|
+
# Get current device.
|
|
128
|
+
device = wp.device_from_jax(get_jax_device())
|
|
129
|
+
|
|
130
|
+
# Get kernel hooks.
|
|
131
|
+
# Note: module was loaded during jit lowering.
|
|
132
|
+
hooks = kernel.module.get_kernel_hooks(kernel, device)
|
|
133
|
+
assert hooks.forward, "Failed to find kernel entry point"
|
|
134
|
+
|
|
135
|
+
# Launch the kernel.
|
|
136
|
+
wp._src.context.runtime.core.wp_cuda_launch_kernel(
|
|
137
|
+
device.context, hooks.forward, bounds.size, 0, 256, hooks.forward_smem_bytes, kernel_params, stream
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def _create_jax_warp_primitive():
|
|
142
|
+
from functools import reduce
|
|
143
|
+
|
|
144
|
+
import jax
|
|
145
|
+
from jax._src.interpreters import batching
|
|
146
|
+
from jax.interpreters import mlir
|
|
147
|
+
from jax.interpreters.mlir import ir
|
|
148
|
+
from jaxlib.hlo_helpers import custom_call
|
|
149
|
+
|
|
150
|
+
global _jax_warp_p
|
|
151
|
+
global _cc_callback
|
|
152
|
+
|
|
153
|
+
# Create and register the primitive.
|
|
154
|
+
# TODO add default implementation that calls the kernel via warp.
|
|
155
|
+
try:
|
|
156
|
+
# newer JAX versions
|
|
157
|
+
import jax.extend
|
|
158
|
+
|
|
159
|
+
_jax_warp_p = jax.extend.core.Primitive("jax_warp")
|
|
160
|
+
except (ImportError, AttributeError):
|
|
161
|
+
# older JAX versions
|
|
162
|
+
_jax_warp_p = jax.core.Primitive("jax_warp")
|
|
163
|
+
_jax_warp_p.multiple_results = True
|
|
164
|
+
|
|
165
|
+
# TODO Just launch the kernel directly, but make sure the argument
|
|
166
|
+
# shapes are massaged the same way as below so that vmap works.
|
|
167
|
+
def impl(*args):
|
|
168
|
+
raise Exception("Not implemented")
|
|
169
|
+
|
|
170
|
+
_jax_warp_p.def_impl(impl)
|
|
171
|
+
|
|
172
|
+
# Auto-batching. Make sure all the arguments are fully broadcasted
|
|
173
|
+
# so that Warp is not confused about dimensions.
|
|
174
|
+
def vectorized_multi_batcher(args, dims, **params):
|
|
175
|
+
# Figure out the number of outputs.
|
|
176
|
+
wp_kernel = _registered_kernels[params["kernel"]]
|
|
177
|
+
output_count = len(wp_kernel.adj.args) - len(args)
|
|
178
|
+
shape, dim = next((a.shape, d) for a, d in zip(args, dims) if d is not None)
|
|
179
|
+
size = shape[dim]
|
|
180
|
+
args = [batching.bdim_at_front(a, d, size) if len(a.shape) else a for a, d in zip(args, dims)]
|
|
181
|
+
# Create the batched primitive.
|
|
182
|
+
return _jax_warp_p.bind(*args, **params), [dims[0]] * output_count
|
|
183
|
+
|
|
184
|
+
batching.primitive_batchers[_jax_warp_p] = vectorized_multi_batcher
|
|
185
|
+
|
|
186
|
+
def get_vecmat_shape(warp_type):
|
|
187
|
+
if hasattr(warp_type.dtype, "_shape_"):
|
|
188
|
+
return warp_type.dtype._shape_
|
|
189
|
+
return []
|
|
190
|
+
|
|
191
|
+
def strip_vecmat_dimensions(warp_arg, actual_shape):
|
|
192
|
+
shape = get_vecmat_shape(warp_arg.type)
|
|
193
|
+
for i, s in enumerate(reversed(shape)):
|
|
194
|
+
item = actual_shape[-i - 1]
|
|
195
|
+
if s != item:
|
|
196
|
+
raise Exception(f"The vector/matrix shape for argument {warp_arg.label} does not match")
|
|
197
|
+
return actual_shape[: len(actual_shape) - len(shape)]
|
|
198
|
+
|
|
199
|
+
def collapse_into_leading_dimension(warp_arg, actual_shape):
|
|
200
|
+
if len(actual_shape) < warp_arg.type.ndim:
|
|
201
|
+
raise Exception(f"Argument {warp_arg.label} has too few non-matrix/vector dimensions")
|
|
202
|
+
index_rest = len(actual_shape) - warp_arg.type.ndim + 1
|
|
203
|
+
leading_size = reduce(lambda x, y: x * y, actual_shape[:index_rest])
|
|
204
|
+
return [leading_size] + actual_shape[index_rest:]
|
|
205
|
+
|
|
206
|
+
# Infer array dimensions from input type.
|
|
207
|
+
def infer_dimensions(warp_arg, actual_shape):
|
|
208
|
+
actual_shape = strip_vecmat_dimensions(warp_arg, actual_shape)
|
|
209
|
+
return collapse_into_leading_dimension(warp_arg, actual_shape)
|
|
210
|
+
|
|
211
|
+
def base_type_to_jax(warp_dtype):
|
|
212
|
+
if hasattr(warp_dtype, "_wp_scalar_type_"):
|
|
213
|
+
return wp.dtype_to_jax(warp_dtype._wp_scalar_type_)
|
|
214
|
+
return wp.dtype_to_jax(warp_dtype)
|
|
215
|
+
|
|
216
|
+
def base_type_to_jax_ir(warp_dtype):
|
|
217
|
+
warp_to_jax_dict = {
|
|
218
|
+
wp.float16: ir.F16Type.get(),
|
|
219
|
+
wp.float32: ir.F32Type.get(),
|
|
220
|
+
wp.float64: ir.F64Type.get(),
|
|
221
|
+
wp.int8: ir.IntegerType.get_signless(8),
|
|
222
|
+
wp.int16: ir.IntegerType.get_signless(16),
|
|
223
|
+
wp.int32: ir.IntegerType.get_signless(32),
|
|
224
|
+
wp.int64: ir.IntegerType.get_signless(64),
|
|
225
|
+
wp.uint8: ir.IntegerType.get_unsigned(8),
|
|
226
|
+
wp.uint16: ir.IntegerType.get_unsigned(16),
|
|
227
|
+
wp.uint32: ir.IntegerType.get_unsigned(32),
|
|
228
|
+
wp.uint64: ir.IntegerType.get_unsigned(64),
|
|
229
|
+
}
|
|
230
|
+
if hasattr(warp_dtype, "_wp_scalar_type_"):
|
|
231
|
+
warp_dtype = warp_dtype._wp_scalar_type_
|
|
232
|
+
jax_dtype = warp_to_jax_dict.get(warp_dtype)
|
|
233
|
+
if jax_dtype is None:
|
|
234
|
+
raise TypeError(f"Invalid or unsupported data type: {warp_dtype}")
|
|
235
|
+
return jax_dtype
|
|
236
|
+
|
|
237
|
+
def base_type_is_compatible(warp_type, jax_ir_type):
|
|
238
|
+
jax_ir_to_warp = {
|
|
239
|
+
"f16": wp.float16,
|
|
240
|
+
"f32": wp.float32,
|
|
241
|
+
"f64": wp.float64,
|
|
242
|
+
"i8": wp.int8,
|
|
243
|
+
"i16": wp.int16,
|
|
244
|
+
"i32": wp.int32,
|
|
245
|
+
"i64": wp.int64,
|
|
246
|
+
"ui8": wp.uint8,
|
|
247
|
+
"ui16": wp.uint16,
|
|
248
|
+
"ui32": wp.uint32,
|
|
249
|
+
"ui64": wp.uint64,
|
|
250
|
+
}
|
|
251
|
+
expected_warp_type = jax_ir_to_warp.get(str(jax_ir_type))
|
|
252
|
+
if expected_warp_type is not None:
|
|
253
|
+
if hasattr(warp_type, "_wp_scalar_type_"):
|
|
254
|
+
return warp_type._wp_scalar_type_ == expected_warp_type
|
|
255
|
+
else:
|
|
256
|
+
return warp_type == expected_warp_type
|
|
257
|
+
else:
|
|
258
|
+
raise TypeError(f"Invalid or unsupported data type: {jax_ir_type}")
|
|
259
|
+
|
|
260
|
+
# Abstract evaluation.
|
|
261
|
+
def jax_warp_abstract(*args, kernel=None, launch_dims=None):
|
|
262
|
+
wp_kernel = _registered_kernels[kernel]
|
|
263
|
+
# All the extra arguments to the warp kernel are outputs.
|
|
264
|
+
warp_outputs = [o.type for o in wp_kernel.adj.args[len(args) :]]
|
|
265
|
+
|
|
266
|
+
if launch_dims is None:
|
|
267
|
+
# Use the first input dimension to infer the output's dimensions if launch_dims is not provided
|
|
268
|
+
dims = strip_vecmat_dimensions(wp_kernel.adj.args[0], list(args[0].shape))
|
|
269
|
+
else:
|
|
270
|
+
dims = launch_dims
|
|
271
|
+
|
|
272
|
+
jax_outputs = []
|
|
273
|
+
for o in warp_outputs:
|
|
274
|
+
shape = list(dims) + list(get_vecmat_shape(o))
|
|
275
|
+
dtype = base_type_to_jax(o.dtype)
|
|
276
|
+
jax_outputs.append(jax.core.ShapedArray(shape, dtype))
|
|
277
|
+
return jax_outputs
|
|
278
|
+
|
|
279
|
+
_jax_warp_p.def_abstract_eval(jax_warp_abstract)
|
|
280
|
+
|
|
281
|
+
# Lowering to MLIR.
|
|
282
|
+
|
|
283
|
+
# Create python-land custom call target.
|
|
284
|
+
CCALLFUNC = ctypes.CFUNCTYPE(
|
|
285
|
+
ctypes.c_voidp, ctypes.c_void_p, ctypes.POINTER(ctypes.c_void_p), ctypes.c_char_p, ctypes.c_size_t
|
|
286
|
+
)
|
|
287
|
+
_cc_callback = CCALLFUNC(_warp_custom_callback)
|
|
288
|
+
ccall_address = ctypes.cast(_cc_callback, ctypes.c_void_p)
|
|
289
|
+
|
|
290
|
+
# Put the custom call into a capsule, as required by XLA.
|
|
291
|
+
PyCapsule_Destructor = ctypes.CFUNCTYPE(None, ctypes.py_object)
|
|
292
|
+
PyCapsule_New = ctypes.pythonapi.PyCapsule_New
|
|
293
|
+
PyCapsule_New.restype = ctypes.py_object
|
|
294
|
+
PyCapsule_New.argtypes = (ctypes.c_void_p, ctypes.c_char_p, PyCapsule_Destructor)
|
|
295
|
+
capsule = PyCapsule_New(ccall_address.value, b"xla._CUSTOM_CALL_TARGET", PyCapsule_Destructor(0))
|
|
296
|
+
|
|
297
|
+
# Register the callback in XLA.
|
|
298
|
+
try:
|
|
299
|
+
# newer JAX versions
|
|
300
|
+
jax.ffi.register_ffi_target("warp_call", capsule, platform="gpu", api_version=0)
|
|
301
|
+
except AttributeError:
|
|
302
|
+
# older JAX versions
|
|
303
|
+
jax.lib.xla_client.register_custom_call_target("warp_call", capsule, platform="gpu")
|
|
304
|
+
|
|
305
|
+
def default_layout(shape):
|
|
306
|
+
return range(len(shape) - 1, -1, -1)
|
|
307
|
+
|
|
308
|
+
def warp_call_lowering(ctx, *args, kernel=None, launch_dims=None):
|
|
309
|
+
if not kernel:
|
|
310
|
+
raise Exception("Unknown kernel id " + str(kernel))
|
|
311
|
+
wp_kernel = _registered_kernels[kernel]
|
|
312
|
+
|
|
313
|
+
# TODO This may not be necessary, but it is perhaps better not to be
|
|
314
|
+
# mucking with kernel loading while already running the workload.
|
|
315
|
+
module = wp_kernel.module
|
|
316
|
+
device = wp.device_from_jax(get_jax_device())
|
|
317
|
+
if not module.load(device):
|
|
318
|
+
raise Exception("Could not load kernel on device")
|
|
319
|
+
|
|
320
|
+
if launch_dims is None:
|
|
321
|
+
# Infer dimensions from the first input.
|
|
322
|
+
warp_arg0 = wp_kernel.adj.args[0]
|
|
323
|
+
actual_shape0 = ir.RankedTensorType(args[0].type).shape
|
|
324
|
+
dims = strip_vecmat_dimensions(warp_arg0, actual_shape0)
|
|
325
|
+
warp_dims = collapse_into_leading_dimension(warp_arg0, dims)
|
|
326
|
+
else:
|
|
327
|
+
dims = launch_dims
|
|
328
|
+
warp_dims = launch_dims
|
|
329
|
+
# Figure out the types and shapes of the input arrays.
|
|
330
|
+
arg_strings = []
|
|
331
|
+
operand_layouts = []
|
|
332
|
+
for actual, warg in zip(args, wp_kernel.adj.args):
|
|
333
|
+
wtype = warg.type
|
|
334
|
+
rtt = ir.RankedTensorType(actual.type)
|
|
335
|
+
|
|
336
|
+
if not isinstance(wtype, wp.array):
|
|
337
|
+
raise Exception("Only contiguous arrays are supported for Jax kernel arguments")
|
|
338
|
+
|
|
339
|
+
if not base_type_is_compatible(wtype.dtype, rtt.element_type):
|
|
340
|
+
raise TypeError(
|
|
341
|
+
f"Incompatible data type for argument '{warg.label}', expected {type_str(wtype.dtype)}, got {rtt.element_type}"
|
|
342
|
+
)
|
|
343
|
+
|
|
344
|
+
# Infer array dimension (by removing the vector/matrix dimensions and
|
|
345
|
+
# collapsing the initial dimensions).
|
|
346
|
+
shape = infer_dimensions(warg, rtt.shape)
|
|
347
|
+
|
|
348
|
+
if len(shape) != wtype.ndim:
|
|
349
|
+
raise TypeError(f"Incompatible array dimensionality for argument '{warg.label}'")
|
|
350
|
+
|
|
351
|
+
arg_strings.append(",".join([str(d) for d in shape]))
|
|
352
|
+
operand_layouts.append(default_layout(rtt.shape))
|
|
353
|
+
|
|
354
|
+
# Figure out the types and shapes of the output arrays.
|
|
355
|
+
result_types = []
|
|
356
|
+
result_layouts = []
|
|
357
|
+
for warg in wp_kernel.adj.args[len(args) :]:
|
|
358
|
+
wtype = warg.type
|
|
359
|
+
|
|
360
|
+
if not isinstance(wtype, wp.array):
|
|
361
|
+
raise Exception("Only contiguous arrays are supported for Jax kernel arguments")
|
|
362
|
+
|
|
363
|
+
# Infer dimensions from the first input.
|
|
364
|
+
arg_strings.append(",".join([str(d) for d in warp_dims]))
|
|
365
|
+
|
|
366
|
+
result_shape = list(dims) + list(get_vecmat_shape(wtype))
|
|
367
|
+
result_types.append(ir.RankedTensorType.get(result_shape, base_type_to_jax_ir(wtype.dtype)))
|
|
368
|
+
result_layouts.append(default_layout(result_shape))
|
|
369
|
+
|
|
370
|
+
# Build opaque descriptor for callback.
|
|
371
|
+
shape_str = ",".join([str(d) for d in warp_dims])
|
|
372
|
+
args_str = ";".join(arg_strings)
|
|
373
|
+
descriptor = f"{kernel}|{shape_str}|{args_str}"
|
|
374
|
+
|
|
375
|
+
out = custom_call(
|
|
376
|
+
b"warp_call",
|
|
377
|
+
result_types=result_types,
|
|
378
|
+
operands=args,
|
|
379
|
+
backend_config=descriptor.encode("utf-8"),
|
|
380
|
+
operand_layouts=operand_layouts,
|
|
381
|
+
result_layouts=result_layouts,
|
|
382
|
+
).results
|
|
383
|
+
return out
|
|
384
|
+
|
|
385
|
+
mlir.register_lowering(
|
|
386
|
+
_jax_warp_p,
|
|
387
|
+
warp_call_lowering,
|
|
388
|
+
platform="gpu",
|
|
389
|
+
)
|