warp-lang 1.10.0__py3-none-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of warp-lang might be problematic. Click here for more details.
- warp/__init__.py +334 -0
- warp/__init__.pyi +5856 -0
- warp/_src/__init__.py +14 -0
- warp/_src/autograd.py +1077 -0
- warp/_src/build.py +620 -0
- warp/_src/build_dll.py +642 -0
- warp/_src/builtins.py +10555 -0
- warp/_src/codegen.py +4361 -0
- warp/_src/config.py +178 -0
- warp/_src/constants.py +59 -0
- warp/_src/context.py +8352 -0
- warp/_src/dlpack.py +464 -0
- warp/_src/fabric.py +362 -0
- warp/_src/fem/__init__.py +14 -0
- warp/_src/fem/adaptivity.py +510 -0
- warp/_src/fem/cache.py +689 -0
- warp/_src/fem/dirichlet.py +190 -0
- warp/_src/fem/domain.py +553 -0
- warp/_src/fem/field/__init__.py +131 -0
- warp/_src/fem/field/field.py +703 -0
- warp/_src/fem/field/nodal_field.py +403 -0
- warp/_src/fem/field/restriction.py +39 -0
- warp/_src/fem/field/virtual.py +1021 -0
- warp/_src/fem/geometry/__init__.py +32 -0
- warp/_src/fem/geometry/adaptive_nanogrid.py +782 -0
- warp/_src/fem/geometry/closest_point.py +99 -0
- warp/_src/fem/geometry/deformed_geometry.py +277 -0
- warp/_src/fem/geometry/element.py +854 -0
- warp/_src/fem/geometry/geometry.py +693 -0
- warp/_src/fem/geometry/grid_2d.py +478 -0
- warp/_src/fem/geometry/grid_3d.py +539 -0
- warp/_src/fem/geometry/hexmesh.py +956 -0
- warp/_src/fem/geometry/nanogrid.py +660 -0
- warp/_src/fem/geometry/partition.py +483 -0
- warp/_src/fem/geometry/quadmesh.py +597 -0
- warp/_src/fem/geometry/tetmesh.py +762 -0
- warp/_src/fem/geometry/trimesh.py +588 -0
- warp/_src/fem/integrate.py +2507 -0
- warp/_src/fem/linalg.py +385 -0
- warp/_src/fem/operator.py +398 -0
- warp/_src/fem/polynomial.py +231 -0
- warp/_src/fem/quadrature/__init__.py +17 -0
- warp/_src/fem/quadrature/pic_quadrature.py +318 -0
- warp/_src/fem/quadrature/quadrature.py +665 -0
- warp/_src/fem/space/__init__.py +248 -0
- warp/_src/fem/space/basis_function_space.py +499 -0
- warp/_src/fem/space/basis_space.py +681 -0
- warp/_src/fem/space/dof_mapper.py +253 -0
- warp/_src/fem/space/function_space.py +312 -0
- warp/_src/fem/space/grid_2d_function_space.py +179 -0
- warp/_src/fem/space/grid_3d_function_space.py +229 -0
- warp/_src/fem/space/hexmesh_function_space.py +255 -0
- warp/_src/fem/space/nanogrid_function_space.py +199 -0
- warp/_src/fem/space/partition.py +435 -0
- warp/_src/fem/space/quadmesh_function_space.py +222 -0
- warp/_src/fem/space/restriction.py +221 -0
- warp/_src/fem/space/shape/__init__.py +152 -0
- warp/_src/fem/space/shape/cube_shape_function.py +1107 -0
- warp/_src/fem/space/shape/shape_function.py +134 -0
- warp/_src/fem/space/shape/square_shape_function.py +928 -0
- warp/_src/fem/space/shape/tet_shape_function.py +829 -0
- warp/_src/fem/space/shape/triangle_shape_function.py +674 -0
- warp/_src/fem/space/tetmesh_function_space.py +270 -0
- warp/_src/fem/space/topology.py +461 -0
- warp/_src/fem/space/trimesh_function_space.py +193 -0
- warp/_src/fem/types.py +114 -0
- warp/_src/fem/utils.py +488 -0
- warp/_src/jax.py +188 -0
- warp/_src/jax_experimental/__init__.py +14 -0
- warp/_src/jax_experimental/custom_call.py +389 -0
- warp/_src/jax_experimental/ffi.py +1286 -0
- warp/_src/jax_experimental/xla_ffi.py +658 -0
- warp/_src/marching_cubes.py +710 -0
- warp/_src/math.py +416 -0
- warp/_src/optim/__init__.py +14 -0
- warp/_src/optim/adam.py +165 -0
- warp/_src/optim/linear.py +1608 -0
- warp/_src/optim/sgd.py +114 -0
- warp/_src/paddle.py +408 -0
- warp/_src/render/__init__.py +14 -0
- warp/_src/render/imgui_manager.py +291 -0
- warp/_src/render/render_opengl.py +3638 -0
- warp/_src/render/render_usd.py +939 -0
- warp/_src/render/utils.py +162 -0
- warp/_src/sparse.py +2718 -0
- warp/_src/tape.py +1208 -0
- warp/_src/thirdparty/__init__.py +0 -0
- warp/_src/thirdparty/appdirs.py +598 -0
- warp/_src/thirdparty/dlpack.py +145 -0
- warp/_src/thirdparty/unittest_parallel.py +676 -0
- warp/_src/torch.py +393 -0
- warp/_src/types.py +5888 -0
- warp/_src/utils.py +1695 -0
- warp/autograd.py +33 -0
- warp/bin/libwarp-clang.dylib +0 -0
- warp/bin/libwarp.dylib +0 -0
- warp/build.py +29 -0
- warp/build_dll.py +24 -0
- warp/codegen.py +24 -0
- warp/constants.py +24 -0
- warp/context.py +33 -0
- warp/dlpack.py +24 -0
- warp/examples/__init__.py +24 -0
- warp/examples/assets/bear.usd +0 -0
- warp/examples/assets/bunny.usd +0 -0
- warp/examples/assets/cube.usd +0 -0
- warp/examples/assets/nonuniform.usd +0 -0
- warp/examples/assets/nvidia_logo.png +0 -0
- warp/examples/assets/pixel.jpg +0 -0
- warp/examples/assets/rocks.nvdb +0 -0
- warp/examples/assets/rocks.usd +0 -0
- warp/examples/assets/sphere.usd +0 -0
- warp/examples/assets/square_cloth.usd +0 -0
- warp/examples/benchmarks/benchmark_api.py +389 -0
- warp/examples/benchmarks/benchmark_cloth.py +296 -0
- warp/examples/benchmarks/benchmark_cloth_cupy.py +96 -0
- warp/examples/benchmarks/benchmark_cloth_jax.py +105 -0
- warp/examples/benchmarks/benchmark_cloth_numba.py +161 -0
- warp/examples/benchmarks/benchmark_cloth_numpy.py +85 -0
- warp/examples/benchmarks/benchmark_cloth_paddle.py +94 -0
- warp/examples/benchmarks/benchmark_cloth_pytorch.py +94 -0
- warp/examples/benchmarks/benchmark_cloth_taichi.py +120 -0
- warp/examples/benchmarks/benchmark_cloth_warp.py +153 -0
- warp/examples/benchmarks/benchmark_gemm.py +164 -0
- warp/examples/benchmarks/benchmark_interop_paddle.py +166 -0
- warp/examples/benchmarks/benchmark_interop_torch.py +166 -0
- warp/examples/benchmarks/benchmark_launches.py +301 -0
- warp/examples/benchmarks/benchmark_tile_load_store.py +103 -0
- warp/examples/benchmarks/benchmark_tile_sort.py +155 -0
- warp/examples/browse.py +37 -0
- warp/examples/core/example_cupy.py +86 -0
- warp/examples/core/example_dem.py +241 -0
- warp/examples/core/example_fluid.py +299 -0
- warp/examples/core/example_graph_capture.py +150 -0
- warp/examples/core/example_marching_cubes.py +195 -0
- warp/examples/core/example_mesh.py +180 -0
- warp/examples/core/example_mesh_intersect.py +211 -0
- warp/examples/core/example_nvdb.py +182 -0
- warp/examples/core/example_raycast.py +111 -0
- warp/examples/core/example_raymarch.py +205 -0
- warp/examples/core/example_render_opengl.py +290 -0
- warp/examples/core/example_sample_mesh.py +300 -0
- warp/examples/core/example_sph.py +411 -0
- warp/examples/core/example_spin_lock.py +93 -0
- warp/examples/core/example_torch.py +211 -0
- warp/examples/core/example_wave.py +269 -0
- warp/examples/core/example_work_queue.py +118 -0
- warp/examples/distributed/example_jacobi_mpi.py +506 -0
- warp/examples/fem/example_adaptive_grid.py +286 -0
- warp/examples/fem/example_apic_fluid.py +469 -0
- warp/examples/fem/example_burgers.py +261 -0
- warp/examples/fem/example_convection_diffusion.py +181 -0
- warp/examples/fem/example_convection_diffusion_dg.py +225 -0
- warp/examples/fem/example_darcy_ls_optimization.py +489 -0
- warp/examples/fem/example_deformed_geometry.py +172 -0
- warp/examples/fem/example_diffusion.py +196 -0
- warp/examples/fem/example_diffusion_3d.py +225 -0
- warp/examples/fem/example_diffusion_mgpu.py +225 -0
- warp/examples/fem/example_distortion_energy.py +228 -0
- warp/examples/fem/example_elastic_shape_optimization.py +387 -0
- warp/examples/fem/example_magnetostatics.py +242 -0
- warp/examples/fem/example_mixed_elasticity.py +293 -0
- warp/examples/fem/example_navier_stokes.py +263 -0
- warp/examples/fem/example_nonconforming_contact.py +300 -0
- warp/examples/fem/example_stokes.py +213 -0
- warp/examples/fem/example_stokes_transfer.py +262 -0
- warp/examples/fem/example_streamlines.py +357 -0
- warp/examples/fem/utils.py +1047 -0
- warp/examples/interop/example_jax_callable.py +146 -0
- warp/examples/interop/example_jax_ffi_callback.py +132 -0
- warp/examples/interop/example_jax_kernel.py +232 -0
- warp/examples/optim/example_diffray.py +561 -0
- warp/examples/optim/example_fluid_checkpoint.py +497 -0
- warp/examples/tile/example_tile_block_cholesky.py +502 -0
- warp/examples/tile/example_tile_cholesky.py +88 -0
- warp/examples/tile/example_tile_convolution.py +66 -0
- warp/examples/tile/example_tile_fft.py +55 -0
- warp/examples/tile/example_tile_filtering.py +113 -0
- warp/examples/tile/example_tile_matmul.py +85 -0
- warp/examples/tile/example_tile_mcgp.py +191 -0
- warp/examples/tile/example_tile_mlp.py +385 -0
- warp/examples/tile/example_tile_nbody.py +199 -0
- warp/fabric.py +24 -0
- warp/fem/__init__.py +173 -0
- warp/fem/adaptivity.py +26 -0
- warp/fem/cache.py +30 -0
- warp/fem/dirichlet.py +24 -0
- warp/fem/field/__init__.py +24 -0
- warp/fem/field/field.py +26 -0
- warp/fem/geometry/__init__.py +21 -0
- warp/fem/geometry/closest_point.py +31 -0
- warp/fem/linalg.py +38 -0
- warp/fem/operator.py +32 -0
- warp/fem/polynomial.py +29 -0
- warp/fem/space/__init__.py +22 -0
- warp/fem/space/basis_space.py +24 -0
- warp/fem/space/shape/__init__.py +68 -0
- warp/fem/space/topology.py +24 -0
- warp/fem/types.py +24 -0
- warp/fem/utils.py +32 -0
- warp/jax.py +29 -0
- warp/jax_experimental/__init__.py +29 -0
- warp/jax_experimental/custom_call.py +29 -0
- warp/jax_experimental/ffi.py +39 -0
- warp/jax_experimental/xla_ffi.py +24 -0
- warp/marching_cubes.py +24 -0
- warp/math.py +37 -0
- warp/native/array.h +1687 -0
- warp/native/builtin.h +2327 -0
- warp/native/bvh.cpp +562 -0
- warp/native/bvh.cu +826 -0
- warp/native/bvh.h +555 -0
- warp/native/clang/clang.cpp +541 -0
- warp/native/coloring.cpp +622 -0
- warp/native/crt.cpp +51 -0
- warp/native/crt.h +568 -0
- warp/native/cuda_crt.h +1058 -0
- warp/native/cuda_util.cpp +677 -0
- warp/native/cuda_util.h +313 -0
- warp/native/error.cpp +77 -0
- warp/native/error.h +36 -0
- warp/native/exports.h +2023 -0
- warp/native/fabric.h +246 -0
- warp/native/hashgrid.cpp +311 -0
- warp/native/hashgrid.cu +89 -0
- warp/native/hashgrid.h +240 -0
- warp/native/initializer_array.h +41 -0
- warp/native/intersect.h +1253 -0
- warp/native/intersect_adj.h +375 -0
- warp/native/intersect_tri.h +348 -0
- warp/native/mat.h +5189 -0
- warp/native/mathdx.cpp +93 -0
- warp/native/matnn.h +221 -0
- warp/native/mesh.cpp +266 -0
- warp/native/mesh.cu +406 -0
- warp/native/mesh.h +2097 -0
- warp/native/nanovdb/GridHandle.h +533 -0
- warp/native/nanovdb/HostBuffer.h +591 -0
- warp/native/nanovdb/NanoVDB.h +6246 -0
- warp/native/nanovdb/NodeManager.h +323 -0
- warp/native/nanovdb/PNanoVDB.h +3390 -0
- warp/native/noise.h +859 -0
- warp/native/quat.h +1664 -0
- warp/native/rand.h +342 -0
- warp/native/range.h +145 -0
- warp/native/reduce.cpp +174 -0
- warp/native/reduce.cu +363 -0
- warp/native/runlength_encode.cpp +79 -0
- warp/native/runlength_encode.cu +61 -0
- warp/native/scan.cpp +47 -0
- warp/native/scan.cu +55 -0
- warp/native/scan.h +23 -0
- warp/native/solid_angle.h +466 -0
- warp/native/sort.cpp +251 -0
- warp/native/sort.cu +286 -0
- warp/native/sort.h +35 -0
- warp/native/sparse.cpp +241 -0
- warp/native/sparse.cu +435 -0
- warp/native/spatial.h +1306 -0
- warp/native/svd.h +727 -0
- warp/native/temp_buffer.h +46 -0
- warp/native/tile.h +4124 -0
- warp/native/tile_radix_sort.h +1112 -0
- warp/native/tile_reduce.h +838 -0
- warp/native/tile_scan.h +240 -0
- warp/native/tuple.h +189 -0
- warp/native/vec.h +2199 -0
- warp/native/version.h +23 -0
- warp/native/volume.cpp +501 -0
- warp/native/volume.cu +68 -0
- warp/native/volume.h +970 -0
- warp/native/volume_builder.cu +483 -0
- warp/native/volume_builder.h +52 -0
- warp/native/volume_impl.h +70 -0
- warp/native/warp.cpp +1143 -0
- warp/native/warp.cu +4604 -0
- warp/native/warp.h +358 -0
- warp/optim/__init__.py +20 -0
- warp/optim/adam.py +24 -0
- warp/optim/linear.py +35 -0
- warp/optim/sgd.py +24 -0
- warp/paddle.py +24 -0
- warp/py.typed +0 -0
- warp/render/__init__.py +22 -0
- warp/render/imgui_manager.py +29 -0
- warp/render/render_opengl.py +24 -0
- warp/render/render_usd.py +24 -0
- warp/render/utils.py +24 -0
- warp/sparse.py +51 -0
- warp/tape.py +24 -0
- warp/tests/__init__.py +1 -0
- warp/tests/__main__.py +4 -0
- warp/tests/assets/curlnoise_golden.npy +0 -0
- warp/tests/assets/mlp_golden.npy +0 -0
- warp/tests/assets/pixel.npy +0 -0
- warp/tests/assets/pnoise_golden.npy +0 -0
- warp/tests/assets/spiky.usd +0 -0
- warp/tests/assets/test_grid.nvdb +0 -0
- warp/tests/assets/test_index_grid.nvdb +0 -0
- warp/tests/assets/test_int32_grid.nvdb +0 -0
- warp/tests/assets/test_vec_grid.nvdb +0 -0
- warp/tests/assets/torus.nvdb +0 -0
- warp/tests/assets/torus.usda +105 -0
- warp/tests/aux_test_class_kernel.py +34 -0
- warp/tests/aux_test_compile_consts_dummy.py +18 -0
- warp/tests/aux_test_conditional_unequal_types_kernels.py +29 -0
- warp/tests/aux_test_dependent.py +29 -0
- warp/tests/aux_test_grad_customs.py +29 -0
- warp/tests/aux_test_instancing_gc.py +26 -0
- warp/tests/aux_test_module_aot.py +7 -0
- warp/tests/aux_test_module_unload.py +23 -0
- warp/tests/aux_test_name_clash1.py +40 -0
- warp/tests/aux_test_name_clash2.py +40 -0
- warp/tests/aux_test_reference.py +9 -0
- warp/tests/aux_test_reference_reference.py +8 -0
- warp/tests/aux_test_square.py +16 -0
- warp/tests/aux_test_unresolved_func.py +22 -0
- warp/tests/aux_test_unresolved_symbol.py +22 -0
- warp/tests/cuda/__init__.py +0 -0
- warp/tests/cuda/test_async.py +676 -0
- warp/tests/cuda/test_conditional_captures.py +1147 -0
- warp/tests/cuda/test_ipc.py +124 -0
- warp/tests/cuda/test_mempool.py +233 -0
- warp/tests/cuda/test_multigpu.py +169 -0
- warp/tests/cuda/test_peer.py +139 -0
- warp/tests/cuda/test_pinned.py +84 -0
- warp/tests/cuda/test_streams.py +691 -0
- warp/tests/geometry/__init__.py +0 -0
- warp/tests/geometry/test_bvh.py +335 -0
- warp/tests/geometry/test_hash_grid.py +259 -0
- warp/tests/geometry/test_marching_cubes.py +294 -0
- warp/tests/geometry/test_mesh.py +318 -0
- warp/tests/geometry/test_mesh_query_aabb.py +392 -0
- warp/tests/geometry/test_mesh_query_point.py +935 -0
- warp/tests/geometry/test_mesh_query_ray.py +323 -0
- warp/tests/geometry/test_volume.py +1103 -0
- warp/tests/geometry/test_volume_write.py +346 -0
- warp/tests/interop/__init__.py +0 -0
- warp/tests/interop/test_dlpack.py +730 -0
- warp/tests/interop/test_jax.py +1673 -0
- warp/tests/interop/test_paddle.py +800 -0
- warp/tests/interop/test_torch.py +1001 -0
- warp/tests/run_coverage_serial.py +39 -0
- warp/tests/test_adam.py +162 -0
- warp/tests/test_arithmetic.py +1096 -0
- warp/tests/test_array.py +3756 -0
- warp/tests/test_array_reduce.py +156 -0
- warp/tests/test_assert.py +303 -0
- warp/tests/test_atomic.py +336 -0
- warp/tests/test_atomic_bitwise.py +209 -0
- warp/tests/test_atomic_cas.py +312 -0
- warp/tests/test_bool.py +220 -0
- warp/tests/test_builtins_resolution.py +732 -0
- warp/tests/test_closest_point_edge_edge.py +327 -0
- warp/tests/test_codegen.py +974 -0
- warp/tests/test_codegen_instancing.py +1495 -0
- warp/tests/test_compile_consts.py +215 -0
- warp/tests/test_conditional.py +298 -0
- warp/tests/test_context.py +35 -0
- warp/tests/test_copy.py +319 -0
- warp/tests/test_ctypes.py +618 -0
- warp/tests/test_dense.py +73 -0
- warp/tests/test_devices.py +127 -0
- warp/tests/test_enum.py +136 -0
- warp/tests/test_examples.py +424 -0
- warp/tests/test_fabricarray.py +998 -0
- warp/tests/test_fast_math.py +72 -0
- warp/tests/test_fem.py +2204 -0
- warp/tests/test_fixedarray.py +229 -0
- warp/tests/test_fp16.py +136 -0
- warp/tests/test_func.py +501 -0
- warp/tests/test_future_annotations.py +100 -0
- warp/tests/test_generics.py +656 -0
- warp/tests/test_grad.py +893 -0
- warp/tests/test_grad_customs.py +339 -0
- warp/tests/test_grad_debug.py +341 -0
- warp/tests/test_implicit_init.py +411 -0
- warp/tests/test_import.py +45 -0
- warp/tests/test_indexedarray.py +1140 -0
- warp/tests/test_intersect.py +103 -0
- warp/tests/test_iter.py +76 -0
- warp/tests/test_large.py +177 -0
- warp/tests/test_launch.py +411 -0
- warp/tests/test_lerp.py +151 -0
- warp/tests/test_linear_solvers.py +223 -0
- warp/tests/test_lvalue.py +427 -0
- warp/tests/test_map.py +526 -0
- warp/tests/test_mat.py +3515 -0
- warp/tests/test_mat_assign_copy.py +178 -0
- warp/tests/test_mat_constructors.py +573 -0
- warp/tests/test_mat_lite.py +122 -0
- warp/tests/test_mat_scalar_ops.py +2913 -0
- warp/tests/test_math.py +212 -0
- warp/tests/test_module_aot.py +287 -0
- warp/tests/test_module_hashing.py +258 -0
- warp/tests/test_modules_lite.py +70 -0
- warp/tests/test_noise.py +252 -0
- warp/tests/test_operators.py +299 -0
- warp/tests/test_options.py +129 -0
- warp/tests/test_overwrite.py +551 -0
- warp/tests/test_print.py +408 -0
- warp/tests/test_quat.py +2653 -0
- warp/tests/test_quat_assign_copy.py +145 -0
- warp/tests/test_rand.py +339 -0
- warp/tests/test_reload.py +303 -0
- warp/tests/test_rounding.py +157 -0
- warp/tests/test_runlength_encode.py +196 -0
- warp/tests/test_scalar_ops.py +133 -0
- warp/tests/test_smoothstep.py +108 -0
- warp/tests/test_snippet.py +318 -0
- warp/tests/test_sparse.py +845 -0
- warp/tests/test_spatial.py +2859 -0
- warp/tests/test_spatial_assign_copy.py +160 -0
- warp/tests/test_special_values.py +361 -0
- warp/tests/test_static.py +640 -0
- warp/tests/test_struct.py +901 -0
- warp/tests/test_tape.py +242 -0
- warp/tests/test_transient_module.py +93 -0
- warp/tests/test_triangle_closest_point.py +192 -0
- warp/tests/test_tuple.py +361 -0
- warp/tests/test_types.py +615 -0
- warp/tests/test_utils.py +594 -0
- warp/tests/test_vec.py +1408 -0
- warp/tests/test_vec_assign_copy.py +143 -0
- warp/tests/test_vec_constructors.py +325 -0
- warp/tests/test_vec_lite.py +80 -0
- warp/tests/test_vec_scalar_ops.py +2327 -0
- warp/tests/test_verify_fp.py +100 -0
- warp/tests/test_version.py +75 -0
- warp/tests/tile/__init__.py +0 -0
- warp/tests/tile/test_tile.py +1519 -0
- warp/tests/tile/test_tile_atomic_bitwise.py +403 -0
- warp/tests/tile/test_tile_cholesky.py +608 -0
- warp/tests/tile/test_tile_load.py +724 -0
- warp/tests/tile/test_tile_mathdx.py +156 -0
- warp/tests/tile/test_tile_matmul.py +179 -0
- warp/tests/tile/test_tile_mlp.py +400 -0
- warp/tests/tile/test_tile_reduce.py +950 -0
- warp/tests/tile/test_tile_shared_memory.py +376 -0
- warp/tests/tile/test_tile_sort.py +121 -0
- warp/tests/tile/test_tile_view.py +173 -0
- warp/tests/unittest_serial.py +47 -0
- warp/tests/unittest_suites.py +430 -0
- warp/tests/unittest_utils.py +469 -0
- warp/tests/walkthrough_debug.py +95 -0
- warp/torch.py +24 -0
- warp/types.py +51 -0
- warp/utils.py +31 -0
- warp_lang-1.10.0.dist-info/METADATA +459 -0
- warp_lang-1.10.0.dist-info/RECORD +468 -0
- warp_lang-1.10.0.dist-info/WHEEL +5 -0
- warp_lang-1.10.0.dist-info/licenses/LICENSE.md +176 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/Gaia-LICENSE.txt +6 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/appdirs-LICENSE.txt +22 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/asset_pixel_jpg-LICENSE.txt +3 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/cuda-LICENSE.txt +1582 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/dlpack-LICENSE.txt +201 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/fp16-LICENSE.txt +28 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/libmathdx-LICENSE.txt +220 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/llvm-LICENSE.txt +279 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/moller-LICENSE.txt +16 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/nanovdb-LICENSE.txt +2 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/nvrtc-LICENSE.txt +1592 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/svd-LICENSE.txt +23 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/unittest_parallel-LICENSE.txt +21 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/usd-LICENSE.txt +213 -0
- warp_lang-1.10.0.dist-info/licenses/licenses/windingnumber-LICENSE.txt +21 -0
- warp_lang-1.10.0.dist-info/top_level.txt +1 -0
warp/native/bvh.h
ADDED
|
@@ -0,0 +1,555 @@
|
|
|
1
|
+
/*
|
|
2
|
+
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
3
|
+
* SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
*
|
|
5
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
6
|
+
* you may not use this file except in compliance with the License.
|
|
7
|
+
* You may obtain a copy of the License at
|
|
8
|
+
*
|
|
9
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
|
10
|
+
*
|
|
11
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
12
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
13
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
14
|
+
* See the License for the specific language governing permissions and
|
|
15
|
+
* limitations under the License.
|
|
16
|
+
*/
|
|
17
|
+
|
|
18
|
+
#pragma once
|
|
19
|
+
|
|
20
|
+
#include "builtin.h"
|
|
21
|
+
#include "intersect.h"
|
|
22
|
+
|
|
23
|
+
#ifdef __CUDA_ARCH__
|
|
24
|
+
#define BVH_SHARED_STACK 1
|
|
25
|
+
#else
|
|
26
|
+
#define BVH_SHARED_STACK 0
|
|
27
|
+
#endif
|
|
28
|
+
|
|
29
|
+
#define SAH_NUM_BUCKETS (16)
|
|
30
|
+
#define USE_LOAD4
|
|
31
|
+
#define BVH_QUERY_STACK_SIZE (32)
|
|
32
|
+
|
|
33
|
+
#define BVH_CONSTRUCTOR_SAH (0)
|
|
34
|
+
#define BVH_CONSTRUCTOR_MEDIAN (1)
|
|
35
|
+
#define BVH_CONSTRUCTOR_LBVH (2)
|
|
36
|
+
|
|
37
|
+
namespace wp
|
|
38
|
+
{
|
|
39
|
+
|
|
40
|
+
struct bounds3
|
|
41
|
+
{
|
|
42
|
+
CUDA_CALLABLE inline bounds3() : lower( FLT_MAX)
|
|
43
|
+
, upper(-FLT_MAX) {}
|
|
44
|
+
|
|
45
|
+
CUDA_CALLABLE inline bounds3(const vec3& lower, const vec3& upper) : lower(lower), upper(upper) {}
|
|
46
|
+
|
|
47
|
+
CUDA_CALLABLE inline vec3 center() const { return 0.5f*(lower+upper); }
|
|
48
|
+
CUDA_CALLABLE inline vec3 edges() const { return upper-lower; }
|
|
49
|
+
|
|
50
|
+
CUDA_CALLABLE inline void expand(float r)
|
|
51
|
+
{
|
|
52
|
+
lower -= vec3(r);
|
|
53
|
+
upper += vec3(r);
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
CUDA_CALLABLE inline void expand(const vec3& r)
|
|
57
|
+
{
|
|
58
|
+
lower -= r;
|
|
59
|
+
upper += r;
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
CUDA_CALLABLE inline bool empty() const { return lower[0] >= upper[0] || lower[1] >= upper[1] || lower[2] >= upper[2]; }
|
|
63
|
+
|
|
64
|
+
CUDA_CALLABLE inline bool overlaps(const vec3& p) const
|
|
65
|
+
{
|
|
66
|
+
if (p[0] < lower[0] ||
|
|
67
|
+
p[1] < lower[1] ||
|
|
68
|
+
p[2] < lower[2] ||
|
|
69
|
+
p[0] > upper[0] ||
|
|
70
|
+
p[1] > upper[1] ||
|
|
71
|
+
p[2] > upper[2])
|
|
72
|
+
{
|
|
73
|
+
return false;
|
|
74
|
+
}
|
|
75
|
+
else
|
|
76
|
+
{
|
|
77
|
+
return true;
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
CUDA_CALLABLE inline bool overlaps(const bounds3& b) const
|
|
82
|
+
{
|
|
83
|
+
if (lower[0] > b.upper[0] ||
|
|
84
|
+
lower[1] > b.upper[1] ||
|
|
85
|
+
lower[2] > b.upper[2] ||
|
|
86
|
+
upper[0] < b.lower[0] ||
|
|
87
|
+
upper[1] < b.lower[1] ||
|
|
88
|
+
upper[2] < b.lower[2])
|
|
89
|
+
{
|
|
90
|
+
return false;
|
|
91
|
+
}
|
|
92
|
+
else
|
|
93
|
+
{
|
|
94
|
+
return true;
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
CUDA_CALLABLE inline bool overlaps(const vec3& b_lower, const vec3& b_upper) const
|
|
99
|
+
{
|
|
100
|
+
if (lower[0] > b_upper[0] ||
|
|
101
|
+
lower[1] > b_upper[1] ||
|
|
102
|
+
lower[2] > b_upper[2] ||
|
|
103
|
+
upper[0] < b_lower[0] ||
|
|
104
|
+
upper[1] < b_lower[1] ||
|
|
105
|
+
upper[2] < b_lower[2])
|
|
106
|
+
{
|
|
107
|
+
return false;
|
|
108
|
+
}
|
|
109
|
+
else
|
|
110
|
+
{
|
|
111
|
+
return true;
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
CUDA_CALLABLE inline void add_point(const vec3& p)
|
|
116
|
+
{
|
|
117
|
+
lower = min(lower, p);
|
|
118
|
+
upper = max(upper, p);
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
CUDA_CALLABLE inline void add_bounds(const vec3& lower_other, const vec3& upper_other)
|
|
122
|
+
{
|
|
123
|
+
// lower_other will only impact the lower of the new bounds
|
|
124
|
+
// upper_other will only impact the upper of the new bounds
|
|
125
|
+
// this costs only half of the computation of adding lower_other and upper_other separately
|
|
126
|
+
lower = min(lower, lower_other);
|
|
127
|
+
upper = max(upper, upper_other);
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
CUDA_CALLABLE inline float area() const
|
|
131
|
+
{
|
|
132
|
+
vec3 e = upper-lower;
|
|
133
|
+
return 2.0f*(e[0]*e[1] + e[0]*e[2] + e[1]*e[2]);
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
vec3 lower;
|
|
137
|
+
vec3 upper;
|
|
138
|
+
};
|
|
139
|
+
|
|
140
|
+
CUDA_CALLABLE inline bounds3 bounds_union(const bounds3& a, const vec3& b)
|
|
141
|
+
{
|
|
142
|
+
return bounds3(min(a.lower, b), max(a.upper, b));
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
CUDA_CALLABLE inline bounds3 bounds_union(const bounds3& a, const bounds3& b)
|
|
146
|
+
{
|
|
147
|
+
return bounds3(min(a.lower, b.lower), max(a.upper, b.upper));
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
CUDA_CALLABLE inline bounds3 bounds_intersection(const bounds3& a, const bounds3& b)
|
|
151
|
+
{
|
|
152
|
+
return bounds3(max(a.lower, b.lower), min(a.upper, b.upper));
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
struct BVHPackedNodeHalf
|
|
156
|
+
{
|
|
157
|
+
float x;
|
|
158
|
+
float y;
|
|
159
|
+
float z;
|
|
160
|
+
// For non-leaf nodes:
|
|
161
|
+
// - 'lower.i' represents the index of the left child node.
|
|
162
|
+
// - 'upper.i' represents the index of the right child node.
|
|
163
|
+
//
|
|
164
|
+
// For leaf nodes:
|
|
165
|
+
// - 'lower.i' indicates the start index of the primitives in 'primitive_indices'.
|
|
166
|
+
// - 'upper.i' indicates the index just after the last primitive in 'primitive_indices'
|
|
167
|
+
unsigned int i : 31;
|
|
168
|
+
unsigned int b : 1;
|
|
169
|
+
};
|
|
170
|
+
|
|
171
|
+
struct BVH
|
|
172
|
+
{
|
|
173
|
+
BVHPackedNodeHalf* node_lowers;
|
|
174
|
+
BVHPackedNodeHalf* node_uppers;
|
|
175
|
+
|
|
176
|
+
// used for fast refits
|
|
177
|
+
int* node_parents;
|
|
178
|
+
int* node_counts;
|
|
179
|
+
// reordered primitive indices corresponds to the ordering of leaf nodes
|
|
180
|
+
int* primitive_indices;
|
|
181
|
+
|
|
182
|
+
int max_depth;
|
|
183
|
+
int max_nodes;
|
|
184
|
+
int num_nodes;
|
|
185
|
+
// since we use packed leaf nodes, the number of them is no longer the number of items, but variable
|
|
186
|
+
int num_leaf_nodes;
|
|
187
|
+
|
|
188
|
+
// pointer (CPU or GPU) to a single integer index in node_lowers, node_uppers
|
|
189
|
+
// representing the root of the tree, this is not always the first node
|
|
190
|
+
// for bottom-up builders
|
|
191
|
+
int* root;
|
|
192
|
+
|
|
193
|
+
// item bounds are not owned by the BVH but by the caller
|
|
194
|
+
vec3* item_lowers;
|
|
195
|
+
vec3* item_uppers;
|
|
196
|
+
int num_items;
|
|
197
|
+
|
|
198
|
+
int leaf_size;
|
|
199
|
+
|
|
200
|
+
// cuda context
|
|
201
|
+
void* context;
|
|
202
|
+
};
|
|
203
|
+
|
|
204
|
+
CUDA_CALLABLE inline BVHPackedNodeHalf make_node(const vec3& bound, int child, bool leaf)
|
|
205
|
+
{
|
|
206
|
+
BVHPackedNodeHalf n;
|
|
207
|
+
n.x = bound[0];
|
|
208
|
+
n.y = bound[1];
|
|
209
|
+
n.z = bound[2];
|
|
210
|
+
n.i = (unsigned int)child;
|
|
211
|
+
n.b = (unsigned int)(leaf?1:0);
|
|
212
|
+
|
|
213
|
+
return n;
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
// variation of make_node through volatile pointers used in build_hierarchy
|
|
217
|
+
CUDA_CALLABLE inline void make_node(volatile BVHPackedNodeHalf* n, const vec3& bound, int child, bool leaf)
|
|
218
|
+
{
|
|
219
|
+
n->x = bound[0];
|
|
220
|
+
n->y = bound[1];
|
|
221
|
+
n->z = bound[2];
|
|
222
|
+
n->i = (unsigned int)child;
|
|
223
|
+
n->b = (unsigned int)(leaf?1:0);
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
#ifdef __CUDA_ARCH__
|
|
227
|
+
__device__ inline wp::BVHPackedNodeHalf bvh_load_node(const wp::BVHPackedNodeHalf* nodes, int index)
|
|
228
|
+
{
|
|
229
|
+
#ifdef USE_LOAD4
|
|
230
|
+
float4 f4 = __ldg((const float4*)(nodes)+index);
|
|
231
|
+
return (const wp::BVHPackedNodeHalf&)f4;
|
|
232
|
+
//return (const wp::BVHPackedNodeHalf&)(*((const float4*)(nodes)+index));
|
|
233
|
+
#else
|
|
234
|
+
return nodes[index];
|
|
235
|
+
#endif // USE_LOAD4
|
|
236
|
+
|
|
237
|
+
}
|
|
238
|
+
#else
|
|
239
|
+
inline wp::BVHPackedNodeHalf bvh_load_node(const wp::BVHPackedNodeHalf* nodes, int index)
|
|
240
|
+
{
|
|
241
|
+
return nodes[index];
|
|
242
|
+
}
|
|
243
|
+
#endif // __CUDACC__
|
|
244
|
+
|
|
245
|
+
CUDA_CALLABLE inline int clz(int x)
|
|
246
|
+
{
|
|
247
|
+
int n;
|
|
248
|
+
if (x == 0) return 32;
|
|
249
|
+
for (n = 0; ((x & 0x80000000) == 0); n++, x <<= 1);
|
|
250
|
+
return n;
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
CUDA_CALLABLE inline uint32_t part1by2(uint32_t n)
|
|
254
|
+
{
|
|
255
|
+
n = (n ^ (n << 16)) & 0xff0000ff;
|
|
256
|
+
n = (n ^ (n << 8)) & 0x0300f00f;
|
|
257
|
+
n = (n ^ (n << 4)) & 0x030c30c3;
|
|
258
|
+
n = (n ^ (n << 2)) & 0x09249249;
|
|
259
|
+
|
|
260
|
+
return n;
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
// Takes values in the range [0, 1] and assigns an index based Morton codes of length 3*lwp2(dim) bits
|
|
264
|
+
template <int dim>
|
|
265
|
+
CUDA_CALLABLE inline uint32_t morton3(float x, float y, float z)
|
|
266
|
+
{
|
|
267
|
+
uint32_t ux = clamp(int(x*dim), 0, dim-1);
|
|
268
|
+
uint32_t uy = clamp(int(y*dim), 0, dim-1);
|
|
269
|
+
uint32_t uz = clamp(int(z*dim), 0, dim-1);
|
|
270
|
+
|
|
271
|
+
return (part1by2(uz) << 2) | (part1by2(uy) << 1) | part1by2(ux);
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
// making the class accessible from python
|
|
275
|
+
|
|
276
|
+
CUDA_CALLABLE inline BVH bvh_get(uint64_t id)
|
|
277
|
+
{
|
|
278
|
+
return *(BVH*)(id);
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
CUDA_CALLABLE inline int bvh_get_num_bounds(uint64_t id)
|
|
282
|
+
{
|
|
283
|
+
BVH bvh = bvh_get(id);
|
|
284
|
+
return bvh.num_items;
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
// represents a strided stack in shared memory
|
|
288
|
+
// so each level of the stack is stored contiguously
|
|
289
|
+
// across the block
|
|
290
|
+
struct bvh_stack_t
|
|
291
|
+
{
|
|
292
|
+
inline int operator[](int depth) const { return ptr[depth*WP_TILE_BLOCK_DIM]; }
|
|
293
|
+
inline int& operator[](int depth) { return ptr[depth*WP_TILE_BLOCK_DIM]; }
|
|
294
|
+
|
|
295
|
+
int* ptr;
|
|
296
|
+
|
|
297
|
+
};
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
// stores state required to traverse the BVH nodes that
|
|
301
|
+
// overlap with a query AABB.
|
|
302
|
+
struct bvh_query_t
|
|
303
|
+
{
|
|
304
|
+
CUDA_CALLABLE bvh_query_t()
|
|
305
|
+
: bvh(),
|
|
306
|
+
stack(),
|
|
307
|
+
count(0),
|
|
308
|
+
is_ray(false),
|
|
309
|
+
input_lower(),
|
|
310
|
+
input_upper(),
|
|
311
|
+
bounds_nr(0),
|
|
312
|
+
primitive_counter(-1)
|
|
313
|
+
{}
|
|
314
|
+
|
|
315
|
+
// Required for adjoint computations.
|
|
316
|
+
CUDA_CALLABLE inline bvh_query_t& operator+=(const bvh_query_t& other)
|
|
317
|
+
{
|
|
318
|
+
return *this;
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
BVH bvh;
|
|
322
|
+
|
|
323
|
+
// BVH traversal stack:
|
|
324
|
+
#if BVH_SHARED_STACK
|
|
325
|
+
bvh_stack_t stack;
|
|
326
|
+
#else
|
|
327
|
+
int stack[BVH_QUERY_STACK_SIZE];
|
|
328
|
+
#endif
|
|
329
|
+
|
|
330
|
+
int count;
|
|
331
|
+
|
|
332
|
+
// >= 0 if currently in a packed leaf node
|
|
333
|
+
int primitive_counter;
|
|
334
|
+
|
|
335
|
+
// inputs
|
|
336
|
+
wp::vec3 input_lower; // start for ray
|
|
337
|
+
wp::vec3 input_upper; // dir for ray
|
|
338
|
+
|
|
339
|
+
int bounds_nr;
|
|
340
|
+
bool is_ray;
|
|
341
|
+
};
|
|
342
|
+
|
|
343
|
+
CUDA_CALLABLE inline bool bvh_query_intersection_test(const bvh_query_t& query, const vec3& node_lower, const vec3& node_upper)
|
|
344
|
+
{
|
|
345
|
+
if (query.is_ray)
|
|
346
|
+
{
|
|
347
|
+
float t = 0.0f;
|
|
348
|
+
return intersect_ray_aabb(query.input_lower, query.input_upper, node_lower, node_upper, t);
|
|
349
|
+
}
|
|
350
|
+
else
|
|
351
|
+
{
|
|
352
|
+
return intersect_aabb_aabb(query.input_lower, query.input_upper, node_lower, node_upper);
|
|
353
|
+
}
|
|
354
|
+
}
|
|
355
|
+
|
|
356
|
+
CUDA_CALLABLE inline bvh_query_t bvh_query(
|
|
357
|
+
uint64_t id, bool is_ray, const vec3& lower, const vec3& upper)
|
|
358
|
+
{
|
|
359
|
+
// This routine traverses the BVH tree until it finds
|
|
360
|
+
// the first overlapping bound.
|
|
361
|
+
|
|
362
|
+
// initialize empty
|
|
363
|
+
bvh_query_t query;
|
|
364
|
+
|
|
365
|
+
#if BVH_SHARED_STACK
|
|
366
|
+
__shared__ int stack[BVH_QUERY_STACK_SIZE*WP_TILE_BLOCK_DIM];
|
|
367
|
+
query.stack.ptr = &stack[threadIdx.x];
|
|
368
|
+
#endif
|
|
369
|
+
|
|
370
|
+
query.bounds_nr = -1;
|
|
371
|
+
|
|
372
|
+
BVH bvh = bvh_get(id);
|
|
373
|
+
|
|
374
|
+
query.bvh = bvh;
|
|
375
|
+
query.is_ray = is_ray;
|
|
376
|
+
|
|
377
|
+
// optimization: make the latest
|
|
378
|
+
query.stack[0] = *bvh.root;
|
|
379
|
+
query.count = 1;
|
|
380
|
+
query.input_lower = lower;
|
|
381
|
+
query.input_upper = upper;
|
|
382
|
+
|
|
383
|
+
// Navigate through the bvh, find the first overlapping leaf node.
|
|
384
|
+
while (query.count)
|
|
385
|
+
{
|
|
386
|
+
const int node_index = query.stack[--query.count];
|
|
387
|
+
BVHPackedNodeHalf node_lower = bvh_load_node(bvh.node_lowers, node_index);
|
|
388
|
+
BVHPackedNodeHalf node_upper = bvh_load_node(bvh.node_uppers, node_index);
|
|
389
|
+
|
|
390
|
+
if (!bvh_query_intersection_test(query, reinterpret_cast<vec3&>(node_lower), reinterpret_cast<vec3&>(node_upper)))
|
|
391
|
+
{
|
|
392
|
+
continue;
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
const int left_index = node_lower.i;
|
|
396
|
+
const int right_index = node_upper.i;
|
|
397
|
+
// Make bounds from this AABB
|
|
398
|
+
if (node_lower.b)
|
|
399
|
+
{
|
|
400
|
+
// Reached a leaf node, point to its first primitive
|
|
401
|
+
// Back up one level and return
|
|
402
|
+
query.primitive_counter = 0;
|
|
403
|
+
query.stack[query.count++] = node_index;
|
|
404
|
+
return query;
|
|
405
|
+
}
|
|
406
|
+
else
|
|
407
|
+
{
|
|
408
|
+
query.stack[query.count++] = left_index;
|
|
409
|
+
query.stack[query.count++] = right_index;
|
|
410
|
+
}
|
|
411
|
+
}
|
|
412
|
+
|
|
413
|
+
return query;
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
CUDA_CALLABLE inline bvh_query_t bvh_query_aabb(
|
|
417
|
+
uint64_t id, const vec3& lower, const vec3& upper)
|
|
418
|
+
{
|
|
419
|
+
return bvh_query(id, false, lower, upper);
|
|
420
|
+
}
|
|
421
|
+
|
|
422
|
+
CUDA_CALLABLE inline bvh_query_t bvh_query_ray(uint64_t id, const vec3& start, const vec3& dir)
|
|
423
|
+
{
|
|
424
|
+
return bvh_query(id, true, start, 1.0f / dir);
|
|
425
|
+
}
|
|
426
|
+
|
|
427
|
+
//Stub
|
|
428
|
+
CUDA_CALLABLE inline void adj_bvh_query_aabb(uint64_t id, const vec3& lower, const vec3& upper,
|
|
429
|
+
uint64_t, vec3&, vec3&, bvh_query_t&)
|
|
430
|
+
{
|
|
431
|
+
}
|
|
432
|
+
|
|
433
|
+
|
|
434
|
+
CUDA_CALLABLE inline void adj_bvh_query_ray(uint64_t id, const vec3& start, const vec3& dir,
|
|
435
|
+
uint64_t, vec3&, vec3&, bvh_query_t&)
|
|
436
|
+
{
|
|
437
|
+
}
|
|
438
|
+
|
|
439
|
+
|
|
440
|
+
CUDA_CALLABLE inline bool bvh_query_next(bvh_query_t& query, int& index)
|
|
441
|
+
{
|
|
442
|
+
BVH bvh = query.bvh;
|
|
443
|
+
|
|
444
|
+
// Navigate through the bvh, find the first overlapping leaf node.
|
|
445
|
+
while (query.count)
|
|
446
|
+
{
|
|
447
|
+
const int node_index = query.stack[--query.count];
|
|
448
|
+
|
|
449
|
+
BVHPackedNodeHalf node_lower = bvh_load_node(bvh.node_lowers, node_index);
|
|
450
|
+
BVHPackedNodeHalf node_upper = bvh_load_node(bvh.node_uppers, node_index);
|
|
451
|
+
|
|
452
|
+
if (query.primitive_counter == 0) {
|
|
453
|
+
if (!bvh_query_intersection_test(query, reinterpret_cast<vec3&>(node_lower), reinterpret_cast<vec3&>(node_upper)))
|
|
454
|
+
{
|
|
455
|
+
continue;
|
|
456
|
+
}
|
|
457
|
+
}
|
|
458
|
+
|
|
459
|
+
const int left_index = node_lower.i;
|
|
460
|
+
const int right_index = node_upper.i;
|
|
461
|
+
|
|
462
|
+
if (node_lower.b)
|
|
463
|
+
{
|
|
464
|
+
// found leaf, loop through its content primitives
|
|
465
|
+
const int start = left_index;
|
|
466
|
+
const int end = right_index;
|
|
467
|
+
|
|
468
|
+
if (end - start <= 1)
|
|
469
|
+
{
|
|
470
|
+
int primitive_index = bvh.primitive_indices[start];
|
|
471
|
+
index = primitive_index;
|
|
472
|
+
query.bounds_nr = primitive_index;
|
|
473
|
+
return true;
|
|
474
|
+
}
|
|
475
|
+
else
|
|
476
|
+
{
|
|
477
|
+
int primitive_index = bvh.primitive_indices[start + (query.primitive_counter++)];
|
|
478
|
+
|
|
479
|
+
// if already visited the last primitive in the leaf node
|
|
480
|
+
// move to the next node and reset the primitive counter to 0
|
|
481
|
+
if (start + query.primitive_counter == end)
|
|
482
|
+
{
|
|
483
|
+
query.primitive_counter = 0;
|
|
484
|
+
}
|
|
485
|
+
// otherwise we need to keep this leaf node in stack for a future visit
|
|
486
|
+
else
|
|
487
|
+
{
|
|
488
|
+
query.stack[query.count++] = node_index;
|
|
489
|
+
}
|
|
490
|
+
// return true;
|
|
491
|
+
if (bvh_query_intersection_test(query, bvh.item_lowers[primitive_index], bvh.item_uppers[primitive_index]))
|
|
492
|
+
{
|
|
493
|
+
index = primitive_index;
|
|
494
|
+
query.bounds_nr = primitive_index;
|
|
495
|
+
|
|
496
|
+
return true;
|
|
497
|
+
}
|
|
498
|
+
}
|
|
499
|
+
}
|
|
500
|
+
else
|
|
501
|
+
{
|
|
502
|
+
// if it's not a leaf node we treat it as if we have visited the last primitive
|
|
503
|
+
query.primitive_counter = 0;
|
|
504
|
+
query.stack[query.count++] = left_index;
|
|
505
|
+
query.stack[query.count++] = right_index;
|
|
506
|
+
}
|
|
507
|
+
}
|
|
508
|
+
return false;
|
|
509
|
+
}
|
|
510
|
+
|
|
511
|
+
CUDA_CALLABLE inline int iter_next(bvh_query_t& query)
|
|
512
|
+
{
|
|
513
|
+
return query.bounds_nr;
|
|
514
|
+
}
|
|
515
|
+
|
|
516
|
+
CUDA_CALLABLE inline bool iter_cmp(bvh_query_t& query)
|
|
517
|
+
{
|
|
518
|
+
bool finished = bvh_query_next(query, query.bounds_nr);
|
|
519
|
+
return finished;
|
|
520
|
+
}
|
|
521
|
+
|
|
522
|
+
CUDA_CALLABLE inline bvh_query_t iter_reverse(const bvh_query_t& query)
|
|
523
|
+
{
|
|
524
|
+
// can't reverse BVH queries, users should not rely on traversal ordering
|
|
525
|
+
return query;
|
|
526
|
+
}
|
|
527
|
+
|
|
528
|
+
CUDA_CALLABLE inline void adj_iter_reverse(const bvh_query_t& query, bvh_query_t& adj_query, bvh_query_t& adj_ret)
|
|
529
|
+
{
|
|
530
|
+
}
|
|
531
|
+
|
|
532
|
+
|
|
533
|
+
// stub
|
|
534
|
+
CUDA_CALLABLE inline void adj_bvh_query_next(bvh_query_t& query, int& index, bvh_query_t&, int&, bool&)
|
|
535
|
+
{
|
|
536
|
+
|
|
537
|
+
}
|
|
538
|
+
|
|
539
|
+
CUDA_CALLABLE bool bvh_get_descriptor(uint64_t id, BVH& bvh);
|
|
540
|
+
CUDA_CALLABLE void bvh_add_descriptor(uint64_t id, const BVH& bvh);
|
|
541
|
+
CUDA_CALLABLE void bvh_rem_descriptor(uint64_t id);
|
|
542
|
+
|
|
543
|
+
void bvh_create_host(vec3* lowers, vec3* uppers, int num_items, int constructor_type, BVH& bvh, int leaf_size);
|
|
544
|
+
void bvh_destroy_host(wp::BVH& bvh);
|
|
545
|
+
void bvh_refit_host(wp::BVH& bvh);
|
|
546
|
+
|
|
547
|
+
#if WP_ENABLE_CUDA
|
|
548
|
+
|
|
549
|
+
void bvh_create_device(void* context, vec3* lowers, vec3* uppers, int num_items, int constructor_type, BVH& bvh_device_on_host, int leaf_size);
|
|
550
|
+
void bvh_destroy_device(BVH& bvh);
|
|
551
|
+
void bvh_refit_device(BVH& bvh);
|
|
552
|
+
|
|
553
|
+
#endif // WP_ENABLE_CUDA
|
|
554
|
+
|
|
555
|
+
} // namespace wp
|