warp-lang 1.0.2__py3-none-win_amd64.whl → 1.2.0__py3-none-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of warp-lang might be problematic. Click here for more details.
- warp/__init__.py +108 -97
- warp/__init__.pyi +1 -1
- warp/bin/warp-clang.dll +0 -0
- warp/bin/warp.dll +0 -0
- warp/build.py +88 -113
- warp/build_dll.py +383 -375
- warp/builtins.py +3693 -3354
- warp/codegen.py +2925 -2792
- warp/config.py +40 -36
- warp/constants.py +49 -45
- warp/context.py +5409 -5102
- warp/dlpack.py +442 -442
- warp/examples/__init__.py +16 -16
- warp/examples/assets/bear.usd +0 -0
- warp/examples/assets/bunny.usd +0 -0
- warp/examples/assets/cartpole.urdf +110 -110
- warp/examples/assets/crazyflie.usd +0 -0
- warp/examples/assets/cube.usd +0 -0
- warp/examples/assets/nv_ant.xml +92 -92
- warp/examples/assets/nv_humanoid.xml +183 -183
- warp/examples/assets/quadruped.urdf +267 -267
- warp/examples/assets/rocks.nvdb +0 -0
- warp/examples/assets/rocks.usd +0 -0
- warp/examples/assets/sphere.usd +0 -0
- warp/examples/benchmarks/benchmark_api.py +381 -383
- warp/examples/benchmarks/benchmark_cloth.py +278 -277
- warp/examples/benchmarks/benchmark_cloth_cupy.py +88 -88
- warp/examples/benchmarks/benchmark_cloth_jax.py +97 -100
- warp/examples/benchmarks/benchmark_cloth_numba.py +146 -142
- warp/examples/benchmarks/benchmark_cloth_numpy.py +77 -77
- warp/examples/benchmarks/benchmark_cloth_pytorch.py +86 -86
- warp/examples/benchmarks/benchmark_cloth_taichi.py +112 -112
- warp/examples/benchmarks/benchmark_cloth_warp.py +145 -146
- warp/examples/benchmarks/benchmark_launches.py +293 -295
- warp/examples/browse.py +29 -29
- warp/examples/core/example_dem.py +232 -219
- warp/examples/core/example_fluid.py +291 -267
- warp/examples/core/example_graph_capture.py +142 -126
- warp/examples/core/example_marching_cubes.py +186 -174
- warp/examples/core/example_mesh.py +172 -155
- warp/examples/core/example_mesh_intersect.py +203 -193
- warp/examples/core/example_nvdb.py +174 -170
- warp/examples/core/example_raycast.py +103 -90
- warp/examples/core/example_raymarch.py +197 -178
- warp/examples/core/example_render_opengl.py +183 -141
- warp/examples/core/example_sph.py +403 -387
- warp/examples/core/example_torch.py +219 -181
- warp/examples/core/example_wave.py +261 -248
- warp/examples/fem/bsr_utils.py +378 -380
- warp/examples/fem/example_apic_fluid.py +432 -389
- warp/examples/fem/example_burgers.py +262 -0
- warp/examples/fem/example_convection_diffusion.py +180 -168
- warp/examples/fem/example_convection_diffusion_dg.py +217 -209
- warp/examples/fem/example_deformed_geometry.py +175 -159
- warp/examples/fem/example_diffusion.py +199 -173
- warp/examples/fem/example_diffusion_3d.py +178 -152
- warp/examples/fem/example_diffusion_mgpu.py +219 -214
- warp/examples/fem/example_mixed_elasticity.py +242 -222
- warp/examples/fem/example_navier_stokes.py +257 -243
- warp/examples/fem/example_stokes.py +218 -192
- warp/examples/fem/example_stokes_transfer.py +263 -249
- warp/examples/fem/mesh_utils.py +133 -109
- warp/examples/fem/plot_utils.py +292 -287
- warp/examples/optim/example_bounce.py +258 -246
- warp/examples/optim/example_cloth_throw.py +220 -209
- warp/examples/optim/example_diffray.py +564 -536
- warp/examples/optim/example_drone.py +862 -835
- warp/examples/optim/example_inverse_kinematics.py +174 -168
- warp/examples/optim/example_inverse_kinematics_torch.py +183 -169
- warp/examples/optim/example_spring_cage.py +237 -231
- warp/examples/optim/example_trajectory.py +221 -199
- warp/examples/optim/example_walker.py +304 -293
- warp/examples/sim/example_cartpole.py +137 -129
- warp/examples/sim/example_cloth.py +194 -186
- warp/examples/sim/example_granular.py +122 -111
- warp/examples/sim/example_granular_collision_sdf.py +195 -186
- warp/examples/sim/example_jacobian_ik.py +234 -214
- warp/examples/sim/example_particle_chain.py +116 -105
- warp/examples/sim/example_quadruped.py +191 -180
- warp/examples/sim/example_rigid_chain.py +195 -187
- warp/examples/sim/example_rigid_contact.py +187 -177
- warp/examples/sim/example_rigid_force.py +125 -125
- warp/examples/sim/example_rigid_gyroscopic.py +107 -95
- warp/examples/sim/example_rigid_soft_contact.py +132 -122
- warp/examples/sim/example_soft_body.py +188 -177
- warp/fabric.py +337 -335
- warp/fem/__init__.py +61 -27
- warp/fem/cache.py +403 -388
- warp/fem/dirichlet.py +178 -179
- warp/fem/domain.py +262 -263
- warp/fem/field/__init__.py +100 -101
- warp/fem/field/field.py +148 -149
- warp/fem/field/nodal_field.py +298 -299
- warp/fem/field/restriction.py +22 -21
- warp/fem/field/test.py +180 -181
- warp/fem/field/trial.py +183 -183
- warp/fem/geometry/__init__.py +16 -19
- warp/fem/geometry/closest_point.py +69 -70
- warp/fem/geometry/deformed_geometry.py +270 -271
- warp/fem/geometry/element.py +748 -744
- warp/fem/geometry/geometry.py +184 -186
- warp/fem/geometry/grid_2d.py +380 -373
- warp/fem/geometry/grid_3d.py +437 -435
- warp/fem/geometry/hexmesh.py +953 -953
- warp/fem/geometry/nanogrid.py +455 -0
- warp/fem/geometry/partition.py +374 -376
- warp/fem/geometry/quadmesh_2d.py +532 -532
- warp/fem/geometry/tetmesh.py +840 -840
- warp/fem/geometry/trimesh_2d.py +577 -577
- warp/fem/integrate.py +1684 -1615
- warp/fem/operator.py +190 -191
- warp/fem/polynomial.py +214 -213
- warp/fem/quadrature/__init__.py +2 -2
- warp/fem/quadrature/pic_quadrature.py +243 -245
- warp/fem/quadrature/quadrature.py +295 -294
- warp/fem/space/__init__.py +179 -292
- warp/fem/space/basis_space.py +522 -489
- warp/fem/space/collocated_function_space.py +100 -105
- warp/fem/space/dof_mapper.py +236 -236
- warp/fem/space/function_space.py +148 -145
- warp/fem/space/grid_2d_function_space.py +148 -267
- warp/fem/space/grid_3d_function_space.py +167 -306
- warp/fem/space/hexmesh_function_space.py +253 -352
- warp/fem/space/nanogrid_function_space.py +202 -0
- warp/fem/space/partition.py +350 -350
- warp/fem/space/quadmesh_2d_function_space.py +261 -369
- warp/fem/space/restriction.py +161 -160
- warp/fem/space/shape/__init__.py +90 -15
- warp/fem/space/shape/cube_shape_function.py +728 -738
- warp/fem/space/shape/shape_function.py +102 -103
- warp/fem/space/shape/square_shape_function.py +611 -611
- warp/fem/space/shape/tet_shape_function.py +565 -567
- warp/fem/space/shape/triangle_shape_function.py +429 -429
- warp/fem/space/tetmesh_function_space.py +224 -292
- warp/fem/space/topology.py +297 -295
- warp/fem/space/trimesh_2d_function_space.py +153 -221
- warp/fem/types.py +77 -77
- warp/fem/utils.py +495 -495
- warp/jax.py +166 -141
- warp/jax_experimental.py +341 -339
- warp/native/array.h +1081 -1025
- warp/native/builtin.h +1603 -1560
- warp/native/bvh.cpp +402 -398
- warp/native/bvh.cu +533 -525
- warp/native/bvh.h +430 -429
- warp/native/clang/clang.cpp +496 -464
- warp/native/crt.cpp +42 -32
- warp/native/crt.h +352 -335
- warp/native/cuda_crt.h +1049 -1049
- warp/native/cuda_util.cpp +549 -540
- warp/native/cuda_util.h +288 -203
- warp/native/cutlass_gemm.cpp +34 -34
- warp/native/cutlass_gemm.cu +372 -372
- warp/native/error.cpp +66 -66
- warp/native/error.h +27 -27
- warp/native/exports.h +187 -0
- warp/native/fabric.h +228 -228
- warp/native/hashgrid.cpp +301 -278
- warp/native/hashgrid.cu +78 -77
- warp/native/hashgrid.h +227 -227
- warp/native/initializer_array.h +32 -32
- warp/native/intersect.h +1204 -1204
- warp/native/intersect_adj.h +365 -365
- warp/native/intersect_tri.h +322 -322
- warp/native/marching.cpp +2 -2
- warp/native/marching.cu +497 -497
- warp/native/marching.h +2 -2
- warp/native/mat.h +1545 -1498
- warp/native/matnn.h +333 -333
- warp/native/mesh.cpp +203 -203
- warp/native/mesh.cu +292 -293
- warp/native/mesh.h +1887 -1887
- warp/native/nanovdb/GridHandle.h +366 -0
- warp/native/nanovdb/HostBuffer.h +590 -0
- warp/native/nanovdb/NanoVDB.h +6624 -4782
- warp/native/nanovdb/PNanoVDB.h +3390 -2553
- warp/native/noise.h +850 -850
- warp/native/quat.h +1112 -1085
- warp/native/rand.h +303 -299
- warp/native/range.h +108 -108
- warp/native/reduce.cpp +156 -156
- warp/native/reduce.cu +348 -348
- warp/native/runlength_encode.cpp +61 -61
- warp/native/runlength_encode.cu +46 -46
- warp/native/scan.cpp +30 -30
- warp/native/scan.cu +36 -36
- warp/native/scan.h +7 -7
- warp/native/solid_angle.h +442 -442
- warp/native/sort.cpp +94 -94
- warp/native/sort.cu +97 -97
- warp/native/sort.h +14 -14
- warp/native/sparse.cpp +337 -337
- warp/native/sparse.cu +544 -544
- warp/native/spatial.h +630 -630
- warp/native/svd.h +562 -562
- warp/native/temp_buffer.h +30 -30
- warp/native/vec.h +1177 -1133
- warp/native/volume.cpp +529 -297
- warp/native/volume.cu +58 -32
- warp/native/volume.h +960 -538
- warp/native/volume_builder.cu +446 -425
- warp/native/volume_builder.h +34 -19
- warp/native/volume_impl.h +61 -0
- warp/native/warp.cpp +1057 -1052
- warp/native/warp.cu +2949 -2828
- warp/native/warp.h +321 -305
- warp/optim/__init__.py +9 -9
- warp/optim/adam.py +120 -120
- warp/optim/linear.py +1104 -939
- warp/optim/sgd.py +104 -92
- warp/render/__init__.py +10 -10
- warp/render/render_opengl.py +3356 -3204
- warp/render/render_usd.py +768 -749
- warp/render/utils.py +152 -150
- warp/sim/__init__.py +52 -59
- warp/sim/articulation.py +685 -685
- warp/sim/collide.py +1594 -1590
- warp/sim/import_mjcf.py +489 -481
- warp/sim/import_snu.py +220 -221
- warp/sim/import_urdf.py +536 -516
- warp/sim/import_usd.py +887 -881
- warp/sim/inertia.py +316 -317
- warp/sim/integrator.py +234 -233
- warp/sim/integrator_euler.py +1956 -1956
- warp/sim/integrator_featherstone.py +1917 -1991
- warp/sim/integrator_xpbd.py +3288 -3312
- warp/sim/model.py +4473 -4314
- warp/sim/particles.py +113 -112
- warp/sim/render.py +417 -403
- warp/sim/utils.py +413 -410
- warp/sparse.py +1289 -1227
- warp/stubs.py +2192 -2469
- warp/tape.py +1162 -225
- warp/tests/__init__.py +1 -1
- warp/tests/__main__.py +4 -4
- warp/tests/assets/test_index_grid.nvdb +0 -0
- warp/tests/assets/torus.usda +105 -105
- warp/tests/aux_test_class_kernel.py +26 -26
- warp/tests/aux_test_compile_consts_dummy.py +10 -10
- warp/tests/aux_test_conditional_unequal_types_kernels.py +21 -21
- warp/tests/aux_test_dependent.py +20 -22
- warp/tests/aux_test_grad_customs.py +21 -23
- warp/tests/aux_test_reference.py +9 -11
- warp/tests/aux_test_reference_reference.py +8 -10
- warp/tests/aux_test_square.py +15 -17
- warp/tests/aux_test_unresolved_func.py +14 -14
- warp/tests/aux_test_unresolved_symbol.py +14 -14
- warp/tests/disabled_kinematics.py +237 -239
- warp/tests/run_coverage_serial.py +31 -31
- warp/tests/test_adam.py +155 -157
- warp/tests/test_arithmetic.py +1088 -1124
- warp/tests/test_array.py +2415 -2326
- warp/tests/test_array_reduce.py +148 -150
- warp/tests/test_async.py +666 -656
- warp/tests/test_atomic.py +139 -141
- warp/tests/test_bool.py +212 -149
- warp/tests/test_builtins_resolution.py +1290 -1292
- warp/tests/test_bvh.py +162 -171
- warp/tests/test_closest_point_edge_edge.py +227 -228
- warp/tests/test_codegen.py +562 -553
- warp/tests/test_compile_consts.py +217 -101
- warp/tests/test_conditional.py +244 -246
- warp/tests/test_copy.py +230 -215
- warp/tests/test_ctypes.py +630 -632
- warp/tests/test_dense.py +65 -67
- warp/tests/test_devices.py +89 -98
- warp/tests/test_dlpack.py +528 -529
- warp/tests/test_examples.py +403 -378
- warp/tests/test_fabricarray.py +952 -955
- warp/tests/test_fast_math.py +60 -54
- warp/tests/test_fem.py +1298 -1278
- warp/tests/test_fp16.py +128 -130
- warp/tests/test_func.py +336 -337
- warp/tests/test_generics.py +596 -571
- warp/tests/test_grad.py +885 -640
- warp/tests/test_grad_customs.py +331 -336
- warp/tests/test_hash_grid.py +208 -164
- warp/tests/test_import.py +37 -39
- warp/tests/test_indexedarray.py +1132 -1134
- warp/tests/test_intersect.py +65 -67
- warp/tests/test_jax.py +305 -307
- warp/tests/test_large.py +169 -164
- warp/tests/test_launch.py +352 -354
- warp/tests/test_lerp.py +217 -261
- warp/tests/test_linear_solvers.py +189 -171
- warp/tests/test_lvalue.py +419 -493
- warp/tests/test_marching_cubes.py +63 -65
- warp/tests/test_mat.py +1799 -1827
- warp/tests/test_mat_lite.py +113 -115
- warp/tests/test_mat_scalar_ops.py +2905 -2889
- warp/tests/test_math.py +124 -193
- warp/tests/test_matmul.py +498 -499
- warp/tests/test_matmul_lite.py +408 -410
- warp/tests/test_mempool.py +186 -190
- warp/tests/test_mesh.py +281 -324
- warp/tests/test_mesh_query_aabb.py +226 -241
- warp/tests/test_mesh_query_point.py +690 -702
- warp/tests/test_mesh_query_ray.py +290 -303
- warp/tests/test_mlp.py +274 -276
- warp/tests/test_model.py +108 -110
- warp/tests/test_module_hashing.py +111 -0
- warp/tests/test_modules_lite.py +36 -39
- warp/tests/test_multigpu.py +161 -163
- warp/tests/test_noise.py +244 -248
- warp/tests/test_operators.py +248 -250
- warp/tests/test_options.py +121 -125
- warp/tests/test_peer.py +131 -137
- warp/tests/test_pinned.py +76 -78
- warp/tests/test_print.py +52 -54
- warp/tests/test_quat.py +2084 -2086
- warp/tests/test_rand.py +324 -288
- warp/tests/test_reload.py +207 -217
- warp/tests/test_rounding.py +177 -179
- warp/tests/test_runlength_encode.py +188 -190
- warp/tests/test_sim_grad.py +241 -0
- warp/tests/test_sim_kinematics.py +89 -97
- warp/tests/test_smoothstep.py +166 -168
- warp/tests/test_snippet.py +303 -266
- warp/tests/test_sparse.py +466 -460
- warp/tests/test_spatial.py +2146 -2148
- warp/tests/test_special_values.py +362 -0
- warp/tests/test_streams.py +484 -473
- warp/tests/test_struct.py +708 -675
- warp/tests/test_tape.py +171 -148
- warp/tests/test_torch.py +741 -743
- warp/tests/test_transient_module.py +85 -87
- warp/tests/test_types.py +554 -659
- warp/tests/test_utils.py +488 -499
- warp/tests/test_vec.py +1262 -1268
- warp/tests/test_vec_lite.py +71 -73
- warp/tests/test_vec_scalar_ops.py +2097 -2099
- warp/tests/test_verify_fp.py +92 -94
- warp/tests/test_volume.py +961 -736
- warp/tests/test_volume_write.py +338 -265
- warp/tests/unittest_serial.py +38 -37
- warp/tests/unittest_suites.py +367 -359
- warp/tests/unittest_utils.py +434 -578
- warp/tests/unused_test_misc.py +69 -71
- warp/tests/walkthrough_debug.py +85 -85
- warp/thirdparty/appdirs.py +598 -598
- warp/thirdparty/dlpack.py +143 -143
- warp/thirdparty/unittest_parallel.py +563 -561
- warp/torch.py +321 -295
- warp/types.py +4941 -4450
- warp/utils.py +1008 -821
- {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/LICENSE.md +126 -126
- {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/METADATA +365 -400
- warp_lang-1.2.0.dist-info/RECORD +359 -0
- warp/examples/assets/cube.usda +0 -42
- warp/examples/assets/sphere.usda +0 -56
- warp/examples/assets/torus.usda +0 -105
- warp/examples/fem/example_convection_diffusion_dg0.py +0 -194
- warp/native/nanovdb/PNanoVDBWrite.h +0 -295
- warp_lang-1.0.2.dist-info/RECORD +0 -352
- {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/WHEEL +0 -0
- {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/top_level.txt +0 -0
warp/native/bvh.h
CHANGED
|
@@ -1,429 +1,430 @@
|
|
|
1
|
-
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
|
|
2
|
-
* NVIDIA CORPORATION and its licensors retain all intellectual property
|
|
3
|
-
* and proprietary rights in and to this software, related documentation
|
|
4
|
-
* and any modifications thereto. Any use, reproduction, disclosure or
|
|
5
|
-
* distribution of this software and related documentation without an express
|
|
6
|
-
* license agreement from NVIDIA CORPORATION is strictly prohibited.
|
|
7
|
-
*/
|
|
8
|
-
|
|
9
|
-
#pragma once
|
|
10
|
-
|
|
11
|
-
#include "builtin.h"
|
|
12
|
-
#include "intersect.h"
|
|
13
|
-
|
|
14
|
-
namespace wp
|
|
15
|
-
{
|
|
16
|
-
|
|
17
|
-
struct bounds3
|
|
18
|
-
{
|
|
19
|
-
CUDA_CALLABLE inline bounds3() : lower( FLT_MAX)
|
|
20
|
-
, upper(-FLT_MAX) {}
|
|
21
|
-
|
|
22
|
-
CUDA_CALLABLE inline bounds3(const vec3& lower, const vec3& upper) : lower(lower), upper(upper) {}
|
|
23
|
-
|
|
24
|
-
CUDA_CALLABLE inline vec3 center() const { return 0.5f*(lower+upper); }
|
|
25
|
-
CUDA_CALLABLE inline vec3 edges() const { return upper-lower; }
|
|
26
|
-
|
|
27
|
-
CUDA_CALLABLE inline void expand(float r)
|
|
28
|
-
{
|
|
29
|
-
lower -= vec3(r);
|
|
30
|
-
upper += vec3(r);
|
|
31
|
-
}
|
|
32
|
-
|
|
33
|
-
CUDA_CALLABLE inline void expand(const vec3& r)
|
|
34
|
-
{
|
|
35
|
-
lower -= r;
|
|
36
|
-
upper += r;
|
|
37
|
-
}
|
|
38
|
-
|
|
39
|
-
CUDA_CALLABLE inline bool empty() const { return lower[0] >= upper[0] || lower[1] >= upper[1] || lower[2] >= upper[2]; }
|
|
40
|
-
|
|
41
|
-
CUDA_CALLABLE inline bool overlaps(const vec3& p) const
|
|
42
|
-
{
|
|
43
|
-
if (p[0] < lower[0] ||
|
|
44
|
-
p[1] < lower[1] ||
|
|
45
|
-
p[2] < lower[2] ||
|
|
46
|
-
p[0] > upper[0] ||
|
|
47
|
-
p[1] > upper[1] ||
|
|
48
|
-
p[2] > upper[2])
|
|
49
|
-
{
|
|
50
|
-
return false;
|
|
51
|
-
}
|
|
52
|
-
else
|
|
53
|
-
{
|
|
54
|
-
return true;
|
|
55
|
-
}
|
|
56
|
-
}
|
|
57
|
-
|
|
58
|
-
CUDA_CALLABLE inline bool overlaps(const bounds3& b) const
|
|
59
|
-
{
|
|
60
|
-
if (lower[0] > b.upper[0] ||
|
|
61
|
-
lower[1] > b.upper[1] ||
|
|
62
|
-
lower[2] > b.upper[2] ||
|
|
63
|
-
upper[0] < b.lower[0] ||
|
|
64
|
-
upper[1] < b.lower[1] ||
|
|
65
|
-
upper[2] < b.lower[2])
|
|
66
|
-
{
|
|
67
|
-
return false;
|
|
68
|
-
}
|
|
69
|
-
else
|
|
70
|
-
{
|
|
71
|
-
return true;
|
|
72
|
-
}
|
|
73
|
-
}
|
|
74
|
-
|
|
75
|
-
CUDA_CALLABLE inline void add_point(const vec3& p)
|
|
76
|
-
{
|
|
77
|
-
lower = min(lower, p);
|
|
78
|
-
upper = max(upper, p);
|
|
79
|
-
}
|
|
80
|
-
|
|
81
|
-
CUDA_CALLABLE inline float area() const
|
|
82
|
-
{
|
|
83
|
-
vec3 e = upper-lower;
|
|
84
|
-
return 2.0f*(e[0]*e[1] + e[0]*e[2] + e[1]*e[2]);
|
|
85
|
-
}
|
|
86
|
-
|
|
87
|
-
vec3 lower;
|
|
88
|
-
vec3 upper;
|
|
89
|
-
};
|
|
90
|
-
|
|
91
|
-
CUDA_CALLABLE inline bounds3 bounds_union(const bounds3& a, const vec3& b)
|
|
92
|
-
{
|
|
93
|
-
return bounds3(min(a.lower, b), max(a.upper, b));
|
|
94
|
-
}
|
|
95
|
-
|
|
96
|
-
CUDA_CALLABLE inline bounds3 bounds_union(const bounds3& a, const bounds3& b)
|
|
97
|
-
{
|
|
98
|
-
return bounds3(min(a.lower, b.lower), max(a.upper, b.upper));
|
|
99
|
-
}
|
|
100
|
-
|
|
101
|
-
CUDA_CALLABLE inline bounds3 bounds_intersection(const bounds3& a, const bounds3& b)
|
|
102
|
-
{
|
|
103
|
-
return bounds3(max(a.lower, b.lower), min(a.upper, b.upper));
|
|
104
|
-
}
|
|
105
|
-
|
|
106
|
-
struct BVHPackedNodeHalf
|
|
107
|
-
{
|
|
108
|
-
float x;
|
|
109
|
-
float y;
|
|
110
|
-
float z;
|
|
111
|
-
unsigned int i : 31;
|
|
112
|
-
unsigned int b : 1;
|
|
113
|
-
};
|
|
114
|
-
|
|
115
|
-
struct BVH
|
|
116
|
-
{
|
|
117
|
-
BVHPackedNodeHalf* node_lowers;
|
|
118
|
-
BVHPackedNodeHalf* node_uppers;
|
|
119
|
-
|
|
120
|
-
// used for fast refits
|
|
121
|
-
int* node_parents;
|
|
122
|
-
int* node_counts;
|
|
123
|
-
|
|
124
|
-
int max_depth;
|
|
125
|
-
int max_nodes;
|
|
126
|
-
int num_nodes;
|
|
127
|
-
|
|
128
|
-
// pointer (CPU or GPU) to a single integer index in node_lowers, node_uppers
|
|
129
|
-
// representing the root of the tree, this is not always the first node
|
|
130
|
-
// for bottom-up builders
|
|
131
|
-
int* root;
|
|
132
|
-
|
|
133
|
-
// item bounds are not owned by the BVH but by the caller
|
|
134
|
-
vec3* item_lowers;
|
|
135
|
-
vec3* item_uppers;
|
|
136
|
-
int num_items;
|
|
137
|
-
|
|
138
|
-
// cuda context
|
|
139
|
-
void* context;
|
|
140
|
-
};
|
|
141
|
-
|
|
142
|
-
CUDA_CALLABLE inline BVHPackedNodeHalf make_node(const vec3& bound, int child, bool leaf)
|
|
143
|
-
{
|
|
144
|
-
BVHPackedNodeHalf n;
|
|
145
|
-
n.x = bound[0];
|
|
146
|
-
n.y = bound[1];
|
|
147
|
-
n.z = bound[2];
|
|
148
|
-
n.i = (unsigned int)child;
|
|
149
|
-
n.b = (unsigned int)(leaf?1:0);
|
|
150
|
-
|
|
151
|
-
return n;
|
|
152
|
-
}
|
|
153
|
-
|
|
154
|
-
// variation of make_node through volatile pointers used in build_hierarchy
|
|
155
|
-
CUDA_CALLABLE inline void make_node(volatile BVHPackedNodeHalf* n, const vec3& bound, int child, bool leaf)
|
|
156
|
-
{
|
|
157
|
-
n->x = bound[0];
|
|
158
|
-
n->y = bound[1];
|
|
159
|
-
n->z = bound[2];
|
|
160
|
-
n->i = (unsigned int)child;
|
|
161
|
-
n->b = (unsigned int)(leaf?1:0);
|
|
162
|
-
}
|
|
163
|
-
|
|
164
|
-
CUDA_CALLABLE inline int clz(int x)
|
|
165
|
-
{
|
|
166
|
-
int n;
|
|
167
|
-
if (x == 0) return 32;
|
|
168
|
-
for (n = 0; ((x & 0x80000000) == 0); n++, x <<= 1);
|
|
169
|
-
return n;
|
|
170
|
-
}
|
|
171
|
-
|
|
172
|
-
CUDA_CALLABLE inline uint32_t part1by2(uint32_t n)
|
|
173
|
-
{
|
|
174
|
-
n = (n ^ (n << 16)) & 0xff0000ff;
|
|
175
|
-
n = (n ^ (n << 8)) & 0x0300f00f;
|
|
176
|
-
n = (n ^ (n << 4)) & 0x030c30c3;
|
|
177
|
-
n = (n ^ (n << 2)) & 0x09249249;
|
|
178
|
-
|
|
179
|
-
return n;
|
|
180
|
-
}
|
|
181
|
-
|
|
182
|
-
// Takes values in the range [0, 1] and assigns an index based Morton codes of length 3*lwp2(dim) bits
|
|
183
|
-
template <int dim>
|
|
184
|
-
CUDA_CALLABLE inline uint32_t morton3(float x, float y, float z)
|
|
185
|
-
{
|
|
186
|
-
uint32_t ux = clamp(int(x*dim), 0, dim-1);
|
|
187
|
-
uint32_t uy = clamp(int(y*dim), 0, dim-1);
|
|
188
|
-
uint32_t uz = clamp(int(z*dim), 0, dim-1);
|
|
189
|
-
|
|
190
|
-
return (part1by2(uz) << 2) | (part1by2(uy) << 1) | part1by2(ux);
|
|
191
|
-
}
|
|
192
|
-
|
|
193
|
-
// making the class accessible from python
|
|
194
|
-
|
|
195
|
-
CUDA_CALLABLE inline BVH bvh_get(uint64_t id)
|
|
196
|
-
{
|
|
197
|
-
return *(BVH*)(id);
|
|
198
|
-
}
|
|
199
|
-
|
|
200
|
-
CUDA_CALLABLE inline int bvh_get_num_bounds(uint64_t id)
|
|
201
|
-
{
|
|
202
|
-
BVH bvh = bvh_get(id);
|
|
203
|
-
return bvh.num_items;
|
|
204
|
-
}
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
// stores state required to traverse the BVH nodes that
|
|
208
|
-
// overlap with a query AABB.
|
|
209
|
-
struct bvh_query_t
|
|
210
|
-
{
|
|
211
|
-
CUDA_CALLABLE bvh_query_t()
|
|
212
|
-
: bvh(),
|
|
213
|
-
stack(),
|
|
214
|
-
count(0),
|
|
215
|
-
is_ray(false),
|
|
216
|
-
input_lower(),
|
|
217
|
-
input_upper(),
|
|
218
|
-
bounds_nr(0)
|
|
219
|
-
{}
|
|
220
|
-
|
|
221
|
-
// Required for adjoint computations.
|
|
222
|
-
CUDA_CALLABLE inline bvh_query_t& operator+=(const bvh_query_t& other)
|
|
223
|
-
{
|
|
224
|
-
return *this;
|
|
225
|
-
}
|
|
226
|
-
|
|
227
|
-
BVH bvh;
|
|
228
|
-
|
|
229
|
-
// BVH traversal stack:
|
|
230
|
-
int stack[32];
|
|
231
|
-
int count;
|
|
232
|
-
|
|
233
|
-
// inputs
|
|
234
|
-
bool is_ray;
|
|
235
|
-
wp::vec3 input_lower; // start for ray
|
|
236
|
-
wp::vec3 input_upper; // dir for ray
|
|
237
|
-
|
|
238
|
-
int bounds_nr;
|
|
239
|
-
};
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
CUDA_CALLABLE inline bvh_query_t bvh_query(
|
|
243
|
-
uint64_t id, bool is_ray, const vec3& lower, const vec3& upper)
|
|
244
|
-
{
|
|
245
|
-
// This routine traverses the BVH tree until it finds
|
|
246
|
-
// the first overlapping bound.
|
|
247
|
-
|
|
248
|
-
// initialize empty
|
|
249
|
-
bvh_query_t query;
|
|
250
|
-
|
|
251
|
-
query.bounds_nr = -1;
|
|
252
|
-
|
|
253
|
-
BVH bvh = bvh_get(id);
|
|
254
|
-
|
|
255
|
-
query.bvh = bvh;
|
|
256
|
-
query.is_ray = is_ray;
|
|
257
|
-
|
|
258
|
-
// optimization: make the latest
|
|
259
|
-
query.stack[0] = *bvh.root;
|
|
260
|
-
query.count = 1;
|
|
261
|
-
query.input_lower = lower;
|
|
262
|
-
query.input_upper = upper;
|
|
263
|
-
|
|
264
|
-
wp::bounds3 input_bounds(query.input_lower, query.input_upper);
|
|
265
|
-
|
|
266
|
-
// Navigate through the bvh, find the first overlapping leaf node.
|
|
267
|
-
while (query.count)
|
|
268
|
-
{
|
|
269
|
-
const int node_index = query.stack[--query.count];
|
|
270
|
-
|
|
271
|
-
BVHPackedNodeHalf node_lower = bvh.node_lowers[node_index];
|
|
272
|
-
BVHPackedNodeHalf node_upper = bvh.node_uppers[node_index];
|
|
273
|
-
|
|
274
|
-
wp::vec3 lower_pos(node_lower.x, node_lower.y, node_lower.z);
|
|
275
|
-
wp::vec3 upper_pos(node_upper.x, node_upper.y, node_upper.z);
|
|
276
|
-
wp::bounds3 current_bounds(lower_pos, upper_pos);
|
|
277
|
-
|
|
278
|
-
if (query.is_ray)
|
|
279
|
-
{
|
|
280
|
-
float t = 0.0f;
|
|
281
|
-
if (!intersect_ray_aabb(query.input_lower, query.input_upper, current_bounds.lower, current_bounds.upper, t))
|
|
282
|
-
// Skip this box, it doesn't overlap with our ray.
|
|
283
|
-
continue;
|
|
284
|
-
}
|
|
285
|
-
else
|
|
286
|
-
{
|
|
287
|
-
if (!input_bounds.overlaps(current_bounds))
|
|
288
|
-
// Skip this box, it doesn't overlap with our target box.
|
|
289
|
-
continue;
|
|
290
|
-
}
|
|
291
|
-
|
|
292
|
-
const int left_index = node_lower.i;
|
|
293
|
-
const int right_index = node_upper.i;
|
|
294
|
-
|
|
295
|
-
// Make bounds from this AABB
|
|
296
|
-
if (node_lower.b)
|
|
297
|
-
{
|
|
298
|
-
// found very first leaf index.
|
|
299
|
-
// Back up one level and return
|
|
300
|
-
query.stack[query.count++] = node_index;
|
|
301
|
-
return query;
|
|
302
|
-
}
|
|
303
|
-
else
|
|
304
|
-
{
|
|
305
|
-
query.stack[query.count++] = left_index;
|
|
306
|
-
query.stack[query.count++] = right_index;
|
|
307
|
-
}
|
|
308
|
-
}
|
|
309
|
-
|
|
310
|
-
return query;
|
|
311
|
-
}
|
|
312
|
-
|
|
313
|
-
CUDA_CALLABLE inline bvh_query_t bvh_query_aabb(
|
|
314
|
-
uint64_t id, const vec3& lower, const vec3& upper)
|
|
315
|
-
{
|
|
316
|
-
return bvh_query(id, false, lower, upper);
|
|
317
|
-
}
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
CUDA_CALLABLE inline bvh_query_t bvh_query_ray(
|
|
321
|
-
uint64_t id, const vec3& start, const vec3& dir)
|
|
322
|
-
{
|
|
323
|
-
return bvh_query(id, true, start, dir);
|
|
324
|
-
}
|
|
325
|
-
|
|
326
|
-
//Stub
|
|
327
|
-
CUDA_CALLABLE inline void adj_bvh_query_aabb(uint64_t id, const vec3& lower, const vec3& upper,
|
|
328
|
-
uint64_t, vec3&, vec3&, bvh_query_t&)
|
|
329
|
-
{
|
|
330
|
-
}
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
CUDA_CALLABLE inline void adj_bvh_query_ray(uint64_t id, const vec3& start, const vec3& dir,
|
|
334
|
-
uint64_t, vec3&, vec3&, bvh_query_t&)
|
|
335
|
-
{
|
|
336
|
-
}
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
CUDA_CALLABLE inline bool bvh_query_next(bvh_query_t& query, int& index)
|
|
340
|
-
{
|
|
341
|
-
BVH bvh = query.bvh;
|
|
342
|
-
|
|
343
|
-
wp::bounds3 input_bounds(query.input_lower, query.input_upper);
|
|
344
|
-
|
|
345
|
-
// Navigate through the bvh, find the first overlapping leaf node.
|
|
346
|
-
while (query.count)
|
|
347
|
-
{
|
|
348
|
-
const int node_index = query.stack[--query.count];
|
|
349
|
-
BVHPackedNodeHalf node_lower = bvh.node_lowers[node_index];
|
|
350
|
-
BVHPackedNodeHalf node_upper = bvh.node_uppers[node_index];
|
|
351
|
-
|
|
352
|
-
wp::vec3 lower_pos(node_lower.x, node_lower.y, node_lower.z);
|
|
353
|
-
wp::vec3 upper_pos(node_upper.x, node_upper.y, node_upper.z);
|
|
354
|
-
wp::bounds3 current_bounds(lower_pos, upper_pos);
|
|
355
|
-
|
|
356
|
-
if (query.is_ray)
|
|
357
|
-
{
|
|
358
|
-
float t = 0.0f;
|
|
359
|
-
if (!intersect_ray_aabb(query.input_lower, query.input_upper, current_bounds.lower, current_bounds.upper, t))
|
|
360
|
-
// Skip this box, it doesn't overlap with our ray.
|
|
361
|
-
continue;
|
|
362
|
-
}
|
|
363
|
-
else {
|
|
364
|
-
if (!input_bounds.overlaps(current_bounds))
|
|
365
|
-
// Skip this box, it doesn't overlap with our target box.
|
|
366
|
-
continue;
|
|
367
|
-
}
|
|
368
|
-
|
|
369
|
-
const int left_index = node_lower.i;
|
|
370
|
-
const int right_index = node_upper.i;
|
|
371
|
-
|
|
372
|
-
if (node_lower.b)
|
|
373
|
-
{
|
|
374
|
-
// found leaf
|
|
375
|
-
query.bounds_nr = left_index;
|
|
376
|
-
index = left_index;
|
|
377
|
-
return true;
|
|
378
|
-
}
|
|
379
|
-
else
|
|
380
|
-
{
|
|
381
|
-
|
|
382
|
-
query.stack[query.count++] = left_index;
|
|
383
|
-
query.stack[query.count++] = right_index;
|
|
384
|
-
}
|
|
385
|
-
}
|
|
386
|
-
return false;
|
|
387
|
-
}
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
CUDA_CALLABLE inline int iter_next(bvh_query_t& query)
|
|
391
|
-
{
|
|
392
|
-
return query.bounds_nr;
|
|
393
|
-
}
|
|
394
|
-
|
|
395
|
-
CUDA_CALLABLE inline bool iter_cmp(bvh_query_t& query)
|
|
396
|
-
{
|
|
397
|
-
bool finished = bvh_query_next(query, query.bounds_nr);
|
|
398
|
-
return finished;
|
|
399
|
-
}
|
|
400
|
-
|
|
401
|
-
CUDA_CALLABLE inline bvh_query_t iter_reverse(const bvh_query_t& query)
|
|
402
|
-
{
|
|
403
|
-
// can't reverse BVH queries, users should not rely on traversal ordering
|
|
404
|
-
return query;
|
|
405
|
-
}
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
// stub
|
|
409
|
-
CUDA_CALLABLE inline void adj_bvh_query_next(bvh_query_t& query, int& index, bvh_query_t&, int&, bool&)
|
|
410
|
-
{
|
|
411
|
-
|
|
412
|
-
}
|
|
413
|
-
|
|
414
|
-
CUDA_CALLABLE bool bvh_get_descriptor(uint64_t id, BVH& bvh);
|
|
415
|
-
CUDA_CALLABLE void bvh_add_descriptor(uint64_t id, const BVH& bvh);
|
|
416
|
-
CUDA_CALLABLE void bvh_rem_descriptor(uint64_t id);
|
|
417
|
-
|
|
418
|
-
#if !__CUDA_ARCH__
|
|
419
|
-
|
|
420
|
-
void
|
|
421
|
-
void
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
void
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
1
|
+
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
|
|
2
|
+
* NVIDIA CORPORATION and its licensors retain all intellectual property
|
|
3
|
+
* and proprietary rights in and to this software, related documentation
|
|
4
|
+
* and any modifications thereto. Any use, reproduction, disclosure or
|
|
5
|
+
* distribution of this software and related documentation without an express
|
|
6
|
+
* license agreement from NVIDIA CORPORATION is strictly prohibited.
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
#pragma once
|
|
10
|
+
|
|
11
|
+
#include "builtin.h"
|
|
12
|
+
#include "intersect.h"
|
|
13
|
+
|
|
14
|
+
namespace wp
|
|
15
|
+
{
|
|
16
|
+
|
|
17
|
+
struct bounds3
|
|
18
|
+
{
|
|
19
|
+
CUDA_CALLABLE inline bounds3() : lower( FLT_MAX)
|
|
20
|
+
, upper(-FLT_MAX) {}
|
|
21
|
+
|
|
22
|
+
CUDA_CALLABLE inline bounds3(const vec3& lower, const vec3& upper) : lower(lower), upper(upper) {}
|
|
23
|
+
|
|
24
|
+
CUDA_CALLABLE inline vec3 center() const { return 0.5f*(lower+upper); }
|
|
25
|
+
CUDA_CALLABLE inline vec3 edges() const { return upper-lower; }
|
|
26
|
+
|
|
27
|
+
CUDA_CALLABLE inline void expand(float r)
|
|
28
|
+
{
|
|
29
|
+
lower -= vec3(r);
|
|
30
|
+
upper += vec3(r);
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
CUDA_CALLABLE inline void expand(const vec3& r)
|
|
34
|
+
{
|
|
35
|
+
lower -= r;
|
|
36
|
+
upper += r;
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
CUDA_CALLABLE inline bool empty() const { return lower[0] >= upper[0] || lower[1] >= upper[1] || lower[2] >= upper[2]; }
|
|
40
|
+
|
|
41
|
+
CUDA_CALLABLE inline bool overlaps(const vec3& p) const
|
|
42
|
+
{
|
|
43
|
+
if (p[0] < lower[0] ||
|
|
44
|
+
p[1] < lower[1] ||
|
|
45
|
+
p[2] < lower[2] ||
|
|
46
|
+
p[0] > upper[0] ||
|
|
47
|
+
p[1] > upper[1] ||
|
|
48
|
+
p[2] > upper[2])
|
|
49
|
+
{
|
|
50
|
+
return false;
|
|
51
|
+
}
|
|
52
|
+
else
|
|
53
|
+
{
|
|
54
|
+
return true;
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
CUDA_CALLABLE inline bool overlaps(const bounds3& b) const
|
|
59
|
+
{
|
|
60
|
+
if (lower[0] > b.upper[0] ||
|
|
61
|
+
lower[1] > b.upper[1] ||
|
|
62
|
+
lower[2] > b.upper[2] ||
|
|
63
|
+
upper[0] < b.lower[0] ||
|
|
64
|
+
upper[1] < b.lower[1] ||
|
|
65
|
+
upper[2] < b.lower[2])
|
|
66
|
+
{
|
|
67
|
+
return false;
|
|
68
|
+
}
|
|
69
|
+
else
|
|
70
|
+
{
|
|
71
|
+
return true;
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
CUDA_CALLABLE inline void add_point(const vec3& p)
|
|
76
|
+
{
|
|
77
|
+
lower = min(lower, p);
|
|
78
|
+
upper = max(upper, p);
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
CUDA_CALLABLE inline float area() const
|
|
82
|
+
{
|
|
83
|
+
vec3 e = upper-lower;
|
|
84
|
+
return 2.0f*(e[0]*e[1] + e[0]*e[2] + e[1]*e[2]);
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
vec3 lower;
|
|
88
|
+
vec3 upper;
|
|
89
|
+
};
|
|
90
|
+
|
|
91
|
+
CUDA_CALLABLE inline bounds3 bounds_union(const bounds3& a, const vec3& b)
|
|
92
|
+
{
|
|
93
|
+
return bounds3(min(a.lower, b), max(a.upper, b));
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
CUDA_CALLABLE inline bounds3 bounds_union(const bounds3& a, const bounds3& b)
|
|
97
|
+
{
|
|
98
|
+
return bounds3(min(a.lower, b.lower), max(a.upper, b.upper));
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
CUDA_CALLABLE inline bounds3 bounds_intersection(const bounds3& a, const bounds3& b)
|
|
102
|
+
{
|
|
103
|
+
return bounds3(max(a.lower, b.lower), min(a.upper, b.upper));
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
struct BVHPackedNodeHalf
|
|
107
|
+
{
|
|
108
|
+
float x;
|
|
109
|
+
float y;
|
|
110
|
+
float z;
|
|
111
|
+
unsigned int i : 31;
|
|
112
|
+
unsigned int b : 1;
|
|
113
|
+
};
|
|
114
|
+
|
|
115
|
+
struct BVH
|
|
116
|
+
{
|
|
117
|
+
BVHPackedNodeHalf* node_lowers;
|
|
118
|
+
BVHPackedNodeHalf* node_uppers;
|
|
119
|
+
|
|
120
|
+
// used for fast refits
|
|
121
|
+
int* node_parents;
|
|
122
|
+
int* node_counts;
|
|
123
|
+
|
|
124
|
+
int max_depth;
|
|
125
|
+
int max_nodes;
|
|
126
|
+
int num_nodes;
|
|
127
|
+
|
|
128
|
+
// pointer (CPU or GPU) to a single integer index in node_lowers, node_uppers
|
|
129
|
+
// representing the root of the tree, this is not always the first node
|
|
130
|
+
// for bottom-up builders
|
|
131
|
+
int* root;
|
|
132
|
+
|
|
133
|
+
// item bounds are not owned by the BVH but by the caller
|
|
134
|
+
vec3* item_lowers;
|
|
135
|
+
vec3* item_uppers;
|
|
136
|
+
int num_items;
|
|
137
|
+
|
|
138
|
+
// cuda context
|
|
139
|
+
void* context;
|
|
140
|
+
};
|
|
141
|
+
|
|
142
|
+
CUDA_CALLABLE inline BVHPackedNodeHalf make_node(const vec3& bound, int child, bool leaf)
|
|
143
|
+
{
|
|
144
|
+
BVHPackedNodeHalf n;
|
|
145
|
+
n.x = bound[0];
|
|
146
|
+
n.y = bound[1];
|
|
147
|
+
n.z = bound[2];
|
|
148
|
+
n.i = (unsigned int)child;
|
|
149
|
+
n.b = (unsigned int)(leaf?1:0);
|
|
150
|
+
|
|
151
|
+
return n;
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
// variation of make_node through volatile pointers used in build_hierarchy
|
|
155
|
+
CUDA_CALLABLE inline void make_node(volatile BVHPackedNodeHalf* n, const vec3& bound, int child, bool leaf)
|
|
156
|
+
{
|
|
157
|
+
n->x = bound[0];
|
|
158
|
+
n->y = bound[1];
|
|
159
|
+
n->z = bound[2];
|
|
160
|
+
n->i = (unsigned int)child;
|
|
161
|
+
n->b = (unsigned int)(leaf?1:0);
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
CUDA_CALLABLE inline int clz(int x)
|
|
165
|
+
{
|
|
166
|
+
int n;
|
|
167
|
+
if (x == 0) return 32;
|
|
168
|
+
for (n = 0; ((x & 0x80000000) == 0); n++, x <<= 1);
|
|
169
|
+
return n;
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
CUDA_CALLABLE inline uint32_t part1by2(uint32_t n)
|
|
173
|
+
{
|
|
174
|
+
n = (n ^ (n << 16)) & 0xff0000ff;
|
|
175
|
+
n = (n ^ (n << 8)) & 0x0300f00f;
|
|
176
|
+
n = (n ^ (n << 4)) & 0x030c30c3;
|
|
177
|
+
n = (n ^ (n << 2)) & 0x09249249;
|
|
178
|
+
|
|
179
|
+
return n;
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
// Takes values in the range [0, 1] and assigns an index based Morton codes of length 3*lwp2(dim) bits
|
|
183
|
+
template <int dim>
|
|
184
|
+
CUDA_CALLABLE inline uint32_t morton3(float x, float y, float z)
|
|
185
|
+
{
|
|
186
|
+
uint32_t ux = clamp(int(x*dim), 0, dim-1);
|
|
187
|
+
uint32_t uy = clamp(int(y*dim), 0, dim-1);
|
|
188
|
+
uint32_t uz = clamp(int(z*dim), 0, dim-1);
|
|
189
|
+
|
|
190
|
+
return (part1by2(uz) << 2) | (part1by2(uy) << 1) | part1by2(ux);
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
// making the class accessible from python
|
|
194
|
+
|
|
195
|
+
CUDA_CALLABLE inline BVH bvh_get(uint64_t id)
|
|
196
|
+
{
|
|
197
|
+
return *(BVH*)(id);
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
CUDA_CALLABLE inline int bvh_get_num_bounds(uint64_t id)
|
|
201
|
+
{
|
|
202
|
+
BVH bvh = bvh_get(id);
|
|
203
|
+
return bvh.num_items;
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
// stores state required to traverse the BVH nodes that
|
|
208
|
+
// overlap with a query AABB.
|
|
209
|
+
struct bvh_query_t
|
|
210
|
+
{
|
|
211
|
+
CUDA_CALLABLE bvh_query_t()
|
|
212
|
+
: bvh(),
|
|
213
|
+
stack(),
|
|
214
|
+
count(0),
|
|
215
|
+
is_ray(false),
|
|
216
|
+
input_lower(),
|
|
217
|
+
input_upper(),
|
|
218
|
+
bounds_nr(0)
|
|
219
|
+
{}
|
|
220
|
+
|
|
221
|
+
// Required for adjoint computations.
|
|
222
|
+
CUDA_CALLABLE inline bvh_query_t& operator+=(const bvh_query_t& other)
|
|
223
|
+
{
|
|
224
|
+
return *this;
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
BVH bvh;
|
|
228
|
+
|
|
229
|
+
// BVH traversal stack:
|
|
230
|
+
int stack[32];
|
|
231
|
+
int count;
|
|
232
|
+
|
|
233
|
+
// inputs
|
|
234
|
+
bool is_ray;
|
|
235
|
+
wp::vec3 input_lower; // start for ray
|
|
236
|
+
wp::vec3 input_upper; // dir for ray
|
|
237
|
+
|
|
238
|
+
int bounds_nr;
|
|
239
|
+
};
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
CUDA_CALLABLE inline bvh_query_t bvh_query(
|
|
243
|
+
uint64_t id, bool is_ray, const vec3& lower, const vec3& upper)
|
|
244
|
+
{
|
|
245
|
+
// This routine traverses the BVH tree until it finds
|
|
246
|
+
// the first overlapping bound.
|
|
247
|
+
|
|
248
|
+
// initialize empty
|
|
249
|
+
bvh_query_t query;
|
|
250
|
+
|
|
251
|
+
query.bounds_nr = -1;
|
|
252
|
+
|
|
253
|
+
BVH bvh = bvh_get(id);
|
|
254
|
+
|
|
255
|
+
query.bvh = bvh;
|
|
256
|
+
query.is_ray = is_ray;
|
|
257
|
+
|
|
258
|
+
// optimization: make the latest
|
|
259
|
+
query.stack[0] = *bvh.root;
|
|
260
|
+
query.count = 1;
|
|
261
|
+
query.input_lower = lower;
|
|
262
|
+
query.input_upper = upper;
|
|
263
|
+
|
|
264
|
+
wp::bounds3 input_bounds(query.input_lower, query.input_upper);
|
|
265
|
+
|
|
266
|
+
// Navigate through the bvh, find the first overlapping leaf node.
|
|
267
|
+
while (query.count)
|
|
268
|
+
{
|
|
269
|
+
const int node_index = query.stack[--query.count];
|
|
270
|
+
|
|
271
|
+
BVHPackedNodeHalf node_lower = bvh.node_lowers[node_index];
|
|
272
|
+
BVHPackedNodeHalf node_upper = bvh.node_uppers[node_index];
|
|
273
|
+
|
|
274
|
+
wp::vec3 lower_pos(node_lower.x, node_lower.y, node_lower.z);
|
|
275
|
+
wp::vec3 upper_pos(node_upper.x, node_upper.y, node_upper.z);
|
|
276
|
+
wp::bounds3 current_bounds(lower_pos, upper_pos);
|
|
277
|
+
|
|
278
|
+
if (query.is_ray)
|
|
279
|
+
{
|
|
280
|
+
float t = 0.0f;
|
|
281
|
+
if (!intersect_ray_aabb(query.input_lower, query.input_upper, current_bounds.lower, current_bounds.upper, t))
|
|
282
|
+
// Skip this box, it doesn't overlap with our ray.
|
|
283
|
+
continue;
|
|
284
|
+
}
|
|
285
|
+
else
|
|
286
|
+
{
|
|
287
|
+
if (!input_bounds.overlaps(current_bounds))
|
|
288
|
+
// Skip this box, it doesn't overlap with our target box.
|
|
289
|
+
continue;
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
const int left_index = node_lower.i;
|
|
293
|
+
const int right_index = node_upper.i;
|
|
294
|
+
|
|
295
|
+
// Make bounds from this AABB
|
|
296
|
+
if (node_lower.b)
|
|
297
|
+
{
|
|
298
|
+
// found very first leaf index.
|
|
299
|
+
// Back up one level and return
|
|
300
|
+
query.stack[query.count++] = node_index;
|
|
301
|
+
return query;
|
|
302
|
+
}
|
|
303
|
+
else
|
|
304
|
+
{
|
|
305
|
+
query.stack[query.count++] = left_index;
|
|
306
|
+
query.stack[query.count++] = right_index;
|
|
307
|
+
}
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
return query;
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
CUDA_CALLABLE inline bvh_query_t bvh_query_aabb(
|
|
314
|
+
uint64_t id, const vec3& lower, const vec3& upper)
|
|
315
|
+
{
|
|
316
|
+
return bvh_query(id, false, lower, upper);
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
CUDA_CALLABLE inline bvh_query_t bvh_query_ray(
|
|
321
|
+
uint64_t id, const vec3& start, const vec3& dir)
|
|
322
|
+
{
|
|
323
|
+
return bvh_query(id, true, start, dir);
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
//Stub
|
|
327
|
+
CUDA_CALLABLE inline void adj_bvh_query_aabb(uint64_t id, const vec3& lower, const vec3& upper,
|
|
328
|
+
uint64_t, vec3&, vec3&, bvh_query_t&)
|
|
329
|
+
{
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
|
|
333
|
+
CUDA_CALLABLE inline void adj_bvh_query_ray(uint64_t id, const vec3& start, const vec3& dir,
|
|
334
|
+
uint64_t, vec3&, vec3&, bvh_query_t&)
|
|
335
|
+
{
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
CUDA_CALLABLE inline bool bvh_query_next(bvh_query_t& query, int& index)
|
|
340
|
+
{
|
|
341
|
+
BVH bvh = query.bvh;
|
|
342
|
+
|
|
343
|
+
wp::bounds3 input_bounds(query.input_lower, query.input_upper);
|
|
344
|
+
|
|
345
|
+
// Navigate through the bvh, find the first overlapping leaf node.
|
|
346
|
+
while (query.count)
|
|
347
|
+
{
|
|
348
|
+
const int node_index = query.stack[--query.count];
|
|
349
|
+
BVHPackedNodeHalf node_lower = bvh.node_lowers[node_index];
|
|
350
|
+
BVHPackedNodeHalf node_upper = bvh.node_uppers[node_index];
|
|
351
|
+
|
|
352
|
+
wp::vec3 lower_pos(node_lower.x, node_lower.y, node_lower.z);
|
|
353
|
+
wp::vec3 upper_pos(node_upper.x, node_upper.y, node_upper.z);
|
|
354
|
+
wp::bounds3 current_bounds(lower_pos, upper_pos);
|
|
355
|
+
|
|
356
|
+
if (query.is_ray)
|
|
357
|
+
{
|
|
358
|
+
float t = 0.0f;
|
|
359
|
+
if (!intersect_ray_aabb(query.input_lower, query.input_upper, current_bounds.lower, current_bounds.upper, t))
|
|
360
|
+
// Skip this box, it doesn't overlap with our ray.
|
|
361
|
+
continue;
|
|
362
|
+
}
|
|
363
|
+
else {
|
|
364
|
+
if (!input_bounds.overlaps(current_bounds))
|
|
365
|
+
// Skip this box, it doesn't overlap with our target box.
|
|
366
|
+
continue;
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
const int left_index = node_lower.i;
|
|
370
|
+
const int right_index = node_upper.i;
|
|
371
|
+
|
|
372
|
+
if (node_lower.b)
|
|
373
|
+
{
|
|
374
|
+
// found leaf
|
|
375
|
+
query.bounds_nr = left_index;
|
|
376
|
+
index = left_index;
|
|
377
|
+
return true;
|
|
378
|
+
}
|
|
379
|
+
else
|
|
380
|
+
{
|
|
381
|
+
|
|
382
|
+
query.stack[query.count++] = left_index;
|
|
383
|
+
query.stack[query.count++] = right_index;
|
|
384
|
+
}
|
|
385
|
+
}
|
|
386
|
+
return false;
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
|
|
390
|
+
CUDA_CALLABLE inline int iter_next(bvh_query_t& query)
|
|
391
|
+
{
|
|
392
|
+
return query.bounds_nr;
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
CUDA_CALLABLE inline bool iter_cmp(bvh_query_t& query)
|
|
396
|
+
{
|
|
397
|
+
bool finished = bvh_query_next(query, query.bounds_nr);
|
|
398
|
+
return finished;
|
|
399
|
+
}
|
|
400
|
+
|
|
401
|
+
CUDA_CALLABLE inline bvh_query_t iter_reverse(const bvh_query_t& query)
|
|
402
|
+
{
|
|
403
|
+
// can't reverse BVH queries, users should not rely on traversal ordering
|
|
404
|
+
return query;
|
|
405
|
+
}
|
|
406
|
+
|
|
407
|
+
|
|
408
|
+
// stub
|
|
409
|
+
CUDA_CALLABLE inline void adj_bvh_query_next(bvh_query_t& query, int& index, bvh_query_t&, int&, bool&)
|
|
410
|
+
{
|
|
411
|
+
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
CUDA_CALLABLE bool bvh_get_descriptor(uint64_t id, BVH& bvh);
|
|
415
|
+
CUDA_CALLABLE void bvh_add_descriptor(uint64_t id, const BVH& bvh);
|
|
416
|
+
CUDA_CALLABLE void bvh_rem_descriptor(uint64_t id);
|
|
417
|
+
|
|
418
|
+
#if !__CUDA_ARCH__
|
|
419
|
+
|
|
420
|
+
void bvh_create_host(vec3* lowers, vec3* uppers, int num_items, BVH& bvh);
|
|
421
|
+
void bvh_destroy_host(wp::BVH& bvh);
|
|
422
|
+
void bvh_refit_host(wp::BVH& bvh);
|
|
423
|
+
|
|
424
|
+
void bvh_destroy_device(wp::BVH& bvh);
|
|
425
|
+
void bvh_refit_device(uint64_t id);
|
|
426
|
+
|
|
427
|
+
#endif
|
|
428
|
+
|
|
429
|
+
} // namespace wp
|
|
430
|
+
|