warp-lang 1.0.2__py3-none-manylinux2014_x86_64.whl → 1.1.0__py3-none-manylinux2014_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of warp-lang might be problematic. Click here for more details.
- warp/__init__.py +108 -97
- warp/__init__.pyi +1 -1
- warp/bin/warp-clang.so +0 -0
- warp/bin/warp.so +0 -0
- warp/build.py +115 -113
- warp/build_dll.py +383 -375
- warp/builtins.py +3425 -3354
- warp/codegen.py +2878 -2792
- warp/config.py +40 -36
- warp/constants.py +45 -45
- warp/context.py +5194 -5102
- warp/dlpack.py +442 -442
- warp/examples/__init__.py +16 -16
- warp/examples/assets/bear.usd +0 -0
- warp/examples/assets/bunny.usd +0 -0
- warp/examples/assets/cartpole.urdf +110 -110
- warp/examples/assets/crazyflie.usd +0 -0
- warp/examples/assets/cube.usd +0 -0
- warp/examples/assets/nv_ant.xml +92 -92
- warp/examples/assets/nv_humanoid.xml +183 -183
- warp/examples/assets/quadruped.urdf +267 -267
- warp/examples/assets/rocks.nvdb +0 -0
- warp/examples/assets/rocks.usd +0 -0
- warp/examples/assets/sphere.usd +0 -0
- warp/examples/benchmarks/benchmark_api.py +383 -383
- warp/examples/benchmarks/benchmark_cloth.py +278 -277
- warp/examples/benchmarks/benchmark_cloth_cupy.py +88 -88
- warp/examples/benchmarks/benchmark_cloth_jax.py +97 -100
- warp/examples/benchmarks/benchmark_cloth_numba.py +146 -142
- warp/examples/benchmarks/benchmark_cloth_numpy.py +77 -77
- warp/examples/benchmarks/benchmark_cloth_pytorch.py +86 -86
- warp/examples/benchmarks/benchmark_cloth_taichi.py +112 -112
- warp/examples/benchmarks/benchmark_cloth_warp.py +146 -146
- warp/examples/benchmarks/benchmark_launches.py +295 -295
- warp/examples/browse.py +29 -29
- warp/examples/core/example_dem.py +234 -219
- warp/examples/core/example_fluid.py +293 -267
- warp/examples/core/example_graph_capture.py +144 -126
- warp/examples/core/example_marching_cubes.py +188 -174
- warp/examples/core/example_mesh.py +174 -155
- warp/examples/core/example_mesh_intersect.py +205 -193
- warp/examples/core/example_nvdb.py +176 -170
- warp/examples/core/example_raycast.py +105 -90
- warp/examples/core/example_raymarch.py +199 -178
- warp/examples/core/example_render_opengl.py +185 -141
- warp/examples/core/example_sph.py +405 -387
- warp/examples/core/example_torch.py +222 -181
- warp/examples/core/example_wave.py +263 -248
- warp/examples/fem/bsr_utils.py +378 -380
- warp/examples/fem/example_apic_fluid.py +407 -389
- warp/examples/fem/example_convection_diffusion.py +182 -168
- warp/examples/fem/example_convection_diffusion_dg.py +219 -209
- warp/examples/fem/example_convection_diffusion_dg0.py +204 -194
- warp/examples/fem/example_deformed_geometry.py +177 -159
- warp/examples/fem/example_diffusion.py +201 -173
- warp/examples/fem/example_diffusion_3d.py +177 -152
- warp/examples/fem/example_diffusion_mgpu.py +221 -214
- warp/examples/fem/example_mixed_elasticity.py +244 -222
- warp/examples/fem/example_navier_stokes.py +259 -243
- warp/examples/fem/example_stokes.py +220 -192
- warp/examples/fem/example_stokes_transfer.py +265 -249
- warp/examples/fem/mesh_utils.py +133 -109
- warp/examples/fem/plot_utils.py +292 -287
- warp/examples/optim/example_bounce.py +260 -246
- warp/examples/optim/example_cloth_throw.py +222 -209
- warp/examples/optim/example_diffray.py +566 -536
- warp/examples/optim/example_drone.py +864 -835
- warp/examples/optim/example_inverse_kinematics.py +176 -168
- warp/examples/optim/example_inverse_kinematics_torch.py +185 -169
- warp/examples/optim/example_spring_cage.py +239 -231
- warp/examples/optim/example_trajectory.py +223 -199
- warp/examples/optim/example_walker.py +306 -293
- warp/examples/sim/example_cartpole.py +139 -129
- warp/examples/sim/example_cloth.py +196 -186
- warp/examples/sim/example_granular.py +124 -111
- warp/examples/sim/example_granular_collision_sdf.py +197 -186
- warp/examples/sim/example_jacobian_ik.py +236 -214
- warp/examples/sim/example_particle_chain.py +118 -105
- warp/examples/sim/example_quadruped.py +193 -180
- warp/examples/sim/example_rigid_chain.py +197 -187
- warp/examples/sim/example_rigid_contact.py +189 -177
- warp/examples/sim/example_rigid_force.py +127 -125
- warp/examples/sim/example_rigid_gyroscopic.py +109 -95
- warp/examples/sim/example_rigid_soft_contact.py +134 -122
- warp/examples/sim/example_soft_body.py +190 -177
- warp/fabric.py +337 -335
- warp/fem/__init__.py +60 -27
- warp/fem/cache.py +401 -388
- warp/fem/dirichlet.py +178 -179
- warp/fem/domain.py +262 -263
- warp/fem/field/__init__.py +100 -101
- warp/fem/field/field.py +148 -149
- warp/fem/field/nodal_field.py +298 -299
- warp/fem/field/restriction.py +22 -21
- warp/fem/field/test.py +180 -181
- warp/fem/field/trial.py +183 -183
- warp/fem/geometry/__init__.py +15 -19
- warp/fem/geometry/closest_point.py +69 -70
- warp/fem/geometry/deformed_geometry.py +270 -271
- warp/fem/geometry/element.py +744 -744
- warp/fem/geometry/geometry.py +184 -186
- warp/fem/geometry/grid_2d.py +380 -373
- warp/fem/geometry/grid_3d.py +441 -435
- warp/fem/geometry/hexmesh.py +953 -953
- warp/fem/geometry/partition.py +374 -376
- warp/fem/geometry/quadmesh_2d.py +532 -532
- warp/fem/geometry/tetmesh.py +840 -840
- warp/fem/geometry/trimesh_2d.py +577 -577
- warp/fem/integrate.py +1630 -1615
- warp/fem/operator.py +190 -191
- warp/fem/polynomial.py +214 -213
- warp/fem/quadrature/__init__.py +2 -2
- warp/fem/quadrature/pic_quadrature.py +243 -245
- warp/fem/quadrature/quadrature.py +295 -294
- warp/fem/space/__init__.py +294 -292
- warp/fem/space/basis_space.py +488 -489
- warp/fem/space/collocated_function_space.py +100 -105
- warp/fem/space/dof_mapper.py +236 -236
- warp/fem/space/function_space.py +148 -145
- warp/fem/space/grid_2d_function_space.py +267 -267
- warp/fem/space/grid_3d_function_space.py +305 -306
- warp/fem/space/hexmesh_function_space.py +350 -352
- warp/fem/space/partition.py +350 -350
- warp/fem/space/quadmesh_2d_function_space.py +368 -369
- warp/fem/space/restriction.py +158 -160
- warp/fem/space/shape/__init__.py +13 -15
- warp/fem/space/shape/cube_shape_function.py +738 -738
- warp/fem/space/shape/shape_function.py +102 -103
- warp/fem/space/shape/square_shape_function.py +611 -611
- warp/fem/space/shape/tet_shape_function.py +565 -567
- warp/fem/space/shape/triangle_shape_function.py +429 -429
- warp/fem/space/tetmesh_function_space.py +294 -292
- warp/fem/space/topology.py +297 -295
- warp/fem/space/trimesh_2d_function_space.py +223 -221
- warp/fem/types.py +77 -77
- warp/fem/utils.py +495 -495
- warp/jax.py +166 -141
- warp/jax_experimental.py +341 -339
- warp/native/array.h +1072 -1025
- warp/native/builtin.h +1560 -1560
- warp/native/bvh.cpp +398 -398
- warp/native/bvh.cu +525 -525
- warp/native/bvh.h +429 -429
- warp/native/clang/clang.cpp +495 -464
- warp/native/crt.cpp +31 -31
- warp/native/crt.h +334 -334
- warp/native/cuda_crt.h +1049 -1049
- warp/native/cuda_util.cpp +549 -540
- warp/native/cuda_util.h +288 -203
- warp/native/cutlass_gemm.cpp +34 -34
- warp/native/cutlass_gemm.cu +372 -372
- warp/native/error.cpp +66 -66
- warp/native/error.h +27 -27
- warp/native/fabric.h +228 -228
- warp/native/hashgrid.cpp +301 -278
- warp/native/hashgrid.cu +78 -77
- warp/native/hashgrid.h +227 -227
- warp/native/initializer_array.h +32 -32
- warp/native/intersect.h +1204 -1204
- warp/native/intersect_adj.h +365 -365
- warp/native/intersect_tri.h +322 -322
- warp/native/marching.cpp +2 -2
- warp/native/marching.cu +497 -497
- warp/native/marching.h +2 -2
- warp/native/mat.h +1498 -1498
- warp/native/matnn.h +333 -333
- warp/native/mesh.cpp +203 -203
- warp/native/mesh.cu +293 -293
- warp/native/mesh.h +1887 -1887
- warp/native/nanovdb/NanoVDB.h +4782 -4782
- warp/native/nanovdb/PNanoVDB.h +2553 -2553
- warp/native/nanovdb/PNanoVDBWrite.h +294 -294
- warp/native/noise.h +850 -850
- warp/native/quat.h +1084 -1084
- warp/native/rand.h +299 -299
- warp/native/range.h +108 -108
- warp/native/reduce.cpp +156 -156
- warp/native/reduce.cu +348 -348
- warp/native/runlength_encode.cpp +61 -61
- warp/native/runlength_encode.cu +46 -46
- warp/native/scan.cpp +30 -30
- warp/native/scan.cu +36 -36
- warp/native/scan.h +7 -7
- warp/native/solid_angle.h +442 -442
- warp/native/sort.cpp +94 -94
- warp/native/sort.cu +97 -97
- warp/native/sort.h +14 -14
- warp/native/sparse.cpp +337 -337
- warp/native/sparse.cu +544 -544
- warp/native/spatial.h +630 -630
- warp/native/svd.h +562 -562
- warp/native/temp_buffer.h +30 -30
- warp/native/vec.h +1132 -1132
- warp/native/volume.cpp +297 -297
- warp/native/volume.cu +32 -32
- warp/native/volume.h +538 -538
- warp/native/volume_builder.cu +425 -425
- warp/native/volume_builder.h +19 -19
- warp/native/warp.cpp +1057 -1052
- warp/native/warp.cu +2943 -2828
- warp/native/warp.h +313 -305
- warp/optim/__init__.py +9 -9
- warp/optim/adam.py +120 -120
- warp/optim/linear.py +1104 -939
- warp/optim/sgd.py +104 -92
- warp/render/__init__.py +10 -10
- warp/render/render_opengl.py +3217 -3204
- warp/render/render_usd.py +768 -749
- warp/render/utils.py +152 -150
- warp/sim/__init__.py +52 -59
- warp/sim/articulation.py +685 -685
- warp/sim/collide.py +1594 -1590
- warp/sim/import_mjcf.py +489 -481
- warp/sim/import_snu.py +220 -221
- warp/sim/import_urdf.py +536 -516
- warp/sim/import_usd.py +887 -881
- warp/sim/inertia.py +316 -317
- warp/sim/integrator.py +234 -233
- warp/sim/integrator_euler.py +1956 -1956
- warp/sim/integrator_featherstone.py +1910 -1991
- warp/sim/integrator_xpbd.py +3294 -3312
- warp/sim/model.py +4473 -4314
- warp/sim/particles.py +113 -112
- warp/sim/render.py +417 -403
- warp/sim/utils.py +413 -410
- warp/sparse.py +1227 -1227
- warp/stubs.py +2109 -2469
- warp/tape.py +1162 -225
- warp/tests/__init__.py +1 -1
- warp/tests/__main__.py +4 -4
- warp/tests/assets/torus.usda +105 -105
- warp/tests/aux_test_class_kernel.py +26 -26
- warp/tests/aux_test_compile_consts_dummy.py +10 -10
- warp/tests/aux_test_conditional_unequal_types_kernels.py +21 -21
- warp/tests/aux_test_dependent.py +22 -22
- warp/tests/aux_test_grad_customs.py +23 -23
- warp/tests/aux_test_reference.py +11 -11
- warp/tests/aux_test_reference_reference.py +10 -10
- warp/tests/aux_test_square.py +17 -17
- warp/tests/aux_test_unresolved_func.py +14 -14
- warp/tests/aux_test_unresolved_symbol.py +14 -14
- warp/tests/disabled_kinematics.py +239 -239
- warp/tests/run_coverage_serial.py +31 -31
- warp/tests/test_adam.py +157 -157
- warp/tests/test_arithmetic.py +1124 -1124
- warp/tests/test_array.py +2417 -2326
- warp/tests/test_array_reduce.py +150 -150
- warp/tests/test_async.py +668 -656
- warp/tests/test_atomic.py +141 -141
- warp/tests/test_bool.py +204 -149
- warp/tests/test_builtins_resolution.py +1292 -1292
- warp/tests/test_bvh.py +164 -171
- warp/tests/test_closest_point_edge_edge.py +228 -228
- warp/tests/test_codegen.py +566 -553
- warp/tests/test_compile_consts.py +97 -101
- warp/tests/test_conditional.py +246 -246
- warp/tests/test_copy.py +232 -215
- warp/tests/test_ctypes.py +632 -632
- warp/tests/test_dense.py +67 -67
- warp/tests/test_devices.py +91 -98
- warp/tests/test_dlpack.py +530 -529
- warp/tests/test_examples.py +400 -378
- warp/tests/test_fabricarray.py +955 -955
- warp/tests/test_fast_math.py +62 -54
- warp/tests/test_fem.py +1277 -1278
- warp/tests/test_fp16.py +130 -130
- warp/tests/test_func.py +338 -337
- warp/tests/test_generics.py +571 -571
- warp/tests/test_grad.py +746 -640
- warp/tests/test_grad_customs.py +333 -336
- warp/tests/test_hash_grid.py +210 -164
- warp/tests/test_import.py +39 -39
- warp/tests/test_indexedarray.py +1134 -1134
- warp/tests/test_intersect.py +67 -67
- warp/tests/test_jax.py +307 -307
- warp/tests/test_large.py +167 -164
- warp/tests/test_launch.py +354 -354
- warp/tests/test_lerp.py +261 -261
- warp/tests/test_linear_solvers.py +191 -171
- warp/tests/test_lvalue.py +421 -493
- warp/tests/test_marching_cubes.py +65 -65
- warp/tests/test_mat.py +1801 -1827
- warp/tests/test_mat_lite.py +115 -115
- warp/tests/test_mat_scalar_ops.py +2907 -2889
- warp/tests/test_math.py +126 -193
- warp/tests/test_matmul.py +500 -499
- warp/tests/test_matmul_lite.py +410 -410
- warp/tests/test_mempool.py +188 -190
- warp/tests/test_mesh.py +284 -324
- warp/tests/test_mesh_query_aabb.py +228 -241
- warp/tests/test_mesh_query_point.py +692 -702
- warp/tests/test_mesh_query_ray.py +292 -303
- warp/tests/test_mlp.py +276 -276
- warp/tests/test_model.py +110 -110
- warp/tests/test_modules_lite.py +39 -39
- warp/tests/test_multigpu.py +163 -163
- warp/tests/test_noise.py +248 -248
- warp/tests/test_operators.py +250 -250
- warp/tests/test_options.py +123 -125
- warp/tests/test_peer.py +133 -137
- warp/tests/test_pinned.py +78 -78
- warp/tests/test_print.py +54 -54
- warp/tests/test_quat.py +2086 -2086
- warp/tests/test_rand.py +288 -288
- warp/tests/test_reload.py +217 -217
- warp/tests/test_rounding.py +179 -179
- warp/tests/test_runlength_encode.py +190 -190
- warp/tests/test_sim_grad.py +243 -0
- warp/tests/test_sim_kinematics.py +91 -97
- warp/tests/test_smoothstep.py +168 -168
- warp/tests/test_snippet.py +305 -266
- warp/tests/test_sparse.py +468 -460
- warp/tests/test_spatial.py +2148 -2148
- warp/tests/test_streams.py +486 -473
- warp/tests/test_struct.py +710 -675
- warp/tests/test_tape.py +173 -148
- warp/tests/test_torch.py +743 -743
- warp/tests/test_transient_module.py +87 -87
- warp/tests/test_types.py +556 -659
- warp/tests/test_utils.py +490 -499
- warp/tests/test_vec.py +1264 -1268
- warp/tests/test_vec_lite.py +73 -73
- warp/tests/test_vec_scalar_ops.py +2099 -2099
- warp/tests/test_verify_fp.py +94 -94
- warp/tests/test_volume.py +737 -736
- warp/tests/test_volume_write.py +255 -265
- warp/tests/unittest_serial.py +37 -37
- warp/tests/unittest_suites.py +363 -359
- warp/tests/unittest_utils.py +603 -578
- warp/tests/unused_test_misc.py +71 -71
- warp/tests/walkthrough_debug.py +85 -85
- warp/thirdparty/appdirs.py +598 -598
- warp/thirdparty/dlpack.py +143 -143
- warp/thirdparty/unittest_parallel.py +566 -561
- warp/torch.py +321 -295
- warp/types.py +4504 -4450
- warp/utils.py +1008 -821
- {warp_lang-1.0.2.dist-info → warp_lang-1.1.0.dist-info}/LICENSE.md +126 -126
- {warp_lang-1.0.2.dist-info → warp_lang-1.1.0.dist-info}/METADATA +338 -400
- warp_lang-1.1.0.dist-info/RECORD +352 -0
- warp/examples/assets/cube.usda +0 -42
- warp/examples/assets/sphere.usda +0 -56
- warp/examples/assets/torus.usda +0 -105
- warp_lang-1.0.2.dist-info/RECORD +0 -352
- {warp_lang-1.0.2.dist-info → warp_lang-1.1.0.dist-info}/WHEEL +0 -0
- {warp_lang-1.0.2.dist-info → warp_lang-1.1.0.dist-info}/top_level.txt +0 -0
warp/native/hashgrid.cu
CHANGED
|
@@ -1,77 +1,78 @@
|
|
|
1
|
-
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
|
|
2
|
-
* NVIDIA CORPORATION and its licensors retain all intellectual property
|
|
3
|
-
* and proprietary rights in and to this software, related documentation
|
|
4
|
-
* and any modifications thereto. Any use, reproduction, disclosure or
|
|
5
|
-
* distribution of this software and related documentation without an express
|
|
6
|
-
* license agreement from NVIDIA CORPORATION is strictly prohibited.
|
|
7
|
-
*/
|
|
8
|
-
|
|
9
|
-
#include "warp.h"
|
|
10
|
-
#include "cuda_util.h"
|
|
11
|
-
#include "hashgrid.h"
|
|
12
|
-
#include "sort.h"
|
|
13
|
-
|
|
14
|
-
namespace wp
|
|
15
|
-
{
|
|
16
|
-
|
|
17
|
-
__global__ void compute_cell_indices(HashGrid grid,
|
|
18
|
-
{
|
|
19
|
-
const int tid = blockIdx.x*blockDim.x + threadIdx.x;
|
|
20
|
-
|
|
21
|
-
if (tid <
|
|
22
|
-
{
|
|
23
|
-
|
|
24
|
-
grid.
|
|
25
|
-
|
|
26
|
-
}
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
}
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
}
|
|
76
|
-
|
|
77
|
-
|
|
1
|
+
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
|
|
2
|
+
* NVIDIA CORPORATION and its licensors retain all intellectual property
|
|
3
|
+
* and proprietary rights in and to this software, related documentation
|
|
4
|
+
* and any modifications thereto. Any use, reproduction, disclosure or
|
|
5
|
+
* distribution of this software and related documentation without an express
|
|
6
|
+
* license agreement from NVIDIA CORPORATION is strictly prohibited.
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
#include "warp.h"
|
|
10
|
+
#include "cuda_util.h"
|
|
11
|
+
#include "hashgrid.h"
|
|
12
|
+
#include "sort.h"
|
|
13
|
+
|
|
14
|
+
namespace wp
|
|
15
|
+
{
|
|
16
|
+
|
|
17
|
+
__global__ void compute_cell_indices(HashGrid grid, wp::array_t<wp::vec3> points)
|
|
18
|
+
{
|
|
19
|
+
const int tid = blockIdx.x*blockDim.x + threadIdx.x;
|
|
20
|
+
|
|
21
|
+
if (tid < points.shape[0])
|
|
22
|
+
{
|
|
23
|
+
const vec3& point = wp::index(points, tid);
|
|
24
|
+
grid.point_cells[tid] = hash_grid_index(grid, point);
|
|
25
|
+
grid.point_ids[tid] = tid;
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
__global__ void compute_cell_offsets(int* cell_starts, int* cell_ends, const int* point_cells, int num_points)
|
|
30
|
+
{
|
|
31
|
+
const int tid = blockIdx.x*blockDim.x + threadIdx.x;
|
|
32
|
+
|
|
33
|
+
// compute cell start / end
|
|
34
|
+
if (tid < num_points)
|
|
35
|
+
{
|
|
36
|
+
// scan the particle-cell array to find the start and end
|
|
37
|
+
const int c = point_cells[tid];
|
|
38
|
+
|
|
39
|
+
if (tid == 0)
|
|
40
|
+
cell_starts[c] = 0;
|
|
41
|
+
else
|
|
42
|
+
{
|
|
43
|
+
const int p = point_cells[tid-1];
|
|
44
|
+
|
|
45
|
+
if (c != p)
|
|
46
|
+
{
|
|
47
|
+
cell_starts[c] = tid;
|
|
48
|
+
cell_ends[p] = tid;
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
if (tid == num_points - 1)
|
|
53
|
+
{
|
|
54
|
+
cell_ends[c] = tid + 1;
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
void hash_grid_rebuild_device(const wp::HashGrid& grid, const wp::array_t<wp::vec3>& points)
|
|
60
|
+
{
|
|
61
|
+
ContextGuard guard(grid.context);
|
|
62
|
+
|
|
63
|
+
int num_points = points.shape[0];
|
|
64
|
+
|
|
65
|
+
wp_launch_device(WP_CURRENT_CONTEXT, wp::compute_cell_indices, num_points, (grid, points));
|
|
66
|
+
|
|
67
|
+
radix_sort_pairs_device(WP_CURRENT_CONTEXT, grid.point_cells, grid.point_ids, num_points);
|
|
68
|
+
|
|
69
|
+
const int num_cells = grid.dim_x * grid.dim_y * grid.dim_z;
|
|
70
|
+
|
|
71
|
+
memset_device(WP_CURRENT_CONTEXT, grid.cell_starts, 0, sizeof(int) * num_cells);
|
|
72
|
+
memset_device(WP_CURRENT_CONTEXT, grid.cell_ends, 0, sizeof(int) * num_cells);
|
|
73
|
+
|
|
74
|
+
wp_launch_device(WP_CURRENT_CONTEXT, wp::compute_cell_offsets, num_points, (grid.cell_starts, grid.cell_ends, grid.point_cells, num_points));
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
} // namespace wp
|
warp/native/hashgrid.h
CHANGED
|
@@ -1,227 +1,227 @@
|
|
|
1
|
-
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
|
|
2
|
-
* NVIDIA CORPORATION and its licensors retain all intellectual property
|
|
3
|
-
* and proprietary rights in and to this software, related documentation
|
|
4
|
-
* and any modifications thereto. Any use, reproduction, disclosure or
|
|
5
|
-
* distribution of this software and related documentation without an express
|
|
6
|
-
* license agreement from NVIDIA CORPORATION is strictly prohibited.
|
|
7
|
-
*/
|
|
8
|
-
|
|
9
|
-
#pragma once
|
|
10
|
-
|
|
11
|
-
namespace wp
|
|
12
|
-
{
|
|
13
|
-
|
|
14
|
-
struct HashGrid
|
|
15
|
-
{
|
|
16
|
-
float cell_width;
|
|
17
|
-
float cell_width_inv;
|
|
18
|
-
|
|
19
|
-
int* point_cells{nullptr}; // cell id of a point
|
|
20
|
-
int* point_ids{nullptr}; // index to original point
|
|
21
|
-
|
|
22
|
-
int* cell_starts{nullptr}; // start index of a range of indices belonging to a cell, dim_x*dim_y*dim_z in length
|
|
23
|
-
int* cell_ends{nullptr}; // end index of a range of indices belonging to a cell, dim_x*dim_y*dim_z in length
|
|
24
|
-
|
|
25
|
-
int dim_x;
|
|
26
|
-
int dim_y;
|
|
27
|
-
int dim_z;
|
|
28
|
-
|
|
29
|
-
int num_points;
|
|
30
|
-
int max_points;
|
|
31
|
-
|
|
32
|
-
void* context;
|
|
33
|
-
};
|
|
34
|
-
|
|
35
|
-
// convert a virtual (world) cell coordinate to a physical one
|
|
36
|
-
CUDA_CALLABLE inline int hash_grid_index(const HashGrid& grid, int x, int y, int z)
|
|
37
|
-
{
|
|
38
|
-
// offset to ensure positive coordinates (means grid dim should be less than 4096^3)
|
|
39
|
-
const int origin = 1<<20;
|
|
40
|
-
|
|
41
|
-
x += origin;
|
|
42
|
-
y += origin;
|
|
43
|
-
z += origin;
|
|
44
|
-
|
|
45
|
-
assert(0 < x);
|
|
46
|
-
assert(0 < y);
|
|
47
|
-
assert(0 < z);
|
|
48
|
-
|
|
49
|
-
// clamp in case any particles fall outside the guard region (-10^20 cell index)
|
|
50
|
-
x = max(0, x);
|
|
51
|
-
y = max(0, y);
|
|
52
|
-
z = max(0, z);
|
|
53
|
-
|
|
54
|
-
// compute physical cell (assume pow2 grid dims)
|
|
55
|
-
// int cx = x & (grid.dim_x-1);
|
|
56
|
-
// int cy = y & (grid.dim_y-1);
|
|
57
|
-
// int cz = z & (grid.dim_z-1);
|
|
58
|
-
|
|
59
|
-
// compute physical cell (arbitrary grid dims)
|
|
60
|
-
int cx = x%grid.dim_x;
|
|
61
|
-
int cy = y%grid.dim_y;
|
|
62
|
-
int cz = z%grid.dim_z;
|
|
63
|
-
|
|
64
|
-
return cz*(grid.dim_x*grid.dim_y) + cy*grid.dim_x + cx;
|
|
65
|
-
}
|
|
66
|
-
|
|
67
|
-
CUDA_CALLABLE inline int hash_grid_index(const HashGrid& grid, const vec3& p)
|
|
68
|
-
{
|
|
69
|
-
return hash_grid_index(grid,
|
|
70
|
-
int(p[0]*grid.cell_width_inv),
|
|
71
|
-
int(p[1]*grid.cell_width_inv),
|
|
72
|
-
int(p[2]*grid.cell_width_inv));
|
|
73
|
-
}
|
|
74
|
-
|
|
75
|
-
// stores state required to traverse neighboring cells of a point
|
|
76
|
-
struct hash_grid_query_t
|
|
77
|
-
{
|
|
78
|
-
CUDA_CALLABLE hash_grid_query_t()
|
|
79
|
-
: x_start(0),
|
|
80
|
-
y_start(0),
|
|
81
|
-
z_start(0),
|
|
82
|
-
x_end(0),
|
|
83
|
-
y_end(0),
|
|
84
|
-
z_end(0),
|
|
85
|
-
x(0),
|
|
86
|
-
y(0),
|
|
87
|
-
z(0),
|
|
88
|
-
cell(0),
|
|
89
|
-
cell_index(0),
|
|
90
|
-
cell_end(0),
|
|
91
|
-
current(0),
|
|
92
|
-
grid()
|
|
93
|
-
{}
|
|
94
|
-
|
|
95
|
-
// Required for adjoint computations.
|
|
96
|
-
CUDA_CALLABLE inline hash_grid_query_t& operator+=(const hash_grid_query_t& other)
|
|
97
|
-
{
|
|
98
|
-
return *this;
|
|
99
|
-
}
|
|
100
|
-
|
|
101
|
-
int x_start;
|
|
102
|
-
int y_start;
|
|
103
|
-
int z_start;
|
|
104
|
-
|
|
105
|
-
int x_end;
|
|
106
|
-
int y_end;
|
|
107
|
-
int z_end;
|
|
108
|
-
|
|
109
|
-
int x;
|
|
110
|
-
int y;
|
|
111
|
-
int z;
|
|
112
|
-
|
|
113
|
-
int cell;
|
|
114
|
-
int cell_index; // offset in the current cell (index into cell_indices)
|
|
115
|
-
int cell_end; // index following the end of this cell
|
|
116
|
-
|
|
117
|
-
int current; // index of the current iterator value
|
|
118
|
-
|
|
119
|
-
HashGrid grid;
|
|
120
|
-
};
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
CUDA_CALLABLE inline hash_grid_query_t hash_grid_query(uint64_t id, wp::vec3 pos, float radius)
|
|
124
|
-
{
|
|
125
|
-
hash_grid_query_t query;
|
|
126
|
-
|
|
127
|
-
query.grid = *(const HashGrid*)(id);
|
|
128
|
-
|
|
129
|
-
// convert coordinate to grid
|
|
130
|
-
query.x_start = int((pos[0]-radius)*query.grid.cell_width_inv);
|
|
131
|
-
query.y_start = int((pos[1]-radius)*query.grid.cell_width_inv);
|
|
132
|
-
query.z_start = int((pos[2]-radius)*query.grid.cell_width_inv);
|
|
133
|
-
|
|
134
|
-
// do not want to visit any cells more than once, so limit large radius offset to one pass over each dimension
|
|
135
|
-
query.x_end = min(int((pos[0]+radius)*query.grid.cell_width_inv), query.x_start + query.grid.dim_x-1);
|
|
136
|
-
query.y_end = min(int((pos[1]+radius)*query.grid.cell_width_inv), query.y_start + query.grid.dim_y-1);
|
|
137
|
-
query.z_end = min(int((pos[2]+radius)*query.grid.cell_width_inv), query.z_start + query.grid.dim_z-1);
|
|
138
|
-
|
|
139
|
-
query.x = query.x_start;
|
|
140
|
-
query.y = query.y_start;
|
|
141
|
-
query.z = query.z_start;
|
|
142
|
-
|
|
143
|
-
const int cell = hash_grid_index(query.grid, query.x, query.y, query.z);
|
|
144
|
-
query.cell_index = query.grid.cell_starts[cell];
|
|
145
|
-
query.cell_end = query.grid.cell_ends[cell];
|
|
146
|
-
|
|
147
|
-
return query;
|
|
148
|
-
}
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
CUDA_CALLABLE inline bool hash_grid_query_next(hash_grid_query_t& query, int& index)
|
|
152
|
-
{
|
|
153
|
-
const HashGrid& grid = query.grid;
|
|
154
|
-
if (!grid.point_cells)
|
|
155
|
-
return false;
|
|
156
|
-
|
|
157
|
-
while (1)
|
|
158
|
-
{
|
|
159
|
-
if (query.cell_index < query.cell_end)
|
|
160
|
-
{
|
|
161
|
-
// write output index
|
|
162
|
-
index = grid.point_ids[query.cell_index++];
|
|
163
|
-
return true;
|
|
164
|
-
}
|
|
165
|
-
else
|
|
166
|
-
{
|
|
167
|
-
query.x++;
|
|
168
|
-
if (query.x > query.x_end)
|
|
169
|
-
{
|
|
170
|
-
query.x = query.x_start;
|
|
171
|
-
query.y++;
|
|
172
|
-
}
|
|
173
|
-
|
|
174
|
-
if (query.y > query.y_end)
|
|
175
|
-
{
|
|
176
|
-
query.y = query.y_start;
|
|
177
|
-
query.z++;
|
|
178
|
-
}
|
|
179
|
-
|
|
180
|
-
if (query.z > query.z_end)
|
|
181
|
-
{
|
|
182
|
-
// finished lookup grid
|
|
183
|
-
return false;
|
|
184
|
-
}
|
|
185
|
-
|
|
186
|
-
// update cell pointers
|
|
187
|
-
const int cell = hash_grid_index(grid, query.x, query.y, query.z);
|
|
188
|
-
|
|
189
|
-
query.cell_index = grid.cell_starts[cell];
|
|
190
|
-
query.cell_end = grid.cell_ends[cell];
|
|
191
|
-
}
|
|
192
|
-
}
|
|
193
|
-
}
|
|
194
|
-
|
|
195
|
-
CUDA_CALLABLE inline int iter_next(hash_grid_query_t& query)
|
|
196
|
-
{
|
|
197
|
-
return query.current;
|
|
198
|
-
}
|
|
199
|
-
|
|
200
|
-
CUDA_CALLABLE inline bool iter_cmp(hash_grid_query_t& query)
|
|
201
|
-
{
|
|
202
|
-
bool finished = hash_grid_query_next(query, query.current);
|
|
203
|
-
return finished;
|
|
204
|
-
}
|
|
205
|
-
|
|
206
|
-
CUDA_CALLABLE inline hash_grid_query_t iter_reverse(const hash_grid_query_t& query)
|
|
207
|
-
{
|
|
208
|
-
// can't reverse grid queries, users should not rely on neighbor ordering
|
|
209
|
-
return query;
|
|
210
|
-
}
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
CUDA_CALLABLE inline int hash_grid_point_id(uint64_t id, int& index)
|
|
215
|
-
{
|
|
216
|
-
const HashGrid* grid = (const HashGrid*)(id);
|
|
217
|
-
if (grid->point_ids == nullptr)
|
|
218
|
-
return -1;
|
|
219
|
-
return grid->point_ids[index];
|
|
220
|
-
}
|
|
221
|
-
|
|
222
|
-
CUDA_CALLABLE inline void adj_hash_grid_query(uint64_t id, wp::vec3 pos, float radius, uint64_t& adj_id, wp::vec3& adj_pos, float& adj_radius, hash_grid_query_t& adj_res) {}
|
|
223
|
-
CUDA_CALLABLE inline void adj_hash_grid_query_next(hash_grid_query_t& query, int& index, hash_grid_query_t& adj_query, int& adj_index, bool& adj_res) {}
|
|
224
|
-
CUDA_CALLABLE inline void adj_hash_grid_point_id(uint64_t id, int& index, uint64_t & adj_id, int& adj_index, int& adj_res) {}
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
} // namespace wp
|
|
1
|
+
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
|
|
2
|
+
* NVIDIA CORPORATION and its licensors retain all intellectual property
|
|
3
|
+
* and proprietary rights in and to this software, related documentation
|
|
4
|
+
* and any modifications thereto. Any use, reproduction, disclosure or
|
|
5
|
+
* distribution of this software and related documentation without an express
|
|
6
|
+
* license agreement from NVIDIA CORPORATION is strictly prohibited.
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
#pragma once
|
|
10
|
+
|
|
11
|
+
namespace wp
|
|
12
|
+
{
|
|
13
|
+
|
|
14
|
+
struct HashGrid
|
|
15
|
+
{
|
|
16
|
+
float cell_width;
|
|
17
|
+
float cell_width_inv;
|
|
18
|
+
|
|
19
|
+
int* point_cells{nullptr}; // cell id of a point
|
|
20
|
+
int* point_ids{nullptr}; // index to original point
|
|
21
|
+
|
|
22
|
+
int* cell_starts{nullptr}; // start index of a range of indices belonging to a cell, dim_x*dim_y*dim_z in length
|
|
23
|
+
int* cell_ends{nullptr}; // end index of a range of indices belonging to a cell, dim_x*dim_y*dim_z in length
|
|
24
|
+
|
|
25
|
+
int dim_x;
|
|
26
|
+
int dim_y;
|
|
27
|
+
int dim_z;
|
|
28
|
+
|
|
29
|
+
int num_points;
|
|
30
|
+
int max_points;
|
|
31
|
+
|
|
32
|
+
void* context{nullptr};
|
|
33
|
+
};
|
|
34
|
+
|
|
35
|
+
// convert a virtual (world) cell coordinate to a physical one
|
|
36
|
+
CUDA_CALLABLE inline int hash_grid_index(const HashGrid& grid, int x, int y, int z)
|
|
37
|
+
{
|
|
38
|
+
// offset to ensure positive coordinates (means grid dim should be less than 4096^3)
|
|
39
|
+
const int origin = 1<<20;
|
|
40
|
+
|
|
41
|
+
x += origin;
|
|
42
|
+
y += origin;
|
|
43
|
+
z += origin;
|
|
44
|
+
|
|
45
|
+
assert(0 < x);
|
|
46
|
+
assert(0 < y);
|
|
47
|
+
assert(0 < z);
|
|
48
|
+
|
|
49
|
+
// clamp in case any particles fall outside the guard region (-10^20 cell index)
|
|
50
|
+
x = max(0, x);
|
|
51
|
+
y = max(0, y);
|
|
52
|
+
z = max(0, z);
|
|
53
|
+
|
|
54
|
+
// compute physical cell (assume pow2 grid dims)
|
|
55
|
+
// int cx = x & (grid.dim_x-1);
|
|
56
|
+
// int cy = y & (grid.dim_y-1);
|
|
57
|
+
// int cz = z & (grid.dim_z-1);
|
|
58
|
+
|
|
59
|
+
// compute physical cell (arbitrary grid dims)
|
|
60
|
+
int cx = x%grid.dim_x;
|
|
61
|
+
int cy = y%grid.dim_y;
|
|
62
|
+
int cz = z%grid.dim_z;
|
|
63
|
+
|
|
64
|
+
return cz*(grid.dim_x*grid.dim_y) + cy*grid.dim_x + cx;
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
CUDA_CALLABLE inline int hash_grid_index(const HashGrid& grid, const vec3& p)
|
|
68
|
+
{
|
|
69
|
+
return hash_grid_index(grid,
|
|
70
|
+
int(p[0]*grid.cell_width_inv),
|
|
71
|
+
int(p[1]*grid.cell_width_inv),
|
|
72
|
+
int(p[2]*grid.cell_width_inv));
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
// stores state required to traverse neighboring cells of a point
|
|
76
|
+
struct hash_grid_query_t
|
|
77
|
+
{
|
|
78
|
+
CUDA_CALLABLE hash_grid_query_t()
|
|
79
|
+
: x_start(0),
|
|
80
|
+
y_start(0),
|
|
81
|
+
z_start(0),
|
|
82
|
+
x_end(0),
|
|
83
|
+
y_end(0),
|
|
84
|
+
z_end(0),
|
|
85
|
+
x(0),
|
|
86
|
+
y(0),
|
|
87
|
+
z(0),
|
|
88
|
+
cell(0),
|
|
89
|
+
cell_index(0),
|
|
90
|
+
cell_end(0),
|
|
91
|
+
current(0),
|
|
92
|
+
grid()
|
|
93
|
+
{}
|
|
94
|
+
|
|
95
|
+
// Required for adjoint computations.
|
|
96
|
+
CUDA_CALLABLE inline hash_grid_query_t& operator+=(const hash_grid_query_t& other)
|
|
97
|
+
{
|
|
98
|
+
return *this;
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
int x_start;
|
|
102
|
+
int y_start;
|
|
103
|
+
int z_start;
|
|
104
|
+
|
|
105
|
+
int x_end;
|
|
106
|
+
int y_end;
|
|
107
|
+
int z_end;
|
|
108
|
+
|
|
109
|
+
int x;
|
|
110
|
+
int y;
|
|
111
|
+
int z;
|
|
112
|
+
|
|
113
|
+
int cell;
|
|
114
|
+
int cell_index; // offset in the current cell (index into cell_indices)
|
|
115
|
+
int cell_end; // index following the end of this cell
|
|
116
|
+
|
|
117
|
+
int current; // index of the current iterator value
|
|
118
|
+
|
|
119
|
+
HashGrid grid;
|
|
120
|
+
};
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
CUDA_CALLABLE inline hash_grid_query_t hash_grid_query(uint64_t id, wp::vec3 pos, float radius)
|
|
124
|
+
{
|
|
125
|
+
hash_grid_query_t query;
|
|
126
|
+
|
|
127
|
+
query.grid = *(const HashGrid*)(id);
|
|
128
|
+
|
|
129
|
+
// convert coordinate to grid
|
|
130
|
+
query.x_start = int((pos[0]-radius)*query.grid.cell_width_inv);
|
|
131
|
+
query.y_start = int((pos[1]-radius)*query.grid.cell_width_inv);
|
|
132
|
+
query.z_start = int((pos[2]-radius)*query.grid.cell_width_inv);
|
|
133
|
+
|
|
134
|
+
// do not want to visit any cells more than once, so limit large radius offset to one pass over each dimension
|
|
135
|
+
query.x_end = min(int((pos[0]+radius)*query.grid.cell_width_inv), query.x_start + query.grid.dim_x-1);
|
|
136
|
+
query.y_end = min(int((pos[1]+radius)*query.grid.cell_width_inv), query.y_start + query.grid.dim_y-1);
|
|
137
|
+
query.z_end = min(int((pos[2]+radius)*query.grid.cell_width_inv), query.z_start + query.grid.dim_z-1);
|
|
138
|
+
|
|
139
|
+
query.x = query.x_start;
|
|
140
|
+
query.y = query.y_start;
|
|
141
|
+
query.z = query.z_start;
|
|
142
|
+
|
|
143
|
+
const int cell = hash_grid_index(query.grid, query.x, query.y, query.z);
|
|
144
|
+
query.cell_index = query.grid.cell_starts[cell];
|
|
145
|
+
query.cell_end = query.grid.cell_ends[cell];
|
|
146
|
+
|
|
147
|
+
return query;
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
CUDA_CALLABLE inline bool hash_grid_query_next(hash_grid_query_t& query, int& index)
|
|
152
|
+
{
|
|
153
|
+
const HashGrid& grid = query.grid;
|
|
154
|
+
if (!grid.point_cells)
|
|
155
|
+
return false;
|
|
156
|
+
|
|
157
|
+
while (1)
|
|
158
|
+
{
|
|
159
|
+
if (query.cell_index < query.cell_end)
|
|
160
|
+
{
|
|
161
|
+
// write output index
|
|
162
|
+
index = grid.point_ids[query.cell_index++];
|
|
163
|
+
return true;
|
|
164
|
+
}
|
|
165
|
+
else
|
|
166
|
+
{
|
|
167
|
+
query.x++;
|
|
168
|
+
if (query.x > query.x_end)
|
|
169
|
+
{
|
|
170
|
+
query.x = query.x_start;
|
|
171
|
+
query.y++;
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
if (query.y > query.y_end)
|
|
175
|
+
{
|
|
176
|
+
query.y = query.y_start;
|
|
177
|
+
query.z++;
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
if (query.z > query.z_end)
|
|
181
|
+
{
|
|
182
|
+
// finished lookup grid
|
|
183
|
+
return false;
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
// update cell pointers
|
|
187
|
+
const int cell = hash_grid_index(grid, query.x, query.y, query.z);
|
|
188
|
+
|
|
189
|
+
query.cell_index = grid.cell_starts[cell];
|
|
190
|
+
query.cell_end = grid.cell_ends[cell];
|
|
191
|
+
}
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
CUDA_CALLABLE inline int iter_next(hash_grid_query_t& query)
|
|
196
|
+
{
|
|
197
|
+
return query.current;
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
CUDA_CALLABLE inline bool iter_cmp(hash_grid_query_t& query)
|
|
201
|
+
{
|
|
202
|
+
bool finished = hash_grid_query_next(query, query.current);
|
|
203
|
+
return finished;
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
CUDA_CALLABLE inline hash_grid_query_t iter_reverse(const hash_grid_query_t& query)
|
|
207
|
+
{
|
|
208
|
+
// can't reverse grid queries, users should not rely on neighbor ordering
|
|
209
|
+
return query;
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
CUDA_CALLABLE inline int hash_grid_point_id(uint64_t id, int& index)
|
|
215
|
+
{
|
|
216
|
+
const HashGrid* grid = (const HashGrid*)(id);
|
|
217
|
+
if (grid->point_ids == nullptr)
|
|
218
|
+
return -1;
|
|
219
|
+
return grid->point_ids[index];
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
CUDA_CALLABLE inline void adj_hash_grid_query(uint64_t id, wp::vec3 pos, float radius, uint64_t& adj_id, wp::vec3& adj_pos, float& adj_radius, hash_grid_query_t& adj_res) {}
|
|
223
|
+
CUDA_CALLABLE inline void adj_hash_grid_query_next(hash_grid_query_t& query, int& index, hash_grid_query_t& adj_query, int& adj_index, bool& adj_res) {}
|
|
224
|
+
CUDA_CALLABLE inline void adj_hash_grid_point_id(uint64_t id, int& index, uint64_t & adj_id, int& adj_index, int& adj_res) {}
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
} // namespace wp
|
warp/native/initializer_array.h
CHANGED
|
@@ -1,32 +1,32 @@
|
|
|
1
|
-
/** Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
|
|
2
|
-
* NVIDIA CORPORATION and its licensors retain all intellectual property
|
|
3
|
-
* and proprietary rights in and to this software, related documentation
|
|
4
|
-
* and any modifications thereto. Any use, reproduction, disclosure or
|
|
5
|
-
* distribution of this software and related documentation without an express
|
|
6
|
-
* license agreement from NVIDIA CORPORATION is strictly prohibited.
|
|
7
|
-
*/
|
|
8
|
-
|
|
9
|
-
#pragma once
|
|
10
|
-
|
|
11
|
-
namespace wp {
|
|
12
|
-
|
|
13
|
-
// wp::initializer_array<> is a simple substitute for std::initializer_list<>
|
|
14
|
-
// which doesn't depend on compiler implementation-specific support. It copies
|
|
15
|
-
// elements by value and only supports array-style indexing.
|
|
16
|
-
template<unsigned Length, typename Type>
|
|
17
|
-
struct initializer_array
|
|
18
|
-
{
|
|
19
|
-
const Type storage[Length];
|
|
20
|
-
|
|
21
|
-
CUDA_CALLABLE const Type operator[](unsigned i)
|
|
22
|
-
{
|
|
23
|
-
return storage[i];
|
|
24
|
-
}
|
|
25
|
-
|
|
26
|
-
CUDA_CALLABLE const Type operator[](unsigned i) const
|
|
27
|
-
{
|
|
28
|
-
return storage[i];
|
|
29
|
-
}
|
|
30
|
-
};
|
|
31
|
-
|
|
32
|
-
} // namespace wp
|
|
1
|
+
/** Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
|
|
2
|
+
* NVIDIA CORPORATION and its licensors retain all intellectual property
|
|
3
|
+
* and proprietary rights in and to this software, related documentation
|
|
4
|
+
* and any modifications thereto. Any use, reproduction, disclosure or
|
|
5
|
+
* distribution of this software and related documentation without an express
|
|
6
|
+
* license agreement from NVIDIA CORPORATION is strictly prohibited.
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
#pragma once
|
|
10
|
+
|
|
11
|
+
namespace wp {
|
|
12
|
+
|
|
13
|
+
// wp::initializer_array<> is a simple substitute for std::initializer_list<>
|
|
14
|
+
// which doesn't depend on compiler implementation-specific support. It copies
|
|
15
|
+
// elements by value and only supports array-style indexing.
|
|
16
|
+
template<unsigned Length, typename Type>
|
|
17
|
+
struct initializer_array
|
|
18
|
+
{
|
|
19
|
+
const Type storage[Length];
|
|
20
|
+
|
|
21
|
+
CUDA_CALLABLE const Type operator[](unsigned i)
|
|
22
|
+
{
|
|
23
|
+
return storage[i];
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
CUDA_CALLABLE const Type operator[](unsigned i) const
|
|
27
|
+
{
|
|
28
|
+
return storage[i];
|
|
29
|
+
}
|
|
30
|
+
};
|
|
31
|
+
|
|
32
|
+
} // namespace wp
|