warp-lang 1.8.0__py3-none-win_amd64.whl → 1.9.0__py3-none-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of warp-lang might be problematic. Click here for more details.

Files changed (153) hide show
  1. warp/__init__.py +282 -103
  2. warp/__init__.pyi +482 -110
  3. warp/bin/warp-clang.dll +0 -0
  4. warp/bin/warp.dll +0 -0
  5. warp/build.py +93 -30
  6. warp/build_dll.py +48 -63
  7. warp/builtins.py +955 -137
  8. warp/codegen.py +327 -209
  9. warp/config.py +1 -1
  10. warp/context.py +1363 -800
  11. warp/examples/core/example_marching_cubes.py +1 -0
  12. warp/examples/core/example_render_opengl.py +100 -3
  13. warp/examples/fem/example_apic_fluid.py +98 -52
  14. warp/examples/fem/example_convection_diffusion_dg.py +25 -4
  15. warp/examples/fem/example_diffusion_mgpu.py +8 -3
  16. warp/examples/fem/utils.py +68 -22
  17. warp/examples/interop/example_jax_callable.py +34 -4
  18. warp/examples/interop/example_jax_kernel.py +27 -1
  19. warp/fabric.py +1 -1
  20. warp/fem/cache.py +27 -19
  21. warp/fem/domain.py +2 -2
  22. warp/fem/field/nodal_field.py +2 -2
  23. warp/fem/field/virtual.py +266 -166
  24. warp/fem/geometry/geometry.py +5 -5
  25. warp/fem/integrate.py +200 -91
  26. warp/fem/space/restriction.py +4 -0
  27. warp/fem/space/shape/tet_shape_function.py +3 -10
  28. warp/jax_experimental/custom_call.py +1 -1
  29. warp/jax_experimental/ffi.py +203 -54
  30. warp/marching_cubes.py +708 -0
  31. warp/native/array.h +103 -8
  32. warp/native/builtin.h +90 -9
  33. warp/native/bvh.cpp +64 -28
  34. warp/native/bvh.cu +58 -58
  35. warp/native/bvh.h +2 -2
  36. warp/native/clang/clang.cpp +7 -7
  37. warp/native/coloring.cpp +13 -3
  38. warp/native/crt.cpp +2 -2
  39. warp/native/crt.h +3 -5
  40. warp/native/cuda_util.cpp +42 -11
  41. warp/native/cuda_util.h +10 -4
  42. warp/native/exports.h +1842 -1908
  43. warp/native/fabric.h +2 -1
  44. warp/native/hashgrid.cpp +37 -37
  45. warp/native/hashgrid.cu +2 -2
  46. warp/native/initializer_array.h +1 -1
  47. warp/native/intersect.h +4 -4
  48. warp/native/mat.h +1913 -119
  49. warp/native/mathdx.cpp +43 -43
  50. warp/native/mesh.cpp +24 -24
  51. warp/native/mesh.cu +26 -26
  52. warp/native/mesh.h +5 -3
  53. warp/native/nanovdb/GridHandle.h +179 -12
  54. warp/native/nanovdb/HostBuffer.h +8 -7
  55. warp/native/nanovdb/NanoVDB.h +517 -895
  56. warp/native/nanovdb/NodeManager.h +323 -0
  57. warp/native/nanovdb/PNanoVDB.h +2 -2
  58. warp/native/quat.h +337 -16
  59. warp/native/rand.h +7 -7
  60. warp/native/range.h +7 -1
  61. warp/native/reduce.cpp +10 -10
  62. warp/native/reduce.cu +13 -14
  63. warp/native/runlength_encode.cpp +2 -2
  64. warp/native/runlength_encode.cu +5 -5
  65. warp/native/scan.cpp +3 -3
  66. warp/native/scan.cu +4 -4
  67. warp/native/sort.cpp +10 -10
  68. warp/native/sort.cu +22 -22
  69. warp/native/sparse.cpp +8 -8
  70. warp/native/sparse.cu +14 -14
  71. warp/native/spatial.h +366 -17
  72. warp/native/svd.h +23 -8
  73. warp/native/temp_buffer.h +2 -2
  74. warp/native/tile.h +303 -70
  75. warp/native/tile_radix_sort.h +5 -1
  76. warp/native/tile_reduce.h +16 -25
  77. warp/native/tuple.h +2 -2
  78. warp/native/vec.h +385 -18
  79. warp/native/volume.cpp +54 -54
  80. warp/native/volume.cu +1 -1
  81. warp/native/volume.h +2 -1
  82. warp/native/volume_builder.cu +30 -37
  83. warp/native/warp.cpp +150 -149
  84. warp/native/warp.cu +337 -193
  85. warp/native/warp.h +227 -226
  86. warp/optim/linear.py +736 -271
  87. warp/render/imgui_manager.py +289 -0
  88. warp/render/render_opengl.py +137 -57
  89. warp/render/render_usd.py +0 -1
  90. warp/sim/collide.py +1 -2
  91. warp/sim/graph_coloring.py +2 -2
  92. warp/sim/integrator_vbd.py +10 -2
  93. warp/sparse.py +559 -176
  94. warp/tape.py +2 -0
  95. warp/tests/aux_test_module_aot.py +7 -0
  96. warp/tests/cuda/test_async.py +3 -3
  97. warp/tests/cuda/test_conditional_captures.py +101 -0
  98. warp/tests/geometry/test_marching_cubes.py +233 -12
  99. warp/tests/sim/test_cloth.py +89 -6
  100. warp/tests/sim/test_coloring.py +82 -7
  101. warp/tests/test_array.py +56 -5
  102. warp/tests/test_assert.py +53 -0
  103. warp/tests/test_atomic_cas.py +127 -114
  104. warp/tests/test_codegen.py +3 -2
  105. warp/tests/test_context.py +8 -15
  106. warp/tests/test_enum.py +136 -0
  107. warp/tests/test_examples.py +2 -2
  108. warp/tests/test_fem.py +45 -2
  109. warp/tests/test_fixedarray.py +229 -0
  110. warp/tests/test_func.py +18 -15
  111. warp/tests/test_future_annotations.py +7 -5
  112. warp/tests/test_linear_solvers.py +30 -0
  113. warp/tests/test_map.py +1 -1
  114. warp/tests/test_mat.py +1540 -378
  115. warp/tests/test_mat_assign_copy.py +178 -0
  116. warp/tests/test_mat_constructors.py +574 -0
  117. warp/tests/test_module_aot.py +287 -0
  118. warp/tests/test_print.py +69 -0
  119. warp/tests/test_quat.py +162 -34
  120. warp/tests/test_quat_assign_copy.py +145 -0
  121. warp/tests/test_reload.py +2 -1
  122. warp/tests/test_sparse.py +103 -0
  123. warp/tests/test_spatial.py +140 -34
  124. warp/tests/test_spatial_assign_copy.py +160 -0
  125. warp/tests/test_static.py +48 -0
  126. warp/tests/test_struct.py +43 -3
  127. warp/tests/test_tape.py +38 -0
  128. warp/tests/test_types.py +0 -20
  129. warp/tests/test_vec.py +216 -441
  130. warp/tests/test_vec_assign_copy.py +143 -0
  131. warp/tests/test_vec_constructors.py +325 -0
  132. warp/tests/tile/test_tile.py +206 -152
  133. warp/tests/tile/test_tile_cholesky.py +605 -0
  134. warp/tests/tile/test_tile_load.py +169 -0
  135. warp/tests/tile/test_tile_mathdx.py +2 -558
  136. warp/tests/tile/test_tile_matmul.py +179 -0
  137. warp/tests/tile/test_tile_mlp.py +1 -1
  138. warp/tests/tile/test_tile_reduce.py +100 -11
  139. warp/tests/tile/test_tile_shared_memory.py +16 -16
  140. warp/tests/tile/test_tile_sort.py +59 -55
  141. warp/tests/unittest_suites.py +16 -0
  142. warp/tests/walkthrough_debug.py +1 -1
  143. warp/thirdparty/unittest_parallel.py +108 -9
  144. warp/types.py +554 -264
  145. warp/utils.py +68 -86
  146. {warp_lang-1.8.0.dist-info → warp_lang-1.9.0.dist-info}/METADATA +28 -65
  147. {warp_lang-1.8.0.dist-info → warp_lang-1.9.0.dist-info}/RECORD +150 -138
  148. warp/native/marching.cpp +0 -19
  149. warp/native/marching.cu +0 -514
  150. warp/native/marching.h +0 -19
  151. {warp_lang-1.8.0.dist-info → warp_lang-1.9.0.dist-info}/WHEEL +0 -0
  152. {warp_lang-1.8.0.dist-info → warp_lang-1.9.0.dist-info}/licenses/LICENSE.md +0 -0
  153. {warp_lang-1.8.0.dist-info → warp_lang-1.9.0.dist-info}/top_level.txt +0 -0
@@ -1,5 +1,5 @@
1
1
  // Copyright Contributors to the OpenVDB Project
2
- // SPDX-License-Identifier: MPL-2.0
2
+ // SPDX-License-Identifier: Apache-2.0
3
3
 
4
4
  /*!
5
5
  \file nanovdb/NanoVDB.h
@@ -99,12 +99,19 @@
99
99
 
100
100
  Array of: LeafNodes of size 8^3: bbox, bit masks, 512 voxel values, and min/max/avg/standard deviation values
101
101
 
102
+ ... optional padding ...
103
+
104
+ Array of: GridBlindMetaData (288 bytes). The offset and count are defined in GridData::mBlindMetadataOffset and GridData::mBlindMetadataCount
105
+
106
+ ... optional padding ...
107
+
108
+ Array of: blind data
102
109
 
103
110
  Notation: "]---[" implies it has optional padding, and "][" implies zero padding
104
111
 
105
112
  [GridData(672B)][TreeData(64B)]---[RootData][N x Root::Tile]---[InternalData<5>]---[InternalData<4>]---[LeafData<3>]---[BLINDMETA...]---[BLIND0]---[BLIND1]---etc.
106
- ^ ^ ^ ^ ^ ^
107
- | | | | | |
113
+ ^ ^ ^ ^ ^ ^ ^
114
+ | | | | | | GridBlindMetaData*
108
115
  +-- Start of 32B aligned buffer | | | | +-- Node0::DataType* leafData
109
116
  GridType::DataType* gridData | | | |
110
117
  | | | +-- Node1::DataType* lowerData
@@ -125,24 +132,19 @@
125
132
  // Do not change this value! 32 byte alignment is fixed in NanoVDB
126
133
  #define NANOVDB_DATA_ALIGNMENT 32
127
134
 
128
- // NANOVDB_MAGIC_NUMB is currently used for both grids and files (starting with v32.6.0)
129
- // NANOVDB_MAGIC_GRID will soon be used exclusively for grids (serialized to a single buffer)
130
- // NANOVDB_MAGIC_FILE will soon be used exclusively for files
131
- // NANOVDB_MAGIC_NODE will soon be used exclusively for NodeManager
132
- // NANOVDB_MAGIC_FRAG will soon be used exclusively for a fragmented grid, i.e. a grid that is not serialized
133
- // | : 0 in 30 corresponds to 0 in NanoVDB0
135
+ // NANOVDB_MAGIC_NUMB previously used for both grids and files (starting with v32.6.0)
136
+ // NANOVDB_MAGIC_GRID currently used exclusively for grids (serialized to a single buffer)
137
+ // NANOVDB_MAGIC_FILE currently used exclusively for files
138
+ // | : 0 in 30 corresponds to 0 in NanoVDB0
134
139
  #define NANOVDB_MAGIC_NUMB 0x304244566f6e614eUL // "NanoVDB0" in hex - little endian (uint64_t)
135
140
  #define NANOVDB_MAGIC_GRID 0x314244566f6e614eUL // "NanoVDB1" in hex - little endian (uint64_t)
136
141
  #define NANOVDB_MAGIC_FILE 0x324244566f6e614eUL // "NanoVDB2" in hex - little endian (uint64_t)
137
- #define NANOVDB_MAGIC_NODE 0x334244566f6e614eUL // "NanoVDB3" in hex - little endian (uint64_t)
138
- #define NANOVDB_MAGIC_FRAG 0x344244566f6e614eUL // "NanoVDB4" in hex - little endian (uint64_t)
139
142
  #define NANOVDB_MAGIC_MASK 0x00FFFFFFFFFFFFFFUL // use this mask to remove the number
140
143
 
141
- //#define NANOVDB_MAGIC_NUMBER 0x304244566f6e614eUL
142
- //#define NANOVDB_USE_NEW_MAGIC_NUMBERS// used to enable use of the new magic numbers described above
144
+ #define NANOVDB_USE_NEW_MAGIC_NUMBERS// enables use of the new magic numbers described above
143
145
 
144
146
  #define NANOVDB_MAJOR_VERSION_NUMBER 32 // reflects changes to the ABI and hence also the file format
145
- #define NANOVDB_MINOR_VERSION_NUMBER 7 // reflects changes to the API but not ABI
147
+ #define NANOVDB_MINOR_VERSION_NUMBER 8 // reflects changes to the API but not ABI
146
148
  #define NANOVDB_PATCH_VERSION_NUMBER 0 // reflects changes that does not affect the ABI or API
147
149
 
148
150
  #define TBB_SUPPRESS_DEPRECATED_MESSAGES 1
@@ -156,9 +158,6 @@
156
158
  // Use this to switch between std::ofstream or FILE implementations
157
159
  //#define NANOVDB_USE_IOSTREAMS
158
160
 
159
- // Use this to switch between old and new accessor methods
160
- #define NANOVDB_NEW_ACCESSOR_METHODS
161
-
162
161
  #define NANOVDB_FPN_BRANCHLESS
163
162
 
164
163
  #if !defined(NANOVDB_ALIGN)
@@ -205,6 +204,8 @@ class Point{};
205
204
  // --------------------------> GridType <------------------------------------
206
205
 
207
206
  /// @brief return the number of characters (including null termination) required to convert enum type to a string
207
+ ///
208
+ /// @note This curious implementation, which subtracts End from StrLen, avoids duplicate values in the enum!
208
209
  template <class EnumT>
209
210
  __hostdev__ inline constexpr uint32_t strlen(){return (uint32_t)EnumT::StrLen - (uint32_t)EnumT::End;}
210
211
 
@@ -275,7 +276,7 @@ __hostdev__ inline char* toStr(char *dst, GridType gridType)
275
276
  case GridType::Index: return util::strcpy(dst, "Index");
276
277
  case GridType::OnIndex: return util::strcpy(dst, "OnIndex");
277
278
  case GridType::IndexMask: return util::strcpy(dst, "IndexMask");
278
- case GridType::OnIndexMask: return util::strcpy(dst, "OnIndexMask");
279
+ case GridType::OnIndexMask: return util::strcpy(dst, "OnIndexMask");// StrLen = 11 + 1 + End
279
280
  case GridType::PointIndex: return util::strcpy(dst, "PointIndex");
280
281
  case GridType::Vec3u8: return util::strcpy(dst, "Vec3u8");
281
282
  case GridType::Vec3u16: return util::strcpy(dst, "Vec3u16");
@@ -311,7 +312,7 @@ __hostdev__ inline char* toStr(char *dst, GridClass gridClass)
311
312
  case GridClass::LevelSet: return util::strcpy(dst, "SDF");
312
313
  case GridClass::FogVolume: return util::strcpy(dst, "FOG");
313
314
  case GridClass::Staggered: return util::strcpy(dst, "MAC");
314
- case GridClass::PointIndex: return util::strcpy(dst, "PNTIDX");
315
+ case GridClass::PointIndex: return util::strcpy(dst, "PNTIDX");// StrLen = 6 + 1 + End
315
316
  case GridClass::PointData: return util::strcpy(dst, "PNTDAT");
316
317
  case GridClass::Topology: return util::strcpy(dst, "TOPO");
317
318
  case GridClass::VoxelVolume: return util::strcpy(dst, "VOX");
@@ -345,7 +346,7 @@ __hostdev__ inline const char* toStr(char *dst, GridFlags gridFlags)
345
346
  case GridFlags::HasBBox: return util::strcpy(dst, "has bbox");
346
347
  case GridFlags::HasMinMax: return util::strcpy(dst, "has min/max");
347
348
  case GridFlags::HasAverage: return util::strcpy(dst, "has average");
348
- case GridFlags::HasStdDeviation: return util::strcpy(dst, "has standard deviation");
349
+ case GridFlags::HasStdDeviation: return util::strcpy(dst, "has standard deviation");// StrLen = 22 + 1 + End
349
350
  case GridFlags::IsBreadthFirst: return util::strcpy(dst, "is breadth-first");
350
351
  default: return util::strcpy(dst, "end");
351
352
  }
@@ -359,10 +360,8 @@ enum class MagicType : uint32_t { Unknown = 0,// first 64 bits are neither of t
359
360
  NanoVDB = 2,// first 64 bits = NANOVDB_MAGIC_NUMB
360
361
  NanoGrid = 3,// first 64 bits = NANOVDB_MAGIC_GRID
361
362
  NanoFile = 4,// first 64 bits = NANOVDB_MAGIC_FILE
362
- NanoNode = 5,// first 64 bits = NANOVDB_MAGIC_NODE
363
- NanoFrag = 6,// first 64 bits = NANOVDB_MAGIC_FRAG
364
- End = 7,
365
- StrLen = End + 25};// this entry is used to determine the minimum size of c-string
363
+ End = 5,
364
+ StrLen = End + 14};// this entry is used to determine the minimum size of c-string
366
365
 
367
366
  /// @brief maps 64 bits of magic number to enum
368
367
  __hostdev__ inline MagicType toMagic(uint64_t magic)
@@ -371,8 +370,6 @@ __hostdev__ inline MagicType toMagic(uint64_t magic)
371
370
  case NANOVDB_MAGIC_NUMB: return MagicType::NanoVDB;
372
371
  case NANOVDB_MAGIC_GRID: return MagicType::NanoGrid;
373
372
  case NANOVDB_MAGIC_FILE: return MagicType::NanoFile;
374
- case NANOVDB_MAGIC_NODE: return MagicType::NanoNode;
375
- case NANOVDB_MAGIC_FRAG: return MagicType::NanoFrag;
376
373
  default: return (magic & ~uint32_t(0)) == 0x56444220UL ? MagicType::OpenVDB : MagicType::Unknown;
377
374
  }
378
375
  }
@@ -386,10 +383,8 @@ __hostdev__ inline char* toStr(char *dst, MagicType magic)
386
383
  switch (magic){
387
384
  case MagicType::Unknown: return util::strcpy(dst, "unknown");
388
385
  case MagicType::NanoVDB: return util::strcpy(dst, "nanovdb");
389
- case MagicType::NanoGrid: return util::strcpy(dst, "nanovdb::Grid");
386
+ case MagicType::NanoGrid: return util::strcpy(dst, "nanovdb::Grid");// StrLen = 13 + 1 + End
390
387
  case MagicType::NanoFile: return util::strcpy(dst, "nanovdb::File");
391
- case MagicType::NanoNode: return util::strcpy(dst, "nanovdb::NodeManager");
392
- case MagicType::NanoFrag: return util::strcpy(dst, "fragmented nanovdb::Grid");
393
388
  case MagicType::OpenVDB: return util::strcpy(dst, "openvdb");
394
389
  default: return util::strcpy(dst, "end");
395
390
  }
@@ -811,57 +806,57 @@ struct FloatTraits<Point, 1> // size of empty class in C++ is 1 byte and not 0 b
811
806
  template<typename BuildT>
812
807
  __hostdev__ inline GridType toGridType()
813
808
  {
814
- if (util::is_same<BuildT, float>::value) { // resolved at compile-time
809
+ if constexpr(util::is_same<BuildT, float>::value) { // resolved at compile-time
815
810
  return GridType::Float;
816
- } else if (util::is_same<BuildT, double>::value) {
811
+ } else if constexpr(util::is_same<BuildT, double>::value) {
817
812
  return GridType::Double;
818
- } else if (util::is_same<BuildT, int16_t>::value) {
813
+ } else if constexpr(util::is_same<BuildT, int16_t>::value) {
819
814
  return GridType::Int16;
820
- } else if (util::is_same<BuildT, int32_t>::value) {
815
+ } else if constexpr(util::is_same<BuildT, int32_t>::value) {
821
816
  return GridType::Int32;
822
- } else if (util::is_same<BuildT, int64_t>::value) {
817
+ } else if constexpr(util::is_same<BuildT, int64_t>::value) {
823
818
  return GridType::Int64;
824
- } else if (util::is_same<BuildT, Vec3f>::value) {
819
+ } else if constexpr(util::is_same<BuildT, Vec3f>::value) {
825
820
  return GridType::Vec3f;
826
- } else if (util::is_same<BuildT, Vec3d>::value) {
821
+ } else if constexpr(util::is_same<BuildT, Vec3d>::value) {
827
822
  return GridType::Vec3d;
828
- } else if (util::is_same<BuildT, uint32_t>::value) {
823
+ } else if constexpr(util::is_same<BuildT, uint32_t>::value) {
829
824
  return GridType::UInt32;
830
- } else if (util::is_same<BuildT, ValueMask>::value) {
825
+ } else if constexpr(util::is_same<BuildT, ValueMask>::value) {
831
826
  return GridType::Mask;
832
- } else if (util::is_same<BuildT, Half>::value) {
827
+ } else if constexpr(util::is_same<BuildT, Half>::value) {
833
828
  return GridType::Half;
834
- } else if (util::is_same<BuildT, ValueIndex>::value) {
829
+ } else if constexpr(util::is_same<BuildT, ValueIndex>::value) {
835
830
  return GridType::Index;
836
- } else if (util::is_same<BuildT, ValueOnIndex>::value) {
831
+ } else if constexpr(util::is_same<BuildT, ValueOnIndex>::value) {
837
832
  return GridType::OnIndex;
838
- } else if (util::is_same<BuildT, ValueIndexMask>::value) {
833
+ } else if constexpr(util::is_same<BuildT, ValueIndexMask>::value) {
839
834
  return GridType::IndexMask;
840
- } else if (util::is_same<BuildT, ValueOnIndexMask>::value) {
835
+ } else if constexpr(util::is_same<BuildT, ValueOnIndexMask>::value) {
841
836
  return GridType::OnIndexMask;
842
- } else if (util::is_same<BuildT, bool>::value) {
837
+ } else if constexpr(util::is_same<BuildT, bool>::value) {
843
838
  return GridType::Boolean;
844
- } else if (util::is_same<BuildT, math::Rgba8>::value) {
839
+ } else if constexpr(util::is_same<BuildT, math::Rgba8>::value) {
845
840
  return GridType::RGBA8;
846
- } else if (util::is_same<BuildT, Fp4>::value) {
841
+ } else if constexpr(util::is_same<BuildT, Fp4>::value) {
847
842
  return GridType::Fp4;
848
- } else if (util::is_same<BuildT, Fp8>::value) {
843
+ } else if constexpr(util::is_same<BuildT, Fp8>::value) {
849
844
  return GridType::Fp8;
850
- } else if (util::is_same<BuildT, Fp16>::value) {
845
+ } else if constexpr(util::is_same<BuildT, Fp16>::value) {
851
846
  return GridType::Fp16;
852
- } else if (util::is_same<BuildT, FpN>::value) {
847
+ } else if constexpr(util::is_same<BuildT, FpN>::value) {
853
848
  return GridType::FpN;
854
- } else if (util::is_same<BuildT, Vec4f>::value) {
849
+ } else if constexpr(util::is_same<BuildT, Vec4f>::value) {
855
850
  return GridType::Vec4f;
856
- } else if (util::is_same<BuildT, Vec4d>::value) {
851
+ } else if constexpr(util::is_same<BuildT, Vec4d>::value) {
857
852
  return GridType::Vec4d;
858
- } else if (util::is_same<BuildT, Point>::value) {
853
+ } else if constexpr(util::is_same<BuildT, Point>::value) {
859
854
  return GridType::PointIndex;
860
- } else if (util::is_same<BuildT, Vec3u8>::value) {
855
+ } else if constexpr(util::is_same<BuildT, Vec3u8>::value) {
861
856
  return GridType::Vec3u8;
862
- } else if (util::is_same<BuildT, Vec3u16>::value) {
857
+ } else if constexpr(util::is_same<BuildT, Vec3u16>::value) {
863
858
  return GridType::Vec3u16;
864
- } else if (util::is_same<BuildT, uint8_t>::value) {
859
+ } else if constexpr(util::is_same<BuildT, uint8_t>::value) {
865
860
  return GridType::UInt8;
866
861
  }
867
862
  return GridType::Unknown;
@@ -877,13 +872,13 @@ __hostdev__ inline GridType mapToGridType(){return toGridType<BuildT>();}
877
872
  template<typename BuildT>
878
873
  __hostdev__ inline GridClass toGridClass(GridClass defaultClass = GridClass::Unknown)
879
874
  {
880
- if (util::is_same<BuildT, ValueMask>::value) {
875
+ if constexpr(util::is_same<BuildT, ValueMask>::value) {
881
876
  return GridClass::Topology;
882
- } else if (BuildTraits<BuildT>::is_index) {
877
+ } else if constexpr(BuildTraits<BuildT>::is_index) {
883
878
  return GridClass::IndexGrid;
884
- } else if (util::is_same<BuildT, math::Rgba8>::value) {
879
+ } else if constexpr(util::is_same<BuildT, math::Rgba8>::value) {
885
880
  return GridClass::VoxelVolume;
886
- } else if (util::is_same<BuildT, Point>::value) {
881
+ } else if constexpr(util::is_same<BuildT, Point>::value) {
887
882
  return GridClass::PointIndex;
888
883
  }
889
884
  return defaultClass;
@@ -953,8 +948,6 @@ public:
953
948
  mFlags = 0u;
954
949
  for (auto mask : list) mFlags |= static_cast<Type>(mask);
955
950
  }
956
- //__hostdev__ Type& data() { return mFlags; }
957
- //__hostdev__ Type data() const { return mFlags; }
958
951
  __hostdev__ Type getFlags() const { return mFlags & (static_cast<Type>(GridFlags::End) - 1u); } // mask out everything except relevant bits
959
952
 
960
953
  __hostdev__ void setOn() { mFlags = ~Type(0u); }
@@ -1159,6 +1152,21 @@ public:
1159
1152
  __hostdev__ uint64_t* words() { return mWords; }
1160
1153
  __hostdev__ const uint64_t* words() const { return mWords; }
1161
1154
 
1155
+ template<typename WordT>
1156
+ __hostdev__ WordT getWord(uint32_t n) const
1157
+ {
1158
+ static_assert(util::is_same<WordT, uint8_t, uint16_t, uint32_t, uint64_t>::value);
1159
+ NANOVDB_ASSERT(n*8*sizeof(WordT) < WORD_COUNT);
1160
+ return reinterpret_cast<WordT*>(mWords)[n];
1161
+ }
1162
+ template<typename WordT>
1163
+ __hostdev__ void setWord(WordT w, uint32_t n)
1164
+ {
1165
+ static_assert(util::is_same<WordT, uint8_t, uint16_t, uint32_t, uint64_t>::value);
1166
+ NANOVDB_ASSERT(n*8*sizeof(WordT) < WORD_COUNT);
1167
+ reinterpret_cast<WordT*>(mWords)[n] = w;
1168
+ }
1169
+
1162
1170
  /// @brief Assignment operator that works with openvdb::util::NodeMask
1163
1171
  template<typename MaskT = Mask>
1164
1172
  __hostdev__ typename util::enable_if<!util::is_same<MaskT, Mask>::value, Mask&>::type operator=(const MaskT& other)
@@ -1228,6 +1236,25 @@ public:
1228
1236
  {
1229
1237
  on ? this->setOnAtomic(n) : this->setOffAtomic(n);
1230
1238
  }
1239
+ /*
1240
+ template<typename WordT>
1241
+ __device__ inline void setWordAtomic(WordT w, uint32_t n)
1242
+ {
1243
+ static_assert(util::is_same<WordT, uint8_t, uint16_t, uint32_t, uint64_t>::value);
1244
+ NANOVDB_ASSERT(n*8*sizeof(WordT) < WORD_COUNT);
1245
+ if constexpr(util::is_same<WordT,uint8_t>::value) {
1246
+ mask <<= x;
1247
+ } else if constexpr(util::is_same<WordT,uint16_t>::value) {
1248
+ unsigned int mask = w;
1249
+ if (n >> 1) mask <<= 16;
1250
+ atomicOr(reinterpret_cast<unsigned int*>(this) + n, mask);
1251
+ } else if constexpr(util::is_same<WordT,uint32_t>::value) {
1252
+ atomicOr(reinterpret_cast<unsigned int*>(this) + n, w);
1253
+ } else {
1254
+ atomicOr(reinterpret_cast<unsigned long long int*>(this) + n, w);
1255
+ }
1256
+ }
1257
+ */
1231
1258
  #endif
1232
1259
  /// @brief Set the specified bit on or off.
1233
1260
  __hostdev__ void set(uint32_t n, bool on)
@@ -1522,7 +1549,7 @@ inline void Map::set(double dx, const Vec3T& trans, double taper)
1522
1549
  struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) GridBlindMetaData
1523
1550
  { // 288 bytes
1524
1551
  static const int MaxNameSize = 256; // due to NULL termination the maximum length is one less!
1525
- int64_t mDataOffset; // byte offset to the blind data, relative to this GridBlindMetaData.
1552
+ int64_t mDataOffset; // byte offset to the blind data, relative to GridBlindMetaData::this.
1526
1553
  uint64_t mValueCount; // number of blind values, e.g. point count
1527
1554
  uint32_t mValueSize;// byte size of each value, e.g. 4 if mDataType=Float and 1 if mDataType=Unknown since that amounts to char
1528
1555
  GridBlindDataSemantic mSemantic; // semantic meaning of the data.
@@ -1531,16 +1558,73 @@ struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) GridBlindMetaData
1531
1558
  char mName[MaxNameSize]; // note this includes the NULL termination
1532
1559
  // no padding required for 32 byte alignment
1533
1560
 
1534
- // disallow copy-construction since methods like blindData and getBlindData uses the this pointer!
1535
- GridBlindMetaData(const GridBlindMetaData&) = delete;
1561
+ /// @brief Empty constructor
1562
+ GridBlindMetaData()
1563
+ : mDataOffset(0)
1564
+ , mValueCount(0)
1565
+ , mValueSize(0)
1566
+ , mSemantic(GridBlindDataSemantic::Unknown)
1567
+ , mDataClass(GridBlindDataClass::Unknown)
1568
+ , mDataType(GridType::Unknown)
1569
+ {
1570
+ util::memzero(mName, MaxNameSize);
1571
+ }
1572
+
1573
+ GridBlindMetaData(int64_t dataOffset, uint64_t valueCount, uint32_t valueSize, GridBlindDataSemantic semantic, GridBlindDataClass dataClass, GridType dataType)
1574
+ : mDataOffset(dataOffset)
1575
+ , mValueCount(valueCount)
1576
+ , mValueSize(valueSize)
1577
+ , mSemantic(semantic)
1578
+ , mDataClass(dataClass)
1579
+ , mDataType(dataType)
1580
+ {
1581
+ util::memzero(mName, MaxNameSize);
1582
+ }
1583
+
1584
+ /// @brief Copy constructor that resets mDataOffset and zeros out mName
1585
+ GridBlindMetaData(const GridBlindMetaData &other)
1586
+ : mDataOffset(util::PtrDiff(util::PtrAdd(&other, other.mDataOffset), this))
1587
+ , mValueCount(other.mValueCount)
1588
+ , mValueSize(other.mValueSize)
1589
+ , mSemantic(other.mSemantic)
1590
+ , mDataClass(other.mDataClass)
1591
+ , mDataType(other.mDataType)
1592
+ {
1593
+ util::strncpy(mName, other.mName, MaxNameSize);
1594
+ }
1595
+
1596
+ /// @brief Copy assignment operator that resets mDataOffset and copies mName
1597
+ /// @param rhs right-hand instance to copy
1598
+ /// @return reference to itself
1599
+ const GridBlindMetaData& operator=(const GridBlindMetaData& rhs)
1600
+ {
1601
+ mDataOffset = util::PtrDiff(util::PtrAdd(&rhs, rhs.mDataOffset), this);
1602
+ mValueCount = rhs.mValueCount;
1603
+ mValueSize = rhs. mValueSize;
1604
+ mSemantic = rhs.mSemantic;
1605
+ mDataClass = rhs.mDataClass;
1606
+ mDataType = rhs.mDataType;
1607
+ util::strncpy(mName, rhs.mName, MaxNameSize);
1608
+ return *this;
1609
+ }
1536
1610
 
1537
- // disallow copy-assignment since methods like blindData and getBlindData uses the this pointer!
1538
- const GridBlindMetaData& operator=(const GridBlindMetaData&) = delete;
1611
+ __hostdev__ void setBlindData(const void* blindData)
1612
+ {
1613
+ mDataOffset = util::PtrDiff(blindData, this);
1614
+ }
1539
1615
 
1540
- __hostdev__ void setBlindData(void* blindData) { mDataOffset = util::PtrDiff(blindData, this); }
1616
+ /// @brief Sets the name string
1617
+ /// @param name c-string source name
1618
+ /// @return returns false if @c name has too many characters
1619
+ __hostdev__ bool setName(const char* name){return util::strncpy(mName, name, MaxNameSize)[MaxNameSize-1] == '\0';}
1541
1620
 
1542
- // unsafe
1543
- __hostdev__ const void* blindData() const {return util::PtrAdd(this, mDataOffset);}
1621
+ /// @brief returns a const void point to the blind data
1622
+ /// @note assumes that setBlinddData was called
1623
+ __hostdev__ const void* blindData() const
1624
+ {
1625
+ NANOVDB_ASSERT(mDataOffset != 0);
1626
+ return util::PtrAdd(this, mDataOffset);
1627
+ }
1544
1628
 
1545
1629
  /// @brief Get a const pointer to the blind data represented by this meta data
1546
1630
  /// @tparam BlindDataT Expected value type of the blind data.
@@ -1549,11 +1633,11 @@ struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) GridBlindMetaData
1549
1633
  template<typename BlindDataT>
1550
1634
  __hostdev__ const BlindDataT* getBlindData() const
1551
1635
  {
1552
- //if (mDataType != toGridType<BlindDataT>()) printf("getBlindData mismatch\n");
1553
- return mDataType == toGridType<BlindDataT>() ? util::PtrAdd<BlindDataT>(this, mDataOffset) : nullptr;
1636
+ return mDataOffset && (mDataType == toGridType<BlindDataT>()) ? util::PtrAdd<BlindDataT>(this, mDataOffset) : nullptr;
1554
1637
  }
1555
1638
 
1556
1639
  /// @brief return true if this meta data has a valid combination of semantic, class and value tags
1640
+ /// @note this does not check if the mDataOffset has been set!
1557
1641
  __hostdev__ bool isValid() const
1558
1642
  {
1559
1643
  auto check = [&]()->bool{
@@ -1655,7 +1739,7 @@ struct NodeTrait<const GridOrTreeOrRootT, 3>
1655
1739
  using type = const typename GridOrTreeOrRootT::RootNodeType;
1656
1740
  };
1657
1741
 
1658
- // ----------------------------> Froward decelerations of random access methods <--------------------------------------
1742
+ // ------------> Froward decelerations of accelerated random access methods <---------------
1659
1743
 
1660
1744
  template<typename BuildT>
1661
1745
  struct GetValue;
@@ -1695,7 +1779,7 @@ __hostdev__ inline char* toStr(char *dst, CheckMode mode)
1695
1779
  switch (mode){
1696
1780
  case CheckMode::Half: return util::strcpy(dst, "half");
1697
1781
  case CheckMode::Full: return util::strcpy(dst, "full");
1698
- default: return util::strcpy(dst, "disabled");
1782
+ default: return util::strcpy(dst, "disabled");// StrLen = 8 + 1 + End
1699
1783
  }
1700
1784
  }
1701
1785
 
@@ -1858,7 +1942,11 @@ struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) GridData
1858
1942
  mBlindMetadataCount = 0u; // i.e. no blind data
1859
1943
  mData0 = 0u; // zero padding
1860
1944
  mData1 = 0u; // only used for index and point grids
1945
+ #ifdef NANOVDB_USE_NEW_MAGIC_NUMBERS
1946
+ mData2 = 0u;// unused
1947
+ #else
1861
1948
  mData2 = NANOVDB_MAGIC_GRID; // since version 32.6.0 (will change in the future)
1949
+ #endif
1862
1950
  }
1863
1951
  /// @brief return true if the magic number and the version are both valid
1864
1952
  __hostdev__ bool isValid() const {
@@ -2154,12 +2242,12 @@ public:
2154
2242
  __hostdev__ bool hasStdDeviation() const { return DataType::mFlags.isMaskOn(GridFlags::HasStdDeviation); }
2155
2243
  __hostdev__ bool isBreadthFirst() const { return DataType::mFlags.isMaskOn(GridFlags::IsBreadthFirst); }
2156
2244
 
2157
- /// @brief return true if the specified node type is layed out breadth-first in memory and has a fixed size.
2245
+ /// @brief return true if the specified node type is laid out breadth-first in memory and has a fixed size.
2158
2246
  /// This allows for sequential access to the nodes.
2159
2247
  template<typename NodeT>
2160
2248
  __hostdev__ bool isSequential() const { return NodeT::FIXED_SIZE && this->isBreadthFirst(); }
2161
2249
 
2162
- /// @brief return true if the specified node level is layed out breadth-first in memory and has a fixed size.
2250
+ /// @brief return true if the specified node level is laid out breadth-first in memory and has a fixed size.
2163
2251
  /// This allows for sequential access to the nodes.
2164
2252
  template<int LEVEL>
2165
2253
  __hostdev__ bool isSequential() const { return NodeTrait<TreeT, LEVEL>::type::FIXED_SIZE && this->isBreadthFirst(); }
@@ -2258,7 +2346,6 @@ struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) TreeData
2258
2346
  uint32_t mTileCount[3]; // 12B, total number of active tile values at the lower internal, upper internal and root node levels
2259
2347
  uint64_t mVoxelCount; // 8B, total number of active voxels in the root and all its child nodes.
2260
2348
  // No padding since it's always 32B aligned
2261
- //__hostdev__ TreeData& operator=(const TreeData& other){return *util::memcpy(this, &other);}
2262
2349
  TreeData& operator=(const TreeData&) = default;
2263
2350
  __hostdev__ void setRoot(const void* root) {
2264
2351
  NANOVDB_ASSERT(root);
@@ -2553,7 +2640,7 @@ struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) RootData
2553
2640
  ValueT value; // value of tile (i.e. no child node)
2554
2641
  }; // Tile
2555
2642
 
2556
- /// @brief Returns a non-const reference to the tile at the specified linear offset.
2643
+ /// @brief Returns a pointer to the tile at the specified linear offset.
2557
2644
  ///
2558
2645
  /// @warning The linear offset is assumed to be in the valid range
2559
2646
  __hostdev__ const Tile* tile(uint32_t n) const
@@ -2567,29 +2654,100 @@ struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) RootData
2567
2654
  return reinterpret_cast<Tile*>(this + 1) + n;
2568
2655
  }
2569
2656
 
2570
- __hostdev__ Tile* probeTile(const CoordT& ijk)
2657
+ template<typename DataT>
2658
+ class TileIter
2571
2659
  {
2572
- #if 1 // switch between linear and binary seach
2573
- const auto key = CoordToKey(ijk);
2574
- for (Tile *p = reinterpret_cast<Tile*>(this + 1), *q = p + mTableSize; p < q; ++p)
2575
- if (p->key == key)
2576
- return p;
2577
- return nullptr;
2578
- #else // do not enable binary search if tiles are not guaranteed to be sorted!!!!!!
2579
- int32_t low = 0, high = mTableSize; // low is inclusive and high is exclusive
2580
- while (low != high) {
2581
- int mid = low + ((high - low) >> 1);
2582
- const Tile* tile = &tiles[mid];
2583
- if (tile->key == key) {
2584
- return tile;
2585
- } else if (tile->key < key) {
2586
- low = mid + 1;
2587
- } else {
2588
- high = mid;
2589
- }
2660
+ protected:
2661
+ using TileT = typename util::match_const<Tile, DataT>::type;
2662
+ using NodeT = typename util::match_const<ChildT, DataT>::type;
2663
+ TileT *mBegin, *mPos, *mEnd;
2664
+
2665
+ public:
2666
+ __hostdev__ TileIter() : mBegin(nullptr), mPos(nullptr), mEnd(nullptr) {}
2667
+ __hostdev__ TileIter(DataT* data, uint32_t pos = 0)
2668
+ : mBegin(reinterpret_cast<TileT*>(data + 1))// tiles reside right after the RootData
2669
+ , mPos(mBegin + pos)
2670
+ , mEnd(mBegin + data->mTableSize)
2671
+ {
2672
+ NANOVDB_ASSERT(data);
2673
+ NANOVDB_ASSERT(mBegin <= mPos);// pos > mTableSize is allowed
2674
+ NANOVDB_ASSERT(mBegin <= mEnd);// mTableSize = 0 is possible
2590
2675
  }
2591
- return nullptr;
2592
- #endif
2676
+ __hostdev__ inline operator bool() const { return mPos < mEnd; }
2677
+ __hostdev__ inline auto pos() const {return mPos - mBegin; }
2678
+ __hostdev__ inline TileIter& operator++()
2679
+ {
2680
+ ++mPos;
2681
+ return *this;
2682
+ }
2683
+ __hostdev__ inline TileT& operator*() const
2684
+ {
2685
+ NANOVDB_ASSERT(mPos < mEnd);
2686
+ return *mPos;
2687
+ }
2688
+ __hostdev__ inline TileT* operator->() const
2689
+ {
2690
+ NANOVDB_ASSERT(mPos < mEnd);
2691
+ return mPos;
2692
+ }
2693
+ __hostdev__ inline DataT* data() const
2694
+ {
2695
+ NANOVDB_ASSERT(mBegin);
2696
+ return reinterpret_cast<DataT*>(mBegin) - 1;
2697
+ }
2698
+ __hostdev__ inline bool isChild() const
2699
+ {
2700
+ NANOVDB_ASSERT(mPos < mEnd);
2701
+ return mPos->child != 0;
2702
+ }
2703
+ __hostdev__ inline bool isValue() const
2704
+ {
2705
+ NANOVDB_ASSERT(mPos < mEnd);
2706
+ return mPos->child == 0;
2707
+ }
2708
+ __hostdev__ inline bool isValueOn() const
2709
+ {
2710
+ NANOVDB_ASSERT(mPos < mEnd);
2711
+ return mPos->child == 0 && mPos->state != 0;
2712
+ }
2713
+ __hostdev__ inline NodeT* child() const
2714
+ {
2715
+ NANOVDB_ASSERT(mPos < mEnd && mPos->child != 0);
2716
+ return util::PtrAdd<NodeT>(this->data(), mPos->child);// byte offset relative to RootData::this
2717
+ }
2718
+ __hostdev__ inline ValueT value() const
2719
+ {
2720
+ NANOVDB_ASSERT(mPos < mEnd && mPos->child == 0);
2721
+ return mPos->value;
2722
+ }
2723
+ };// TileIter
2724
+
2725
+ using TileIterator = TileIter<RootData>;
2726
+ using ConstTileIterator = TileIter<const RootData>;
2727
+
2728
+ __hostdev__ TileIterator beginTile() { return TileIterator(this); }
2729
+ __hostdev__ ConstTileIterator cbeginTile() const { return ConstTileIterator(this); }
2730
+
2731
+ __hostdev__ inline TileIterator probe(const CoordT& ijk)
2732
+ {
2733
+ const auto key = CoordToKey(ijk);
2734
+ TileIterator iter(this);
2735
+ for(; iter; ++iter) if (iter->key == key) break;
2736
+ return iter;
2737
+ }
2738
+
2739
+ __hostdev__ inline ConstTileIterator probe(const CoordT& ijk) const
2740
+ {
2741
+ const auto key = CoordToKey(ijk);
2742
+ ConstTileIterator iter(this);
2743
+ for(; iter; ++iter) if (iter->key == key) break;
2744
+ return iter;
2745
+ }
2746
+
2747
+ __hostdev__ inline Tile* probeTile(const CoordT& ijk)
2748
+ {
2749
+ auto iter = this->probe(ijk);
2750
+ return iter ? iter.operator->() : nullptr;
2593
2751
  }
2594
2752
 
2595
2753
  __hostdev__ inline const Tile* probeTile(const CoordT& ijk) const
@@ -2597,6 +2755,17 @@ struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) RootData
2597
2755
  return const_cast<RootData*>(this)->probeTile(ijk);
2598
2756
  }
2599
2757
 
2758
+ __hostdev__ inline ChildT* probeChild(const CoordT& ijk)
2759
+ {
2760
+ auto iter = this->probe(ijk);
2761
+ return iter && iter.isChild() ? iter.child() : nullptr;
2762
+ }
2763
+
2764
+ __hostdev__ inline const ChildT* probeChild(const CoordT& ijk) const
2765
+ {
2766
+ return const_cast<RootData*>(this)->probeChild(ijk);
2767
+ }
2768
+
2600
2769
  /// @brief Returns a const reference to the child node in the specified tile.
2601
2770
  ///
2602
2771
  /// @warning A child node is assumed to exist in the specified tile
@@ -2660,30 +2829,16 @@ public:
2660
2829
  protected:
2661
2830
  using DataT = typename util::match_const<DataType, RootT>::type;
2662
2831
  using TileT = typename util::match_const<Tile, RootT>::type;
2663
- DataT* mData;
2664
- uint32_t mPos, mSize;
2665
- __hostdev__ BaseIter(DataT* data = nullptr, uint32_t n = 0)
2666
- : mData(data)
2667
- , mPos(0)
2668
- , mSize(n)
2669
- {
2670
- }
2832
+ typename DataType::template TileIter<DataT> mTileIter;
2833
+ __hostdev__ BaseIter() : mTileIter() {}
2834
+ __hostdev__ BaseIter(DataT* data) : mTileIter(data){}
2671
2835
 
2672
2836
  public:
2673
- __hostdev__ operator bool() const { return mPos < mSize; }
2674
- __hostdev__ uint32_t pos() const { return mPos; }
2675
- __hostdev__ void next() { ++mPos; }
2676
- __hostdev__ TileT* tile() const { return mData->tile(mPos); }
2677
- __hostdev__ CoordType getOrigin() const
2678
- {
2679
- NANOVDB_ASSERT(*this);
2680
- return this->tile()->origin();
2681
- }
2682
- __hostdev__ CoordType getCoord() const
2683
- {
2684
- NANOVDB_ASSERT(*this);
2685
- return this->tile()->origin();
2686
- }
2837
+ __hostdev__ operator bool() const { return bool(mTileIter); }
2838
+ __hostdev__ uint32_t pos() const { return uint32_t(mTileIter.pos()); }
2839
+ __hostdev__ TileT* tile() const { return mTileIter.operator->(); }
2840
+ __hostdev__ CoordType getOrigin() const {return mTileIter->origin();}
2841
+ __hostdev__ CoordType getCoord() const {return this->getOrigin();}
2687
2842
  }; // Member class BaseIter
2688
2843
 
2689
2844
  template<typename RootT>
@@ -2692,41 +2847,26 @@ public:
2692
2847
  static_assert(util::is_same<typename util::remove_const<RootT>::type, RootNode>::value, "Invalid RootT");
2693
2848
  using BaseT = BaseIter<RootT>;
2694
2849
  using NodeT = typename util::match_const<ChildT, RootT>::type;
2850
+ using BaseT::mTileIter;
2695
2851
 
2696
2852
  public:
2697
- __hostdev__ ChildIter()
2698
- : BaseT()
2699
- {
2700
- }
2701
- __hostdev__ ChildIter(RootT* parent)
2702
- : BaseT(parent->data(), parent->tileCount())
2703
- {
2704
- NANOVDB_ASSERT(BaseT::mData);
2705
- while (*this && !this->tile()->isChild())
2706
- this->next();
2707
- }
2708
- __hostdev__ NodeT& operator*() const
2709
- {
2710
- NANOVDB_ASSERT(*this);
2711
- return *BaseT::mData->getChild(this->tile());
2712
- }
2713
- __hostdev__ NodeT* operator->() const
2853
+ __hostdev__ ChildIter() : BaseT() {}
2854
+ __hostdev__ ChildIter(RootT* parent) : BaseT(parent->data())
2714
2855
  {
2715
- NANOVDB_ASSERT(*this);
2716
- return BaseT::mData->getChild(this->tile());
2856
+ while (mTileIter && mTileIter.isValue()) ++mTileIter;
2717
2857
  }
2858
+ __hostdev__ NodeT& operator*() const {return *mTileIter.child();}
2859
+ __hostdev__ NodeT* operator->() const {return mTileIter.child();}
2718
2860
  __hostdev__ ChildIter& operator++()
2719
2861
  {
2720
- NANOVDB_ASSERT(BaseT::mData);
2721
- this->next();
2722
- while (*this && this->tile()->isValue())
2723
- this->next();
2862
+ ++mTileIter;
2863
+ while (mTileIter && mTileIter.isValue()) ++mTileIter;
2724
2864
  return *this;
2725
2865
  }
2726
2866
  __hostdev__ ChildIter operator++(int)
2727
2867
  {
2728
2868
  auto tmp = *this;
2729
- ++(*this);
2869
+ this->operator++();
2730
2870
  return tmp;
2731
2871
  }
2732
2872
  }; // Member class ChildIter
@@ -2734,48 +2874,33 @@ public:
2734
2874
  using ChildIterator = ChildIter<RootNode>;
2735
2875
  using ConstChildIterator = ChildIter<const RootNode>;
2736
2876
 
2737
- __hostdev__ ChildIterator beginChild() { return ChildIterator(this); }
2877
+ __hostdev__ ChildIterator beginChild() { return ChildIterator(this); }
2738
2878
  __hostdev__ ConstChildIterator cbeginChild() const { return ConstChildIterator(this); }
2739
2879
 
2740
2880
  template<typename RootT>
2741
2881
  class ValueIter : public BaseIter<RootT>
2742
2882
  {
2743
2883
  using BaseT = BaseIter<RootT>;
2884
+ using BaseT::mTileIter;
2744
2885
 
2745
2886
  public:
2746
- __hostdev__ ValueIter()
2747
- : BaseT()
2748
- {
2749
- }
2750
- __hostdev__ ValueIter(RootT* parent)
2751
- : BaseT(parent->data(), parent->tileCount())
2752
- {
2753
- NANOVDB_ASSERT(BaseT::mData);
2754
- while (*this && this->tile()->isChild())
2755
- this->next();
2756
- }
2757
- __hostdev__ ValueType operator*() const
2758
- {
2759
- NANOVDB_ASSERT(*this);
2760
- return this->tile()->value;
2761
- }
2762
- __hostdev__ bool isActive() const
2887
+ __hostdev__ ValueIter() : BaseT(){}
2888
+ __hostdev__ ValueIter(RootT* parent) : BaseT(parent->data())
2763
2889
  {
2764
- NANOVDB_ASSERT(*this);
2765
- return this->tile()->state;
2890
+ while (mTileIter && mTileIter.isChild()) ++mTileIter;
2766
2891
  }
2892
+ __hostdev__ ValueType operator*() const {return mTileIter.value();}
2893
+ __hostdev__ bool isActive() const {return mTileIter.isValueOn();}
2767
2894
  __hostdev__ ValueIter& operator++()
2768
2895
  {
2769
- NANOVDB_ASSERT(BaseT::mData);
2770
- this->next();
2771
- while (*this && this->tile()->isChild())
2772
- this->next();
2896
+ ++mTileIter;
2897
+ while (mTileIter && mTileIter.isChild()) ++mTileIter;
2773
2898
  return *this;
2774
2899
  }
2775
2900
  __hostdev__ ValueIter operator++(int)
2776
2901
  {
2777
2902
  auto tmp = *this;
2778
- ++(*this);
2903
+ this->operator++();
2779
2904
  return tmp;
2780
2905
  }
2781
2906
  }; // Member class ValueIter
@@ -2783,43 +2908,32 @@ public:
2783
2908
  using ValueIterator = ValueIter<RootNode>;
2784
2909
  using ConstValueIterator = ValueIter<const RootNode>;
2785
2910
 
2786
- __hostdev__ ValueIterator beginValue() { return ValueIterator(this); }
2911
+ __hostdev__ ValueIterator beginValue() { return ValueIterator(this); }
2787
2912
  __hostdev__ ConstValueIterator cbeginValueAll() const { return ConstValueIterator(this); }
2788
2913
 
2789
2914
  template<typename RootT>
2790
2915
  class ValueOnIter : public BaseIter<RootT>
2791
2916
  {
2792
2917
  using BaseT = BaseIter<RootT>;
2918
+ using BaseT::mTileIter;
2793
2919
 
2794
2920
  public:
2795
- __hostdev__ ValueOnIter()
2796
- : BaseT()
2797
- {
2798
- }
2799
- __hostdev__ ValueOnIter(RootT* parent)
2800
- : BaseT(parent->data(), parent->tileCount())
2921
+ __hostdev__ ValueOnIter() : BaseT(){}
2922
+ __hostdev__ ValueOnIter(RootT* parent) : BaseT(parent->data())
2801
2923
  {
2802
- NANOVDB_ASSERT(BaseT::mData);
2803
- while (*this && !this->tile()->isActive())
2804
- ++BaseT::mPos;
2805
- }
2806
- __hostdev__ ValueType operator*() const
2807
- {
2808
- NANOVDB_ASSERT(*this);
2809
- return this->tile()->value;
2924
+ while (mTileIter && !mTileIter.isValueOn()) ++mTileIter;
2810
2925
  }
2926
+ __hostdev__ ValueType operator*() const {return mTileIter.value();}
2811
2927
  __hostdev__ ValueOnIter& operator++()
2812
2928
  {
2813
- NANOVDB_ASSERT(BaseT::mData);
2814
- this->next();
2815
- while (*this && !this->tile()->isActive())
2816
- this->next();
2929
+ ++mTileIter;
2930
+ while (mTileIter && !mTileIter.isValueOn()) ++mTileIter;
2817
2931
  return *this;
2818
2932
  }
2819
2933
  __hostdev__ ValueOnIter operator++(int)
2820
2934
  {
2821
2935
  auto tmp = *this;
2822
- ++(*this);
2936
+ this->operator++();
2823
2937
  return tmp;
2824
2938
  }
2825
2939
  }; // Member class ValueOnIter
@@ -2827,7 +2941,7 @@ public:
2827
2941
  using ValueOnIterator = ValueOnIter<RootNode>;
2828
2942
  using ConstValueOnIterator = ValueOnIter<const RootNode>;
2829
2943
 
2830
- __hostdev__ ValueOnIterator beginValueOn() { return ValueOnIterator(this); }
2944
+ __hostdev__ ValueOnIterator beginValueOn() { return ValueOnIterator(this); }
2831
2945
  __hostdev__ ConstValueOnIterator cbeginValueOn() const { return ConstValueOnIterator(this); }
2832
2946
 
2833
2947
  template<typename RootT>
@@ -2835,53 +2949,36 @@ public:
2835
2949
  {
2836
2950
  using BaseT = BaseIter<RootT>;
2837
2951
  using NodeT = typename util::match_const<ChildT, RootT>::type;
2952
+ using BaseT::mTileIter;
2838
2953
 
2839
2954
  public:
2840
- __hostdev__ DenseIter()
2841
- : BaseT()
2842
- {
2843
- }
2844
- __hostdev__ DenseIter(RootT* parent)
2845
- : BaseT(parent->data(), parent->tileCount())
2846
- {
2847
- NANOVDB_ASSERT(BaseT::mData);
2848
- }
2955
+ __hostdev__ DenseIter() : BaseT(){}
2956
+ __hostdev__ DenseIter(RootT* parent) : BaseT(parent->data()){}
2849
2957
  __hostdev__ NodeT* probeChild(ValueType& value) const
2850
2958
  {
2851
- NANOVDB_ASSERT(*this);
2852
- NodeT* child = nullptr;
2853
- auto* t = this->tile();
2854
- if (t->isChild()) {
2855
- child = BaseT::mData->getChild(t);
2856
- } else {
2857
- value = t->value;
2858
- }
2859
- return child;
2860
- }
2861
- __hostdev__ bool isValueOn() const
2862
- {
2863
- NANOVDB_ASSERT(*this);
2864
- return this->tile()->state;
2959
+ if (mTileIter.isChild()) return mTileIter.child();
2960
+ value = mTileIter.value();
2961
+ return nullptr;
2865
2962
  }
2963
+ __hostdev__ bool isValueOn() const{return mTileIter.isValueOn();}
2866
2964
  __hostdev__ DenseIter& operator++()
2867
2965
  {
2868
- NANOVDB_ASSERT(BaseT::mData);
2869
- this->next();
2966
+ ++mTileIter;
2870
2967
  return *this;
2871
2968
  }
2872
2969
  __hostdev__ DenseIter operator++(int)
2873
2970
  {
2874
2971
  auto tmp = *this;
2875
- ++(*this);
2972
+ ++mTileIter;
2876
2973
  return tmp;
2877
2974
  }
2878
2975
  }; // Member class DenseIter
2879
2976
 
2880
- using DenseIterator = DenseIter<RootNode>;
2977
+ using DenseIterator = DenseIter<RootNode>;
2881
2978
  using ConstDenseIterator = DenseIter<const RootNode>;
2882
2979
 
2883
- __hostdev__ DenseIterator beginDense() { return DenseIterator(this); }
2884
- __hostdev__ ConstDenseIterator cbeginDense() const { return ConstDenseIterator(this); }
2980
+ __hostdev__ DenseIterator beginDense() { return DenseIterator(this); }
2981
+ __hostdev__ ConstDenseIterator cbeginDense() const { return ConstDenseIterator(this); }
2885
2982
  __hostdev__ ConstDenseIterator cbeginChildAll() const { return ConstDenseIterator(this); }
2886
2983
 
2887
2984
  /// @brief This class cannot be constructed or deleted
@@ -2933,7 +3030,6 @@ public:
2933
3030
  /// @brief Return true if this RootNode is empty, i.e. contains no values or nodes
2934
3031
  __hostdev__ bool isEmpty() const { return DataType::mTableSize == uint32_t(0); }
2935
3032
 
2936
- #ifdef NANOVDB_NEW_ACCESSOR_METHODS
2937
3033
  /// @brief Return the value of the given voxel
2938
3034
  __hostdev__ ValueType getValue(const CoordType& ijk) const { return this->template get<GetValue<BuildType>>(ijk); }
2939
3035
  __hostdev__ ValueType getValue(int i, int j, int k) const { return this->template get<GetValue<BuildType>>(CoordType(i, j, k)); }
@@ -2941,83 +3037,22 @@ public:
2941
3037
  /// @brief return the state and updates the value of the specified voxel
2942
3038
  __hostdev__ bool probeValue(const CoordType& ijk, ValueType& v) const { return this->template get<ProbeValue<BuildType>>(ijk, v); }
2943
3039
  __hostdev__ const LeafNodeType* probeLeaf(const CoordType& ijk) const { return this->template get<GetLeaf<BuildType>>(ijk); }
2944
- #else // NANOVDB_NEW_ACCESSOR_METHODS
2945
-
2946
- /// @brief Return the value of the given voxel
2947
- __hostdev__ ValueType getValue(const CoordType& ijk) const
2948
- {
2949
- if (const Tile* tile = DataType::probeTile(ijk)) {
2950
- return tile->isChild() ? this->getChild(tile)->getValue(ijk) : tile->value;
2951
- }
2952
- return DataType::mBackground;
2953
- }
2954
- __hostdev__ ValueType getValue(int i, int j, int k) const { return this->getValue(CoordType(i, j, k)); }
2955
-
2956
- __hostdev__ bool isActive(const CoordType& ijk) const
2957
- {
2958
- if (const Tile* tile = DataType::probeTile(ijk)) {
2959
- return tile->isChild() ? this->getChild(tile)->isActive(ijk) : tile->state;
2960
- }
2961
- return false;
2962
- }
2963
-
2964
- __hostdev__ bool probeValue(const CoordType& ijk, ValueType& v) const
2965
- {
2966
- if (const Tile* tile = DataType::probeTile(ijk)) {
2967
- if (tile->isChild()) {
2968
- const auto* child = this->getChild(tile);
2969
- return child->probeValue(ijk, v);
2970
- }
2971
- v = tile->value;
2972
- return tile->state;
2973
- }
2974
- v = DataType::mBackground;
2975
- return false;
2976
- }
2977
-
2978
- __hostdev__ const LeafNodeType* probeLeaf(const CoordType& ijk) const
2979
- {
2980
- const Tile* tile = DataType::probeTile(ijk);
2981
- if (tile && tile->isChild()) {
2982
- const auto* child = this->getChild(tile);
2983
- return child->probeLeaf(ijk);
2984
- }
2985
- return nullptr;
2986
- }
2987
-
2988
- #endif // NANOVDB_NEW_ACCESSOR_METHODS
2989
-
2990
- __hostdev__ const ChildNodeType* probeChild(const CoordType& ijk) const
2991
- {
2992
- const Tile* tile = DataType::probeTile(ijk);
2993
- return tile && tile->isChild() ? this->getChild(tile) : nullptr;
2994
- }
2995
-
2996
- __hostdev__ ChildNodeType* probeChild(const CoordType& ijk)
2997
- {
2998
- const Tile* tile = DataType::probeTile(ijk);
2999
- return tile && tile->isChild() ? this->getChild(tile) : nullptr;
3000
- }
3001
3040
 
3002
3041
  template<typename OpT, typename... ArgsT>
3003
- __hostdev__ auto get(const CoordType& ijk, ArgsT&&... args) const
3042
+ __hostdev__ typename OpT::Type get(const CoordType& ijk, ArgsT&&... args) const
3004
3043
  {
3005
3044
  if (const Tile* tile = this->probeTile(ijk)) {
3006
- if (tile->isChild())
3007
- return this->getChild(tile)->template get<OpT>(ijk, args...);
3045
+ if constexpr(OpT::LEVEL < LEVEL) if (tile->isChild()) return this->getChild(tile)->template get<OpT>(ijk, args...);
3008
3046
  return OpT::get(*tile, args...);
3009
3047
  }
3010
3048
  return OpT::get(*this, args...);
3011
3049
  }
3012
3050
 
3013
3051
  template<typename OpT, typename... ArgsT>
3014
- // __hostdev__ auto // occasionally fails with NVCC
3015
- __hostdev__ decltype(OpT::set(util::declval<Tile&>(), util::declval<ArgsT>()...))
3016
- set(const CoordType& ijk, ArgsT&&... args)
3052
+ __hostdev__ void set(const CoordType& ijk, ArgsT&&... args)
3017
3053
  {
3018
3054
  if (Tile* tile = DataType::probeTile(ijk)) {
3019
- if (tile->isChild())
3020
- return this->getChild(tile)->template set<OpT>(ijk, args...);
3055
+ if constexpr(OpT::LEVEL < LEVEL) if (tile->isChild()) return this->getChild(tile)->template set<OpT>(ijk, args...);
3021
3056
  return OpT::set(*tile, args...);
3022
3057
  }
3023
3058
  return OpT::set(*this, args...);
@@ -3032,78 +3067,6 @@ private:
3032
3067
 
3033
3068
  template<typename>
3034
3069
  friend class Tree;
3035
- #ifndef NANOVDB_NEW_ACCESSOR_METHODS
3036
- /// @brief Private method to return node information and update a ReadAccessor
3037
- template<typename AccT>
3038
- __hostdev__ typename AccT::NodeInfo getNodeInfoAndCache(const CoordType& ijk, const AccT& acc) const
3039
- {
3040
- using NodeInfoT = typename AccT::NodeInfo;
3041
- if (const Tile* tile = this->probeTile(ijk)) {
3042
- if (tile->isChild()) {
3043
- const auto* child = this->getChild(tile);
3044
- acc.insert(ijk, child);
3045
- return child->getNodeInfoAndCache(ijk, acc);
3046
- }
3047
- return NodeInfoT{LEVEL, ChildT::dim(), tile->value, tile->value, tile->value, 0, tile->origin(), tile->origin() + CoordType(ChildT::DIM)};
3048
- }
3049
- return NodeInfoT{LEVEL, ChildT::dim(), this->minimum(), this->maximum(), this->average(), this->stdDeviation(), this->bbox()[0], this->bbox()[1]};
3050
- }
3051
-
3052
- /// @brief Private method to return a voxel value and update a ReadAccessor
3053
- template<typename AccT>
3054
- __hostdev__ ValueType getValueAndCache(const CoordType& ijk, const AccT& acc) const
3055
- {
3056
- if (const Tile* tile = this->probeTile(ijk)) {
3057
- if (tile->isChild()) {
3058
- const auto* child = this->getChild(tile);
3059
- acc.insert(ijk, child);
3060
- return child->getValueAndCache(ijk, acc);
3061
- }
3062
- return tile->value;
3063
- }
3064
- return DataType::mBackground;
3065
- }
3066
-
3067
- template<typename AccT>
3068
- __hostdev__ bool isActiveAndCache(const CoordType& ijk, const AccT& acc) const
3069
- {
3070
- const Tile* tile = this->probeTile(ijk);
3071
- if (tile && tile->isChild()) {
3072
- const auto* child = this->getChild(tile);
3073
- acc.insert(ijk, child);
3074
- return child->isActiveAndCache(ijk, acc);
3075
- }
3076
- return false;
3077
- }
3078
-
3079
- template<typename AccT>
3080
- __hostdev__ bool probeValueAndCache(const CoordType& ijk, ValueType& v, const AccT& acc) const
3081
- {
3082
- if (const Tile* tile = this->probeTile(ijk)) {
3083
- if (tile->isChild()) {
3084
- const auto* child = this->getChild(tile);
3085
- acc.insert(ijk, child);
3086
- return child->probeValueAndCache(ijk, v, acc);
3087
- }
3088
- v = tile->value;
3089
- return tile->state;
3090
- }
3091
- v = DataType::mBackground;
3092
- return false;
3093
- }
3094
-
3095
- template<typename AccT>
3096
- __hostdev__ const LeafNodeType* probeLeafAndCache(const CoordType& ijk, const AccT& acc) const
3097
- {
3098
- const Tile* tile = this->probeTile(ijk);
3099
- if (tile && tile->isChild()) {
3100
- const auto* child = this->getChild(tile);
3101
- acc.insert(ijk, child);
3102
- return child->probeLeafAndCache(ijk, acc);
3103
- }
3104
- return nullptr;
3105
- }
3106
- #endif // NANOVDB_NEW_ACCESSOR_METHODS
3107
3070
 
3108
3071
  template<typename RayT, typename AccT>
3109
3072
  __hostdev__ uint32_t getDimAndCache(const CoordType& ijk, const RayT& ray, const AccT& acc) const
@@ -3120,15 +3083,15 @@ private:
3120
3083
  }
3121
3084
 
3122
3085
  template<typename OpT, typename AccT, typename... ArgsT>
3123
- //__hostdev__ decltype(OpT::get(util::declval<const Tile&>(), util::declval<ArgsT>()...))
3124
- __hostdev__ auto
3125
- getAndCache(const CoordType& ijk, const AccT& acc, ArgsT&&... args) const
3086
+ __hostdev__ typename OpT::Type getAndCache(const CoordType& ijk, const AccT& acc, ArgsT&&... args) const
3126
3087
  {
3127
3088
  if (const Tile* tile = this->probeTile(ijk)) {
3128
- if (tile->isChild()) {
3129
- const ChildT* child = this->getChild(tile);
3130
- acc.insert(ijk, child);
3131
- return child->template getAndCache<OpT>(ijk, acc, args...);
3089
+ if constexpr(OpT::LEVEL < LEVEL) {
3090
+ if (tile->isChild()) {
3091
+ const ChildT* child = this->getChild(tile);
3092
+ acc.insert(ijk, child);
3093
+ return child->template getAndCache<OpT>(ijk, acc, args...);
3094
+ }
3132
3095
  }
3133
3096
  return OpT::get(*tile, args...);
3134
3097
  }
@@ -3136,15 +3099,15 @@ private:
3136
3099
  }
3137
3100
 
3138
3101
  template<typename OpT, typename AccT, typename... ArgsT>
3139
- // __hostdev__ auto // occasionally fails with NVCC
3140
- __hostdev__ decltype(OpT::set(util::declval<Tile&>(), util::declval<ArgsT>()...))
3141
- setAndCache(const CoordType& ijk, const AccT& acc, ArgsT&&... args)
3102
+ __hostdev__ void setAndCache(const CoordType& ijk, const AccT& acc, ArgsT&&... args)
3142
3103
  {
3143
3104
  if (Tile* tile = DataType::probeTile(ijk)) {
3144
- if (tile->isChild()) {
3145
- ChildT* child = this->getChild(tile);
3146
- acc.insert(ijk, child);
3147
- return child->template setAndCache<OpT>(ijk, acc, args...);
3105
+ if constexpr(OpT::LEVEL < LEVEL) {
3106
+ if (tile->isChild()) {
3107
+ ChildT* child = this->getChild(tile);
3108
+ acc.insert(ijk, child);
3109
+ return child->template setAndCache<OpT>(ijk, acc, args...);
3110
+ }
3148
3111
  }
3149
3112
  return OpT::set(*tile, args...);
3150
3113
  }
@@ -3250,7 +3213,10 @@ struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) InternalData
3250
3213
  __hostdev__ const StatsT& average() const { return mAverage; }
3251
3214
  __hostdev__ const StatsT& stdDeviation() const { return mStdDevi; }
3252
3215
 
3253
- #if defined(__GNUC__) && !defined(__APPLE__) && !defined(__llvm__)
3216
+ // GCC 11 (and possibly prior versions) has a regression that results in invalid
3217
+ // warnings when -Wstringop-overflow is turned on. For details, refer to
3218
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101854
3219
+ #if defined(__GNUC__) && (__GNUC__ < 12) && !defined(__APPLE__) && !defined(__llvm__)
3254
3220
  #pragma GCC diagnostic push
3255
3221
  #pragma GCC diagnostic ignored "-Wstringop-overflow"
3256
3222
  #endif
@@ -3258,7 +3224,7 @@ struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) InternalData
3258
3224
  __hostdev__ void setMax(const ValueT& v) { mMaximum = v; }
3259
3225
  __hostdev__ void setAvg(const StatsT& v) { mAverage = v; }
3260
3226
  __hostdev__ void setDev(const StatsT& v) { mStdDevi = v; }
3261
- #if defined(__GNUC__) && !defined(__APPLE__) && !defined(__llvm__)
3227
+ #if defined(__GNUC__) && (__GNUC__ < 12) && !defined(__APPLE__) && !defined(__llvm__)
3262
3228
  #pragma GCC diagnostic pop
3263
3229
  #endif
3264
3230
 
@@ -3517,41 +3483,12 @@ public:
3517
3483
  return DataType::mChildMask.isOn(SIZE - 1) ? this->getChild(SIZE - 1)->getLastValue() : DataType::getValue(SIZE - 1);
3518
3484
  }
3519
3485
 
3520
- #ifdef NANOVDB_NEW_ACCESSOR_METHODS
3521
3486
  /// @brief Return the value of the given voxel
3522
3487
  __hostdev__ ValueType getValue(const CoordType& ijk) const { return this->template get<GetValue<BuildType>>(ijk); }
3523
3488
  __hostdev__ bool isActive(const CoordType& ijk) const { return this->template get<GetState<BuildType>>(ijk); }
3524
3489
  /// @brief return the state and updates the value of the specified voxel
3525
3490
  __hostdev__ bool probeValue(const CoordType& ijk, ValueType& v) const { return this->template get<ProbeValue<BuildType>>(ijk, v); }
3526
3491
  __hostdev__ const LeafNodeType* probeLeaf(const CoordType& ijk) const { return this->template get<GetLeaf<BuildType>>(ijk); }
3527
- #else // NANOVDB_NEW_ACCESSOR_METHODS
3528
- __hostdev__ ValueType getValue(const CoordType& ijk) const
3529
- {
3530
- const uint32_t n = CoordToOffset(ijk);
3531
- return DataType::mChildMask.isOn(n) ? this->getChild(n)->getValue(ijk) : DataType::getValue(n);
3532
- }
3533
- __hostdev__ bool isActive(const CoordType& ijk) const
3534
- {
3535
- const uint32_t n = CoordToOffset(ijk);
3536
- return DataType::mChildMask.isOn(n) ? this->getChild(n)->isActive(ijk) : DataType::isActive(n);
3537
- }
3538
- __hostdev__ bool probeValue(const CoordType& ijk, ValueType& v) const
3539
- {
3540
- const uint32_t n = CoordToOffset(ijk);
3541
- if (DataType::mChildMask.isOn(n))
3542
- return this->getChild(n)->probeValue(ijk, v);
3543
- v = DataType::getValue(n);
3544
- return DataType::isActive(n);
3545
- }
3546
- __hostdev__ const LeafNodeType* probeLeaf(const CoordType& ijk) const
3547
- {
3548
- const uint32_t n = CoordToOffset(ijk);
3549
- if (DataType::mChildMask.isOn(n))
3550
- return this->getChild(n)->probeLeaf(ijk);
3551
- return nullptr;
3552
- }
3553
-
3554
- #endif // NANOVDB_NEW_ACCESSOR_METHODS
3555
3492
 
3556
3493
  __hostdev__ ChildNodeType* probeChild(const CoordType& ijk)
3557
3494
  {
@@ -3598,22 +3535,18 @@ public:
3598
3535
  __hostdev__ bool isActive() const { return DataType::mFlags & uint32_t(2); }
3599
3536
 
3600
3537
  template<typename OpT, typename... ArgsT>
3601
- __hostdev__ auto get(const CoordType& ijk, ArgsT&&... args) const
3538
+ __hostdev__ typename OpT::Type get(const CoordType& ijk, ArgsT&&... args) const
3602
3539
  {
3603
3540
  const uint32_t n = CoordToOffset(ijk);
3604
- if (this->isChild(n))
3605
- return this->getChild(n)->template get<OpT>(ijk, args...);
3541
+ if constexpr(OpT::LEVEL < LEVEL) if (this->isChild(n)) return this->getChild(n)->template get<OpT>(ijk, args...);
3606
3542
  return OpT::get(*this, n, args...);
3607
3543
  }
3608
3544
 
3609
3545
  template<typename OpT, typename... ArgsT>
3610
- //__hostdev__ auto // occasionally fails with NVCC
3611
- __hostdev__ decltype(OpT::set(util::declval<InternalNode&>(), util::declval<uint32_t>(), util::declval<ArgsT>()...))
3612
- set(const CoordType& ijk, ArgsT&&... args)
3546
+ __hostdev__ void set(const CoordType& ijk, ArgsT&&... args)
3613
3547
  {
3614
3548
  const uint32_t n = CoordToOffset(ijk);
3615
- if (this->isChild(n))
3616
- return this->getChild(n)->template set<OpT>(ijk, args...);
3549
+ if constexpr(OpT::LEVEL < LEVEL) if (this->isChild(n)) return this->getChild(n)->template set<OpT>(ijk, args...);
3617
3550
  return OpT::set(*this, n, args...);
3618
3551
  }
3619
3552
 
@@ -3628,64 +3561,6 @@ private:
3628
3561
  template<typename, uint32_t>
3629
3562
  friend class InternalNode;
3630
3563
 
3631
- #ifndef NANOVDB_NEW_ACCESSOR_METHODS
3632
- /// @brief Private read access method used by the ReadAccessor
3633
- template<typename AccT>
3634
- __hostdev__ ValueType getValueAndCache(const CoordType& ijk, const AccT& acc) const
3635
- {
3636
- const uint32_t n = CoordToOffset(ijk);
3637
- if (DataType::mChildMask.isOff(n))
3638
- return DataType::getValue(n);
3639
- const ChildT* child = this->getChild(n);
3640
- acc.insert(ijk, child);
3641
- return child->getValueAndCache(ijk, acc);
3642
- }
3643
- template<typename AccT>
3644
- __hostdev__ bool isActiveAndCache(const CoordType& ijk, const AccT& acc) const
3645
- {
3646
- const uint32_t n = CoordToOffset(ijk);
3647
- if (DataType::mChildMask.isOff(n))
3648
- return DataType::isActive(n);
3649
- const ChildT* child = this->getChild(n);
3650
- acc.insert(ijk, child);
3651
- return child->isActiveAndCache(ijk, acc);
3652
- }
3653
- template<typename AccT>
3654
- __hostdev__ bool probeValueAndCache(const CoordType& ijk, ValueType& v, const AccT& acc) const
3655
- {
3656
- const uint32_t n = CoordToOffset(ijk);
3657
- if (DataType::mChildMask.isOff(n)) {
3658
- v = DataType::getValue(n);
3659
- return DataType::isActive(n);
3660
- }
3661
- const ChildT* child = this->getChild(n);
3662
- acc.insert(ijk, child);
3663
- return child->probeValueAndCache(ijk, v, acc);
3664
- }
3665
- template<typename AccT>
3666
- __hostdev__ const LeafNodeType* probeLeafAndCache(const CoordType& ijk, const AccT& acc) const
3667
- {
3668
- const uint32_t n = CoordToOffset(ijk);
3669
- if (DataType::mChildMask.isOff(n))
3670
- return nullptr;
3671
- const ChildT* child = this->getChild(n);
3672
- acc.insert(ijk, child);
3673
- return child->probeLeafAndCache(ijk, acc);
3674
- }
3675
- template<typename AccT>
3676
- __hostdev__ typename AccT::NodeInfo getNodeInfoAndCache(const CoordType& ijk, const AccT& acc) const
3677
- {
3678
- using NodeInfoT = typename AccT::NodeInfo;
3679
- const uint32_t n = CoordToOffset(ijk);
3680
- if (DataType::mChildMask.isOff(n)) {
3681
- return NodeInfoT{LEVEL, this->dim(), this->minimum(), this->maximum(), this->average(), this->stdDeviation(), this->bbox()[0], this->bbox()[1]};
3682
- }
3683
- const ChildT* child = this->getChild(n);
3684
- acc.insert(ijk, child);
3685
- return child->getNodeInfoAndCache(ijk, acc);
3686
- }
3687
- #endif // NANOVDB_NEW_ACCESSOR_METHODS
3688
-
3689
3564
  template<typename RayT, typename AccT>
3690
3565
  __hostdev__ uint32_t getDimAndCache(const CoordType& ijk, const RayT& ray, const AccT& acc) const
3691
3566
  {
@@ -3703,29 +3578,31 @@ private:
3703
3578
  }
3704
3579
 
3705
3580
  template<typename OpT, typename AccT, typename... ArgsT>
3706
- __hostdev__ auto
3707
- //__hostdev__ decltype(OpT::get(util::declval<const InternalNode&>(), util::declval<uint32_t>(), util::declval<ArgsT>()...))
3708
- getAndCache(const CoordType& ijk, const AccT& acc, ArgsT&&... args) const
3581
+ __hostdev__ typename OpT::Type getAndCache(const CoordType& ijk, const AccT& acc, ArgsT&&... args) const
3709
3582
  {
3710
3583
  const uint32_t n = CoordToOffset(ijk);
3711
- if (DataType::mChildMask.isOff(n))
3712
- return OpT::get(*this, n, args...);
3713
- const ChildT* child = this->getChild(n);
3714
- acc.insert(ijk, child);
3715
- return child->template getAndCache<OpT>(ijk, acc, args...);
3584
+ if constexpr(OpT::LEVEL < LEVEL) {
3585
+ if (this->isChild(n)) {
3586
+ const ChildT* child = this->getChild(n);
3587
+ acc.insert(ijk, child);
3588
+ return child->template getAndCache<OpT>(ijk, acc, args...);
3589
+ }
3590
+ }
3591
+ return OpT::get(*this, n, args...);
3716
3592
  }
3717
3593
 
3718
3594
  template<typename OpT, typename AccT, typename... ArgsT>
3719
- //__hostdev__ auto // occasionally fails with NVCC
3720
- __hostdev__ decltype(OpT::set(util::declval<InternalNode&>(), util::declval<uint32_t>(), util::declval<ArgsT>()...))
3721
- setAndCache(const CoordType& ijk, const AccT& acc, ArgsT&&... args)
3595
+ __hostdev__ void setAndCache(const CoordType& ijk, const AccT& acc, ArgsT&&... args)
3722
3596
  {
3723
3597
  const uint32_t n = CoordToOffset(ijk);
3724
- if (DataType::mChildMask.isOff(n))
3725
- return OpT::set(*this, n, args...);
3726
- ChildT* child = this->getChild(n);
3727
- acc.insert(ijk, child);
3728
- return child->template setAndCache<OpT>(ijk, acc, args...);
3598
+ if constexpr(OpT::LEVEL < LEVEL) {
3599
+ if (this->isChild(n)) {
3600
+ ChildT* child = this->getChild(n);
3601
+ acc.insert(ijk, child);
3602
+ return child->template setAndCache<OpT>(ijk, acc, args...);
3603
+ }
3604
+ }
3605
+ return OpT::set(*this, n, args...);
3729
3606
  }
3730
3607
 
3731
3608
  }; // InternalNode class
@@ -3782,10 +3659,20 @@ struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) LeafData
3782
3659
  __hostdev__ FloatType getAvg() const { return mAverage; }
3783
3660
  __hostdev__ FloatType getDev() const { return mStdDevi; }
3784
3661
 
3662
+ // GCC 11 (and possibly prior versions) has a regression that results in invalid
3663
+ // warnings when -Wstringop-overflow is turned on. For details, refer to
3664
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101854
3665
+ #if defined(__GNUC__) && (__GNUC__ < 12) && !defined(__APPLE__) && !defined(__llvm__)
3666
+ #pragma GCC diagnostic push
3667
+ #pragma GCC diagnostic ignored "-Wstringop-overflow"
3668
+ #endif
3785
3669
  __hostdev__ void setMin(const ValueType& v) { mMinimum = v; }
3786
3670
  __hostdev__ void setMax(const ValueType& v) { mMaximum = v; }
3787
3671
  __hostdev__ void setAvg(const FloatType& v) { mAverage = v; }
3788
3672
  __hostdev__ void setDev(const FloatType& v) { mStdDevi = v; }
3673
+ #if defined(__GNUC__) && (__GNUC__ < 12) && !defined(__APPLE__) && !defined(__llvm__)
3674
+ #pragma GCC diagnostic pop
3675
+ #endif
3789
3676
 
3790
3677
  template<typename T>
3791
3678
  __hostdev__ void setOrigin(const T& ijk) { mBBoxMin = ijk; }
@@ -4644,29 +4531,6 @@ private:
4644
4531
  template<typename, uint32_t>
4645
4532
  friend class InternalNode;
4646
4533
 
4647
- #ifndef NANOVDB_NEW_ACCESSOR_METHODS
4648
- /// @brief Private method to return a voxel value and update a (dummy) ReadAccessor
4649
- template<typename AccT>
4650
- __hostdev__ ValueType getValueAndCache(const CoordT& ijk, const AccT&) const { return this->getValue(ijk); }
4651
-
4652
- /// @brief Return the node information.
4653
- template<typename AccT>
4654
- __hostdev__ typename AccT::NodeInfo getNodeInfoAndCache(const CoordType& /*ijk*/, const AccT& /*acc*/) const
4655
- {
4656
- using NodeInfoT = typename AccT::NodeInfo;
4657
- return NodeInfoT{LEVEL, this->dim(), this->minimum(), this->maximum(), this->average(), this->stdDeviation(), this->bbox()[0], this->bbox()[1]};
4658
- }
4659
-
4660
- template<typename AccT>
4661
- __hostdev__ bool isActiveAndCache(const CoordT& ijk, const AccT&) const { return this->isActive(ijk); }
4662
-
4663
- template<typename AccT>
4664
- __hostdev__ bool probeValueAndCache(const CoordT& ijk, ValueType& v, const AccT&) const { return this->probeValue(ijk, v); }
4665
-
4666
- template<typename AccT>
4667
- __hostdev__ const LeafNode* probeLeafAndCache(const CoordT&, const AccT&) const { return this; }
4668
- #endif
4669
-
4670
4534
  template<typename RayT, typename AccT>
4671
4535
  __hostdev__ uint32_t getDimAndCache(const CoordT&, const RayT& /*ray*/, const AccT&) const
4672
4536
  {
@@ -4712,7 +4576,7 @@ __hostdev__ inline bool LeafNode<ValueT, CoordT, MaskT, LOG2DIM>::updateBBox()
4712
4576
  };
4713
4577
  uint64_t *w = DataType::mValueMask.words(), word64 = *w;
4714
4578
  uint32_t Xmin = word64 ? 0u : 8u, Xmax = Xmin;
4715
- for (int i = 1; i < 8; ++i) { // last loop over 8 64 bit words
4579
+ for (int i = 1; i < 8; ++i) { // last loop over 7 remaining 64 bit words
4716
4580
  if (w[i]) { // skip if word has no set bits
4717
4581
  word64 |= w[i]; // union 8 x 64 bits words into one 64 bit word
4718
4582
  if (Xmin == 8)
@@ -4943,19 +4807,7 @@ public:
4943
4807
  using CoordType = typename RootT::CoordType;
4944
4808
 
4945
4809
  static const int CacheLevels = 0;
4946
- #ifndef NANOVDB_NEW_ACCESSOR_METHODS
4947
- struct NodeInfo
4948
- {
4949
- uint32_t mLevel; // 4B
4950
- uint32_t mDim; // 4B
4951
- ValueType mMinimum; // typically 4B
4952
- ValueType mMaximum; // typically 4B
4953
- FloatType mAverage; // typically 4B
4954
- FloatType mStdDevi; // typically 4B
4955
- CoordType mBBoxMin; // 3*4B
4956
- CoordType mBBoxMax; // 3*4B
4957
- };
4958
- #endif
4810
+
4959
4811
  /// @brief Constructor from a root node
4960
4812
  __hostdev__ ReadAccessor(const RootT& root)
4961
4813
  : mRoot{&root}
@@ -4984,7 +4836,6 @@ public:
4984
4836
  ReadAccessor(const ReadAccessor&) = default;
4985
4837
  ~ReadAccessor() = default;
4986
4838
  ReadAccessor& operator=(const ReadAccessor&) = default;
4987
- #ifdef NANOVDB_NEW_ACCESSOR_METHODS
4988
4839
  __hostdev__ ValueType getValue(const CoordType& ijk) const
4989
4840
  {
4990
4841
  return this->template get<GetValue<BuildT>>(ijk);
@@ -4996,44 +4847,6 @@ public:
4996
4847
  __hostdev__ bool isActive(const CoordType& ijk) const { return this->template get<GetState<BuildT>>(ijk); }
4997
4848
  __hostdev__ bool probeValue(const CoordType& ijk, ValueType& v) const { return this->template get<ProbeValue<BuildT>>(ijk, v); }
4998
4849
  __hostdev__ const LeafT* probeLeaf(const CoordType& ijk) const { return this->template get<GetLeaf<BuildT>>(ijk); }
4999
- #else // NANOVDB_NEW_ACCESSOR_METHODS
5000
- __hostdev__ ValueType getValue(const CoordType& ijk) const
5001
- {
5002
- return mRoot->getValueAndCache(ijk, *this);
5003
- }
5004
- __hostdev__ ValueType getValue(int i, int j, int k) const
5005
- {
5006
- return this->getValue(CoordType(i, j, k));
5007
- }
5008
- __hostdev__ ValueType operator()(const CoordType& ijk) const
5009
- {
5010
- return this->getValue(ijk);
5011
- }
5012
- __hostdev__ ValueType operator()(int i, int j, int k) const
5013
- {
5014
- return this->getValue(CoordType(i, j, k));
5015
- }
5016
-
5017
- __hostdev__ NodeInfo getNodeInfo(const CoordType& ijk) const
5018
- {
5019
- return mRoot->getNodeInfoAndCache(ijk, *this);
5020
- }
5021
-
5022
- __hostdev__ bool isActive(const CoordType& ijk) const
5023
- {
5024
- return mRoot->isActiveAndCache(ijk, *this);
5025
- }
5026
-
5027
- __hostdev__ bool probeValue(const CoordType& ijk, ValueType& v) const
5028
- {
5029
- return mRoot->probeValueAndCache(ijk, v, *this);
5030
- }
5031
-
5032
- __hostdev__ const LeafT* probeLeaf(const CoordType& ijk) const
5033
- {
5034
- return mRoot->probeLeafAndCache(ijk, *this);
5035
- }
5036
- #endif // NANOVDB_NEW_ACCESSOR_METHODS
5037
4850
  template<typename RayT>
5038
4851
  __hostdev__ uint32_t getDim(const CoordType& ijk, const RayT& ray) const
5039
4852
  {
@@ -5093,9 +4906,7 @@ public:
5093
4906
  using CoordType = CoordT;
5094
4907
 
5095
4908
  static const int CacheLevels = 1;
5096
- #ifndef NANOVDB_NEW_ACCESSOR_METHODS
5097
- using NodeInfo = typename ReadAccessor<ValueT, -1, -1, -1>::NodeInfo;
5098
- #endif
4909
+
5099
4910
  /// @brief Constructor from a root node
5100
4911
  __hostdev__ ReadAccessor(const RootT& root)
5101
4912
  : mKey(CoordType::max())
@@ -5137,7 +4948,6 @@ public:
5137
4948
  (ijk[2] & int32_t(~NodeT::MASK)) == mKey[2];
5138
4949
  }
5139
4950
 
5140
- #ifdef NANOVDB_NEW_ACCESSOR_METHODS
5141
4951
  __hostdev__ ValueType getValue(const CoordType& ijk) const
5142
4952
  {
5143
4953
  return this->template get<GetValue<BuildT>>(ijk);
@@ -5149,75 +4959,25 @@ public:
5149
4959
  __hostdev__ bool isActive(const CoordType& ijk) const { return this->template get<GetState<BuildT>>(ijk); }
5150
4960
  __hostdev__ bool probeValue(const CoordType& ijk, ValueType& v) const { return this->template get<ProbeValue<BuildT>>(ijk, v); }
5151
4961
  __hostdev__ const LeafT* probeLeaf(const CoordType& ijk) const { return this->template get<GetLeaf<BuildT>>(ijk); }
5152
- #else // NANOVDB_NEW_ACCESSOR_METHODS
5153
- __hostdev__ ValueType getValue(const CoordType& ijk) const
5154
- {
5155
- if (this->isCached(ijk))
5156
- return mNode->getValueAndCache(ijk, *this);
5157
- return mRoot->getValueAndCache(ijk, *this);
5158
- }
5159
- __hostdev__ ValueType getValue(int i, int j, int k) const
5160
- {
5161
- return this->getValue(CoordType(i, j, k));
5162
- }
5163
- __hostdev__ ValueType operator()(const CoordType& ijk) const
5164
- {
5165
- return this->getValue(ijk);
5166
- }
5167
- __hostdev__ ValueType operator()(int i, int j, int k) const
5168
- {
5169
- return this->getValue(CoordType(i, j, k));
5170
- }
5171
-
5172
- __hostdev__ NodeInfo getNodeInfo(const CoordType& ijk) const
5173
- {
5174
- if (this->isCached(ijk))
5175
- return mNode->getNodeInfoAndCache(ijk, *this);
5176
- return mRoot->getNodeInfoAndCache(ijk, *this);
5177
- }
5178
-
5179
- __hostdev__ bool isActive(const CoordType& ijk) const
5180
- {
5181
- if (this->isCached(ijk))
5182
- return mNode->isActiveAndCache(ijk, *this);
5183
- return mRoot->isActiveAndCache(ijk, *this);
5184
- }
5185
-
5186
- __hostdev__ bool probeValue(const CoordType& ijk, ValueType& v) const
5187
- {
5188
- if (this->isCached(ijk))
5189
- return mNode->probeValueAndCache(ijk, v, *this);
5190
- return mRoot->probeValueAndCache(ijk, v, *this);
5191
- }
5192
4962
 
5193
- __hostdev__ const LeafT* probeLeaf(const CoordType& ijk) const
5194
- {
5195
- if (this->isCached(ijk))
5196
- return mNode->probeLeafAndCache(ijk, *this);
5197
- return mRoot->probeLeafAndCache(ijk, *this);
5198
- }
5199
- #endif // NANOVDB_NEW_ACCESSOR_METHODS
5200
4963
  template<typename RayT>
5201
4964
  __hostdev__ uint32_t getDim(const CoordType& ijk, const RayT& ray) const
5202
4965
  {
5203
- if (this->isCached(ijk))
5204
- return mNode->getDimAndCache(ijk, ray, *this);
4966
+ if (this->isCached(ijk)) return mNode->getDimAndCache(ijk, ray, *this);
5205
4967
  return mRoot->getDimAndCache(ijk, ray, *this);
5206
4968
  }
5207
4969
 
5208
4970
  template<typename OpT, typename... ArgsT>
5209
- __hostdev__ auto get(const CoordType& ijk, ArgsT&&... args) const
4971
+ __hostdev__ typename OpT::Type get(const CoordType& ijk, ArgsT&&... args) const
5210
4972
  {
5211
- if (this->isCached(ijk))
5212
- return mNode->template getAndCache<OpT>(ijk, *this, args...);
4973
+ if constexpr(OpT::LEVEL <= LEVEL0) if (this->isCached(ijk)) return mNode->template getAndCache<OpT>(ijk, *this, args...);
5213
4974
  return mRoot->template getAndCache<OpT>(ijk, *this, args...);
5214
4975
  }
5215
4976
 
5216
4977
  template<typename OpT, typename... ArgsT>
5217
- __hostdev__ auto set(const CoordType& ijk, ArgsT&&... args) const
4978
+ __hostdev__ void set(const CoordType& ijk, ArgsT&&... args) const
5218
4979
  {
5219
- if (this->isCached(ijk))
5220
- return const_cast<NodeT*>(mNode)->template setAndCache<OpT>(ijk, *this, args...);
4980
+ if constexpr(OpT::LEVEL <= LEVEL0) if (this->isCached(ijk)) return const_cast<NodeT*>(mNode)->template setAndCache<OpT>(ijk, *this, args...);
5221
4981
  return const_cast<RootT*>(mRoot)->template setAndCache<OpT>(ijk, *this, args...);
5222
4982
  }
5223
4983
 
@@ -5276,9 +5036,7 @@ public:
5276
5036
  using CoordType = CoordT;
5277
5037
 
5278
5038
  static const int CacheLevels = 2;
5279
- #ifndef NANOVDB_NEW_ACCESSOR_METHODS
5280
- using NodeInfo = typename ReadAccessor<ValueT, -1, -1, -1>::NodeInfo;
5281
- #endif
5039
+
5282
5040
  /// @brief Constructor from a root node
5283
5041
  __hostdev__ ReadAccessor(const RootT& root)
5284
5042
  #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
@@ -5363,7 +5121,6 @@ public:
5363
5121
  }
5364
5122
  #endif
5365
5123
 
5366
- #ifdef NANOVDB_NEW_ACCESSOR_METHODS
5367
5124
  __hostdev__ ValueType getValue(const CoordType& ijk) const
5368
5125
  {
5369
5126
  return this->template get<GetValue<BuildT>>(ijk);
@@ -5375,94 +5132,6 @@ public:
5375
5132
  __hostdev__ bool isActive(const CoordType& ijk) const { return this->template get<GetState<BuildT>>(ijk); }
5376
5133
  __hostdev__ bool probeValue(const CoordType& ijk, ValueType& v) const { return this->template get<ProbeValue<BuildT>>(ijk, v); }
5377
5134
  __hostdev__ const LeafT* probeLeaf(const CoordType& ijk) const { return this->template get<GetLeaf<BuildT>>(ijk); }
5378
- #else // NANOVDB_NEW_ACCESSOR_METHODS
5379
-
5380
- __hostdev__ ValueType getValue(const CoordType& ijk) const
5381
- {
5382
- #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
5383
- const CoordValueType dirty = this->computeDirty(ijk);
5384
- #else
5385
- auto&& dirty = ijk;
5386
- #endif
5387
- if (this->isCached1(dirty)) {
5388
- return mNode1->getValueAndCache(ijk, *this);
5389
- } else if (this->isCached2(dirty)) {
5390
- return mNode2->getValueAndCache(ijk, *this);
5391
- }
5392
- return mRoot->getValueAndCache(ijk, *this);
5393
- }
5394
- __hostdev__ ValueType operator()(const CoordType& ijk) const
5395
- {
5396
- return this->getValue(ijk);
5397
- }
5398
- __hostdev__ ValueType operator()(int i, int j, int k) const
5399
- {
5400
- return this->getValue(CoordType(i, j, k));
5401
- }
5402
- __hostdev__ ValueType getValue(int i, int j, int k) const
5403
- {
5404
- return this->getValue(CoordType(i, j, k));
5405
- }
5406
- __hostdev__ NodeInfo getNodeInfo(const CoordType& ijk) const
5407
- {
5408
- #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
5409
- const CoordValueType dirty = this->computeDirty(ijk);
5410
- #else
5411
- auto&& dirty = ijk;
5412
- #endif
5413
- if (this->isCached1(dirty)) {
5414
- return mNode1->getNodeInfoAndCache(ijk, *this);
5415
- } else if (this->isCached2(dirty)) {
5416
- return mNode2->getNodeInfoAndCache(ijk, *this);
5417
- }
5418
- return mRoot->getNodeInfoAndCache(ijk, *this);
5419
- }
5420
-
5421
- __hostdev__ bool isActive(const CoordType& ijk) const
5422
- {
5423
- #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
5424
- const CoordValueType dirty = this->computeDirty(ijk);
5425
- #else
5426
- auto&& dirty = ijk;
5427
- #endif
5428
- if (this->isCached1(dirty)) {
5429
- return mNode1->isActiveAndCache(ijk, *this);
5430
- } else if (this->isCached2(dirty)) {
5431
- return mNode2->isActiveAndCache(ijk, *this);
5432
- }
5433
- return mRoot->isActiveAndCache(ijk, *this);
5434
- }
5435
-
5436
- __hostdev__ bool probeValue(const CoordType& ijk, ValueType& v) const
5437
- {
5438
- #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
5439
- const CoordValueType dirty = this->computeDirty(ijk);
5440
- #else
5441
- auto&& dirty = ijk;
5442
- #endif
5443
- if (this->isCached1(dirty)) {
5444
- return mNode1->probeValueAndCache(ijk, v, *this);
5445
- } else if (this->isCached2(dirty)) {
5446
- return mNode2->probeValueAndCache(ijk, v, *this);
5447
- }
5448
- return mRoot->probeValueAndCache(ijk, v, *this);
5449
- }
5450
-
5451
- __hostdev__ const LeafT* probeLeaf(const CoordType& ijk) const
5452
- {
5453
- #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
5454
- const CoordValueType dirty = this->computeDirty(ijk);
5455
- #else
5456
- auto&& dirty = ijk;
5457
- #endif
5458
- if (this->isCached1(dirty)) {
5459
- return mNode1->probeLeafAndCache(ijk, *this);
5460
- } else if (this->isCached2(dirty)) {
5461
- return mNode2->probeLeafAndCache(ijk, *this);
5462
- }
5463
- return mRoot->probeLeafAndCache(ijk, *this);
5464
- }
5465
- #endif // NANOVDB_NEW_ACCESSOR_METHODS
5466
5135
 
5467
5136
  template<typename RayT>
5468
5137
  __hostdev__ uint32_t getDim(const CoordType& ijk, const RayT& ray) const
@@ -5481,33 +5150,33 @@ public:
5481
5150
  }
5482
5151
 
5483
5152
  template<typename OpT, typename... ArgsT>
5484
- __hostdev__ auto get(const CoordType& ijk, ArgsT&&... args) const
5153
+ __hostdev__ typename OpT::Type get(const CoordType& ijk, ArgsT&&... args) const
5485
5154
  {
5486
5155
  #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
5487
5156
  const CoordValueType dirty = this->computeDirty(ijk);
5488
5157
  #else
5489
5158
  auto&& dirty = ijk;
5490
5159
  #endif
5491
- if (this->isCached1(dirty)) {
5492
- return mNode1->template getAndCache<OpT>(ijk, *this, args...);
5493
- } else if (this->isCached2(dirty)) {
5494
- return mNode2->template getAndCache<OpT>(ijk, *this, args...);
5160
+ if constexpr(OpT::LEVEL <= LEVEL0) {
5161
+ if (this->isCached1(dirty)) return mNode1->template getAndCache<OpT>(ijk, *this, args...);
5162
+ } else if constexpr(OpT::LEVEL <= LEVEL1) {
5163
+ if (this->isCached2(dirty)) return mNode2->template getAndCache<OpT>(ijk, *this, args...);
5495
5164
  }
5496
5165
  return mRoot->template getAndCache<OpT>(ijk, *this, args...);
5497
5166
  }
5498
5167
 
5499
5168
  template<typename OpT, typename... ArgsT>
5500
- __hostdev__ auto set(const CoordType& ijk, ArgsT&&... args) const
5169
+ __hostdev__ void set(const CoordType& ijk, ArgsT&&... args) const
5501
5170
  {
5502
5171
  #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
5503
5172
  const CoordValueType dirty = this->computeDirty(ijk);
5504
5173
  #else
5505
5174
  auto&& dirty = ijk;
5506
5175
  #endif
5507
- if (this->isCached1(dirty)) {
5508
- return const_cast<Node1T*>(mNode1)->template setAndCache<OpT>(ijk, *this, args...);
5509
- } else if (this->isCached2(dirty)) {
5510
- return const_cast<Node2T*>(mNode2)->template setAndCache<OpT>(ijk, *this, args...);
5176
+ if constexpr(OpT::LEVEL <= LEVEL0) {
5177
+ if (this->isCached1(dirty)) return const_cast<Node1T*>(mNode1)->template setAndCache<OpT>(ijk, *this, args...);
5178
+ } else if constexpr(OpT::LEVEL <= LEVEL1) {
5179
+ if (this->isCached2(dirty)) return const_cast<Node2T*>(mNode2)->template setAndCache<OpT>(ijk, *this, args...);
5511
5180
  }
5512
5181
  return const_cast<RootT*>(mRoot)->template setAndCache<OpT>(ijk, *this, args...);
5513
5182
  }
@@ -5575,9 +5244,7 @@ public:
5575
5244
  using CoordType = CoordT;
5576
5245
 
5577
5246
  static const int CacheLevels = 3;
5578
- #ifndef NANOVDB_NEW_ACCESSOR_METHODS
5579
- using NodeInfo = typename ReadAccessor<ValueT, -1, -1, -1>::NodeInfo;
5580
- #endif
5247
+
5581
5248
  /// @brief Constructor from a root node
5582
5249
  __hostdev__ ReadAccessor(const RootT& root)
5583
5250
  #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
@@ -5666,11 +5333,7 @@ public:
5666
5333
  }
5667
5334
  #endif
5668
5335
 
5669
- #ifdef NANOVDB_NEW_ACCESSOR_METHODS
5670
- __hostdev__ ValueType getValue(const CoordType& ijk) const
5671
- {
5672
- return this->template get<GetValue<BuildT>>(ijk);
5673
- }
5336
+ __hostdev__ ValueType getValue(const CoordType& ijk) const {return this->template get<GetValue<BuildT>>(ijk);}
5674
5337
  __hostdev__ ValueType getValue(int i, int j, int k) const { return this->template get<GetValue<BuildT>>(CoordType(i, j, k)); }
5675
5338
  __hostdev__ ValueType operator()(const CoordType& ijk) const { return this->template get<GetValue<BuildT>>(ijk); }
5676
5339
  __hostdev__ ValueType operator()(int i, int j, int k) const { return this->template get<GetValue<BuildT>>(CoordType(i, j, k)); }
@@ -5678,137 +5341,39 @@ public:
5678
5341
  __hostdev__ bool isActive(const CoordType& ijk) const { return this->template get<GetState<BuildT>>(ijk); }
5679
5342
  __hostdev__ bool probeValue(const CoordType& ijk, ValueType& v) const { return this->template get<ProbeValue<BuildT>>(ijk, v); }
5680
5343
  __hostdev__ const LeafT* probeLeaf(const CoordType& ijk) const { return this->template get<GetLeaf<BuildT>>(ijk); }
5681
- #else // NANOVDB_NEW_ACCESSOR_METHODS
5682
-
5683
- __hostdev__ ValueType getValue(const CoordType& ijk) const
5684
- {
5685
- #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
5686
- const CoordValueType dirty = this->computeDirty(ijk);
5687
- #else
5688
- auto&& dirty = ijk;
5689
- #endif
5690
- if (this->isCached<LeafT>(dirty)) {
5691
- return ((LeafT*)mNode[0])->getValue(ijk);
5692
- } else if (this->isCached<NodeT1>(dirty)) {
5693
- return ((NodeT1*)mNode[1])->getValueAndCache(ijk, *this);
5694
- } else if (this->isCached<NodeT2>(dirty)) {
5695
- return ((NodeT2*)mNode[2])->getValueAndCache(ijk, *this);
5696
- }
5697
- return mRoot->getValueAndCache(ijk, *this);
5698
- }
5699
- __hostdev__ ValueType operator()(const CoordType& ijk) const
5700
- {
5701
- return this->getValue(ijk);
5702
- }
5703
- __hostdev__ ValueType operator()(int i, int j, int k) const
5704
- {
5705
- return this->getValue(CoordType(i, j, k));
5706
- }
5707
- __hostdev__ ValueType getValue(int i, int j, int k) const
5708
- {
5709
- return this->getValue(CoordType(i, j, k));
5710
- }
5711
-
5712
- __hostdev__ NodeInfo getNodeInfo(const CoordType& ijk) const
5713
- {
5714
- #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
5715
- const CoordValueType dirty = this->computeDirty(ijk);
5716
- #else
5717
- auto&& dirty = ijk;
5718
- #endif
5719
- if (this->isCached<LeafT>(dirty)) {
5720
- return ((LeafT*)mNode[0])->getNodeInfoAndCache(ijk, *this);
5721
- } else if (this->isCached<NodeT1>(dirty)) {
5722
- return ((NodeT1*)mNode[1])->getNodeInfoAndCache(ijk, *this);
5723
- } else if (this->isCached<NodeT2>(dirty)) {
5724
- return ((NodeT2*)mNode[2])->getNodeInfoAndCache(ijk, *this);
5725
- }
5726
- return mRoot->getNodeInfoAndCache(ijk, *this);
5727
- }
5728
-
5729
- __hostdev__ bool isActive(const CoordType& ijk) const
5730
- {
5731
- #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
5732
- const CoordValueType dirty = this->computeDirty(ijk);
5733
- #else
5734
- auto&& dirty = ijk;
5735
- #endif
5736
- if (this->isCached<LeafT>(dirty)) {
5737
- return ((LeafT*)mNode[0])->isActive(ijk);
5738
- } else if (this->isCached<NodeT1>(dirty)) {
5739
- return ((NodeT1*)mNode[1])->isActiveAndCache(ijk, *this);
5740
- } else if (this->isCached<NodeT2>(dirty)) {
5741
- return ((NodeT2*)mNode[2])->isActiveAndCache(ijk, *this);
5742
- }
5743
- return mRoot->isActiveAndCache(ijk, *this);
5744
- }
5745
-
5746
- __hostdev__ bool probeValue(const CoordType& ijk, ValueType& v) const
5747
- {
5748
- #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
5749
- const CoordValueType dirty = this->computeDirty(ijk);
5750
- #else
5751
- auto&& dirty = ijk;
5752
- #endif
5753
- if (this->isCached<LeafT>(dirty)) {
5754
- return ((LeafT*)mNode[0])->probeValue(ijk, v);
5755
- } else if (this->isCached<NodeT1>(dirty)) {
5756
- return ((NodeT1*)mNode[1])->probeValueAndCache(ijk, v, *this);
5757
- } else if (this->isCached<NodeT2>(dirty)) {
5758
- return ((NodeT2*)mNode[2])->probeValueAndCache(ijk, v, *this);
5759
- }
5760
- return mRoot->probeValueAndCache(ijk, v, *this);
5761
- }
5762
- __hostdev__ const LeafT* probeLeaf(const CoordType& ijk) const
5763
- {
5764
- #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
5765
- const CoordValueType dirty = this->computeDirty(ijk);
5766
- #else
5767
- auto&& dirty = ijk;
5768
- #endif
5769
- if (this->isCached<LeafT>(dirty)) {
5770
- return ((LeafT*)mNode[0]);
5771
- } else if (this->isCached<NodeT1>(dirty)) {
5772
- return ((NodeT1*)mNode[1])->probeLeafAndCache(ijk, *this);
5773
- } else if (this->isCached<NodeT2>(dirty)) {
5774
- return ((NodeT2*)mNode[2])->probeLeafAndCache(ijk, *this);
5775
- }
5776
- return mRoot->probeLeafAndCache(ijk, *this);
5777
- }
5778
- #endif // NANOVDB_NEW_ACCESSOR_METHODS
5779
5344
 
5780
5345
  template<typename OpT, typename... ArgsT>
5781
- __hostdev__ auto get(const CoordType& ijk, ArgsT&&... args) const
5346
+ __hostdev__ typename OpT::Type get(const CoordType& ijk, ArgsT&&... args) const
5782
5347
  {
5783
5348
  #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
5784
5349
  const CoordValueType dirty = this->computeDirty(ijk);
5785
5350
  #else
5786
5351
  auto&& dirty = ijk;
5787
5352
  #endif
5788
- if (this->isCached<LeafT>(dirty)) {
5789
- return ((const LeafT*)mNode[0])->template getAndCache<OpT>(ijk, *this, args...);
5790
- } else if (this->isCached<NodeT1>(dirty)) {
5791
- return ((const NodeT1*)mNode[1])->template getAndCache<OpT>(ijk, *this, args...);
5792
- } else if (this->isCached<NodeT2>(dirty)) {
5793
- return ((const NodeT2*)mNode[2])->template getAndCache<OpT>(ijk, *this, args...);
5353
+ if constexpr(OpT::LEVEL <=0) {
5354
+ if (this->isCached<LeafT>(dirty)) return ((const LeafT*)mNode[0])->template getAndCache<OpT>(ijk, *this, args...);
5355
+ } else if constexpr(OpT::LEVEL <= 1) {
5356
+ if (this->isCached<NodeT1>(dirty)) return ((const NodeT1*)mNode[1])->template getAndCache<OpT>(ijk, *this, args...);
5357
+ } else if constexpr(OpT::LEVEL <= 2) {
5358
+ if (this->isCached<NodeT2>(dirty)) return ((const NodeT2*)mNode[2])->template getAndCache<OpT>(ijk, *this, args...);
5794
5359
  }
5795
5360
  return mRoot->template getAndCache<OpT>(ijk, *this, args...);
5796
5361
  }
5797
5362
 
5798
5363
  template<typename OpT, typename... ArgsT>
5799
- __hostdev__ auto set(const CoordType& ijk, ArgsT&&... args) const
5364
+ __hostdev__ void set(const CoordType& ijk, ArgsT&&... args) const
5800
5365
  {
5801
5366
  #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
5802
5367
  const CoordValueType dirty = this->computeDirty(ijk);
5803
5368
  #else
5804
5369
  auto&& dirty = ijk;
5805
5370
  #endif
5806
- if (this->isCached<LeafT>(dirty)) {
5807
- return ((LeafT*)mNode[0])->template setAndCache<OpT>(ijk, *this, args...);
5808
- } else if (this->isCached<NodeT1>(dirty)) {
5809
- return ((NodeT1*)mNode[1])->template setAndCache<OpT>(ijk, *this, args...);
5810
- } else if (this->isCached<NodeT2>(dirty)) {
5811
- return ((NodeT2*)mNode[2])->template setAndCache<OpT>(ijk, *this, args...);
5371
+ if constexpr(OpT::LEVEL <= 0) {
5372
+ if (this->isCached<LeafT>(dirty)) return ((LeafT*)mNode[0])->template setAndCache<OpT>(ijk, *this, args...);
5373
+ } else if constexpr(OpT::LEVEL <= 1) {
5374
+ if (this->isCached<NodeT1>(dirty)) return ((NodeT1*)mNode[1])->template setAndCache<OpT>(ijk, *this, args...);
5375
+ } else if constexpr(OpT::LEVEL <= 2) {
5376
+ if (this->isCached<NodeT2>(dirty)) return ((NodeT2*)mNode[2])->template setAndCache<OpT>(ijk, *this, args...);
5812
5377
  }
5813
5378
  return ((RootT*)mRoot)->template setAndCache<OpT>(ijk, *this, args...);
5814
5379
  }
@@ -6232,7 +5797,7 @@ __hostdev__ inline const char* toStr(char *dst, Codec codec)
6232
5797
  switch (codec){
6233
5798
  case Codec::NONE: return util::strcpy(dst, "NONE");
6234
5799
  case Codec::ZIP: return util::strcpy(dst, "ZIP");
6235
- case Codec::BLOSC : return util::strcpy(dst, "BLOSC");
5800
+ case Codec::BLOSC : return util::strcpy(dst, "BLOSC");// StrLen = 5 + 1 + End
6236
5801
  default: return util::strcpy(dst, "END");
6237
5802
  }
6238
5803
  }
@@ -6453,41 +6018,76 @@ VecT<GridHandleT> readUncompressedGrids(const char* fileName, const typename Gri
6453
6018
 
6454
6019
  // ----------------------------> Implementations of random access methods <--------------------------------------
6455
6020
 
6021
+ /**
6022
+ * @brief Below is an example of a struct used for random get methods.
6023
+ * @note All member methods, data, and types are mandatory.
6024
+ * @code
6025
+ template<typename BuildT>
6026
+ struct GetOpT {
6027
+ using Type = typename BuildToValueMap<BuildT>::Type;// return type
6028
+ static constexpr int LEVEL = 0;// minimum level for the descent during top-down traversal
6029
+ __hostdev__ static Type get(const NanoRoot<BuildT>& root, args...) { }
6030
+ __hostdev__ static Type get(const typename NanoRoot<BuildT>::Tile& tile, args...) { }
6031
+ __hostdev__ static Type get(const NanoUpper<BuildT>& node, uint32_t n, args...) { }
6032
+ __hostdev__ static Type get(const NanoLower<BuildT>& node, uint32_t n, args...) { }
6033
+ __hostdev__ static Type get(const NanoLeaf<BuildT>& leaf, uint32_t n, args...) { }
6034
+ };
6035
+ @endcode
6036
+
6037
+ * @brief Below is an example of the struct used for random set methods
6038
+ * @note All member methods and data are mandatory.
6039
+ * @code
6040
+ template<typename BuildT>
6041
+ struct SetOpT {
6042
+ static constexpr int LEVEL = 0;// minimum level for the descent during top-down traversal
6043
+ __hostdev__ static void set(NanoRoot<BuildT>& root, args...) { }
6044
+ __hostdev__ static void set(typename NanoRoot<BuildT>::Tile& tile, args...) { }
6045
+ __hostdev__ static void set(NanoUpper<BuildT>& node, uint32_t n, args...) { }
6046
+ __hostdev__ static void set(NanoLower<BuildT>& node, uint32_t n, args...) { }
6047
+ __hostdev__ static void set(NanoLeaf<BuildT>& leaf, uint32_t n, args...) { }
6048
+ };
6049
+ @endcode
6050
+ **/
6051
+
6456
6052
  /// @brief Implements Tree::getValue(math::Coord), i.e. return the value associated with a specific coordinate @c ijk.
6457
6053
  /// @tparam BuildT Build type of the grid being called
6458
- /// @details The value at a coordinate maps to the background, a tile value or a leaf value.
6054
+ /// @details The value at a coordinate either maps to the background, a tile value or a leaf value.
6459
6055
  template<typename BuildT>
6460
6056
  struct GetValue
6461
6057
  {
6462
- __hostdev__ static auto get(const NanoRoot<BuildT>& root) { return root.mBackground; }
6463
- __hostdev__ static auto get(const typename NanoRoot<BuildT>::Tile& tile) { return tile.value; }
6464
- __hostdev__ static auto get(const NanoUpper<BuildT>& node, uint32_t n) { return node.mTable[n].value; }
6465
- __hostdev__ static auto get(const NanoLower<BuildT>& node, uint32_t n) { return node.mTable[n].value; }
6466
- __hostdev__ static auto get(const NanoLeaf<BuildT>& leaf, uint32_t n) { return leaf.getValue(n); } // works with all build types
6058
+ using Type = typename NanoLeaf<BuildT>::ValueType;
6059
+ static constexpr int LEVEL = 0;// minimum level for the descent during top-down traversal
6060
+ __hostdev__ static Type get(const NanoRoot<BuildT>& root) { return root.mBackground; }
6061
+ __hostdev__ static Type get(const typename NanoRoot<BuildT>::Tile& tile) { return tile.value; }
6062
+ __hostdev__ static Type get(const NanoUpper<BuildT>& node, uint32_t n) { return node.mTable[n].value; }
6063
+ __hostdev__ static Type get(const NanoLower<BuildT>& node, uint32_t n) { return node.mTable[n].value; }
6064
+ __hostdev__ static Type get(const NanoLeaf<BuildT>& leaf, uint32_t n) { return leaf.getValue(n); } // works with all build types
6467
6065
  }; // GetValue<BuildT>
6468
6066
 
6469
6067
  template<typename BuildT>
6470
6068
  struct SetValue
6471
6069
  {
6472
- static_assert(!BuildTraits<BuildT>::is_special, "SetValue does not support special value types");
6070
+ static_assert(!BuildTraits<BuildT>::is_special, "SetValue does not support special value types, e.g. Fp4, Fp8, Fp16, FpN");
6473
6071
  using ValueT = typename NanoLeaf<BuildT>::ValueType;
6474
- __hostdev__ static auto set(NanoRoot<BuildT>&, const ValueT&) {} // no-op
6475
- __hostdev__ static auto set(typename NanoRoot<BuildT>::Tile& tile, const ValueT& v) { tile.value = v; }
6476
- __hostdev__ static auto set(NanoUpper<BuildT>& node, uint32_t n, const ValueT& v) { node.mTable[n].value = v; }
6477
- __hostdev__ static auto set(NanoLower<BuildT>& node, uint32_t n, const ValueT& v) { node.mTable[n].value = v; }
6478
- __hostdev__ static auto set(NanoLeaf<BuildT>& leaf, uint32_t n, const ValueT& v) { leaf.mValues[n] = v; }
6072
+ static constexpr int LEVEL = 0;// minimum level for the descent during top-down traversal
6073
+ __hostdev__ static void set(NanoRoot<BuildT>&, const ValueT&) {} // no-op
6074
+ __hostdev__ static void set(typename NanoRoot<BuildT>::Tile& tile, const ValueT& v) { tile.value = v; }
6075
+ __hostdev__ static void set(NanoUpper<BuildT>& node, uint32_t n, const ValueT& v) { node.mTable[n].value = v; }
6076
+ __hostdev__ static void set(NanoLower<BuildT>& node, uint32_t n, const ValueT& v) { node.mTable[n].value = v; }
6077
+ __hostdev__ static void set(NanoLeaf<BuildT>& leaf, uint32_t n, const ValueT& v) { leaf.mValues[n] = v; }
6479
6078
  }; // SetValue<BuildT>
6480
6079
 
6481
6080
  template<typename BuildT>
6482
6081
  struct SetVoxel
6483
6082
  {
6484
- static_assert(!BuildTraits<BuildT>::is_special, "SetVoxel does not support special value types");
6083
+ static_assert(!BuildTraits<BuildT>::is_special, "SetVoxel does not support special value types. e.g. Fp4, Fp8, Fp16, FpN");
6485
6084
  using ValueT = typename NanoLeaf<BuildT>::ValueType;
6486
- __hostdev__ static auto set(NanoRoot<BuildT>&, const ValueT&) {} // no-op
6487
- __hostdev__ static auto set(typename NanoRoot<BuildT>::Tile&, const ValueT&) {} // no-op
6488
- __hostdev__ static auto set(NanoUpper<BuildT>&, uint32_t, const ValueT&) {} // no-op
6489
- __hostdev__ static auto set(NanoLower<BuildT>&, uint32_t, const ValueT&) {} // no-op
6490
- __hostdev__ static auto set(NanoLeaf<BuildT>& leaf, uint32_t n, const ValueT& v) { leaf.mValues[n] = v; }
6085
+ static constexpr int LEVEL = 0;// minimum level for the descent during top-down traversal
6086
+ __hostdev__ static void set(NanoRoot<BuildT>&, const ValueT&) {} // no-op
6087
+ __hostdev__ static void set(typename NanoRoot<BuildT>::Tile&, const ValueT&) {} // no-op
6088
+ __hostdev__ static void set(NanoUpper<BuildT>&, uint32_t, const ValueT&) {} // no-op
6089
+ __hostdev__ static void set(NanoLower<BuildT>&, uint32_t, const ValueT&) {} // no-op
6090
+ __hostdev__ static void set(NanoLeaf<BuildT>& leaf, uint32_t n, const ValueT& v) { leaf.mValues[n] = v; }
6491
6091
  }; // SetVoxel<BuildT>
6492
6092
 
6493
6093
  /// @brief Implements Tree::isActive(math::Coord)
@@ -6495,11 +6095,13 @@ struct SetVoxel
6495
6095
  template<typename BuildT>
6496
6096
  struct GetState
6497
6097
  {
6498
- __hostdev__ static auto get(const NanoRoot<BuildT>&) { return false; }
6499
- __hostdev__ static auto get(const typename NanoRoot<BuildT>::Tile& tile) { return tile.state > 0; }
6500
- __hostdev__ static auto get(const NanoUpper<BuildT>& node, uint32_t n) { return node.mValueMask.isOn(n); }
6501
- __hostdev__ static auto get(const NanoLower<BuildT>& node, uint32_t n) { return node.mValueMask.isOn(n); }
6502
- __hostdev__ static auto get(const NanoLeaf<BuildT>& leaf, uint32_t n) { return leaf.mValueMask.isOn(n); }
6098
+ using Type = bool;
6099
+ static constexpr int LEVEL = 0;// minimum level for the descent during top-down traversal
6100
+ __hostdev__ static Type get(const NanoRoot<BuildT>&) { return false; }
6101
+ __hostdev__ static Type get(const typename NanoRoot<BuildT>::Tile& tile) { return tile.state > 0; }
6102
+ __hostdev__ static Type get(const NanoUpper<BuildT>& node, uint32_t n) { return node.mValueMask.isOn(n); }
6103
+ __hostdev__ static Type get(const NanoLower<BuildT>& node, uint32_t n) { return node.mValueMask.isOn(n); }
6104
+ __hostdev__ static Type get(const NanoLeaf<BuildT>& leaf, uint32_t n) { return leaf.mValueMask.isOn(n); }
6503
6105
  }; // GetState<BuildT>
6504
6106
 
6505
6107
  /// @brief Implements Tree::getDim(math::Coord)
@@ -6507,11 +6109,13 @@ struct GetState
6507
6109
  template<typename BuildT>
6508
6110
  struct GetDim
6509
6111
  {
6510
- __hostdev__ static uint32_t get(const NanoRoot<BuildT>&) { return 0u; } // background
6511
- __hostdev__ static uint32_t get(const typename NanoRoot<BuildT>::Tile&) { return 4096u; }
6512
- __hostdev__ static uint32_t get(const NanoUpper<BuildT>&, uint32_t) { return 128u; }
6513
- __hostdev__ static uint32_t get(const NanoLower<BuildT>&, uint32_t) { return 8u; }
6514
- __hostdev__ static uint32_t get(const NanoLeaf<BuildT>&, uint32_t) { return 1u; }
6112
+ using Type = uint32_t;
6113
+ static constexpr int LEVEL = 0;// minimum level for the descent during top-down traversal
6114
+ __hostdev__ static Type get(const NanoRoot<BuildT>&) { return 0u; } // background
6115
+ __hostdev__ static Type get(const typename NanoRoot<BuildT>::Tile&) { return 4096u; }
6116
+ __hostdev__ static Type get(const NanoUpper<BuildT>&, uint32_t) { return 128u; }
6117
+ __hostdev__ static Type get(const NanoLower<BuildT>&, uint32_t) { return 8u; }
6118
+ __hostdev__ static Type get(const NanoLeaf<BuildT>&, uint32_t) { return 1u; }
6515
6119
  }; // GetDim<BuildT>
6516
6120
 
6517
6121
  /// @brief Return the pointer to the leaf node that contains math::Coord. Implements Tree::probeLeaf(math::Coord)
@@ -6519,11 +6123,13 @@ struct GetDim
6519
6123
  template<typename BuildT>
6520
6124
  struct GetLeaf
6521
6125
  {
6522
- __hostdev__ static const NanoLeaf<BuildT>* get(const NanoRoot<BuildT>&) { return nullptr; }
6523
- __hostdev__ static const NanoLeaf<BuildT>* get(const typename NanoRoot<BuildT>::Tile&) { return nullptr; }
6524
- __hostdev__ static const NanoLeaf<BuildT>* get(const NanoUpper<BuildT>&, uint32_t) { return nullptr; }
6525
- __hostdev__ static const NanoLeaf<BuildT>* get(const NanoLower<BuildT>&, uint32_t) { return nullptr; }
6526
- __hostdev__ static const NanoLeaf<BuildT>* get(const NanoLeaf<BuildT>& leaf, uint32_t) { return &leaf; }
6126
+ using Type = const NanoLeaf<BuildT>*;
6127
+ static constexpr int LEVEL = 0;// minimum level for the descent during top-down traversal
6128
+ __hostdev__ static Type get(const NanoRoot<BuildT>&) { return nullptr; }
6129
+ __hostdev__ static Type get(const typename NanoRoot<BuildT>::Tile&) { return nullptr; }
6130
+ __hostdev__ static Type get(const NanoUpper<BuildT>&, uint32_t) { return nullptr; }
6131
+ __hostdev__ static Type get(const NanoLower<BuildT>&, uint32_t) { return nullptr; }
6132
+ __hostdev__ static Type get(const NanoLeaf<BuildT>& leaf, uint32_t) { return &leaf; }
6527
6133
  }; // GetLeaf<BuildT>
6528
6134
 
6529
6135
  /// @brief Return point to the lower internal node where math::Coord maps to one of its values, i.e. terminates
@@ -6531,11 +6137,12 @@ struct GetLeaf
6531
6137
  template<typename BuildT>
6532
6138
  struct GetLower
6533
6139
  {
6534
- __hostdev__ static const NanoLower<BuildT>* get(const NanoRoot<BuildT>&) { return nullptr; }
6535
- __hostdev__ static const NanoLower<BuildT>* get(const typename NanoRoot<BuildT>::Tile&) { return nullptr; }
6536
- __hostdev__ static const NanoLower<BuildT>* get(const NanoUpper<BuildT>&, uint32_t) { return nullptr; }
6537
- __hostdev__ static const NanoLower<BuildT>* get(const NanoLower<BuildT>& node, uint32_t) { return &node; }
6538
- __hostdev__ static const NanoLower<BuildT>* get(const NanoLeaf<BuildT>&, uint32_t) { return nullptr; }
6140
+ using Type = const NanoLower<BuildT>*;
6141
+ static constexpr int LEVEL = 1;// minimum level for the descent during top-down traversal
6142
+ __hostdev__ static Type get(const NanoRoot<BuildT>&) { return nullptr; }
6143
+ __hostdev__ static Type get(const typename NanoRoot<BuildT>::Tile&) { return nullptr; }
6144
+ __hostdev__ static Type get(const NanoUpper<BuildT>&, uint32_t) { return nullptr; }
6145
+ __hostdev__ static Type get(const NanoLower<BuildT>& node, uint32_t) { return &node; }
6539
6146
  }; // GetLower<BuildT>
6540
6147
 
6541
6148
  /// @brief Return point to the upper internal node where math::Coord maps to one of its values, i.e. terminates
@@ -6543,40 +6150,53 @@ struct GetLower
6543
6150
  template<typename BuildT>
6544
6151
  struct GetUpper
6545
6152
  {
6546
- __hostdev__ static const NanoUpper<BuildT>* get(const NanoRoot<BuildT>&) { return nullptr; }
6547
- __hostdev__ static const NanoUpper<BuildT>* get(const typename NanoRoot<BuildT>::Tile&) { return nullptr; }
6548
- __hostdev__ static const NanoUpper<BuildT>* get(const NanoUpper<BuildT>& node, uint32_t) { return &node; }
6549
- __hostdev__ static const NanoUpper<BuildT>* get(const NanoLower<BuildT>& node, uint32_t) { return nullptr; }
6550
- __hostdev__ static const NanoUpper<BuildT>* get(const NanoLeaf<BuildT>&, uint32_t) { return nullptr; }
6153
+ using Type = const NanoUpper<BuildT>*;
6154
+ static constexpr int LEVEL = 2;// minimum level for the descent during top-down traversal
6155
+ __hostdev__ static Type get(const NanoRoot<BuildT>&) { return nullptr; }
6156
+ __hostdev__ static Type get(const typename NanoRoot<BuildT>::Tile&) { return nullptr; }
6157
+ __hostdev__ static Type get(const NanoUpper<BuildT>& node, uint32_t) { return &node; }
6551
6158
  }; // GetUpper<BuildT>
6552
6159
 
6160
+ /// @brief Return point to the root Tile where math::Coord maps to one of its values, i.e. terminates
6161
+ /// @tparam BuildT Build type of the grid being called
6162
+ template<typename BuildT>
6163
+ struct GetTile
6164
+ {
6165
+ using Type = const typename NanoRoot<BuildT>::Tile*;
6166
+ static constexpr int LEVEL = 3;// minimum level for the descent during top-down traversal
6167
+ __hostdev__ static Type get(const NanoRoot<BuildT>&) { return nullptr; }
6168
+ __hostdev__ static Type get(const typename NanoRoot<BuildT>::Tile &tile) { return &tile; }
6169
+ }; // GetTile<BuildT>
6170
+
6553
6171
  /// @brief Implements Tree::probeLeaf(math::Coord)
6554
6172
  /// @tparam BuildT Build type of the grid being called
6555
6173
  template<typename BuildT>
6556
6174
  struct ProbeValue
6557
6175
  {
6176
+ using Type = bool;
6177
+ static constexpr int LEVEL = 0;// minimum level for the descent during top-down traversal
6558
6178
  using ValueT = typename BuildToValueMap<BuildT>::Type;
6559
- __hostdev__ static bool get(const NanoRoot<BuildT>& root, ValueT& v)
6179
+ __hostdev__ static Type get(const NanoRoot<BuildT>& root, ValueT& v)
6560
6180
  {
6561
6181
  v = root.mBackground;
6562
6182
  return false;
6563
6183
  }
6564
- __hostdev__ static bool get(const typename NanoRoot<BuildT>::Tile& tile, ValueT& v)
6184
+ __hostdev__ static Type get(const typename NanoRoot<BuildT>::Tile& tile, ValueT& v)
6565
6185
  {
6566
6186
  v = tile.value;
6567
6187
  return tile.state > 0u;
6568
6188
  }
6569
- __hostdev__ static bool get(const NanoUpper<BuildT>& node, uint32_t n, ValueT& v)
6189
+ __hostdev__ static Type get(const NanoUpper<BuildT>& node, uint32_t n, ValueT& v)
6570
6190
  {
6571
6191
  v = node.mTable[n].value;
6572
6192
  return node.mValueMask.isOn(n);
6573
6193
  }
6574
- __hostdev__ static bool get(const NanoLower<BuildT>& node, uint32_t n, ValueT& v)
6194
+ __hostdev__ static Type get(const NanoLower<BuildT>& node, uint32_t n, ValueT& v)
6575
6195
  {
6576
6196
  v = node.mTable[n].value;
6577
6197
  return node.mValueMask.isOn(n);
6578
6198
  }
6579
- __hostdev__ static bool get(const NanoLeaf<BuildT>& leaf, uint32_t n, ValueT& v)
6199
+ __hostdev__ static Type get(const NanoLeaf<BuildT>& leaf, uint32_t n, ValueT& v)
6580
6200
  {
6581
6201
  v = leaf.getValue(n);
6582
6202
  return leaf.mValueMask.isOn(n);
@@ -6597,23 +6217,25 @@ struct GetNodeInfo
6597
6217
  FloatType average, stdDevi;
6598
6218
  CoordBBox bbox;
6599
6219
  };
6600
- __hostdev__ static NodeInfo get(const NanoRoot<BuildT>& root)
6220
+ static constexpr int LEVEL = 0;
6221
+ using Type = NodeInfo;
6222
+ __hostdev__ static Type get(const NanoRoot<BuildT>& root)
6601
6223
  {
6602
6224
  return NodeInfo{3u, NanoUpper<BuildT>::DIM, root.minimum(), root.maximum(), root.average(), root.stdDeviation(), root.bbox()};
6603
6225
  }
6604
- __hostdev__ static NodeInfo get(const typename NanoRoot<BuildT>::Tile& tile)
6226
+ __hostdev__ static Type get(const typename NanoRoot<BuildT>::Tile& tile)
6605
6227
  {
6606
6228
  return NodeInfo{3u, NanoUpper<BuildT>::DIM, tile.value, tile.value, static_cast<FloatType>(tile.value), 0, CoordBBox::createCube(tile.origin(), NanoUpper<BuildT>::DIM)};
6607
6229
  }
6608
- __hostdev__ static NodeInfo get(const NanoUpper<BuildT>& node, uint32_t n)
6230
+ __hostdev__ static Type get(const NanoUpper<BuildT>& node, uint32_t n)
6609
6231
  {
6610
6232
  return NodeInfo{2u, node.dim(), node.minimum(), node.maximum(), node.average(), node.stdDeviation(), node.bbox()};
6611
6233
  }
6612
- __hostdev__ static NodeInfo get(const NanoLower<BuildT>& node, uint32_t n)
6234
+ __hostdev__ static Type get(const NanoLower<BuildT>& node, uint32_t n)
6613
6235
  {
6614
6236
  return NodeInfo{1u, node.dim(), node.minimum(), node.maximum(), node.average(), node.stdDeviation(), node.bbox()};
6615
6237
  }
6616
- __hostdev__ static NodeInfo get(const NanoLeaf<BuildT>& leaf, uint32_t n)
6238
+ __hostdev__ static Type get(const NanoLeaf<BuildT>& leaf, uint32_t n)
6617
6239
  {
6618
6240
  return NodeInfo{0u, leaf.dim(), leaf.minimum(), leaf.maximum(), leaf.average(), leaf.stdDeviation(), leaf.bbox()};
6619
6241
  }