warp-lang 1.8.1__py3-none-win_amd64.whl → 1.9.0__py3-none-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of warp-lang might be problematic. Click here for more details.

Files changed (134) hide show
  1. warp/__init__.py +282 -103
  2. warp/__init__.pyi +482 -110
  3. warp/bin/warp-clang.dll +0 -0
  4. warp/bin/warp.dll +0 -0
  5. warp/build.py +93 -30
  6. warp/build_dll.py +47 -67
  7. warp/builtins.py +955 -137
  8. warp/codegen.py +312 -206
  9. warp/config.py +1 -1
  10. warp/context.py +1249 -784
  11. warp/examples/core/example_marching_cubes.py +1 -0
  12. warp/examples/core/example_render_opengl.py +100 -3
  13. warp/examples/fem/example_apic_fluid.py +98 -52
  14. warp/examples/fem/example_convection_diffusion_dg.py +25 -4
  15. warp/examples/fem/example_diffusion_mgpu.py +8 -3
  16. warp/examples/fem/utils.py +68 -22
  17. warp/fabric.py +1 -1
  18. warp/fem/cache.py +27 -19
  19. warp/fem/domain.py +2 -2
  20. warp/fem/field/nodal_field.py +2 -2
  21. warp/fem/field/virtual.py +264 -166
  22. warp/fem/geometry/geometry.py +5 -5
  23. warp/fem/integrate.py +129 -51
  24. warp/fem/space/restriction.py +4 -0
  25. warp/fem/space/shape/tet_shape_function.py +3 -10
  26. warp/jax_experimental/custom_call.py +1 -1
  27. warp/jax_experimental/ffi.py +2 -1
  28. warp/marching_cubes.py +708 -0
  29. warp/native/array.h +99 -4
  30. warp/native/builtin.h +82 -5
  31. warp/native/bvh.cpp +64 -28
  32. warp/native/bvh.cu +58 -58
  33. warp/native/bvh.h +2 -2
  34. warp/native/clang/clang.cpp +7 -7
  35. warp/native/coloring.cpp +8 -2
  36. warp/native/crt.cpp +2 -2
  37. warp/native/crt.h +3 -5
  38. warp/native/cuda_util.cpp +41 -10
  39. warp/native/cuda_util.h +10 -4
  40. warp/native/exports.h +1842 -1908
  41. warp/native/fabric.h +2 -1
  42. warp/native/hashgrid.cpp +37 -37
  43. warp/native/hashgrid.cu +2 -2
  44. warp/native/initializer_array.h +1 -1
  45. warp/native/intersect.h +2 -2
  46. warp/native/mat.h +1910 -116
  47. warp/native/mathdx.cpp +43 -43
  48. warp/native/mesh.cpp +24 -24
  49. warp/native/mesh.cu +26 -26
  50. warp/native/mesh.h +4 -2
  51. warp/native/nanovdb/GridHandle.h +179 -12
  52. warp/native/nanovdb/HostBuffer.h +8 -7
  53. warp/native/nanovdb/NanoVDB.h +517 -895
  54. warp/native/nanovdb/NodeManager.h +323 -0
  55. warp/native/nanovdb/PNanoVDB.h +2 -2
  56. warp/native/quat.h +331 -14
  57. warp/native/range.h +7 -1
  58. warp/native/reduce.cpp +10 -10
  59. warp/native/reduce.cu +13 -14
  60. warp/native/runlength_encode.cpp +2 -2
  61. warp/native/runlength_encode.cu +5 -5
  62. warp/native/scan.cpp +3 -3
  63. warp/native/scan.cu +4 -4
  64. warp/native/sort.cpp +10 -10
  65. warp/native/sort.cu +22 -22
  66. warp/native/sparse.cpp +8 -8
  67. warp/native/sparse.cu +13 -13
  68. warp/native/spatial.h +366 -17
  69. warp/native/temp_buffer.h +2 -2
  70. warp/native/tile.h +283 -69
  71. warp/native/vec.h +381 -14
  72. warp/native/volume.cpp +54 -54
  73. warp/native/volume.cu +1 -1
  74. warp/native/volume.h +2 -1
  75. warp/native/volume_builder.cu +30 -37
  76. warp/native/warp.cpp +150 -149
  77. warp/native/warp.cu +323 -192
  78. warp/native/warp.h +227 -226
  79. warp/optim/linear.py +736 -271
  80. warp/render/imgui_manager.py +289 -0
  81. warp/render/render_opengl.py +85 -6
  82. warp/sim/graph_coloring.py +2 -2
  83. warp/sparse.py +558 -175
  84. warp/tests/aux_test_module_aot.py +7 -0
  85. warp/tests/cuda/test_async.py +3 -3
  86. warp/tests/cuda/test_conditional_captures.py +101 -0
  87. warp/tests/geometry/test_marching_cubes.py +233 -12
  88. warp/tests/sim/test_coloring.py +6 -6
  89. warp/tests/test_array.py +56 -5
  90. warp/tests/test_codegen.py +3 -2
  91. warp/tests/test_context.py +8 -15
  92. warp/tests/test_enum.py +136 -0
  93. warp/tests/test_examples.py +2 -2
  94. warp/tests/test_fem.py +45 -2
  95. warp/tests/test_fixedarray.py +229 -0
  96. warp/tests/test_func.py +18 -15
  97. warp/tests/test_future_annotations.py +7 -5
  98. warp/tests/test_linear_solvers.py +30 -0
  99. warp/tests/test_map.py +1 -1
  100. warp/tests/test_mat.py +1518 -378
  101. warp/tests/test_mat_assign_copy.py +178 -0
  102. warp/tests/test_mat_constructors.py +574 -0
  103. warp/tests/test_module_aot.py +287 -0
  104. warp/tests/test_print.py +69 -0
  105. warp/tests/test_quat.py +140 -34
  106. warp/tests/test_quat_assign_copy.py +145 -0
  107. warp/tests/test_reload.py +2 -1
  108. warp/tests/test_sparse.py +71 -0
  109. warp/tests/test_spatial.py +140 -34
  110. warp/tests/test_spatial_assign_copy.py +160 -0
  111. warp/tests/test_struct.py +43 -3
  112. warp/tests/test_types.py +0 -20
  113. warp/tests/test_vec.py +179 -34
  114. warp/tests/test_vec_assign_copy.py +143 -0
  115. warp/tests/tile/test_tile.py +184 -18
  116. warp/tests/tile/test_tile_cholesky.py +605 -0
  117. warp/tests/tile/test_tile_load.py +169 -0
  118. warp/tests/tile/test_tile_mathdx.py +2 -558
  119. warp/tests/tile/test_tile_matmul.py +1 -1
  120. warp/tests/tile/test_tile_mlp.py +1 -1
  121. warp/tests/tile/test_tile_shared_memory.py +5 -5
  122. warp/tests/unittest_suites.py +6 -0
  123. warp/tests/walkthrough_debug.py +1 -1
  124. warp/thirdparty/unittest_parallel.py +108 -9
  125. warp/types.py +554 -264
  126. warp/utils.py +68 -86
  127. {warp_lang-1.8.1.dist-info → warp_lang-1.9.0.dist-info}/METADATA +28 -65
  128. {warp_lang-1.8.1.dist-info → warp_lang-1.9.0.dist-info}/RECORD +131 -121
  129. warp/native/marching.cpp +0 -19
  130. warp/native/marching.cu +0 -514
  131. warp/native/marching.h +0 -19
  132. {warp_lang-1.8.1.dist-info → warp_lang-1.9.0.dist-info}/WHEEL +0 -0
  133. {warp_lang-1.8.1.dist-info → warp_lang-1.9.0.dist-info}/licenses/LICENSE.md +0 -0
  134. {warp_lang-1.8.1.dist-info → warp_lang-1.9.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,323 @@
1
+ // Copyright Contributors to the OpenVDB Project
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ /*!
5
+ \file nanovdb/NodeManager.h
6
+
7
+ \author Ken Museth
8
+
9
+ \date February 12, 2021
10
+
11
+ \brief This class allows for sequential access to nodes
12
+ in a NanoVDB tree on both the host and device.
13
+
14
+ \details The ordering of the sequential access to nodes is always breadth-first!
15
+ */
16
+
17
+ #include <nanovdb/NanoVDB.h>// for NanoGrid etc
18
+ #include <nanovdb/HostBuffer.h>// for HostBuffer
19
+
20
+ #ifndef NANOVDB_NODEMANAGER_H_HAS_BEEN_INCLUDED
21
+ #define NANOVDB_NODEMANAGER_H_HAS_BEEN_INCLUDED
22
+
23
+ namespace nanovdb {
24
+
25
+ /// @brief NodeManager allows for sequential access to nodes
26
+ template <typename BuildT>
27
+ class NodeManager;
28
+
29
+ /// @brief NodeManagerHandle manages the memory of a NodeManager
30
+ template<typename BufferT = HostBuffer>
31
+ class NodeManagerHandle;
32
+
33
+ /// @brief brief Construct a NodeManager and return its handle
34
+ ///
35
+ /// @param grid grid whose nodes will be accessed sequentially
36
+ /// @param buffer buffer from which to allocate the output handle
37
+ ///
38
+ /// @note This is the only way to create a NodeManager since it's using
39
+ /// managed memory pointed to by a NodeManagerHandle.
40
+ template <typename BuildT, typename BufferT = HostBuffer>
41
+ NodeManagerHandle<BufferT> createNodeManager(const NanoGrid<BuildT> &grid,
42
+ const BufferT& buffer = BufferT());
43
+
44
+ struct NodeManagerData
45
+ {// 40B = 5*8B
46
+ __hostdev__ NodeManagerData(void *grid) : mPadding{0}, mGrid(grid), mPtr{0,0,0}{}
47
+ union {int64_t mPadding; uint8_t mLinear;};// 8B of which 1B is used for a binary flag
48
+ void *mGrid;// 8B pointer to either host or device grid
49
+ union {int64_t *mPtr[3], mOff[3];};// 24B, use mOff if mLinear!=0
50
+ };
51
+
52
+ /// @brief This class serves to manage a raw memory buffer of a NanoVDB NodeManager or LeafManager.
53
+ template<typename BufferT>
54
+ class NodeManagerHandle
55
+ {
56
+ GridType mGridType{GridType::Unknown};
57
+ BufferT mBuffer;
58
+
59
+ template<typename BuildT>
60
+ const NodeManager<BuildT>* getMgr() const {
61
+ return mGridType == toGridType<BuildT>() ? (const NodeManager<BuildT>*)mBuffer.data() : nullptr;
62
+ }
63
+
64
+ template<typename BuildT, typename U = BufferT>
65
+ typename util::enable_if<BufferTraits<U>::hasDeviceDual, const NodeManager<BuildT>*>::type
66
+ getDeviceMgr() const {
67
+ return mGridType == toGridType<BuildT>() ? (const NodeManager<BuildT>*)mBuffer.deviceData() : nullptr;
68
+ }
69
+
70
+ template <typename T>
71
+ static T* no_const(const T* ptr) { return const_cast<T*>(ptr); }
72
+
73
+ public:
74
+ /// @brief Move constructor from a buffer
75
+ NodeManagerHandle(GridType gridType, BufferT&& buffer) : mGridType(gridType) { mBuffer = std::move(buffer); }
76
+ /// @brief Empty ctor
77
+ NodeManagerHandle() = default;
78
+ /// @brief Disallow copy-construction
79
+ NodeManagerHandle(const NodeManagerHandle&) = delete;
80
+ /// @brief Disallow copy assignment operation
81
+ NodeManagerHandle& operator=(const NodeManagerHandle&) = delete;
82
+ /// @brief Move copy assignment operation
83
+ NodeManagerHandle& operator=(NodeManagerHandle&& other) noexcept {
84
+ mGridType = other.mGridType;
85
+ mBuffer = std::move(other.mBuffer);
86
+ other.mGridType = GridType::Unknown;
87
+ return *this;
88
+ }
89
+ /// @brief Move copy-constructor
90
+ NodeManagerHandle(NodeManagerHandle&& other) noexcept {
91
+ mGridType = other.mGridType;
92
+ mBuffer = std::move(other.mBuffer);
93
+ other.mGridType = GridType::Unknown;
94
+ }
95
+ /// @brief Default destructor
96
+ ~NodeManagerHandle() { this->reset(); }
97
+ /// @brief clear the buffer
98
+ void reset() { mBuffer.clear(); }
99
+
100
+ /// @brief Return a reference to the buffer
101
+ BufferT& buffer() { return mBuffer; }
102
+
103
+ /// @brief Return a const reference to the buffer
104
+ const BufferT& buffer() const { return mBuffer; }
105
+
106
+ /// @brief Returns a non-const pointer to the data.
107
+ ///
108
+ /// @warning Note that the return pointer can be NULL if the NodeManagerHandle was not initialized
109
+ void* data() { return mBuffer.data(); }
110
+
111
+ /// @brief Returns a const pointer to the data.
112
+ ///
113
+ /// @warning Note that the return pointer can be NULL if the NodeManagerHandle was not initialized
114
+ const void* data() const { return mBuffer.data(); }
115
+
116
+ /// @brief Returns the size in bytes of the raw memory buffer managed by this NodeManagerHandle's allocator.
117
+ uint64_t size() const { return mBuffer.size(); }
118
+
119
+ /// @brief Returns a const pointer to the NodeManager encoded in this NodeManagerHandle.
120
+ ///
121
+ /// @warning Note that the return pointer can be NULL if the template parameter does not match the specified grid!
122
+ template<typename BuildT>
123
+ const NodeManager<BuildT>* mgr() const { return this->template getMgr<BuildT>(); }
124
+
125
+ /// @brief Returns a pointer to the NodeManager encoded in this NodeManagerHandle.
126
+ ///
127
+ /// @warning Note that the return pointer can be NULL if the template parameter does not match the specified grid!
128
+ template<typename BuildT>
129
+ NodeManager<BuildT>* mgr() { return no_const(this->template getMgr<BuildT>()); }
130
+
131
+ /// @brief Return a const pointer to the NodeManager encoded in this NodeManagerHandle on the device, e.g. GPU
132
+ ///
133
+ /// @warning Note that the return pointer can be NULL if the template parameter does not match the specified grid!
134
+ template<typename BuildT, typename U = BufferT>
135
+ typename util::enable_if<BufferTraits<U>::hasDeviceDual, const NodeManager<BuildT>*>::type
136
+ deviceMgr() const { return this->template getDeviceMgr<BuildT>(); }
137
+
138
+ /// @brief Return a const pointer to the NodeManager encoded in this NodeManagerHandle on the device, e.g. GPU
139
+ ///
140
+ /// @warning Note that the return pointer can be NULL if the template parameter does not match the specified grid!
141
+ template<typename BuildT, typename U = BufferT>
142
+ typename util::enable_if<BufferTraits<U>::hasDeviceDual, NodeManager<BuildT>*>::type
143
+ deviceMgr() { return no_const(this->template getDeviceMgr<BuildT>()); }
144
+
145
+ /// @brief Upload the NodeManager to the device, e.g. from CPU to GPU
146
+ ///
147
+ /// @note This method is only available if the buffer supports devices
148
+ template<typename U = BufferT>
149
+ typename util::enable_if<BufferTraits<U>::hasDeviceDual, void>::type
150
+ deviceUpload(void* deviceGrid, void* stream = nullptr, bool sync = true)
151
+ {
152
+ assert(deviceGrid);
153
+ auto *data = reinterpret_cast<NodeManagerData*>(mBuffer.data());
154
+ void *tmp = data->mGrid;
155
+ data->mGrid = deviceGrid;
156
+ mBuffer.deviceUpload(stream, sync);
157
+ data->mGrid = tmp;
158
+ }
159
+
160
+ /// @brief Download the NodeManager to from the device, e.g. from GPU to CPU
161
+ ///
162
+ /// @note This method is only available if the buffer supports devices
163
+ template<typename U = BufferT>
164
+ typename util::enable_if<BufferTraits<U>::hasDeviceDual, void>::type
165
+ deviceDownload(void* stream = nullptr, bool sync = true)
166
+ {
167
+ auto *data = reinterpret_cast<NodeManagerData*>(mBuffer.data());
168
+ void *tmp = data->mGrid;
169
+ mBuffer.deviceDownload(stream, sync);
170
+ data->mGrid = tmp;
171
+ }
172
+ };// NodeManagerHandle
173
+
174
+ /// @brief This class allows for sequential access to nodes in a NanoVDB tree
175
+ ///
176
+ /// @details Nodes are always arranged breadth first during sequential access of nodes
177
+ /// at a particular level.
178
+ template<typename BuildT>
179
+ class NodeManager : private NodeManagerData
180
+ {
181
+ using DataT = NodeManagerData;
182
+ using GridT = NanoGrid<BuildT>;
183
+ using TreeT = typename GridTree<GridT>::type;
184
+ template<int LEVEL>
185
+ using NodeT = typename NodeTrait<TreeT, LEVEL>::type;
186
+ using RootT = NodeT<3>;// root node
187
+ using Node2 = NodeT<2>;// upper internal node
188
+ using Node1 = NodeT<1>;// lower internal node
189
+ using Node0 = NodeT<0>;// leaf node
190
+
191
+ public:
192
+ static constexpr bool FIXED_SIZE = Node0::FIXED_SIZE && Node1::FIXED_SIZE && Node2::FIXED_SIZE;
193
+
194
+ NodeManager(const NodeManager&) = delete;
195
+ NodeManager(NodeManager&&) = delete;
196
+ NodeManager& operator=(const NodeManager&) = delete;
197
+ NodeManager& operator=(NodeManager&&) = delete;
198
+ ~NodeManager() = delete;
199
+
200
+ /// @brief return true if the nodes have both fixed size and are arranged breadth-first in memory.
201
+ /// This allows for direct and memory-efficient linear access to nodes.
202
+ __hostdev__ static bool isLinear(const GridT &grid) {return FIXED_SIZE && grid.isBreadthFirst();}
203
+
204
+ /// @brief return true if the nodes have both fixed size and are arranged breadth-first in memory.
205
+ /// This allows for direct and memory-efficient linear access to nodes.
206
+ __hostdev__ bool isLinear() const {return DataT::mLinear!=0u;}
207
+
208
+ /// @brief Return the memory footprint in bytes of the NodeManager derived from the specified grid
209
+ __hostdev__ static uint64_t memUsage(const GridT &grid) {
210
+ uint64_t size = sizeof(NodeManagerData);
211
+ if (!NodeManager::isLinear(grid)) {
212
+ const uint32_t *p = grid.tree().mNodeCount;
213
+ size += sizeof(int64_t)*(p[0]+p[1]+p[2]);
214
+ }
215
+ return size;
216
+ }
217
+
218
+ /// @brief Return the memory footprint in bytes of this instance
219
+ __hostdev__ uint64_t memUsage() const {return NodeManager::memUsage(this->grid());}
220
+
221
+ /// @brief Return a reference to the grid
222
+ __hostdev__ GridT& grid() { return *reinterpret_cast<GridT*>(DataT::mGrid); }
223
+ __hostdev__ const GridT& grid() const { return *reinterpret_cast<const GridT*>(DataT::mGrid); }
224
+
225
+ /// @brief Return a reference to the tree
226
+ __hostdev__ TreeT& tree() { return this->grid().tree(); }
227
+ __hostdev__ const TreeT& tree() const { return this->grid().tree(); }
228
+
229
+ /// @brief Return a reference to the root
230
+ __hostdev__ RootT& root() { return this->tree().root(); }
231
+ __hostdev__ const RootT& root() const { return this->tree().root(); }
232
+
233
+ /// @brief Return the number of tree nodes at the specified level
234
+ /// @details 0 is leaf, 1 is lower internal, and 2 is upper internal level
235
+ __hostdev__ uint64_t nodeCount(int level) const { return this->tree().nodeCount(level); }
236
+
237
+ __hostdev__ uint64_t leafCount() const { return this->tree().nodeCount(0); }
238
+ __hostdev__ uint64_t lowerCount() const { return this->tree().nodeCount(1); }
239
+ __hostdev__ uint64_t upperCount() const { return this->tree().nodeCount(2); }
240
+
241
+ /// @brief Return the i'th leaf node with respect to breadth-first ordering
242
+ template <int LEVEL>
243
+ __hostdev__ const NodeT<LEVEL>& node(uint32_t i) const {
244
+ NANOVDB_ASSERT(i < this->nodeCount(LEVEL));
245
+ const NodeT<LEVEL>* ptr = nullptr;
246
+ if (DataT::mLinear) {
247
+ ptr = util::PtrAdd<const NodeT<LEVEL>>(DataT::mGrid, DataT::mOff[LEVEL]) + i;
248
+ } else {
249
+ ptr = util::PtrAdd<const NodeT<LEVEL>>(DataT::mGrid, DataT::mPtr[LEVEL][i]);
250
+ }
251
+ NANOVDB_ASSERT(ptr && isAligned(ptr));
252
+ return *ptr;
253
+ }
254
+
255
+ /// @brief Return the i'th node with respect to breadth-first ordering
256
+ template <int LEVEL>
257
+ __hostdev__ NodeT<LEVEL>& node(uint32_t i) {
258
+ NANOVDB_ASSERT(i < this->nodeCount(LEVEL));
259
+ NodeT<LEVEL>* ptr = nullptr;
260
+ if (DataT::mLinear) {
261
+ ptr = util::PtrAdd<NodeT<LEVEL>>(DataT::mGrid, DataT::mOff[LEVEL]) + i;
262
+ } else {
263
+ ptr = util::PtrAdd<NodeT<LEVEL>>(DataT::mGrid, DataT::mPtr[LEVEL][i]);
264
+ }
265
+ NANOVDB_ASSERT(ptr && isAligned(ptr));
266
+ return *ptr;
267
+ }
268
+
269
+ /// @brief Return the i'th leaf node with respect to breadth-first ordering
270
+ __hostdev__ const Node0& leaf(uint32_t i) const { return this->node<0>(i); }
271
+ __hostdev__ Node0& leaf(uint32_t i) { return this->node<0>(i); }
272
+
273
+ /// @brief Return the i'th lower internal node with respect to breadth-first ordering
274
+ __hostdev__ const Node1& lower(uint32_t i) const { return this->node<1>(i); }
275
+ __hostdev__ Node1& lower(uint32_t i) { return this->node<1>(i); }
276
+
277
+ /// @brief Return the i'th upper internal node with respect to breadth-first ordering
278
+ __hostdev__ const Node2& upper(uint32_t i) const { return this->node<2>(i); }
279
+ __hostdev__ Node2& upper(uint32_t i) { return this->node<2>(i); }
280
+
281
+ }; // NodeManager<BuildT> class
282
+
283
+ template <typename BuildT, typename BufferT>
284
+ NodeManagerHandle<BufferT> createNodeManager(const NanoGrid<BuildT> &grid,
285
+ const BufferT& buffer)
286
+ {
287
+ NodeManagerHandle<BufferT> handle(toGridType<BuildT>(), BufferT::create(NodeManager<BuildT>::memUsage(grid), &buffer));
288
+ auto *data = reinterpret_cast<NodeManagerData*>(handle.data());
289
+ NANOVDB_ASSERT(data && isAligned(data));
290
+ NANOVDB_ASSERT(toGridType<BuildT>() == grid.gridType());
291
+ *data = NodeManagerData((void*)&grid);
292
+
293
+ if (NodeManager<BuildT>::isLinear(grid)) {
294
+ data->mLinear = uint8_t(1u);
295
+ data->mOff[0] = util::PtrDiff(grid.tree().template getFirstNode<0>(), &grid);
296
+ data->mOff[1] = util::PtrDiff(grid.tree().template getFirstNode<1>(), &grid);
297
+ data->mOff[2] = util::PtrDiff(grid.tree().template getFirstNode<2>(), &grid);
298
+ } else {
299
+ int64_t *ptr0 = data->mPtr[0] = reinterpret_cast<int64_t*>(data + 1);
300
+ int64_t *ptr1 = data->mPtr[1] = data->mPtr[0] + grid.tree().nodeCount(0);
301
+ int64_t *ptr2 = data->mPtr[2] = data->mPtr[1] + grid.tree().nodeCount(1);
302
+ // Performs depth first traversal but breadth first insertion
303
+ for (auto it2 = grid.tree().root().cbeginChild(); it2; ++it2) {
304
+ *ptr2++ = util::PtrDiff(&*it2, &grid);
305
+ for (auto it1 = it2->cbeginChild(); it1; ++it1) {
306
+ *ptr1++ = util::PtrDiff(&*it1, &grid);
307
+ for (auto it0 = it1->cbeginChild(); it0; ++it0) {
308
+ *ptr0++ = util::PtrDiff(&*it0, &grid);
309
+ }// loop over child nodes of the lower internal node
310
+ }// loop over child nodes of the upper internal node
311
+ }// loop over child nodes of the root node
312
+ }
313
+
314
+ return handle;// // is converted to r-value so return value is move constructed!
315
+ }// createNodeManager
316
+
317
+ } // namespace nanovdb
318
+
319
+ #if defined(__CUDACC__)
320
+ #include <nanovdb/cuda/NodeManager.cuh>
321
+ #endif// defined(__CUDACC__)
322
+
323
+ #endif // NANOVDB_NODEMANAGER_H_HAS_BEEN_INCLUDED
@@ -1,6 +1,6 @@
1
1
 
2
2
  // Copyright Contributors to the OpenVDB Project
3
- // SPDX-License-Identifier: MPL-2.0
3
+ // SPDX-License-Identifier: Apache-2.0
4
4
 
5
5
  /*!
6
6
  \file nanovdb/PNanoVDB.h
@@ -933,7 +933,7 @@ PNANOVDB_FORCE_INLINE void pnanovdb_write_vec3(pnanovdb_buf_t buf, pnanovdb_addr
933
933
  #define PNANOVDB_MAGIC_FILE 0x324244566f6e614eUL// "NanoVDB2" in hex - little endian (uint64_t)
934
934
 
935
935
  #define PNANOVDB_MAJOR_VERSION_NUMBER 32// reflects changes to the ABI
936
- #define PNANOVDB_MINOR_VERSION_NUMBER 7// reflects changes to the API but not ABI
936
+ #define PNANOVDB_MINOR_VERSION_NUMBER 8// reflects changes to the API but not ABI
937
937
  #define PNANOVDB_PATCH_VERSION_NUMBER 0// reflects bug-fixes with no ABI or API changes
938
938
 
939
939
  #define PNANOVDB_GRID_TYPE_UNKNOWN 0