warp-lang 1.8.1__py3-none-macosx_10_13_universal2.whl → 1.9.0__py3-none-macosx_10_13_universal2.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of warp-lang might be problematic. Click here for more details.

Files changed (134) hide show
  1. warp/__init__.py +282 -103
  2. warp/__init__.pyi +482 -110
  3. warp/bin/libwarp-clang.dylib +0 -0
  4. warp/bin/libwarp.dylib +0 -0
  5. warp/build.py +93 -30
  6. warp/build_dll.py +47 -67
  7. warp/builtins.py +955 -137
  8. warp/codegen.py +312 -206
  9. warp/config.py +1 -1
  10. warp/context.py +1249 -784
  11. warp/examples/core/example_marching_cubes.py +1 -0
  12. warp/examples/core/example_render_opengl.py +100 -3
  13. warp/examples/fem/example_apic_fluid.py +98 -52
  14. warp/examples/fem/example_convection_diffusion_dg.py +25 -4
  15. warp/examples/fem/example_diffusion_mgpu.py +8 -3
  16. warp/examples/fem/utils.py +68 -22
  17. warp/fabric.py +1 -1
  18. warp/fem/cache.py +27 -19
  19. warp/fem/domain.py +2 -2
  20. warp/fem/field/nodal_field.py +2 -2
  21. warp/fem/field/virtual.py +264 -166
  22. warp/fem/geometry/geometry.py +5 -5
  23. warp/fem/integrate.py +129 -51
  24. warp/fem/space/restriction.py +4 -0
  25. warp/fem/space/shape/tet_shape_function.py +3 -10
  26. warp/jax_experimental/custom_call.py +1 -1
  27. warp/jax_experimental/ffi.py +2 -1
  28. warp/marching_cubes.py +708 -0
  29. warp/native/array.h +99 -4
  30. warp/native/builtin.h +82 -5
  31. warp/native/bvh.cpp +64 -28
  32. warp/native/bvh.cu +58 -58
  33. warp/native/bvh.h +2 -2
  34. warp/native/clang/clang.cpp +7 -7
  35. warp/native/coloring.cpp +8 -2
  36. warp/native/crt.cpp +2 -2
  37. warp/native/crt.h +3 -5
  38. warp/native/cuda_util.cpp +41 -10
  39. warp/native/cuda_util.h +10 -4
  40. warp/native/exports.h +1842 -1908
  41. warp/native/fabric.h +2 -1
  42. warp/native/hashgrid.cpp +37 -37
  43. warp/native/hashgrid.cu +2 -2
  44. warp/native/initializer_array.h +1 -1
  45. warp/native/intersect.h +2 -2
  46. warp/native/mat.h +1910 -116
  47. warp/native/mathdx.cpp +43 -43
  48. warp/native/mesh.cpp +24 -24
  49. warp/native/mesh.cu +26 -26
  50. warp/native/mesh.h +4 -2
  51. warp/native/nanovdb/GridHandle.h +179 -12
  52. warp/native/nanovdb/HostBuffer.h +8 -7
  53. warp/native/nanovdb/NanoVDB.h +517 -895
  54. warp/native/nanovdb/NodeManager.h +323 -0
  55. warp/native/nanovdb/PNanoVDB.h +2 -2
  56. warp/native/quat.h +331 -14
  57. warp/native/range.h +7 -1
  58. warp/native/reduce.cpp +10 -10
  59. warp/native/reduce.cu +13 -14
  60. warp/native/runlength_encode.cpp +2 -2
  61. warp/native/runlength_encode.cu +5 -5
  62. warp/native/scan.cpp +3 -3
  63. warp/native/scan.cu +4 -4
  64. warp/native/sort.cpp +10 -10
  65. warp/native/sort.cu +22 -22
  66. warp/native/sparse.cpp +8 -8
  67. warp/native/sparse.cu +13 -13
  68. warp/native/spatial.h +366 -17
  69. warp/native/temp_buffer.h +2 -2
  70. warp/native/tile.h +283 -69
  71. warp/native/vec.h +381 -14
  72. warp/native/volume.cpp +54 -54
  73. warp/native/volume.cu +1 -1
  74. warp/native/volume.h +2 -1
  75. warp/native/volume_builder.cu +30 -37
  76. warp/native/warp.cpp +150 -149
  77. warp/native/warp.cu +323 -192
  78. warp/native/warp.h +227 -226
  79. warp/optim/linear.py +736 -271
  80. warp/render/imgui_manager.py +289 -0
  81. warp/render/render_opengl.py +85 -6
  82. warp/sim/graph_coloring.py +2 -2
  83. warp/sparse.py +558 -175
  84. warp/tests/aux_test_module_aot.py +7 -0
  85. warp/tests/cuda/test_async.py +3 -3
  86. warp/tests/cuda/test_conditional_captures.py +101 -0
  87. warp/tests/geometry/test_marching_cubes.py +233 -12
  88. warp/tests/sim/test_coloring.py +6 -6
  89. warp/tests/test_array.py +56 -5
  90. warp/tests/test_codegen.py +3 -2
  91. warp/tests/test_context.py +8 -15
  92. warp/tests/test_enum.py +136 -0
  93. warp/tests/test_examples.py +2 -2
  94. warp/tests/test_fem.py +45 -2
  95. warp/tests/test_fixedarray.py +229 -0
  96. warp/tests/test_func.py +18 -15
  97. warp/tests/test_future_annotations.py +7 -5
  98. warp/tests/test_linear_solvers.py +30 -0
  99. warp/tests/test_map.py +1 -1
  100. warp/tests/test_mat.py +1518 -378
  101. warp/tests/test_mat_assign_copy.py +178 -0
  102. warp/tests/test_mat_constructors.py +574 -0
  103. warp/tests/test_module_aot.py +287 -0
  104. warp/tests/test_print.py +69 -0
  105. warp/tests/test_quat.py +140 -34
  106. warp/tests/test_quat_assign_copy.py +145 -0
  107. warp/tests/test_reload.py +2 -1
  108. warp/tests/test_sparse.py +71 -0
  109. warp/tests/test_spatial.py +140 -34
  110. warp/tests/test_spatial_assign_copy.py +160 -0
  111. warp/tests/test_struct.py +43 -3
  112. warp/tests/test_types.py +0 -20
  113. warp/tests/test_vec.py +179 -34
  114. warp/tests/test_vec_assign_copy.py +143 -0
  115. warp/tests/tile/test_tile.py +184 -18
  116. warp/tests/tile/test_tile_cholesky.py +605 -0
  117. warp/tests/tile/test_tile_load.py +169 -0
  118. warp/tests/tile/test_tile_mathdx.py +2 -558
  119. warp/tests/tile/test_tile_matmul.py +1 -1
  120. warp/tests/tile/test_tile_mlp.py +1 -1
  121. warp/tests/tile/test_tile_shared_memory.py +5 -5
  122. warp/tests/unittest_suites.py +6 -0
  123. warp/tests/walkthrough_debug.py +1 -1
  124. warp/thirdparty/unittest_parallel.py +108 -9
  125. warp/types.py +554 -264
  126. warp/utils.py +68 -86
  127. {warp_lang-1.8.1.dist-info → warp_lang-1.9.0.dist-info}/METADATA +28 -65
  128. {warp_lang-1.8.1.dist-info → warp_lang-1.9.0.dist-info}/RECORD +131 -121
  129. warp/native/marching.cpp +0 -19
  130. warp/native/marching.cu +0 -514
  131. warp/native/marching.h +0 -19
  132. {warp_lang-1.8.1.dist-info → warp_lang-1.9.0.dist-info}/WHEEL +0 -0
  133. {warp_lang-1.8.1.dist-info → warp_lang-1.9.0.dist-info}/licenses/LICENSE.md +0 -0
  134. {warp_lang-1.8.1.dist-info → warp_lang-1.9.0.dist-info}/top_level.txt +0 -0
@@ -1,5 +1,5 @@
1
1
  // Copyright Contributors to the OpenVDB Project
2
- // SPDX-License-Identifier: MPL-2.0
2
+ // SPDX-License-Identifier: Apache-2.0
3
3
 
4
4
  /*!
5
5
  \file nanovdb/GridHandle.h
@@ -22,6 +22,7 @@
22
22
 
23
23
  #include <nanovdb/NanoVDB.h>// for toGridType
24
24
  #include <nanovdb/HostBuffer.h>
25
+ #include <nanovdb/tools/GridChecksum.h>// for updateGridCount
25
26
 
26
27
  namespace nanovdb {
27
28
 
@@ -44,13 +45,14 @@ class GridHandle
44
45
  public:
45
46
  using BufferType = BufferT;
46
47
 
47
- /// @brief Move constructor from a host buffer
48
+ /// @brief Move constructor from a dual host-device buffer
48
49
  /// @param buffer buffer containing one or more NanoGrids that will be moved into this GridHandle
49
50
  /// @throw Will throw and error with the buffer does not contain a valid NanoGrid!
51
+ /// @note The implementation of this template specialization is in nanovdb/cuda/GridHandle.cuh since it requires CUDA
50
52
  template<typename T = BufferT, typename util::enable_if<BufferTraits<T>::hasDeviceDual, int>::type = 0>
51
53
  GridHandle(T&& buffer);
52
54
 
53
- /// @brief Move constructor from a dual host-device buffer
55
+ /// @brief Move constructor from a host buffer
54
56
  /// @param buffer buffer containing one or more NanoGrids that will be moved into this GridHandle
55
57
  /// @throw Will throw and error with the buffer does not contain a valid NanoGrid!
56
58
  template<typename T = BufferT, typename util::disable_if<BufferTraits<T>::hasDeviceDual, int>::type = 0>
@@ -109,19 +111,28 @@ public:
109
111
  typename util::enable_if<BufferTraits<U>::hasDeviceDual, const void*>::type
110
112
  deviceData() const { return mBuffer.deviceData(); }
111
113
  template<typename U = BufferT>
114
+ typename util::enable_if<BufferTraits<U>::hasDeviceDual, const void*>::type
115
+ deviceData(int device) const { return mBuffer.deviceData(device); }
116
+ template<typename U = BufferT>
112
117
  typename util::enable_if<BufferTraits<U>::hasDeviceDual, void*>::type
113
118
  deviceData() { return mBuffer.deviceData(); }
119
+ template<typename U = BufferT>
120
+ typename util::enable_if<BufferTraits<U>::hasDeviceDual, void*>::type
121
+ deviceData(int device) { return mBuffer.deviceData(device); }
114
122
 
123
+ //@{
115
124
  /// @brief Returns the size in bytes of the raw memory buffer managed by this GridHandle.
116
- uint64_t size() const { return mBuffer.size(); }
125
+ [[deprecated("Use GridHandle::bufferSize instead.")]] uint64_t size() const { return mBuffer.size(); }
126
+ uint64_t bufferSize() const { return mBuffer.size(); }
127
+ //@}
117
128
 
118
129
  //@{
119
130
  /// @brief Return true if this handle is empty, i.e. has no allocated memory
120
- bool empty() const { return this->size() == 0; }
121
- bool isEmpty() const { return this->size() == 0; }
131
+ bool empty() const { return mBuffer.size() == 0; }
132
+ bool isEmpty() const { return mBuffer.size() == 0; }
122
133
  //@}
123
134
 
124
- /// @brief Return true if this handle contains any grids
135
+ /// @brief Return true if this handle is not empty, i.e. contains at least one grid
125
136
  operator bool() const { return !this->empty(); }
126
137
 
127
138
  /// @brief Returns a const host pointer to the @a n'th NanoVDB grid encoded in this GridHandle.
@@ -151,7 +162,7 @@ public:
151
162
 
152
163
  /// @brief Return a const pointer to the @a n'th grid encoded in this GridHandle on the device, e.g. GPU
153
164
  /// @tparam ValueT Value type of the grid point to be returned
154
- /// @param n Index if of the grid pointer to be returned
165
+ /// @param n Index of the grid pointer to be returned
155
166
  /// @param verbose if non-zero error messages will be printed in case something failed
156
167
  /// @warning Note that the return pointer can be NULL if the GridHandle was not initialized, @a n is invalid,
157
168
  /// or if the template parameter does not match the specified grid.
@@ -163,13 +174,25 @@ public:
163
174
  /// @note This method is only available if the buffer supports devices
164
175
  template<typename U = BufferT>
165
176
  typename util::enable_if<BufferTraits<U>::hasDeviceDual, void>::type
166
- deviceUpload(void* stream = nullptr, bool sync = true) { mBuffer.deviceUpload(stream, sync); }
177
+ deviceUpload(void* stream, bool sync = true) { mBuffer.deviceUpload(stream, sync); }
178
+
179
+ /// @brief Upload the host buffer to a specific device buffer. It device buffer doesn't exist it's created first
180
+ /// @param device Device to upload host data to
181
+ /// @param stream cuda stream
182
+ /// @param sync if false the memory copy is asynchronous
183
+ template<typename U = BufferT>
184
+ typename util::enable_if<BufferTraits<U>::hasDeviceDual, void>::type
185
+ deviceUpload(int device = 0, void* stream = nullptr, bool sync = true) { mBuffer.deviceUpload(device, stream, sync); }
167
186
 
168
187
  /// @brief Download the grid to from the device, e.g. from GPU to CPU
169
188
  /// @note This method is only available if the buffer supports devices
170
189
  template<typename U = BufferT>
171
190
  typename util::enable_if<BufferTraits<U>::hasDeviceDual, void>::type
172
- deviceDownload(void* stream = nullptr, bool sync = true) { mBuffer.deviceDownload(stream, sync); }
191
+ deviceDownload(void* stream, bool sync = true) { mBuffer.deviceDownload(stream, sync); }
192
+
193
+ template<typename U = BufferT>
194
+ typename util::enable_if<BufferTraits<U>::hasDeviceDual, void>::type
195
+ deviceDownload(int device = 0, void* stream = nullptr, bool sync = true) { mBuffer.deviceDownload(device, stream, sync); }
173
196
 
174
197
  /// @brief Check if the buffer is this handle has any padding, i.e. if the buffer is larger than the combined size of all its grids
175
198
  /// @return true is the combined size of all grid is smaller than the buffer size
@@ -183,6 +206,23 @@ public:
183
206
  /// @return Return the byte size of the specified grid
184
207
  uint64_t gridSize(uint32_t n = 0) const {return mMetaData[n].size; }
185
208
 
209
+ /// @brief compute the total sum of memory footprints of all the grids in this buffer
210
+ /// @return the number of bytes occupied by all grids associated with this buffer
211
+ uint64_t totalGridSize() const {
212
+ uint64_t sum = 0;
213
+ for (auto &m : mMetaData) sum += m.size;
214
+ NANOVDB_ASSERT(sum <= mBuffer.size());
215
+ return sum;
216
+ }
217
+
218
+ /// @brief compute the size of unused storage in this buffer
219
+ /// @return the number of unused bytes in this buffer.
220
+ uint64_t freeSize() const {return mBuffer.size() - this->totalGridSize();}
221
+
222
+ /// @brief Test if this buffer has any unused storage left, i.e. memory not occupied by grids
223
+ /// @return true if there is no extra storage left in this buffer, i.e. empty or fully occupied with grids
224
+ bool isFull() const { return this->totalGridSize() == mBuffer.size(); }
225
+
186
226
  /// @brief Return the GridType of the @a n'th grid in this GridHandle
187
227
  /// @param n index of the grid (assumed to be less than gridCount())
188
228
  /// @return Return the GridType of the specified grid
@@ -249,12 +289,12 @@ public:
249
289
  /// @param is input stream containing a raw grid buffer
250
290
  /// @param gridName string name of the grid to be read
251
291
  /// @param pool optional pool from which to allocate the new grid buffer
252
- /// @throw Will throw a std::logic_error if the stream does not contain a valid raw grid with the speficied name
292
+ /// @throw Will throw a std::logic_error if the stream does not contain a valid raw grid with the specified name
253
293
  void read(std::istream& is, const std::string &gridName, const BufferT& pool = BufferT());
254
294
 
255
295
  /// @brief Read a raw grid buffer from a file
256
296
  /// @param filename string name of the input file containing a raw grid buffer
257
- /// @param pool optional pool from which to allocate the new grid buffe
297
+ /// @param pool optional pool from which to allocate the new grid buffer
258
298
  void read(const std::string &fileName, const BufferT& pool = BufferT()) {
259
299
  std::ifstream is(fileName, std::ios::in | std::ios::binary);
260
300
  if (!is.is_open()) throw std::ios_base::failure("Unable to open file named \"" + fileName + "\" for input");
@@ -314,6 +354,7 @@ inline __hostdev__ void cpyGridHandleMeta(const GridData *data, GridHandleMetaDa
314
354
  }
315
355
  }// void cpyGridHandleMeta(const GridData *data, GridHandleMetaData *meta)
316
356
 
357
+ // template specialization of move constructor from a host buffer
317
358
  template<typename BufferT>
318
359
  template<typename T, typename util::disable_if<BufferTraits<T>::hasDeviceDual, int>::type>
319
360
  GridHandle<BufferT>::GridHandle(T&& buffer)
@@ -356,6 +397,132 @@ GridHandle<BufferT>::deviceGrid(uint32_t n) const
356
397
  return util::PtrAdd<NanoGrid<ValueT>>(data, mMetaData[n].offset);
357
398
  }// GridHandle<BufferT>::deviceGrid(uint32_t n) cons
358
399
 
400
+ template<typename BufferT>
401
+ void GridHandle<BufferT>::read(std::istream& is, const BufferT& pool)
402
+ {
403
+ GridData data;
404
+ is.read((char*)&data, sizeof(GridData));
405
+ if (data.isValid()) {
406
+ uint64_t size = data.mGridSize, sum = 0u;
407
+ while(data.mGridIndex + 1u < data.mGridCount) {// loop over remaining raw grids in stream
408
+ is.seekg(data.mGridSize - sizeof(GridData), std::ios::cur);// skip grid
409
+ is.read((char*)&data, sizeof(GridData));
410
+ sum += data.mGridSize;
411
+ }
412
+ auto buffer = BufferT::create(size + sum, &pool);
413
+ is.seekg(-int64_t(sum + sizeof(GridData)), std::ios::cur);// rewind to start
414
+ is.read((char*)(buffer.data()), buffer.size());
415
+ *this = GridHandle(std::move(buffer));
416
+ } else {
417
+ is.seekg(-sizeof(GridData), std::ios::cur);// rewind
418
+ throw std::logic_error("This stream does not contain a valid raw grid buffer");
419
+ }
420
+ }// void GridHandle<BufferT>::read(std::istream& is, const BufferT& pool)
421
+
422
+ template<typename BufferT>
423
+ void GridHandle<BufferT>::read(std::istream& is, uint32_t n, const BufferT& pool)
424
+ {
425
+ GridData data;
426
+ is.read((char*)&data, sizeof(GridData));
427
+ if (data.isValid()) {
428
+ if (n>=data.mGridCount) throw std::runtime_error("stream does not contain a #" + std::to_string(n) + " grid");
429
+ while(data.mGridIndex != n) {
430
+ is.seekg(data.mGridSize - sizeof(GridData), std::ios::cur);// skip grid
431
+ is.read((char*)&data, sizeof(GridData));
432
+ }
433
+ auto buffer = BufferT::create(data.mGridSize, &pool);
434
+ is.seekg(-sizeof(GridData), std::ios::cur);// rewind
435
+ is.read((char*)(buffer.data()), data.mGridSize);
436
+ tools::updateGridCount((GridData*)buffer.data(), 0u, 1u);
437
+ *this = GridHandle(std::move(buffer));
438
+ } else {
439
+ is.seekg(-sizeof(GridData), std::ios::cur);// rewind sizeof(GridData) bytes to undo initial read
440
+ throw std::logic_error("This file does not contain a valid raw buffer");
441
+ }
442
+ }// void GridHandle<BufferT>::read(std::istream& is, uint32_t n, const BufferT& pool)
443
+
444
+ template<typename BufferT>
445
+ void GridHandle<BufferT>::read(std::istream& is, const std::string &gridName, const BufferT& pool)
446
+ {
447
+ static const std::streamsize byteSize = sizeof(GridData);
448
+ GridData data;
449
+ is.read((char*)&data, byteSize);
450
+ is.seekg(-byteSize, std::ios::cur);// rewind
451
+ if (data.isValid()) {
452
+ uint32_t n = 0;
453
+ while(data.mGridName != gridName && n++ < data.mGridCount) {
454
+ is.seekg(data.mGridSize, std::ios::cur);// skip grid
455
+ is.read((char*)&data, byteSize);// read sizeof(GridData) bytes
456
+ is.seekg(-byteSize, std::ios::cur);// rewind
457
+ }
458
+ if (n>data.mGridCount) throw std::runtime_error("No raw grid named \""+gridName+"\"");
459
+ auto buffer = BufferT::create(data.mGridSize, &pool);
460
+ is.read((char*)(buffer.data()), data.mGridSize);
461
+ tools::updateGridCount((GridData*)buffer.data(), 0u, 1u);
462
+ *this = GridHandle(std::move(buffer));
463
+ } else {
464
+ throw std::logic_error("This file does not contain a valid raw buffer");
465
+ }
466
+ }// void GridHandle<BufferT>::read(std::istream& is, const std::string &gridName n, const BufferT& pool)
467
+
468
+ // --------------------------> free-standing functions <------------------------------------
469
+
470
+ /// @brief Split all grids in a single GridHandle into a vector of multiple GridHandles each with a single grid
471
+ /// @tparam BufferT Type of the input and output grid buffers
472
+ /// @param handle GridHandle with grids that will be slip into individual GridHandles
473
+ /// @param pool optional pool used for allocation of output GridHandle
474
+ /// @return Vector of GridHandles each containing a single grid
475
+ template<typename BufferT, template <class, class...> class VectorT = std::vector>
476
+ inline VectorT<GridHandle<BufferT>>
477
+ splitGrids(const GridHandle<BufferT> &handle, const BufferT* other = nullptr)
478
+ {
479
+ using HandleT = GridHandle<BufferT>;
480
+ const void *ptr = handle.data();
481
+ if (ptr == nullptr) return VectorT<HandleT>();
482
+ VectorT<HandleT> handles(handle.gridCount());
483
+ for (auto &h : handles) {
484
+ const GridData *src = reinterpret_cast<const GridData*>(ptr);
485
+ NANOVDB_ASSERT(src->isValid());
486
+ auto buffer = BufferT::create(src->mGridSize, other);
487
+ GridData *dst = reinterpret_cast<GridData*>(buffer.data());
488
+ std::memcpy(dst, src, src->mGridSize);
489
+ tools::updateGridCount(dst, 0u, 1u);
490
+ h = HandleT(std::move(buffer));
491
+ ptr = util::PtrAdd(ptr, src->mGridSize);
492
+ }
493
+ return std::move(handles);
494
+ }// splitGrids
495
+
496
+ /// @brief Combines (or merges) multiple GridHandles into a single GridHandle containing all grids
497
+ /// @tparam BufferT Type of the input and output grid buffers
498
+ /// @param handles Vector of GridHandles to be combined
499
+ /// @param pool optional pool used for allocation of output GridHandle
500
+ /// @return single GridHandle containing all input grids
501
+ template<typename BufferT, template <class, class...> class VectorT>
502
+ inline GridHandle<BufferT>
503
+ mergeGrids(const VectorT<GridHandle<BufferT>> &handles, const BufferT* pool = nullptr)
504
+ {
505
+ uint64_t size = 0u;
506
+ uint32_t counter = 0u, gridCount = 0u;
507
+ for (auto &h : handles) {
508
+ gridCount += h.gridCount();
509
+ for (uint32_t n=0; n<h.gridCount(); ++n) size += h.gridSize(n);
510
+ }
511
+ auto buffer = BufferT::create(size, pool);
512
+ void *dst = buffer.data();
513
+ for (auto &h : handles) {
514
+ const void *src = h.data();
515
+ for (uint32_t n=0; n<h.gridCount(); ++n) {
516
+ std::memcpy(dst, src, h.gridSize(n));
517
+ GridData *data = reinterpret_cast<GridData*>(dst);
518
+ NANOVDB_ASSERT(data->isValid());
519
+ tools::updateGridCount(data, counter++, gridCount);
520
+ dst = util::PtrAdd(dst, data->mGridSize);
521
+ src = util::PtrAdd(src, data->mGridSize);
522
+ }
523
+ }
524
+ return GridHandle<BufferT>(std::move(buffer));
525
+ }// mergeGrids
359
526
 
360
527
  } // namespace nanovdb
361
528
 
@@ -1,5 +1,5 @@
1
1
  // Copyright Contributors to the OpenVDB Project
2
- // SPDX-License-Identifier: MPL-2.0
2
+ // SPDX-License-Identifier: Apache-2.0
3
3
 
4
4
  /*!
5
5
  @file nanovdb/HostBuffer.h
@@ -192,6 +192,13 @@ public:
192
192
  void* data() { return mData; }
193
193
  //@}
194
194
 
195
+ /// @brief Returns an offset pointer of a specific type from the allocated host memory
196
+ /// @tparam T Type of the pointer returned
197
+ /// @param count Numbers of elements of @c parameter type T to skip
198
+ /// @warning might return NULL
199
+ template <typename T>
200
+ T* data(ptrdiff_t count = 0) const {return mData ? reinterpret_cast<T*>(mData) + count : nullptr;}
201
+
195
202
  //@{
196
203
  /// @brief Returns the size in bytes associated with this buffer.
197
204
  uint64_t bufferSize() const { return mSize; }
@@ -316,7 +323,6 @@ struct HostBuffer::Pool
316
323
  << mSize << " bytes of which\n\t" << (util::PtrDiff(alignedFree, mData) - mPadding)
317
324
  << " bytes are used by " << mRegister.size() << " other buffer(s). "
318
325
  << "Pool is " << (mManaged ? "internally" : "externally") << " managed.\n";
319
- //std::cerr << ss.str();
320
326
  throw std::runtime_error(ss.str());
321
327
  }
322
328
  buffer->mSize = size;
@@ -389,7 +395,6 @@ struct HostBuffer::Pool
389
395
  }
390
396
 
391
397
  for (HostBuffer* buffer : mRegister) { // update registered buffers
392
- //buffer->mData = paddedData + ptrdiff_t(buffer->mData - (mData + mPadding));
393
398
  buffer->mData = util::PtrAdd(paddedData, util::PtrDiff(buffer->mData, util::PtrAdd(mData, mPadding)));
394
399
  }
395
400
  mFree = util::PtrAdd(paddedData, memUsage); // update the free pointer
@@ -414,12 +419,8 @@ private:
414
419
 
415
420
  static void* alloc(uint64_t size)
416
421
  {
417
- //#if (__cplusplus >= 201703L)
418
- // return std::aligned_alloc(NANOVDB_DATA_ALIGNMENT, size);//C++17 or newer
419
- //#else
420
422
  // make sure we alloc enough space to align the result
421
423
  return std::malloc(size + NANOVDB_DATA_ALIGNMENT);
422
- //#endif
423
424
  }
424
425
 
425
426
  static void* realloc(void* const origData,