warp-lang 1.1.0__py3-none-manylinux2014_aarch64.whl → 1.2.1__py3-none-manylinux2014_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of warp-lang might be problematic. Click here for more details.

Files changed (218) hide show
  1. warp/bin/warp-clang.so +0 -0
  2. warp/bin/warp.so +0 -0
  3. warp/build.py +10 -37
  4. warp/build_dll.py +2 -2
  5. warp/builtins.py +274 -6
  6. warp/codegen.py +51 -4
  7. warp/config.py +2 -2
  8. warp/constants.py +4 -0
  9. warp/context.py +422 -203
  10. warp/examples/benchmarks/benchmark_api.py +0 -2
  11. warp/examples/benchmarks/benchmark_cloth_warp.py +0 -1
  12. warp/examples/benchmarks/benchmark_launches.py +0 -2
  13. warp/examples/core/example_dem.py +0 -2
  14. warp/examples/core/example_fluid.py +0 -2
  15. warp/examples/core/example_graph_capture.py +0 -2
  16. warp/examples/core/example_marching_cubes.py +0 -2
  17. warp/examples/core/example_mesh.py +0 -2
  18. warp/examples/core/example_mesh_intersect.py +0 -2
  19. warp/examples/core/example_nvdb.py +0 -2
  20. warp/examples/core/example_raycast.py +0 -2
  21. warp/examples/core/example_raymarch.py +0 -2
  22. warp/examples/core/example_render_opengl.py +0 -2
  23. warp/examples/core/example_sph.py +0 -2
  24. warp/examples/core/example_torch.py +0 -3
  25. warp/examples/core/example_wave.py +0 -2
  26. warp/examples/fem/example_apic_fluid.py +140 -115
  27. warp/examples/fem/example_burgers.py +262 -0
  28. warp/examples/fem/example_convection_diffusion.py +0 -2
  29. warp/examples/fem/example_convection_diffusion_dg.py +0 -2
  30. warp/examples/fem/example_deformed_geometry.py +0 -2
  31. warp/examples/fem/example_diffusion.py +0 -2
  32. warp/examples/fem/example_diffusion_3d.py +5 -4
  33. warp/examples/fem/example_diffusion_mgpu.py +0 -2
  34. warp/examples/fem/example_mixed_elasticity.py +0 -2
  35. warp/examples/fem/example_navier_stokes.py +0 -2
  36. warp/examples/fem/example_stokes.py +0 -2
  37. warp/examples/fem/example_stokes_transfer.py +0 -2
  38. warp/examples/optim/example_bounce.py +0 -2
  39. warp/examples/optim/example_cloth_throw.py +0 -2
  40. warp/examples/optim/example_diffray.py +0 -2
  41. warp/examples/optim/example_drone.py +0 -2
  42. warp/examples/optim/example_inverse_kinematics.py +0 -2
  43. warp/examples/optim/example_inverse_kinematics_torch.py +0 -2
  44. warp/examples/optim/example_spring_cage.py +0 -2
  45. warp/examples/optim/example_trajectory.py +0 -2
  46. warp/examples/optim/example_walker.py +0 -2
  47. warp/examples/sim/example_cartpole.py +0 -2
  48. warp/examples/sim/example_cloth.py +0 -2
  49. warp/examples/sim/example_granular.py +0 -2
  50. warp/examples/sim/example_granular_collision_sdf.py +0 -2
  51. warp/examples/sim/example_jacobian_ik.py +0 -2
  52. warp/examples/sim/example_particle_chain.py +0 -2
  53. warp/examples/sim/example_quadruped.py +0 -2
  54. warp/examples/sim/example_rigid_chain.py +0 -2
  55. warp/examples/sim/example_rigid_contact.py +0 -2
  56. warp/examples/sim/example_rigid_force.py +0 -2
  57. warp/examples/sim/example_rigid_gyroscopic.py +0 -2
  58. warp/examples/sim/example_rigid_soft_contact.py +0 -2
  59. warp/examples/sim/example_soft_body.py +0 -2
  60. warp/fem/__init__.py +1 -0
  61. warp/fem/cache.py +3 -1
  62. warp/fem/geometry/__init__.py +1 -0
  63. warp/fem/geometry/element.py +4 -0
  64. warp/fem/geometry/grid_3d.py +0 -4
  65. warp/fem/geometry/nanogrid.py +455 -0
  66. warp/fem/integrate.py +63 -9
  67. warp/fem/space/__init__.py +43 -158
  68. warp/fem/space/basis_space.py +34 -0
  69. warp/fem/space/collocated_function_space.py +1 -1
  70. warp/fem/space/grid_2d_function_space.py +13 -132
  71. warp/fem/space/grid_3d_function_space.py +16 -154
  72. warp/fem/space/hexmesh_function_space.py +37 -134
  73. warp/fem/space/nanogrid_function_space.py +202 -0
  74. warp/fem/space/quadmesh_2d_function_space.py +12 -119
  75. warp/fem/space/restriction.py +4 -1
  76. warp/fem/space/shape/__init__.py +77 -0
  77. warp/fem/space/shape/cube_shape_function.py +5 -15
  78. warp/fem/space/tetmesh_function_space.py +6 -76
  79. warp/fem/space/trimesh_2d_function_space.py +6 -76
  80. warp/native/array.h +12 -3
  81. warp/native/builtin.h +48 -5
  82. warp/native/bvh.cpp +14 -10
  83. warp/native/bvh.cu +23 -15
  84. warp/native/bvh.h +1 -0
  85. warp/native/clang/clang.cpp +2 -1
  86. warp/native/crt.cpp +11 -1
  87. warp/native/crt.h +18 -1
  88. warp/native/exports.h +187 -0
  89. warp/native/mat.h +47 -0
  90. warp/native/mesh.cpp +1 -1
  91. warp/native/mesh.cu +1 -2
  92. warp/native/nanovdb/GridHandle.h +366 -0
  93. warp/native/nanovdb/HostBuffer.h +590 -0
  94. warp/native/nanovdb/NanoVDB.h +3999 -2157
  95. warp/native/nanovdb/PNanoVDB.h +936 -99
  96. warp/native/quat.h +28 -1
  97. warp/native/rand.h +5 -1
  98. warp/native/vec.h +45 -1
  99. warp/native/volume.cpp +335 -103
  100. warp/native/volume.cu +39 -13
  101. warp/native/volume.h +725 -303
  102. warp/native/volume_builder.cu +381 -360
  103. warp/native/volume_builder.h +16 -1
  104. warp/native/volume_impl.h +61 -0
  105. warp/native/warp.cu +8 -2
  106. warp/native/warp.h +15 -7
  107. warp/render/render_opengl.py +191 -52
  108. warp/sim/integrator_featherstone.py +10 -3
  109. warp/sim/integrator_xpbd.py +16 -22
  110. warp/sparse.py +89 -27
  111. warp/stubs.py +83 -0
  112. warp/tests/assets/test_index_grid.nvdb +0 -0
  113. warp/tests/aux_test_dependent.py +0 -2
  114. warp/tests/aux_test_grad_customs.py +0 -2
  115. warp/tests/aux_test_reference.py +0 -2
  116. warp/tests/aux_test_reference_reference.py +0 -2
  117. warp/tests/aux_test_square.py +0 -2
  118. warp/tests/disabled_kinematics.py +0 -2
  119. warp/tests/test_adam.py +0 -2
  120. warp/tests/test_arithmetic.py +0 -36
  121. warp/tests/test_array.py +9 -11
  122. warp/tests/test_array_reduce.py +0 -2
  123. warp/tests/test_async.py +0 -2
  124. warp/tests/test_atomic.py +0 -2
  125. warp/tests/test_bool.py +58 -50
  126. warp/tests/test_builtins_resolution.py +0 -2
  127. warp/tests/test_bvh.py +0 -2
  128. warp/tests/test_closest_point_edge_edge.py +0 -1
  129. warp/tests/test_codegen.py +0 -4
  130. warp/tests/test_compile_consts.py +130 -10
  131. warp/tests/test_conditional.py +0 -2
  132. warp/tests/test_copy.py +0 -2
  133. warp/tests/test_ctypes.py +6 -8
  134. warp/tests/test_dense.py +0 -2
  135. warp/tests/test_devices.py +0 -2
  136. warp/tests/test_dlpack.py +9 -11
  137. warp/tests/test_examples.py +42 -39
  138. warp/tests/test_fabricarray.py +0 -3
  139. warp/tests/test_fast_math.py +0 -2
  140. warp/tests/test_fem.py +75 -54
  141. warp/tests/test_fp16.py +0 -2
  142. warp/tests/test_func.py +0 -2
  143. warp/tests/test_generics.py +27 -2
  144. warp/tests/test_grad.py +147 -8
  145. warp/tests/test_grad_customs.py +0 -2
  146. warp/tests/test_hash_grid.py +1 -3
  147. warp/tests/test_import.py +0 -2
  148. warp/tests/test_indexedarray.py +0 -2
  149. warp/tests/test_intersect.py +0 -2
  150. warp/tests/test_jax.py +0 -2
  151. warp/tests/test_large.py +11 -9
  152. warp/tests/test_launch.py +0 -2
  153. warp/tests/test_lerp.py +10 -54
  154. warp/tests/test_linear_solvers.py +3 -5
  155. warp/tests/test_lvalue.py +0 -2
  156. warp/tests/test_marching_cubes.py +0 -2
  157. warp/tests/test_mat.py +0 -2
  158. warp/tests/test_mat_lite.py +0 -2
  159. warp/tests/test_mat_scalar_ops.py +0 -2
  160. warp/tests/test_math.py +0 -2
  161. warp/tests/test_matmul.py +35 -37
  162. warp/tests/test_matmul_lite.py +29 -31
  163. warp/tests/test_mempool.py +0 -2
  164. warp/tests/test_mesh.py +0 -3
  165. warp/tests/test_mesh_query_aabb.py +0 -2
  166. warp/tests/test_mesh_query_point.py +0 -2
  167. warp/tests/test_mesh_query_ray.py +0 -2
  168. warp/tests/test_mlp.py +0 -2
  169. warp/tests/test_model.py +0 -2
  170. warp/tests/test_module_hashing.py +111 -0
  171. warp/tests/test_modules_lite.py +0 -3
  172. warp/tests/test_multigpu.py +0 -2
  173. warp/tests/test_noise.py +0 -4
  174. warp/tests/test_operators.py +0 -2
  175. warp/tests/test_options.py +0 -2
  176. warp/tests/test_peer.py +0 -2
  177. warp/tests/test_pinned.py +0 -2
  178. warp/tests/test_print.py +0 -2
  179. warp/tests/test_quat.py +0 -2
  180. warp/tests/test_rand.py +41 -5
  181. warp/tests/test_reload.py +0 -10
  182. warp/tests/test_rounding.py +0 -2
  183. warp/tests/test_runlength_encode.py +0 -2
  184. warp/tests/test_sim_grad.py +0 -2
  185. warp/tests/test_sim_kinematics.py +0 -2
  186. warp/tests/test_smoothstep.py +0 -2
  187. warp/tests/test_snippet.py +0 -2
  188. warp/tests/test_sparse.py +0 -2
  189. warp/tests/test_spatial.py +0 -2
  190. warp/tests/test_special_values.py +362 -0
  191. warp/tests/test_streams.py +0 -2
  192. warp/tests/test_struct.py +0 -2
  193. warp/tests/test_tape.py +0 -2
  194. warp/tests/test_torch.py +0 -2
  195. warp/tests/test_transient_module.py +0 -2
  196. warp/tests/test_types.py +0 -2
  197. warp/tests/test_utils.py +0 -2
  198. warp/tests/test_vec.py +0 -2
  199. warp/tests/test_vec_lite.py +0 -2
  200. warp/tests/test_vec_scalar_ops.py +0 -2
  201. warp/tests/test_verify_fp.py +0 -2
  202. warp/tests/test_volume.py +237 -13
  203. warp/tests/test_volume_write.py +86 -3
  204. warp/tests/unittest_serial.py +10 -9
  205. warp/tests/unittest_suites.py +6 -2
  206. warp/tests/unittest_utils.py +2 -171
  207. warp/tests/unused_test_misc.py +0 -2
  208. warp/tests/walkthrough_debug.py +1 -1
  209. warp/thirdparty/unittest_parallel.py +37 -40
  210. warp/types.py +526 -85
  211. {warp_lang-1.1.0.dist-info → warp_lang-1.2.1.dist-info}/METADATA +61 -31
  212. warp_lang-1.2.1.dist-info/RECORD +359 -0
  213. warp/examples/fem/example_convection_diffusion_dg0.py +0 -204
  214. warp/native/nanovdb/PNanoVDBWrite.h +0 -295
  215. warp_lang-1.1.0.dist-info/RECORD +0 -352
  216. {warp_lang-1.1.0.dist-info → warp_lang-1.2.1.dist-info}/LICENSE.md +0 -0
  217. {warp_lang-1.1.0.dist-info → warp_lang-1.2.1.dist-info}/WHEEL +0 -0
  218. {warp_lang-1.1.0.dist-info → warp_lang-1.2.1.dist-info}/top_level.txt +0 -0
warp/native/volume.cpp CHANGED
@@ -6,10 +6,10 @@
6
6
  * license agreement from NVIDIA CORPORATION is strictly prohibited.
7
7
  */
8
8
 
9
- #include "volume.h"
9
+ #include "cuda_util.h"
10
10
  #include "volume_builder.h"
11
+ #include "volume_impl.h"
11
12
  #include "warp.h"
12
- #include "cuda_util.h"
13
13
 
14
14
  #include <map>
15
15
 
@@ -21,40 +21,50 @@ namespace
21
21
  struct VolumeDesc
22
22
  {
23
23
  // NanoVDB buffer either in device or host memory
24
- void* buffer;
24
+ void *buffer;
25
25
  uint64_t size_in_bytes;
26
+ bool owner; // whether the buffer should be deallocated when the volume is destroyed
26
27
 
27
- // offset to the voxel values of the first leaf node relative to buffer
28
- uint64_t first_voxel_data_offs;
29
-
30
- // copy of the grids's metadata to keep on the host for device volumes
31
28
  pnanovdb_grid_t grid_data;
32
-
33
- // copy of the tree's metadata to keep on the host for device volumes
34
29
  pnanovdb_tree_t tree_data;
35
30
 
31
+ // Host-accessible version of the blind metadata (copy if GPU, alias if CPU)
32
+ pnanovdb_gridblindmetadata_t *blind_metadata;
33
+
36
34
  // CUDA context for this volume (NULL if CPU)
37
- void* context;
35
+ void *context;
36
+
37
+ pnanovdb_buf_t as_pnano() const
38
+ {
39
+ return pnanovdb_make_buf(static_cast<uint32_t *>(buffer), size_in_bytes);
40
+ }
38
41
  };
39
42
 
40
43
  // Host-side volume descriptors. Maps each CPU/GPU volume buffer address (id) to a CPU desc
41
44
  std::map<uint64_t, VolumeDesc> g_volume_descriptors;
42
45
 
43
- bool volume_get_descriptor(uint64_t id, VolumeDesc& volumeDesc)
46
+ bool volume_get_descriptor(uint64_t id, const VolumeDesc *&volumeDesc)
44
47
  {
45
- if (id == 0) return false;
48
+ if (id == 0)
49
+ return false;
46
50
 
47
- const auto& iter = g_volume_descriptors.find(id);
51
+ const auto &iter = g_volume_descriptors.find(id);
48
52
  if (iter == g_volume_descriptors.end())
49
53
  return false;
50
54
  else
51
- volumeDesc = iter->second;
55
+ volumeDesc = &iter->second;
52
56
  return true;
53
57
  }
54
58
 
55
- void volume_add_descriptor(uint64_t id, const VolumeDesc& volumeDesc)
59
+ bool volume_exists(const void *id)
56
60
  {
57
- g_volume_descriptors[id] = volumeDesc;
61
+ const VolumeDesc *volume;
62
+ return volume_get_descriptor((uint64_t)id, volume);
63
+ }
64
+
65
+ void volume_add_descriptor(uint64_t id, VolumeDesc &&volumeDesc)
66
+ {
67
+ g_volume_descriptors[id] = std::move(volumeDesc);
58
68
  }
59
69
 
60
70
  void volume_rem_descriptor(uint64_t id)
@@ -64,234 +74,456 @@ void volume_rem_descriptor(uint64_t id)
64
74
 
65
75
  } // anonymous namespace
66
76
 
67
-
68
77
  // NB: buf must be a host pointer
69
- uint64_t volume_create_host(void* buf, uint64_t size)
78
+ uint64_t volume_create_host(void *buf, uint64_t size, bool copy, bool owner)
70
79
  {
71
- if (size < sizeof(pnanovdb_grid_t) + sizeof(pnanovdb_tree_t))
72
- return 0; // This cannot be a valid NanoVDB grid with data
80
+ if (size > 0 && size < sizeof(pnanovdb_grid_t) + sizeof(pnanovdb_tree_t))
81
+ return 0; // This cannot be a valid NanoVDB grid with data
73
82
 
74
- VolumeDesc volume;
83
+ if (!copy && volume_exists(buf))
84
+ {
85
+ // descriptor already created for this volume
86
+ return 0;
87
+ }
75
88
 
89
+ VolumeDesc volume;
76
90
  volume.context = NULL;
77
91
 
78
92
  memcpy_h2h(&volume.grid_data, buf, sizeof(pnanovdb_grid_t));
79
- memcpy_h2h(&volume.tree_data, (pnanovdb_grid_t*)buf + 1, sizeof(pnanovdb_tree_t));
93
+ memcpy_h2h(&volume.tree_data, (pnanovdb_grid_t *)buf + 1, sizeof(pnanovdb_tree_t));
80
94
 
81
- if (volume.grid_data.magic != PNANOVDB_MAGIC_NUMBER)
95
+ if (volume.grid_data.magic != PNANOVDB_MAGIC_NUMBER && volume.grid_data.magic != PNANOVDB_MAGIC_GRID)
82
96
  return 0;
83
97
 
98
+ if (size == 0)
99
+ {
100
+ size = volume.grid_data.grid_size;
101
+ }
102
+
103
+ // Copy or alias buffer
84
104
  volume.size_in_bytes = size;
85
- volume.buffer = alloc_host(size);
86
- memcpy_h2h(volume.buffer, buf, size);
105
+ if (copy)
106
+ {
107
+ volume.buffer = alloc_host(size);
108
+ memcpy_h2h(volume.buffer, buf, size);
109
+ volume.owner = true;
110
+ }
111
+ else
112
+ {
113
+ volume.buffer = buf;
114
+ volume.owner = owner;
115
+ }
87
116
 
88
- volume.first_voxel_data_offs =
89
- sizeof(pnanovdb_grid_t) + volume.tree_data.node_offset_leaf + PNANOVDB_GRID_TYPE_GET(PNANOVDB_GRID_TYPE_FLOAT, leaf_off_table);
117
+ // Alias blind metadata
118
+ volume.blind_metadata = reinterpret_cast<pnanovdb_gridblindmetadata_t *>(static_cast<uint8_t *>(volume.buffer) +
119
+ volume.grid_data.blind_metadata_offset);
90
120
 
91
121
  uint64_t id = (uint64_t)volume.buffer;
92
122
 
93
- volume_add_descriptor(id, volume);
123
+ volume_add_descriptor(id, std::move(volume));
94
124
 
95
125
  return id;
96
126
  }
97
127
 
98
128
  // NB: buf must be a pointer on the same device
99
- uint64_t volume_create_device(void* context, void* buf, uint64_t size)
129
+ uint64_t volume_create_device(void *context, void *buf, uint64_t size, bool copy, bool owner)
100
130
  {
101
- if (size < sizeof(pnanovdb_grid_t) + sizeof(pnanovdb_tree_t))
102
- return 0; // This cannot be a valid NanoVDB grid with data
131
+ if (size > 0 && size < sizeof(pnanovdb_grid_t) + sizeof(pnanovdb_tree_t))
132
+ return 0; // This cannot be a valid NanoVDB grid with data
133
+
134
+ if (!copy && volume_exists(buf))
135
+ {
136
+ // descriptor already created for this volume
137
+ return 0;
138
+ }
103
139
 
104
140
  ContextGuard guard(context);
105
141
 
106
142
  VolumeDesc volume;
107
-
108
143
  volume.context = context ? context : cuda_context_get_current();
109
144
 
110
145
  memcpy_d2h(WP_CURRENT_CONTEXT, &volume.grid_data, buf, sizeof(pnanovdb_grid_t));
111
- memcpy_d2h(WP_CURRENT_CONTEXT, &volume.tree_data, (pnanovdb_grid_t*)buf + 1, sizeof(pnanovdb_tree_t));
146
+ memcpy_d2h(WP_CURRENT_CONTEXT, &volume.tree_data, (pnanovdb_grid_t *)buf + 1, sizeof(pnanovdb_tree_t));
147
+ // no sync needed since the above copies are to pageable memory
112
148
 
113
- if (volume.grid_data.magic != PNANOVDB_MAGIC_NUMBER)
149
+ if (volume.grid_data.magic != PNANOVDB_MAGIC_NUMBER && volume.grid_data.magic != PNANOVDB_MAGIC_GRID)
114
150
  return 0;
115
151
 
152
+ if (size == 0)
153
+ {
154
+ size = volume.grid_data.grid_size;
155
+ }
156
+
157
+ // Copy or alias data buffer
116
158
  volume.size_in_bytes = size;
117
- volume.buffer = alloc_device(WP_CURRENT_CONTEXT, size);
118
- memcpy_d2d(WP_CURRENT_CONTEXT, volume.buffer, buf, size);
159
+ if (copy)
160
+ {
161
+ volume.buffer = alloc_device(WP_CURRENT_CONTEXT, size);
162
+ memcpy_d2d(WP_CURRENT_CONTEXT, volume.buffer, buf, size);
163
+ volume.owner = true;
164
+ }
165
+ else
166
+ {
167
+ volume.buffer = buf;
168
+ volume.owner = owner;
169
+ }
119
170
 
120
- volume.first_voxel_data_offs =
121
- sizeof(pnanovdb_grid_t) + volume.tree_data.node_offset_leaf + PNANOVDB_GRID_TYPE_GET(PNANOVDB_GRID_TYPE_FLOAT, leaf_off_table);
171
+ // Make blind metadata accessible on host
172
+ const uint64_t blindmetadata_size = volume.grid_data.blind_metadata_count * sizeof(pnanovdb_gridblindmetadata_t);
173
+ volume.blind_metadata = static_cast<pnanovdb_gridblindmetadata_t *>(alloc_pinned(blindmetadata_size));
174
+ memcpy_d2h(WP_CURRENT_CONTEXT, volume.blind_metadata,
175
+ static_cast<uint8_t *>(volume.buffer) + volume.grid_data.blind_metadata_offset, blindmetadata_size);
122
176
 
123
177
  uint64_t id = (uint64_t)volume.buffer;
124
-
125
- volume_add_descriptor(id, volume);
178
+ volume_add_descriptor(id, std::move(volume));
126
179
 
127
180
  return id;
128
181
  }
129
182
 
130
- static void volume_get_buffer_info(uint64_t id, void** buf, uint64_t* size)
183
+ void volume_get_buffer_info(uint64_t id, void **buf, uint64_t *size)
131
184
  {
132
185
  *buf = 0;
133
186
  *size = 0;
134
187
 
135
- VolumeDesc volume;
188
+ const VolumeDesc *volume;
136
189
  if (volume_get_descriptor(id, volume))
137
190
  {
138
- *buf = volume.buffer;
139
- *size = volume.size_in_bytes;
191
+ *buf = volume->buffer;
192
+ *size = volume->size_in_bytes;
140
193
  }
141
194
  }
142
195
 
143
- void volume_get_buffer_info_host(uint64_t id, void** buf, uint64_t* size)
196
+ void volume_get_voxel_size(uint64_t id, float *dx, float *dy, float *dz)
144
197
  {
145
- volume_get_buffer_info(id, buf, size);
198
+ *dx = *dx = *dz = 0.0f;
199
+
200
+ const VolumeDesc *volume;
201
+ if (volume_get_descriptor(id, volume))
202
+ {
203
+ *dx = (float)volume->grid_data.voxel_size[0];
204
+ *dy = (float)volume->grid_data.voxel_size[1];
205
+ *dz = (float)volume->grid_data.voxel_size[2];
206
+ }
146
207
  }
147
208
 
148
- void volume_get_buffer_info_device(uint64_t id, void** buf, uint64_t* size)
209
+ void volume_get_tile_and_voxel_count(uint64_t id, uint32_t &tile_count, uint64_t &voxel_count)
149
210
  {
150
- volume_get_buffer_info(id, buf, size);
211
+ tile_count = 0;
212
+ voxel_count = 0;
213
+
214
+ const VolumeDesc *volume;
215
+ if (volume_get_descriptor(id, volume))
216
+ {
217
+ tile_count = volume->tree_data.node_count_leaf;
218
+
219
+ const uint32_t grid_type = volume->grid_data.grid_type;
220
+
221
+ switch (grid_type)
222
+ {
223
+ case PNANOVDB_GRID_TYPE_ONINDEX:
224
+ case PNANOVDB_GRID_TYPE_ONINDEXMASK:
225
+ // number of indexable voxels is number of active voxels
226
+ voxel_count = volume->tree_data.voxel_count;
227
+ break;
228
+ default:
229
+ // all leaf voxels are indexable
230
+ voxel_count = uint64_t(tile_count) * PNANOVDB_LEAF_TABLE_COUNT;
231
+ }
232
+ }
151
233
  }
152
234
 
153
- void volume_get_voxel_size(uint64_t id, float* dx, float* dy, float* dz)
235
+ const char *volume_get_grid_info(uint64_t id, uint64_t *grid_size, uint32_t *grid_index, uint32_t *grid_count,
236
+ float translation[3], float transform[9], char type_str[16])
154
237
  {
155
- *dx = *dx = *dz = 0.0f;
238
+ const VolumeDesc *volume;
239
+ if (volume_get_descriptor(id, volume))
240
+ {
241
+ const pnanovdb_grid_t &grid_data = volume->grid_data;
242
+ *grid_count = grid_data.grid_count;
243
+ *grid_index = grid_data.grid_index;
244
+ *grid_size = grid_data.grid_size;
156
245
 
157
- VolumeDesc volume;
246
+ memcpy(translation, grid_data.map.vecf, sizeof(grid_data.map.vecf));
247
+ memcpy(transform, grid_data.map.matf, sizeof(grid_data.map.matf));
248
+
249
+ nanovdb::toStr(type_str, static_cast<nanovdb::GridType>(grid_data.grid_type));
250
+ return (const char *)grid_data.grid_name;
251
+ }
252
+
253
+ *grid_size = 0;
254
+ *grid_index = 0;
255
+ *grid_count = 0;
256
+ type_str[0] = 0;
257
+
258
+ return nullptr;
259
+ }
260
+
261
+ uint32_t volume_get_blind_data_count(uint64_t id)
262
+ {
263
+ const VolumeDesc *volume;
158
264
  if (volume_get_descriptor(id, volume))
159
265
  {
160
- *dx = (float)volume.grid_data.voxel_size[0];
161
- *dy = (float)volume.grid_data.voxel_size[1];
162
- *dz = (float)volume.grid_data.voxel_size[2];
266
+ return volume->grid_data.blind_metadata_count;
163
267
  }
268
+ return 0;
164
269
  }
165
270
 
166
- void volume_get_tiles_host(uint64_t id, void** buf, uint64_t* size)
271
+ const char *volume_get_blind_data_info(uint64_t id, uint32_t data_index, void **buf, uint64_t *value_count,
272
+ uint32_t *value_size, char type_str[16])
167
273
  {
168
- static constexpr uint32_t MASK = (1u << 3u) - 1u; // mask for bit operations
274
+ const VolumeDesc *volume;
275
+ if (volume_get_descriptor(id, volume) && data_index < volume->grid_data.blind_metadata_count)
276
+ {
277
+ const pnanovdb_gridblindmetadata_t &metadata = volume->blind_metadata[data_index];
278
+ *value_count = metadata.value_count;
279
+ *value_size = metadata.value_size;
280
+
281
+ nanovdb::toStr(type_str, static_cast<nanovdb::GridType>(metadata.data_type));
282
+ *buf = static_cast<uint8_t *>(volume->buffer) + volume->grid_data.blind_metadata_offset +
283
+ data_index * sizeof(pnanovdb_gridblindmetadata_t) + metadata.data_offset;
284
+ return (const char *)metadata.name;
285
+ }
286
+ *buf = nullptr;
287
+ *value_count = 0;
288
+ *value_size = 0;
289
+ type_str[0] = 0;
290
+ return nullptr;
291
+ }
169
292
 
170
- *buf = 0;
171
- *size = 0;
293
+ void volume_get_tiles_host(uint64_t id, void *buf)
294
+ {
295
+ static constexpr uint32_t MASK = (1u << 3u) - 1u; // mask for bit operations
172
296
 
173
- VolumeDesc volume;
297
+ const VolumeDesc *volume;
174
298
  if (volume_get_descriptor(id, volume))
175
299
  {
176
- const uint32_t leaf_count = volume.tree_data.node_count_leaf;
177
- *size = leaf_count * sizeof(pnanovdb_coord_t);
300
+ const uint32_t leaf_count = volume->tree_data.node_count_leaf;
301
+
302
+ pnanovdb_coord_t *leaf_coords = static_cast<pnanovdb_coord_t *>(buf);
303
+
304
+ const uint64_t first_leaf =
305
+ (uint64_t)volume->buffer + sizeof(pnanovdb_grid_t) + volume->tree_data.node_offset_leaf;
306
+ const uint32_t leaf_stride = PNANOVDB_GRID_TYPE_GET(volume->grid_data.grid_type, leaf_size);
307
+
308
+ const pnanovdb_buf_t pnano_buf = volume->as_pnano();
309
+
310
+ for (uint32_t i = 0; i < leaf_count; ++i)
311
+ {
312
+ pnanovdb_leaf_handle_t leaf = volume::get_leaf(pnano_buf, i);
313
+ leaf_coords[i] = volume::leaf_origin(pnano_buf, leaf);
314
+ }
315
+ }
316
+ }
178
317
 
179
- pnanovdb_coord_t *leaf_coords = (pnanovdb_coord_t*)alloc_host(*size);
180
- *buf = leaf_coords;
318
+ void volume_get_voxels_host(uint64_t id, void *buf)
319
+ {
320
+ const VolumeDesc *volume;
321
+ if (volume_get_descriptor(id, volume))
322
+ {
323
+ uint32_t leaf_count;
324
+ uint64_t voxel_count;
325
+ volume_get_tile_and_voxel_count(id, leaf_count, voxel_count);
181
326
 
182
- const uint64_t first_leaf = (uint64_t)volume.buffer + sizeof(pnanovdb_grid_t) + volume.tree_data.node_offset_leaf;
183
- const uint32_t leaf_stride = PNANOVDB_GRID_TYPE_GET(volume.grid_data.grid_type, leaf_size);
327
+ pnanovdb_coord_t *voxel_coords = static_cast<pnanovdb_coord_t *>(buf);
184
328
 
329
+ const pnanovdb_buf_t pnano_buf = volume->as_pnano();
185
330
  for (uint32_t i = 0; i < leaf_count; ++i)
186
331
  {
187
- leaf_coords[i] = ((pnanovdb_leaf_t*)(first_leaf + leaf_stride * i))->bbox_min;
188
- leaf_coords[i].x &= ~MASK;
189
- leaf_coords[i].y &= ~MASK;
190
- leaf_coords[i].z &= ~MASK;
332
+ pnanovdb_leaf_handle_t leaf = volume::get_leaf(pnano_buf, i);
333
+ pnanovdb_coord_t leaf_coords = volume::leaf_origin(pnano_buf, leaf);
334
+
335
+ for (uint32_t n = 0; n < 512; ++n)
336
+ {
337
+ pnanovdb_coord_t loc_ijk = volume::leaf_offset_to_local_coord(n);
338
+ pnanovdb_coord_t ijk = {
339
+ loc_ijk.x + leaf_coords.x,
340
+ loc_ijk.y + leaf_coords.y,
341
+ loc_ijk.z + leaf_coords.z,
342
+ };
343
+
344
+ const uint64_t index = volume::leaf_voxel_index(pnano_buf, i, ijk);
345
+ if (index < voxel_count)
346
+ {
347
+ voxel_coords[index] = ijk;
348
+ }
349
+ }
191
350
  }
192
351
  }
193
352
  }
194
353
 
195
354
  void volume_destroy_host(uint64_t id)
196
355
  {
197
- free_host((void*)id);
198
- volume_rem_descriptor(id);
356
+ const VolumeDesc *volume;
357
+ if (volume_get_descriptor(id, volume))
358
+ {
359
+ if (volume->owner)
360
+ {
361
+ free_host(volume->buffer);
362
+ }
363
+ volume_rem_descriptor(id);
364
+ }
199
365
  }
200
366
 
201
367
  void volume_destroy_device(uint64_t id)
202
368
  {
203
- VolumeDesc volume;
369
+ const VolumeDesc *volume;
204
370
  if (volume_get_descriptor(id, volume))
205
371
  {
206
- ContextGuard guard(volume.context);
207
- free_device(WP_CURRENT_CONTEXT, volume.buffer);
372
+ ContextGuard guard(volume->context);
373
+ if (volume->owner)
374
+ {
375
+ free_device(WP_CURRENT_CONTEXT, volume->buffer);
376
+ }
377
+ free_pinned(volume->blind_metadata);
208
378
  volume_rem_descriptor(id);
209
379
  }
210
380
  }
211
381
 
212
-
213
382
  #if WP_ENABLE_CUDA
214
- uint64_t volume_f_from_tiles_device(void* context, void* points, int num_points, float voxel_size, float bg_value, float tx, float ty, float tz, bool points_in_world_space)
383
+ uint64_t volume_f_from_tiles_device(void *context, void *points, int num_points, float voxel_size, float bg_value,
384
+ float tx, float ty, float tz, bool points_in_world_space)
215
385
  {
216
- nanovdb::FloatGrid* grid;
386
+ nanovdb::FloatGrid *grid;
217
387
  size_t gridSize;
218
388
  BuildGridParams<float> params;
219
389
  params.voxel_size = voxel_size;
220
390
  params.background_value = bg_value;
221
391
  params.translation = nanovdb::Vec3f{tx, ty, tz};
222
392
 
223
- build_grid_from_tiles(grid, gridSize, points, num_points, points_in_world_space, params);
393
+ build_grid_from_points(grid, gridSize, points, num_points, points_in_world_space, params);
224
394
 
225
- return volume_create_device(context, grid, gridSize);
395
+ return volume_create_device(context, grid, gridSize, false, true);
226
396
  }
227
397
 
228
- uint64_t volume_v_from_tiles_device(void* context, void* points, int num_points, float voxel_size, float bg_value_x, float bg_value_y, float bg_value_z, float tx, float ty, float tz, bool points_in_world_space)
398
+ uint64_t volume_v_from_tiles_device(void *context, void *points, int num_points, float voxel_size, float bg_value_x,
399
+ float bg_value_y, float bg_value_z, float tx, float ty, float tz,
400
+ bool points_in_world_space)
229
401
  {
230
- nanovdb::Vec3fGrid* grid;
402
+ nanovdb::Vec3fGrid *grid;
231
403
  size_t gridSize;
232
404
  BuildGridParams<nanovdb::Vec3f> params;
233
405
  params.voxel_size = voxel_size;
234
406
  params.background_value = nanovdb::Vec3f{bg_value_x, bg_value_y, bg_value_z};
235
407
  params.translation = nanovdb::Vec3f{tx, ty, tz};
236
408
 
237
- build_grid_from_tiles(grid, gridSize, points, num_points, points_in_world_space, params);
409
+ build_grid_from_points(grid, gridSize, points, num_points, points_in_world_space, params);
238
410
 
239
- return volume_create_device(context, grid, gridSize);
411
+ return volume_create_device(context, grid, gridSize, false, true);
240
412
  }
241
413
 
242
- uint64_t volume_i_from_tiles_device(void* context, void* points, int num_points, float voxel_size, int bg_value, float tx, float ty, float tz, bool points_in_world_space)
414
+ uint64_t volume_i_from_tiles_device(void *context, void *points, int num_points, float voxel_size, int bg_value,
415
+ float tx, float ty, float tz, bool points_in_world_space)
243
416
  {
244
- nanovdb::Int32Grid* grid;
417
+ nanovdb::Int32Grid *grid;
245
418
  size_t gridSize;
246
419
  BuildGridParams<int32_t> params;
247
420
  params.voxel_size = voxel_size;
248
421
  params.background_value = (int32_t)(bg_value);
249
422
  params.translation = nanovdb::Vec3f{tx, ty, tz};
250
423
 
251
- build_grid_from_tiles(grid, gridSize, points, num_points, points_in_world_space, params);
424
+ build_grid_from_points(grid, gridSize, points, num_points, points_in_world_space, params);
252
425
 
253
- return volume_create_device(context, grid, gridSize);
426
+ return volume_create_device(context, grid, gridSize, false, true);
254
427
  }
255
428
 
256
- void launch_get_leaf_coords(void* context, const uint32_t leaf_count, pnanovdb_coord_t *leaf_coords, const uint64_t first_leaf, const uint32_t leaf_stride);
429
+ uint64_t volume_index_from_tiles_device(void *context, void *points, int num_points, float voxel_size, float tx,
430
+ float ty, float tz, bool points_in_world_space)
431
+ {
432
+ nanovdb::IndexGrid *grid;
433
+ size_t gridSize;
434
+ BuildGridParams<nanovdb::ValueIndex> params;
435
+ params.voxel_size = voxel_size;
436
+ params.translation = nanovdb::Vec3f{tx, ty, tz};
257
437
 
258
- void volume_get_tiles_device(uint64_t id, void** buf, uint64_t* size)
438
+ build_grid_from_points(grid, gridSize, points, num_points, points_in_world_space, params);
439
+
440
+ return volume_create_device(context, grid, gridSize, false, true);
441
+ }
442
+
443
+ uint64_t volume_from_active_voxels_device(void *context, void *points, int num_points, float voxel_size, float tx,
444
+ float ty, float tz, bool points_in_world_space)
259
445
  {
260
- *buf = 0;
261
- *size = 0;
446
+ nanovdb::OnIndexGrid *grid;
447
+ size_t gridSize;
448
+ BuildGridParams<nanovdb::ValueOnIndex> params;
449
+ params.voxel_size = voxel_size;
450
+ params.translation = nanovdb::Vec3f{tx, ty, tz};
262
451
 
263
- VolumeDesc volume;
452
+ build_grid_from_points(grid, gridSize, points, num_points, points_in_world_space, params);
453
+
454
+ return volume_create_device(context, grid, gridSize, false, true);
455
+ }
456
+
457
+ void launch_get_leaf_coords(void *context, const uint32_t leaf_count, pnanovdb_coord_t *leaf_coords,
458
+ pnanovdb_buf_t buf);
459
+ void launch_get_voxel_coords(void *context, const uint32_t leaf_count, const uint32_t voxel_count,
460
+ pnanovdb_coord_t *voxel_coords, pnanovdb_buf_t buf);
461
+
462
+ void volume_get_tiles_device(uint64_t id, void *buf)
463
+ {
464
+ const VolumeDesc *volume;
264
465
  if (volume_get_descriptor(id, volume))
265
466
  {
266
- const uint32_t leaf_count = volume.tree_data.node_count_leaf;
267
- *size = leaf_count * sizeof(pnanovdb_coord_t);
467
+ const uint32_t leaf_count = volume->tree_data.node_count_leaf;
268
468
 
269
- pnanovdb_coord_t *leaf_coords = (pnanovdb_coord_t*)alloc_device(volume.context, *size);
270
- *buf = leaf_coords;
469
+ pnanovdb_coord_t *leaf_coords = static_cast<pnanovdb_coord_t *>(buf);
470
+ launch_get_leaf_coords(volume->context, leaf_count, leaf_coords, volume->as_pnano());
471
+ }
472
+ }
271
473
 
272
- const uint64_t first_leaf = (uint64_t)volume.buffer + sizeof(pnanovdb_grid_t) + volume.tree_data.node_offset_leaf;
273
- const uint32_t leaf_stride = PNANOVDB_GRID_TYPE_GET(volume.grid_data.grid_type, leaf_size);
474
+ void volume_get_voxels_device(uint64_t id, void *buf)
475
+ {
476
+ const VolumeDesc *volume;
477
+ if (volume_get_descriptor(id, volume))
478
+ {
479
+ uint32_t leaf_count;
480
+ uint64_t voxel_count;
481
+ volume_get_tile_and_voxel_count(id, leaf_count, voxel_count);
274
482
 
275
- launch_get_leaf_coords(volume.context, leaf_count, leaf_coords, first_leaf, leaf_stride);
483
+ pnanovdb_coord_t *voxel_coords = static_cast<pnanovdb_coord_t *>(buf);
484
+ launch_get_voxel_coords(volume->context, leaf_count, voxel_count, voxel_coords, volume->as_pnano());
276
485
  }
277
486
  }
487
+
278
488
  #else
279
489
  // stubs for non-CUDA platforms
280
- uint64_t volume_f_from_tiles_device(void* context, void* points, int num_points, float voxel_size, float bg_value, float tx, float ty, float tz, bool points_in_world_space)
490
+ uint64_t volume_f_from_tiles_device(void *context, void *points, int num_points, float voxel_size, float bg_value,
491
+ float tx, float ty, float tz, bool points_in_world_space)
492
+ {
493
+ return 0;
494
+ }
495
+
496
+ uint64_t volume_v_from_tiles_device(void *context, void *points, int num_points, float voxel_size, float bg_value_x,
497
+ float bg_value_y, float bg_value_z, float tx, float ty, float tz,
498
+ bool points_in_world_space)
499
+ {
500
+ return 0;
501
+ }
502
+
503
+ uint64_t volume_i_from_tiles_device(void *context, void *points, int num_points, float voxel_size, int bg_value,
504
+ float tx, float ty, float tz, bool points_in_world_space)
281
505
  {
282
506
  return 0;
283
507
  }
284
508
 
285
- uint64_t volume_v_from_tiles_device(void* context, void* points, int num_points, float voxel_size, float bg_value_x, float bg_value_y, float bg_value_z, float tx, float ty, float tz, bool points_in_world_space)
509
+ uint64_t volume_index_from_tiles_device(void *context, void *points, int num_points, float voxel_size,
510
+ float tx, float ty, float tz, bool points_in_world_space)
286
511
  {
287
512
  return 0;
288
513
  }
289
514
 
290
- uint64_t volume_i_from_tiles_device(void* context, void* points, int num_points, float voxel_size, int bg_value, float tx, float ty, float tz, bool points_in_world_space)
515
+ uint64_t volume_from_active_voxels_device(void *context, void *points, int num_points, float voxel_size, float tx,
516
+ float ty, float tz, bool points_in_world_space)
291
517
  {
292
518
  return 0;
293
519
  }
294
520
 
295
- void volume_get_tiles_device(uint64_t id, void** buf, uint64_t* size) {}
521
+ void volume_get_tiles_device(uint64_t id, void *buf)
522
+ {
523
+ }
524
+
525
+ void volume_get_voxels_device(uint64_t id, void *buf)
526
+ {
527
+ }
296
528
 
297
529
  #endif