warp-lang 1.2.1__py3-none-win_amd64.whl → 1.3.0__py3-none-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of warp-lang might be problematic. Click here for more details.

Files changed (194) hide show
  1. warp/__init__.py +8 -6
  2. warp/autograd.py +823 -0
  3. warp/bin/warp-clang.dll +0 -0
  4. warp/bin/warp.dll +0 -0
  5. warp/build.py +6 -2
  6. warp/builtins.py +1410 -886
  7. warp/codegen.py +503 -166
  8. warp/config.py +48 -18
  9. warp/context.py +401 -199
  10. warp/dlpack.py +8 -0
  11. warp/examples/assets/bunny.usd +0 -0
  12. warp/examples/benchmarks/benchmark_cloth_warp.py +1 -1
  13. warp/examples/benchmarks/benchmark_interop_torch.py +158 -0
  14. warp/examples/benchmarks/benchmark_launches.py +1 -1
  15. warp/examples/core/example_cupy.py +78 -0
  16. warp/examples/fem/example_apic_fluid.py +17 -36
  17. warp/examples/fem/example_burgers.py +9 -18
  18. warp/examples/fem/example_convection_diffusion.py +7 -17
  19. warp/examples/fem/example_convection_diffusion_dg.py +27 -47
  20. warp/examples/fem/example_deformed_geometry.py +11 -22
  21. warp/examples/fem/example_diffusion.py +7 -18
  22. warp/examples/fem/example_diffusion_3d.py +24 -28
  23. warp/examples/fem/example_diffusion_mgpu.py +7 -14
  24. warp/examples/fem/example_magnetostatics.py +190 -0
  25. warp/examples/fem/example_mixed_elasticity.py +111 -80
  26. warp/examples/fem/example_navier_stokes.py +30 -34
  27. warp/examples/fem/example_nonconforming_contact.py +290 -0
  28. warp/examples/fem/example_stokes.py +17 -32
  29. warp/examples/fem/example_stokes_transfer.py +12 -21
  30. warp/examples/fem/example_streamlines.py +350 -0
  31. warp/examples/fem/utils.py +936 -0
  32. warp/fabric.py +5 -2
  33. warp/fem/__init__.py +13 -3
  34. warp/fem/cache.py +161 -11
  35. warp/fem/dirichlet.py +37 -28
  36. warp/fem/domain.py +105 -14
  37. warp/fem/field/__init__.py +14 -3
  38. warp/fem/field/field.py +454 -11
  39. warp/fem/field/nodal_field.py +33 -18
  40. warp/fem/geometry/deformed_geometry.py +50 -15
  41. warp/fem/geometry/hexmesh.py +12 -24
  42. warp/fem/geometry/nanogrid.py +106 -31
  43. warp/fem/geometry/quadmesh_2d.py +6 -11
  44. warp/fem/geometry/tetmesh.py +103 -61
  45. warp/fem/geometry/trimesh_2d.py +98 -47
  46. warp/fem/integrate.py +231 -186
  47. warp/fem/operator.py +14 -9
  48. warp/fem/quadrature/pic_quadrature.py +35 -9
  49. warp/fem/quadrature/quadrature.py +119 -32
  50. warp/fem/space/basis_space.py +98 -22
  51. warp/fem/space/collocated_function_space.py +3 -1
  52. warp/fem/space/function_space.py +7 -2
  53. warp/fem/space/grid_2d_function_space.py +3 -3
  54. warp/fem/space/grid_3d_function_space.py +4 -4
  55. warp/fem/space/hexmesh_function_space.py +3 -2
  56. warp/fem/space/nanogrid_function_space.py +12 -14
  57. warp/fem/space/partition.py +45 -47
  58. warp/fem/space/restriction.py +19 -16
  59. warp/fem/space/shape/cube_shape_function.py +91 -3
  60. warp/fem/space/shape/shape_function.py +7 -0
  61. warp/fem/space/shape/square_shape_function.py +32 -0
  62. warp/fem/space/shape/tet_shape_function.py +11 -7
  63. warp/fem/space/shape/triangle_shape_function.py +10 -1
  64. warp/fem/space/topology.py +116 -42
  65. warp/fem/types.py +8 -1
  66. warp/fem/utils.py +301 -83
  67. warp/native/array.h +16 -0
  68. warp/native/builtin.h +0 -15
  69. warp/native/cuda_util.cpp +14 -6
  70. warp/native/exports.h +1348 -1308
  71. warp/native/quat.h +79 -0
  72. warp/native/rand.h +27 -4
  73. warp/native/sparse.cpp +83 -81
  74. warp/native/sparse.cu +381 -453
  75. warp/native/vec.h +64 -0
  76. warp/native/volume.cpp +40 -49
  77. warp/native/volume_builder.cu +2 -3
  78. warp/native/volume_builder.h +12 -17
  79. warp/native/warp.cu +3 -3
  80. warp/native/warp.h +69 -59
  81. warp/render/render_opengl.py +17 -9
  82. warp/sim/articulation.py +117 -17
  83. warp/sim/collide.py +35 -29
  84. warp/sim/model.py +123 -18
  85. warp/sim/render.py +3 -1
  86. warp/sparse.py +867 -203
  87. warp/stubs.py +312 -541
  88. warp/tape.py +29 -1
  89. warp/tests/disabled_kinematics.py +1 -1
  90. warp/tests/test_adam.py +1 -1
  91. warp/tests/test_arithmetic.py +1 -1
  92. warp/tests/test_array.py +58 -1
  93. warp/tests/test_array_reduce.py +1 -1
  94. warp/tests/test_async.py +1 -1
  95. warp/tests/test_atomic.py +1 -1
  96. warp/tests/test_bool.py +1 -1
  97. warp/tests/test_builtins_resolution.py +1 -1
  98. warp/tests/test_bvh.py +6 -1
  99. warp/tests/test_closest_point_edge_edge.py +1 -1
  100. warp/tests/test_codegen.py +66 -1
  101. warp/tests/test_compile_consts.py +1 -1
  102. warp/tests/test_conditional.py +1 -1
  103. warp/tests/test_copy.py +1 -1
  104. warp/tests/test_ctypes.py +1 -1
  105. warp/tests/test_dense.py +1 -1
  106. warp/tests/test_devices.py +1 -1
  107. warp/tests/test_dlpack.py +1 -1
  108. warp/tests/test_examples.py +33 -4
  109. warp/tests/test_fabricarray.py +5 -2
  110. warp/tests/test_fast_math.py +1 -1
  111. warp/tests/test_fem.py +213 -6
  112. warp/tests/test_fp16.py +1 -1
  113. warp/tests/test_func.py +1 -1
  114. warp/tests/test_future_annotations.py +90 -0
  115. warp/tests/test_generics.py +1 -1
  116. warp/tests/test_grad.py +1 -1
  117. warp/tests/test_grad_customs.py +1 -1
  118. warp/tests/test_grad_debug.py +247 -0
  119. warp/tests/test_hash_grid.py +6 -1
  120. warp/tests/test_implicit_init.py +354 -0
  121. warp/tests/test_import.py +1 -1
  122. warp/tests/test_indexedarray.py +1 -1
  123. warp/tests/test_intersect.py +1 -1
  124. warp/tests/test_jax.py +1 -1
  125. warp/tests/test_large.py +1 -1
  126. warp/tests/test_launch.py +1 -1
  127. warp/tests/test_lerp.py +1 -1
  128. warp/tests/test_linear_solvers.py +1 -1
  129. warp/tests/test_lvalue.py +1 -1
  130. warp/tests/test_marching_cubes.py +5 -2
  131. warp/tests/test_mat.py +34 -35
  132. warp/tests/test_mat_lite.py +2 -1
  133. warp/tests/test_mat_scalar_ops.py +1 -1
  134. warp/tests/test_math.py +1 -1
  135. warp/tests/test_matmul.py +20 -16
  136. warp/tests/test_matmul_lite.py +1 -1
  137. warp/tests/test_mempool.py +1 -1
  138. warp/tests/test_mesh.py +5 -2
  139. warp/tests/test_mesh_query_aabb.py +1 -1
  140. warp/tests/test_mesh_query_point.py +1 -1
  141. warp/tests/test_mesh_query_ray.py +1 -1
  142. warp/tests/test_mlp.py +1 -1
  143. warp/tests/test_model.py +1 -1
  144. warp/tests/test_module_hashing.py +77 -1
  145. warp/tests/test_modules_lite.py +1 -1
  146. warp/tests/test_multigpu.py +1 -1
  147. warp/tests/test_noise.py +1 -1
  148. warp/tests/test_operators.py +1 -1
  149. warp/tests/test_options.py +1 -1
  150. warp/tests/test_overwrite.py +542 -0
  151. warp/tests/test_peer.py +1 -1
  152. warp/tests/test_pinned.py +1 -1
  153. warp/tests/test_print.py +1 -1
  154. warp/tests/test_quat.py +15 -1
  155. warp/tests/test_rand.py +1 -1
  156. warp/tests/test_reload.py +1 -1
  157. warp/tests/test_rounding.py +1 -1
  158. warp/tests/test_runlength_encode.py +1 -1
  159. warp/tests/test_scalar_ops.py +95 -0
  160. warp/tests/test_sim_grad.py +1 -1
  161. warp/tests/test_sim_kinematics.py +1 -1
  162. warp/tests/test_smoothstep.py +1 -1
  163. warp/tests/test_sparse.py +82 -15
  164. warp/tests/test_spatial.py +1 -1
  165. warp/tests/test_special_values.py +2 -11
  166. warp/tests/test_streams.py +11 -1
  167. warp/tests/test_struct.py +1 -1
  168. warp/tests/test_tape.py +1 -1
  169. warp/tests/test_torch.py +194 -1
  170. warp/tests/test_transient_module.py +1 -1
  171. warp/tests/test_types.py +1 -1
  172. warp/tests/test_utils.py +1 -1
  173. warp/tests/test_vec.py +15 -63
  174. warp/tests/test_vec_lite.py +2 -1
  175. warp/tests/test_vec_scalar_ops.py +122 -39
  176. warp/tests/test_verify_fp.py +1 -1
  177. warp/tests/test_volume.py +28 -2
  178. warp/tests/test_volume_write.py +1 -1
  179. warp/tests/unittest_serial.py +1 -1
  180. warp/tests/unittest_suites.py +9 -1
  181. warp/tests/walkthrough_debug.py +1 -1
  182. warp/thirdparty/unittest_parallel.py +2 -5
  183. warp/torch.py +103 -41
  184. warp/types.py +344 -227
  185. warp/utils.py +11 -2
  186. {warp_lang-1.2.1.dist-info → warp_lang-1.3.0.dist-info}/METADATA +99 -46
  187. warp_lang-1.3.0.dist-info/RECORD +368 -0
  188. warp/examples/fem/bsr_utils.py +0 -378
  189. warp/examples/fem/mesh_utils.py +0 -133
  190. warp/examples/fem/plot_utils.py +0 -292
  191. warp_lang-1.2.1.dist-info/RECORD +0 -359
  192. {warp_lang-1.2.1.dist-info → warp_lang-1.3.0.dist-info}/LICENSE.md +0 -0
  193. {warp_lang-1.2.1.dist-info → warp_lang-1.3.0.dist-info}/WHEEL +0 -0
  194. {warp_lang-1.2.1.dist-info → warp_lang-1.3.0.dist-info}/top_level.txt +0 -0
warp/torch.py CHANGED
@@ -10,12 +10,41 @@ import ctypes
10
10
  import numpy
11
11
 
12
12
  import warp
13
+ import warp.context
13
14
 
14
15
 
15
16
  # return the warp device corresponding to a torch device
16
17
  def device_from_torch(torch_device) -> warp.context.Device:
17
- """Return the Warp device corresponding to a Torch device."""
18
- return warp.get_device(str(torch_device))
18
+ """Return the Warp device corresponding to a Torch device.
19
+
20
+ Args:
21
+ torch_device (`torch.device` or `str`): Torch device identifier
22
+
23
+ Raises:
24
+ RuntimeError: Torch device does not have a corresponding Warp device
25
+ """
26
+ if type(torch_device) is str:
27
+ warp_device = warp.context.runtime.device_map.get(torch_device)
28
+ if warp_device is not None:
29
+ return warp_device
30
+ elif torch_device == "cuda":
31
+ return warp.context.runtime.get_current_cuda_device()
32
+ else:
33
+ raise RuntimeError(f"Unsupported Torch device {torch_device}")
34
+ else:
35
+ try:
36
+ if torch_device.type == "cuda":
37
+ return warp.context.runtime.cuda_devices[torch_device.index]
38
+ elif torch_device.type == "cpu":
39
+ return warp.context.runtime.cpu_device
40
+ else:
41
+ raise RuntimeError(f"Unsupported Torch device type {torch_device.type}")
42
+ except Exception as e:
43
+ import torch
44
+
45
+ if not isinstance(torch_device, torch.device):
46
+ raise ValueError("Argument must be a torch.device object or a string") from e
47
+ raise
19
48
 
20
49
 
21
50
  def device_to_torch(warp_device: warp.context.Devicelike) -> str:
@@ -154,16 +183,17 @@ dtype_is_compatible.compatible_sets = None
154
183
 
155
184
 
156
185
  # wrap a torch tensor to a wp array, data is not copied
157
- def from_torch(t, dtype=None, requires_grad=None, grad=None):
186
+ def from_torch(t, dtype=None, requires_grad=None, grad=None, return_ctype=False):
158
187
  """Convert a Torch tensor to a Warp array without copying the data.
159
188
 
160
189
  Args:
161
190
  t (torch.Tensor): The torch tensor to wrap.
162
191
  dtype (warp.dtype, optional): The target data type of the resulting Warp array. Defaults to the tensor value type mapped to a Warp array value type.
163
192
  requires_grad (bool, optional): Whether the resulting array should wrap the tensor's gradient, if it exists (the grad tensor will be allocated otherwise). Defaults to the tensor's `requires_grad` value.
193
+ return_ctype (bool, optional): Whether to return a low-level array descriptor instead of a ``wp.array`` object (faster). The descriptor can be passed to Warp kernels.
164
194
 
165
195
  Returns:
166
- warp.array: The wrapped array.
196
+ warp.array: The wrapped array or array descriptor.
167
197
  """
168
198
  if dtype is None:
169
199
  dtype = dtype_from_torch(t.dtype)
@@ -175,7 +205,6 @@ def from_torch(t, dtype=None, requires_grad=None, grad=None):
175
205
 
176
206
  shape = tuple(t.shape)
177
207
  strides = tuple(s * ctype_size for s in t.stride())
178
- device = device_from_torch(t.device)
179
208
 
180
209
  # if target is a vector or matrix type
181
210
  # then check if trailing dimensions match
@@ -183,57 +212,90 @@ def from_torch(t, dtype=None, requires_grad=None, grad=None):
183
212
  if hasattr(dtype, "_shape_"):
184
213
  dtype_shape = dtype._shape_
185
214
  dtype_dims = len(dtype._shape_)
215
+ # ensure inner shape matches
186
216
  if dtype_dims > len(shape) or dtype_shape != shape[-dtype_dims:]:
187
217
  raise RuntimeError(
188
218
  f"Could not convert Torch tensor with shape {shape} to Warp array with dtype={dtype}, ensure that source inner shape is {dtype_shape}"
189
219
  )
190
-
191
- # ensure the inner strides are contiguous
192
- stride = ctype_size
193
- for i in range(dtype_dims):
194
- if strides[-i - 1] != stride:
195
- raise RuntimeError(
196
- f"Could not convert Torch tensor with shape {shape} to Warp array with dtype={dtype}, because the source inner strides are not contiguous"
197
- )
198
- stride *= dtype_shape[-i - 1]
199
-
220
+ # ensure inner strides are contiguous
221
+ if strides[-1] != ctype_size or (dtype_dims > 1 and strides[-2] != ctype_size * dtype_shape[-1]):
222
+ raise RuntimeError(
223
+ f"Could not convert Torch tensor with shape {shape} to Warp array with dtype={dtype}, because the source inner strides are not contiguous"
224
+ )
225
+ # trim shape and strides
200
226
  shape = tuple(shape[:-dtype_dims]) or (1,)
201
227
  strides = tuple(strides[:-dtype_dims]) or (ctype_size,)
202
228
 
229
+ # gradient
230
+ # - if return_ctype is False, we set `grad` to a wp.array or None
231
+ # - if return_ctype is True, we set `grad_ptr` and set `grad` as the owner (wp.array or torch.Tensor)
203
232
  requires_grad = t.requires_grad if requires_grad is None else requires_grad
233
+ grad_ptr = 0
204
234
  if grad is not None:
205
- if not isinstance(grad, warp.array):
206
- import torch
207
-
208
- if isinstance(grad, torch.Tensor):
209
- grad = from_torch(grad, dtype=dtype)
235
+ if isinstance(grad, warp.array):
236
+ if return_ctype:
237
+ if grad.strides != strides:
238
+ raise RuntimeError(
239
+ f"Gradient strides must match array strides, expected {strides} but got {grad.strides}"
240
+ )
241
+ grad_ptr = grad.ptr
242
+ else:
243
+ # assume grad is a torch.Tensor
244
+ if return_ctype:
245
+ if t.stride() != grad.stride():
246
+ raise RuntimeError(
247
+ f"Gradient strides must match array strides, expected {t.stride()} but got {grad.stride()}"
248
+ )
249
+ grad_ptr = grad.data_ptr()
210
250
  else:
211
- raise ValueError(f"Invalid gradient type: {type(grad)}")
251
+ grad = from_torch(grad, dtype=dtype, requires_grad=False)
212
252
  elif requires_grad:
213
253
  # wrap the tensor gradient, allocate if necessary
214
- if t.grad is None:
254
+ if t.grad is not None:
255
+ if return_ctype:
256
+ grad = t.grad
257
+ if t.stride() != grad.stride():
258
+ raise RuntimeError(
259
+ f"Gradient strides must match array strides, expected {t.stride()} but got {grad.stride()}"
260
+ )
261
+ grad_ptr = grad.data_ptr()
262
+ else:
263
+ grad = from_torch(t.grad, dtype=dtype, requires_grad=False)
264
+ else:
215
265
  # allocate a zero-filled gradient if it doesn't exist
216
266
  # Note: we use Warp to allocate the shared gradient with compatible strides
217
- grad = warp.zeros(dtype=dtype, shape=shape, strides=strides, device=device)
267
+ grad = warp.zeros(dtype=dtype, shape=shape, strides=strides, device=device_from_torch(t.device))
218
268
  t.grad = to_torch(grad, requires_grad=False)
219
- else:
220
- # TODO: this will fail if the strides are incompatible
221
- grad = from_torch(t.grad, dtype=dtype)
222
-
223
- a = warp.array(
224
- ptr=t.data_ptr(),
225
- dtype=dtype,
226
- shape=shape,
227
- strides=strides,
228
- device=device,
229
- copy=False,
230
- grad=grad,
231
- requires_grad=requires_grad,
232
- )
233
-
234
- # save a reference to the source tensor, otherwise it will be deallocated
235
- a._tensor = t
236
- return a
269
+ grad_ptr = grad.ptr
270
+
271
+ if return_ctype:
272
+ ptr = t.data_ptr()
273
+
274
+ # create array descriptor
275
+ array_ctype = warp.types.array_t(ptr, grad_ptr, len(shape), shape, strides)
276
+
277
+ # keep data and gradient alive
278
+ array_ctype._ref = t
279
+ array_ctype._gradref = grad
280
+
281
+ return array_ctype
282
+
283
+ else:
284
+ a = warp.array(
285
+ ptr=t.data_ptr(),
286
+ dtype=dtype,
287
+ shape=shape,
288
+ strides=strides,
289
+ device=device_from_torch(t.device),
290
+ copy=False,
291
+ grad=grad,
292
+ requires_grad=requires_grad,
293
+ )
294
+
295
+ # save a reference to the source tensor, otherwise it may get deallocated
296
+ a._tensor = t
297
+
298
+ return a
237
299
 
238
300
 
239
301
  def to_torch(a, requires_grad=None):