warp-lang 1.2.1__py3-none-win_amd64.whl → 1.3.0__py3-none-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of warp-lang might be problematic. Click here for more details.
- warp/__init__.py +8 -6
- warp/autograd.py +823 -0
- warp/bin/warp-clang.dll +0 -0
- warp/bin/warp.dll +0 -0
- warp/build.py +6 -2
- warp/builtins.py +1410 -886
- warp/codegen.py +503 -166
- warp/config.py +48 -18
- warp/context.py +401 -199
- warp/dlpack.py +8 -0
- warp/examples/assets/bunny.usd +0 -0
- warp/examples/benchmarks/benchmark_cloth_warp.py +1 -1
- warp/examples/benchmarks/benchmark_interop_torch.py +158 -0
- warp/examples/benchmarks/benchmark_launches.py +1 -1
- warp/examples/core/example_cupy.py +78 -0
- warp/examples/fem/example_apic_fluid.py +17 -36
- warp/examples/fem/example_burgers.py +9 -18
- warp/examples/fem/example_convection_diffusion.py +7 -17
- warp/examples/fem/example_convection_diffusion_dg.py +27 -47
- warp/examples/fem/example_deformed_geometry.py +11 -22
- warp/examples/fem/example_diffusion.py +7 -18
- warp/examples/fem/example_diffusion_3d.py +24 -28
- warp/examples/fem/example_diffusion_mgpu.py +7 -14
- warp/examples/fem/example_magnetostatics.py +190 -0
- warp/examples/fem/example_mixed_elasticity.py +111 -80
- warp/examples/fem/example_navier_stokes.py +30 -34
- warp/examples/fem/example_nonconforming_contact.py +290 -0
- warp/examples/fem/example_stokes.py +17 -32
- warp/examples/fem/example_stokes_transfer.py +12 -21
- warp/examples/fem/example_streamlines.py +350 -0
- warp/examples/fem/utils.py +936 -0
- warp/fabric.py +5 -2
- warp/fem/__init__.py +13 -3
- warp/fem/cache.py +161 -11
- warp/fem/dirichlet.py +37 -28
- warp/fem/domain.py +105 -14
- warp/fem/field/__init__.py +14 -3
- warp/fem/field/field.py +454 -11
- warp/fem/field/nodal_field.py +33 -18
- warp/fem/geometry/deformed_geometry.py +50 -15
- warp/fem/geometry/hexmesh.py +12 -24
- warp/fem/geometry/nanogrid.py +106 -31
- warp/fem/geometry/quadmesh_2d.py +6 -11
- warp/fem/geometry/tetmesh.py +103 -61
- warp/fem/geometry/trimesh_2d.py +98 -47
- warp/fem/integrate.py +231 -186
- warp/fem/operator.py +14 -9
- warp/fem/quadrature/pic_quadrature.py +35 -9
- warp/fem/quadrature/quadrature.py +119 -32
- warp/fem/space/basis_space.py +98 -22
- warp/fem/space/collocated_function_space.py +3 -1
- warp/fem/space/function_space.py +7 -2
- warp/fem/space/grid_2d_function_space.py +3 -3
- warp/fem/space/grid_3d_function_space.py +4 -4
- warp/fem/space/hexmesh_function_space.py +3 -2
- warp/fem/space/nanogrid_function_space.py +12 -14
- warp/fem/space/partition.py +45 -47
- warp/fem/space/restriction.py +19 -16
- warp/fem/space/shape/cube_shape_function.py +91 -3
- warp/fem/space/shape/shape_function.py +7 -0
- warp/fem/space/shape/square_shape_function.py +32 -0
- warp/fem/space/shape/tet_shape_function.py +11 -7
- warp/fem/space/shape/triangle_shape_function.py +10 -1
- warp/fem/space/topology.py +116 -42
- warp/fem/types.py +8 -1
- warp/fem/utils.py +301 -83
- warp/native/array.h +16 -0
- warp/native/builtin.h +0 -15
- warp/native/cuda_util.cpp +14 -6
- warp/native/exports.h +1348 -1308
- warp/native/quat.h +79 -0
- warp/native/rand.h +27 -4
- warp/native/sparse.cpp +83 -81
- warp/native/sparse.cu +381 -453
- warp/native/vec.h +64 -0
- warp/native/volume.cpp +40 -49
- warp/native/volume_builder.cu +2 -3
- warp/native/volume_builder.h +12 -17
- warp/native/warp.cu +3 -3
- warp/native/warp.h +69 -59
- warp/render/render_opengl.py +17 -9
- warp/sim/articulation.py +117 -17
- warp/sim/collide.py +35 -29
- warp/sim/model.py +123 -18
- warp/sim/render.py +3 -1
- warp/sparse.py +867 -203
- warp/stubs.py +312 -541
- warp/tape.py +29 -1
- warp/tests/disabled_kinematics.py +1 -1
- warp/tests/test_adam.py +1 -1
- warp/tests/test_arithmetic.py +1 -1
- warp/tests/test_array.py +58 -1
- warp/tests/test_array_reduce.py +1 -1
- warp/tests/test_async.py +1 -1
- warp/tests/test_atomic.py +1 -1
- warp/tests/test_bool.py +1 -1
- warp/tests/test_builtins_resolution.py +1 -1
- warp/tests/test_bvh.py +6 -1
- warp/tests/test_closest_point_edge_edge.py +1 -1
- warp/tests/test_codegen.py +66 -1
- warp/tests/test_compile_consts.py +1 -1
- warp/tests/test_conditional.py +1 -1
- warp/tests/test_copy.py +1 -1
- warp/tests/test_ctypes.py +1 -1
- warp/tests/test_dense.py +1 -1
- warp/tests/test_devices.py +1 -1
- warp/tests/test_dlpack.py +1 -1
- warp/tests/test_examples.py +33 -4
- warp/tests/test_fabricarray.py +5 -2
- warp/tests/test_fast_math.py +1 -1
- warp/tests/test_fem.py +213 -6
- warp/tests/test_fp16.py +1 -1
- warp/tests/test_func.py +1 -1
- warp/tests/test_future_annotations.py +90 -0
- warp/tests/test_generics.py +1 -1
- warp/tests/test_grad.py +1 -1
- warp/tests/test_grad_customs.py +1 -1
- warp/tests/test_grad_debug.py +247 -0
- warp/tests/test_hash_grid.py +6 -1
- warp/tests/test_implicit_init.py +354 -0
- warp/tests/test_import.py +1 -1
- warp/tests/test_indexedarray.py +1 -1
- warp/tests/test_intersect.py +1 -1
- warp/tests/test_jax.py +1 -1
- warp/tests/test_large.py +1 -1
- warp/tests/test_launch.py +1 -1
- warp/tests/test_lerp.py +1 -1
- warp/tests/test_linear_solvers.py +1 -1
- warp/tests/test_lvalue.py +1 -1
- warp/tests/test_marching_cubes.py +5 -2
- warp/tests/test_mat.py +34 -35
- warp/tests/test_mat_lite.py +2 -1
- warp/tests/test_mat_scalar_ops.py +1 -1
- warp/tests/test_math.py +1 -1
- warp/tests/test_matmul.py +20 -16
- warp/tests/test_matmul_lite.py +1 -1
- warp/tests/test_mempool.py +1 -1
- warp/tests/test_mesh.py +5 -2
- warp/tests/test_mesh_query_aabb.py +1 -1
- warp/tests/test_mesh_query_point.py +1 -1
- warp/tests/test_mesh_query_ray.py +1 -1
- warp/tests/test_mlp.py +1 -1
- warp/tests/test_model.py +1 -1
- warp/tests/test_module_hashing.py +77 -1
- warp/tests/test_modules_lite.py +1 -1
- warp/tests/test_multigpu.py +1 -1
- warp/tests/test_noise.py +1 -1
- warp/tests/test_operators.py +1 -1
- warp/tests/test_options.py +1 -1
- warp/tests/test_overwrite.py +542 -0
- warp/tests/test_peer.py +1 -1
- warp/tests/test_pinned.py +1 -1
- warp/tests/test_print.py +1 -1
- warp/tests/test_quat.py +15 -1
- warp/tests/test_rand.py +1 -1
- warp/tests/test_reload.py +1 -1
- warp/tests/test_rounding.py +1 -1
- warp/tests/test_runlength_encode.py +1 -1
- warp/tests/test_scalar_ops.py +95 -0
- warp/tests/test_sim_grad.py +1 -1
- warp/tests/test_sim_kinematics.py +1 -1
- warp/tests/test_smoothstep.py +1 -1
- warp/tests/test_sparse.py +82 -15
- warp/tests/test_spatial.py +1 -1
- warp/tests/test_special_values.py +2 -11
- warp/tests/test_streams.py +11 -1
- warp/tests/test_struct.py +1 -1
- warp/tests/test_tape.py +1 -1
- warp/tests/test_torch.py +194 -1
- warp/tests/test_transient_module.py +1 -1
- warp/tests/test_types.py +1 -1
- warp/tests/test_utils.py +1 -1
- warp/tests/test_vec.py +15 -63
- warp/tests/test_vec_lite.py +2 -1
- warp/tests/test_vec_scalar_ops.py +122 -39
- warp/tests/test_verify_fp.py +1 -1
- warp/tests/test_volume.py +28 -2
- warp/tests/test_volume_write.py +1 -1
- warp/tests/unittest_serial.py +1 -1
- warp/tests/unittest_suites.py +9 -1
- warp/tests/walkthrough_debug.py +1 -1
- warp/thirdparty/unittest_parallel.py +2 -5
- warp/torch.py +103 -41
- warp/types.py +344 -227
- warp/utils.py +11 -2
- {warp_lang-1.2.1.dist-info → warp_lang-1.3.0.dist-info}/METADATA +99 -46
- warp_lang-1.3.0.dist-info/RECORD +368 -0
- warp/examples/fem/bsr_utils.py +0 -378
- warp/examples/fem/mesh_utils.py +0 -133
- warp/examples/fem/plot_utils.py +0 -292
- warp_lang-1.2.1.dist-info/RECORD +0 -359
- {warp_lang-1.2.1.dist-info → warp_lang-1.3.0.dist-info}/LICENSE.md +0 -0
- {warp_lang-1.2.1.dist-info → warp_lang-1.3.0.dist-info}/WHEEL +0 -0
- {warp_lang-1.2.1.dist-info → warp_lang-1.3.0.dist-info}/top_level.txt +0 -0
warp/torch.py
CHANGED
|
@@ -10,12 +10,41 @@ import ctypes
|
|
|
10
10
|
import numpy
|
|
11
11
|
|
|
12
12
|
import warp
|
|
13
|
+
import warp.context
|
|
13
14
|
|
|
14
15
|
|
|
15
16
|
# return the warp device corresponding to a torch device
|
|
16
17
|
def device_from_torch(torch_device) -> warp.context.Device:
|
|
17
|
-
"""Return the Warp device corresponding to a Torch device.
|
|
18
|
-
|
|
18
|
+
"""Return the Warp device corresponding to a Torch device.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
torch_device (`torch.device` or `str`): Torch device identifier
|
|
22
|
+
|
|
23
|
+
Raises:
|
|
24
|
+
RuntimeError: Torch device does not have a corresponding Warp device
|
|
25
|
+
"""
|
|
26
|
+
if type(torch_device) is str:
|
|
27
|
+
warp_device = warp.context.runtime.device_map.get(torch_device)
|
|
28
|
+
if warp_device is not None:
|
|
29
|
+
return warp_device
|
|
30
|
+
elif torch_device == "cuda":
|
|
31
|
+
return warp.context.runtime.get_current_cuda_device()
|
|
32
|
+
else:
|
|
33
|
+
raise RuntimeError(f"Unsupported Torch device {torch_device}")
|
|
34
|
+
else:
|
|
35
|
+
try:
|
|
36
|
+
if torch_device.type == "cuda":
|
|
37
|
+
return warp.context.runtime.cuda_devices[torch_device.index]
|
|
38
|
+
elif torch_device.type == "cpu":
|
|
39
|
+
return warp.context.runtime.cpu_device
|
|
40
|
+
else:
|
|
41
|
+
raise RuntimeError(f"Unsupported Torch device type {torch_device.type}")
|
|
42
|
+
except Exception as e:
|
|
43
|
+
import torch
|
|
44
|
+
|
|
45
|
+
if not isinstance(torch_device, torch.device):
|
|
46
|
+
raise ValueError("Argument must be a torch.device object or a string") from e
|
|
47
|
+
raise
|
|
19
48
|
|
|
20
49
|
|
|
21
50
|
def device_to_torch(warp_device: warp.context.Devicelike) -> str:
|
|
@@ -154,16 +183,17 @@ dtype_is_compatible.compatible_sets = None
|
|
|
154
183
|
|
|
155
184
|
|
|
156
185
|
# wrap a torch tensor to a wp array, data is not copied
|
|
157
|
-
def from_torch(t, dtype=None, requires_grad=None, grad=None):
|
|
186
|
+
def from_torch(t, dtype=None, requires_grad=None, grad=None, return_ctype=False):
|
|
158
187
|
"""Convert a Torch tensor to a Warp array without copying the data.
|
|
159
188
|
|
|
160
189
|
Args:
|
|
161
190
|
t (torch.Tensor): The torch tensor to wrap.
|
|
162
191
|
dtype (warp.dtype, optional): The target data type of the resulting Warp array. Defaults to the tensor value type mapped to a Warp array value type.
|
|
163
192
|
requires_grad (bool, optional): Whether the resulting array should wrap the tensor's gradient, if it exists (the grad tensor will be allocated otherwise). Defaults to the tensor's `requires_grad` value.
|
|
193
|
+
return_ctype (bool, optional): Whether to return a low-level array descriptor instead of a ``wp.array`` object (faster). The descriptor can be passed to Warp kernels.
|
|
164
194
|
|
|
165
195
|
Returns:
|
|
166
|
-
warp.array: The wrapped array.
|
|
196
|
+
warp.array: The wrapped array or array descriptor.
|
|
167
197
|
"""
|
|
168
198
|
if dtype is None:
|
|
169
199
|
dtype = dtype_from_torch(t.dtype)
|
|
@@ -175,7 +205,6 @@ def from_torch(t, dtype=None, requires_grad=None, grad=None):
|
|
|
175
205
|
|
|
176
206
|
shape = tuple(t.shape)
|
|
177
207
|
strides = tuple(s * ctype_size for s in t.stride())
|
|
178
|
-
device = device_from_torch(t.device)
|
|
179
208
|
|
|
180
209
|
# if target is a vector or matrix type
|
|
181
210
|
# then check if trailing dimensions match
|
|
@@ -183,57 +212,90 @@ def from_torch(t, dtype=None, requires_grad=None, grad=None):
|
|
|
183
212
|
if hasattr(dtype, "_shape_"):
|
|
184
213
|
dtype_shape = dtype._shape_
|
|
185
214
|
dtype_dims = len(dtype._shape_)
|
|
215
|
+
# ensure inner shape matches
|
|
186
216
|
if dtype_dims > len(shape) or dtype_shape != shape[-dtype_dims:]:
|
|
187
217
|
raise RuntimeError(
|
|
188
218
|
f"Could not convert Torch tensor with shape {shape} to Warp array with dtype={dtype}, ensure that source inner shape is {dtype_shape}"
|
|
189
219
|
)
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
f"Could not convert Torch tensor with shape {shape} to Warp array with dtype={dtype}, because the source inner strides are not contiguous"
|
|
197
|
-
)
|
|
198
|
-
stride *= dtype_shape[-i - 1]
|
|
199
|
-
|
|
220
|
+
# ensure inner strides are contiguous
|
|
221
|
+
if strides[-1] != ctype_size or (dtype_dims > 1 and strides[-2] != ctype_size * dtype_shape[-1]):
|
|
222
|
+
raise RuntimeError(
|
|
223
|
+
f"Could not convert Torch tensor with shape {shape} to Warp array with dtype={dtype}, because the source inner strides are not contiguous"
|
|
224
|
+
)
|
|
225
|
+
# trim shape and strides
|
|
200
226
|
shape = tuple(shape[:-dtype_dims]) or (1,)
|
|
201
227
|
strides = tuple(strides[:-dtype_dims]) or (ctype_size,)
|
|
202
228
|
|
|
229
|
+
# gradient
|
|
230
|
+
# - if return_ctype is False, we set `grad` to a wp.array or None
|
|
231
|
+
# - if return_ctype is True, we set `grad_ptr` and set `grad` as the owner (wp.array or torch.Tensor)
|
|
203
232
|
requires_grad = t.requires_grad if requires_grad is None else requires_grad
|
|
233
|
+
grad_ptr = 0
|
|
204
234
|
if grad is not None:
|
|
205
|
-
if
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
235
|
+
if isinstance(grad, warp.array):
|
|
236
|
+
if return_ctype:
|
|
237
|
+
if grad.strides != strides:
|
|
238
|
+
raise RuntimeError(
|
|
239
|
+
f"Gradient strides must match array strides, expected {strides} but got {grad.strides}"
|
|
240
|
+
)
|
|
241
|
+
grad_ptr = grad.ptr
|
|
242
|
+
else:
|
|
243
|
+
# assume grad is a torch.Tensor
|
|
244
|
+
if return_ctype:
|
|
245
|
+
if t.stride() != grad.stride():
|
|
246
|
+
raise RuntimeError(
|
|
247
|
+
f"Gradient strides must match array strides, expected {t.stride()} but got {grad.stride()}"
|
|
248
|
+
)
|
|
249
|
+
grad_ptr = grad.data_ptr()
|
|
210
250
|
else:
|
|
211
|
-
|
|
251
|
+
grad = from_torch(grad, dtype=dtype, requires_grad=False)
|
|
212
252
|
elif requires_grad:
|
|
213
253
|
# wrap the tensor gradient, allocate if necessary
|
|
214
|
-
if t.grad is None:
|
|
254
|
+
if t.grad is not None:
|
|
255
|
+
if return_ctype:
|
|
256
|
+
grad = t.grad
|
|
257
|
+
if t.stride() != grad.stride():
|
|
258
|
+
raise RuntimeError(
|
|
259
|
+
f"Gradient strides must match array strides, expected {t.stride()} but got {grad.stride()}"
|
|
260
|
+
)
|
|
261
|
+
grad_ptr = grad.data_ptr()
|
|
262
|
+
else:
|
|
263
|
+
grad = from_torch(t.grad, dtype=dtype, requires_grad=False)
|
|
264
|
+
else:
|
|
215
265
|
# allocate a zero-filled gradient if it doesn't exist
|
|
216
266
|
# Note: we use Warp to allocate the shared gradient with compatible strides
|
|
217
|
-
grad = warp.zeros(dtype=dtype, shape=shape, strides=strides, device=device)
|
|
267
|
+
grad = warp.zeros(dtype=dtype, shape=shape, strides=strides, device=device_from_torch(t.device))
|
|
218
268
|
t.grad = to_torch(grad, requires_grad=False)
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
269
|
+
grad_ptr = grad.ptr
|
|
270
|
+
|
|
271
|
+
if return_ctype:
|
|
272
|
+
ptr = t.data_ptr()
|
|
273
|
+
|
|
274
|
+
# create array descriptor
|
|
275
|
+
array_ctype = warp.types.array_t(ptr, grad_ptr, len(shape), shape, strides)
|
|
276
|
+
|
|
277
|
+
# keep data and gradient alive
|
|
278
|
+
array_ctype._ref = t
|
|
279
|
+
array_ctype._gradref = grad
|
|
280
|
+
|
|
281
|
+
return array_ctype
|
|
282
|
+
|
|
283
|
+
else:
|
|
284
|
+
a = warp.array(
|
|
285
|
+
ptr=t.data_ptr(),
|
|
286
|
+
dtype=dtype,
|
|
287
|
+
shape=shape,
|
|
288
|
+
strides=strides,
|
|
289
|
+
device=device_from_torch(t.device),
|
|
290
|
+
copy=False,
|
|
291
|
+
grad=grad,
|
|
292
|
+
requires_grad=requires_grad,
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
# save a reference to the source tensor, otherwise it may get deallocated
|
|
296
|
+
a._tensor = t
|
|
297
|
+
|
|
298
|
+
return a
|
|
237
299
|
|
|
238
300
|
|
|
239
301
|
def to_torch(a, requires_grad=None):
|