warp-lang 1.7.0__py3-none-manylinux_2_28_x86_64.whl → 1.7.1__py3-none-manylinux_2_28_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of warp-lang might be problematic. Click here for more details.
- warp/autograd.py +12 -2
- warp/bin/warp-clang.so +0 -0
- warp/bin/warp.so +0 -0
- warp/build.py +1 -1
- warp/builtins.py +11 -10
- warp/codegen.py +17 -5
- warp/config.py +1 -1
- warp/context.py +6 -0
- warp/examples/benchmarks/benchmark_cloth.py +1 -1
- warp/examples/distributed/example_jacobi_mpi.py +507 -0
- warp/fem/field/field.py +11 -1
- warp/fem/field/nodal_field.py +36 -22
- warp/fem/geometry/adaptive_nanogrid.py +7 -3
- warp/fem/geometry/trimesh.py +4 -12
- warp/jax_experimental/custom_call.py +14 -2
- warp/jax_experimental/ffi.py +5 -1
- warp/native/tile.h +11 -11
- warp/native/warp.cu +1 -1
- warp/render/render_opengl.py +19 -17
- warp/render/render_usd.py +93 -3
- warp/sim/collide.py +11 -9
- warp/sim/inertia.py +189 -156
- warp/sim/integrator_euler.py +3 -0
- warp/sim/integrator_xpbd.py +3 -0
- warp/sim/model.py +29 -12
- warp/sim/render.py +4 -0
- warp/stubs.py +1 -1
- warp/tests/assets/torus.usda +1 -1
- warp/tests/sim/test_collision.py +237 -206
- warp/tests/sim/test_inertia.py +161 -0
- warp/tests/sim/{flaky_test_sim_grad.py → test_sim_grad.py} +4 -0
- warp/tests/sim/test_xpbd.py +399 -0
- warp/tests/test_codegen.py +24 -3
- warp/tests/test_examples.py +16 -6
- warp/tests/test_fem.py +75 -10
- warp/tests/test_mat.py +370 -103
- warp/tests/test_quat.py +321 -137
- warp/tests/test_vec.py +320 -174
- warp/tests/tile/test_tile_load.py +97 -0
- warp/tests/unittest_suites.py +2 -5
- warp/types.py +65 -8
- {warp_lang-1.7.0.dist-info → warp_lang-1.7.1.dist-info}/METADATA +21 -9
- {warp_lang-1.7.0.dist-info → warp_lang-1.7.1.dist-info}/RECORD +46 -43
- {warp_lang-1.7.0.dist-info → warp_lang-1.7.1.dist-info}/WHEEL +1 -1
- {warp_lang-1.7.0.dist-info → warp_lang-1.7.1.dist-info}/licenses/LICENSE.md +0 -26
- {warp_lang-1.7.0.dist-info → warp_lang-1.7.1.dist-info}/top_level.txt +0 -0
warp/autograd.py
CHANGED
|
@@ -52,7 +52,12 @@ def gradcheck(
|
|
|
52
52
|
) -> bool:
|
|
53
53
|
"""
|
|
54
54
|
Checks whether the autodiff gradient of a Warp kernel matches finite differences.
|
|
55
|
-
|
|
55
|
+
|
|
56
|
+
Given the autodiff (:math:`\\nabla_\\text{AD}`) and finite difference gradients (:math:`\\nabla_\\text{FD}`), the check succeeds if the autodiff gradients contain no NaN values and the following condition holds:
|
|
57
|
+
|
|
58
|
+
.. math::
|
|
59
|
+
|
|
60
|
+
|\\nabla_\\text{AD} - \\nabla_\\text{FD}| \\leq atol + rtol \\cdot |\\nabla_\\text{FD}|.
|
|
56
61
|
|
|
57
62
|
The kernel function and its adjoint version are launched with the given inputs and outputs, as well as the provided
|
|
58
63
|
``dim``, ``max_blocks``, and ``block_dim`` arguments (see :func:`warp.launch` for more details).
|
|
@@ -250,7 +255,12 @@ def gradcheck_tape(
|
|
|
250
255
|
) -> bool:
|
|
251
256
|
"""
|
|
252
257
|
Checks whether the autodiff gradients for kernels recorded on the Warp tape match finite differences.
|
|
253
|
-
|
|
258
|
+
|
|
259
|
+
Given the autodiff (:math:`\\nabla_\\text{AD}`) and finite difference gradients (:math:`\\nabla_\\text{FD}`), the check succeeds if the autodiff gradients contain no NaN values and the following condition holds:
|
|
260
|
+
|
|
261
|
+
.. math::
|
|
262
|
+
|
|
263
|
+
|\\nabla_\\text{AD} - \\nabla_\\text{FD}| \\leq atol + rtol \\cdot |\\nabla_\\text{FD}|.
|
|
254
264
|
|
|
255
265
|
Note:
|
|
256
266
|
Only Warp kernels recorded on the tape are checked but not arbitrary functions that have been recorded, e.g. via :meth:`Tape.record_func`.
|
warp/bin/warp-clang.so
CHANGED
|
Binary file
|
warp/bin/warp.so
CHANGED
|
Binary file
|
warp/build.py
CHANGED
|
@@ -360,7 +360,7 @@ def build_lto_solver(M, N, solver, solver_enum, fill_mode, arch, precision_enum,
|
|
|
360
360
|
# TODO: MathDx doesn't yet have heuristics for Blackwell
|
|
361
361
|
arch = min(arch, 90)
|
|
362
362
|
|
|
363
|
-
lto_symbol = f"{solver}_{M}_{N}_{arch}_{precision_enum}"
|
|
363
|
+
lto_symbol = f"{solver}_{M}_{N}_{arch}_{num_threads}_{precision_enum}_{fill_mode}"
|
|
364
364
|
ltoir_decl = f"void {lto_symbol}{parameter_list};"
|
|
365
365
|
|
|
366
366
|
# early out if LTO for this symbol is already cached in current module
|
warp/builtins.py
CHANGED
|
@@ -836,7 +836,7 @@ def vector_value_func(arg_types: Mapping[str, type], arg_values: Mapping[str, An
|
|
|
836
836
|
|
|
837
837
|
if dtype is None:
|
|
838
838
|
dtype = value_type
|
|
839
|
-
elif value_type
|
|
839
|
+
elif not warp.types.scalars_equal(value_type, dtype):
|
|
840
840
|
raise RuntimeError(
|
|
841
841
|
f"the value used to fill this vector is expected to be of the type `{dtype.__name__}`"
|
|
842
842
|
)
|
|
@@ -857,9 +857,9 @@ def vector_value_func(arg_types: Mapping[str, type], arg_values: Mapping[str, An
|
|
|
857
857
|
|
|
858
858
|
if dtype is None:
|
|
859
859
|
dtype = value_type
|
|
860
|
-
elif value_type
|
|
860
|
+
elif not warp.types.scalars_equal(value_type, dtype):
|
|
861
861
|
raise RuntimeError(
|
|
862
|
-
f"all values used to initialize this vector
|
|
862
|
+
f"all values used to initialize this vector are expected to be of the type `{dtype.__name__}`"
|
|
863
863
|
)
|
|
864
864
|
|
|
865
865
|
if length is None:
|
|
@@ -940,7 +940,7 @@ def matrix_value_func(arg_types: Mapping[str, type], arg_values: Mapping[str, An
|
|
|
940
940
|
|
|
941
941
|
if dtype is None:
|
|
942
942
|
dtype = value_type
|
|
943
|
-
elif value_type
|
|
943
|
+
elif not warp.types.scalars_equal(value_type, dtype):
|
|
944
944
|
raise RuntimeError(
|
|
945
945
|
f"the value used to fill this matrix is expected to be of the type `{dtype.__name__}`"
|
|
946
946
|
)
|
|
@@ -979,7 +979,7 @@ def matrix_value_func(arg_types: Mapping[str, type], arg_values: Mapping[str, An
|
|
|
979
979
|
|
|
980
980
|
if dtype is None:
|
|
981
981
|
dtype = value_type
|
|
982
|
-
elif value_type
|
|
982
|
+
elif not warp.types.scalars_equal(value_type, dtype):
|
|
983
983
|
raise RuntimeError(
|
|
984
984
|
f"all values used to initialize this matrix are expected to be of the type `{dtype.__name__}`"
|
|
985
985
|
)
|
|
@@ -1170,7 +1170,7 @@ def matrix_transform_value_func(arg_types: Mapping[str, type], arg_values: Mappi
|
|
|
1170
1170
|
|
|
1171
1171
|
if dtype is None:
|
|
1172
1172
|
dtype = value_type
|
|
1173
|
-
elif value_type
|
|
1173
|
+
elif not warp.types.scalars_equal(value_type, dtype):
|
|
1174
1174
|
raise RuntimeError(
|
|
1175
1175
|
f"all values used to initialize this transformation matrix are expected to be of the type `{dtype.__name__}`"
|
|
1176
1176
|
)
|
|
@@ -1305,7 +1305,7 @@ def quaternion_value_func(arg_types: Mapping[str, type], arg_values: Mapping[str
|
|
|
1305
1305
|
|
|
1306
1306
|
if dtype is None:
|
|
1307
1307
|
dtype = value_type
|
|
1308
|
-
elif value_type
|
|
1308
|
+
elif not warp.types.scalars_equal(value_type, dtype):
|
|
1309
1309
|
raise RuntimeError(
|
|
1310
1310
|
f"all values used to initialize this quaternion are expected to be of the type `{dtype.__name__}`"
|
|
1311
1311
|
)
|
|
@@ -1345,7 +1345,8 @@ add_builtin(
|
|
|
1345
1345
|
)
|
|
1346
1346
|
add_builtin(
|
|
1347
1347
|
"quaternion",
|
|
1348
|
-
input_types={"x": Float, "y": Float, "z": Float, "w": Float},
|
|
1348
|
+
input_types={"x": Float, "y": Float, "z": Float, "w": Float, "dtype": Scalar},
|
|
1349
|
+
defaults={"dtype": None},
|
|
1349
1350
|
value_func=quaternion_value_func,
|
|
1350
1351
|
export_func=lambda input_types: {k: v for k, v in input_types.items() if k != "dtype"},
|
|
1351
1352
|
dispatch_func=quaternion_dispatch_func,
|
|
@@ -1515,7 +1516,7 @@ def transformation_value_func(arg_types: Mapping[str, type], arg_values: Mapping
|
|
|
1515
1516
|
dtype = arg_values.get("dtype", None)
|
|
1516
1517
|
if dtype is None:
|
|
1517
1518
|
dtype = value_type
|
|
1518
|
-
elif value_type
|
|
1519
|
+
elif not warp.types.scalars_equal(value_type, dtype):
|
|
1519
1520
|
raise RuntimeError(
|
|
1520
1521
|
f"all values used to initialize this transformation matrix are expected to be of the type `{dtype.__name__}`"
|
|
1521
1522
|
)
|
|
@@ -1682,7 +1683,7 @@ def spatial_vector_value_func(arg_types: Mapping[str, type], arg_values: Mapping
|
|
|
1682
1683
|
|
|
1683
1684
|
if dtype is None:
|
|
1684
1685
|
dtype = value_type
|
|
1685
|
-
elif value_type
|
|
1686
|
+
elif not warp.types.scalars_equal(value_type, dtype):
|
|
1686
1687
|
raise RuntimeError(
|
|
1687
1688
|
f"all values used to initialize this spatial vector are expected to be of the type `{dtype.__name__}`"
|
|
1688
1689
|
)
|
warp/codegen.py
CHANGED
|
@@ -202,7 +202,7 @@ def get_full_arg_spec(func: Callable) -> inspect.FullArgSpec:
|
|
|
202
202
|
return spec._replace(annotations=eval_annotations(spec.annotations, func))
|
|
203
203
|
|
|
204
204
|
|
|
205
|
-
def struct_instance_repr_recursive(inst: StructInstance, depth: int) -> str:
|
|
205
|
+
def struct_instance_repr_recursive(inst: StructInstance, depth: int, use_repr: bool) -> str:
|
|
206
206
|
indent = "\t"
|
|
207
207
|
|
|
208
208
|
# handle empty structs
|
|
@@ -216,9 +216,12 @@ def struct_instance_repr_recursive(inst: StructInstance, depth: int) -> str:
|
|
|
216
216
|
field_value = getattr(inst, field_name, None)
|
|
217
217
|
|
|
218
218
|
if isinstance(field_value, StructInstance):
|
|
219
|
-
field_value = struct_instance_repr_recursive(field_value, depth + 1)
|
|
219
|
+
field_value = struct_instance_repr_recursive(field_value, depth + 1, use_repr)
|
|
220
220
|
|
|
221
|
-
|
|
221
|
+
if use_repr:
|
|
222
|
+
lines.append(f"{indent * (depth + 1)}{field_name}={field_value!r},")
|
|
223
|
+
else:
|
|
224
|
+
lines.append(f"{indent * (depth + 1)}{field_name}={field_value!s},")
|
|
222
225
|
|
|
223
226
|
lines.append(f"{indent * depth})")
|
|
224
227
|
return "\n".join(lines)
|
|
@@ -341,7 +344,10 @@ class StructInstance:
|
|
|
341
344
|
return self._ctype
|
|
342
345
|
|
|
343
346
|
def __repr__(self):
|
|
344
|
-
return struct_instance_repr_recursive(self, 0)
|
|
347
|
+
return struct_instance_repr_recursive(self, 0, use_repr=True)
|
|
348
|
+
|
|
349
|
+
def __str__(self):
|
|
350
|
+
return struct_instance_repr_recursive(self, 0, use_repr=False)
|
|
345
351
|
|
|
346
352
|
def to(self, device):
|
|
347
353
|
"""Copies this struct with all array members moved onto the given device.
|
|
@@ -1492,6 +1498,8 @@ class Adjoint:
|
|
|
1492
1498
|
|
|
1493
1499
|
def add_return(adj, var):
|
|
1494
1500
|
if var is None or len(var) == 0:
|
|
1501
|
+
# NOTE: If this kernel gets compiled for a CUDA device, then we need
|
|
1502
|
+
# to convert the return; into a continue; in codegen_func_forward()
|
|
1495
1503
|
adj.add_forward("return;", f"goto label{adj.label_count};")
|
|
1496
1504
|
elif len(var) == 1:
|
|
1497
1505
|
adj.add_forward(f"return {var[0].emit()};", f"goto label{adj.label_count};")
|
|
@@ -3549,7 +3557,11 @@ def codegen_func_forward(adj, func_type="kernel", device="cpu"):
|
|
|
3549
3557
|
lines += ["// forward\n"]
|
|
3550
3558
|
|
|
3551
3559
|
for f in adj.blocks[0].body_forward:
|
|
3552
|
-
|
|
3560
|
+
if func_type == "kernel" and device == "cuda" and f.lstrip().startswith("return;"):
|
|
3561
|
+
# Use of grid-stride loops in CUDA kernels requires that we convert return; to continue;
|
|
3562
|
+
lines += [f.replace("return;", "continue;") + "\n"]
|
|
3563
|
+
else:
|
|
3564
|
+
lines += [f + "\n"]
|
|
3553
3565
|
|
|
3554
3566
|
return "".join(l.lstrip() if l.lstrip().startswith("#line") else indent_block + l for l in lines)
|
|
3555
3567
|
|
warp/config.py
CHANGED
warp/context.py
CHANGED
|
@@ -769,6 +769,12 @@ class Kernel:
|
|
|
769
769
|
|
|
770
770
|
return f"{self.key}_{hash_suffix}"
|
|
771
771
|
|
|
772
|
+
def __call__(self, *args, **kwargs):
|
|
773
|
+
# we implement this function only to ensure Kernel is a callable object
|
|
774
|
+
# so that we can document Warp kernels in the same way as Python functions
|
|
775
|
+
# annotated by @wp.kernel (see functools.update_wrapper())
|
|
776
|
+
raise NotImplementedError("Kernel.__call__() is not implemented, please use wp.launch() instead")
|
|
777
|
+
|
|
772
778
|
|
|
773
779
|
# ----------------------
|
|
774
780
|
|
|
@@ -160,7 +160,7 @@ def run_benchmark(mode, dim, timers, render=False):
|
|
|
160
160
|
stage = Usd.Stage.CreateNew("benchmark.usd")
|
|
161
161
|
stage.SetStartTimeCode(0.0)
|
|
162
162
|
stage.SetEndTimeCode(sim_duration * sim_fps)
|
|
163
|
-
stage.
|
|
163
|
+
stage.SetFramesPerSecond(sim_fps)
|
|
164
164
|
|
|
165
165
|
grid = UsdGeom.Mesh.Define(stage, "/root")
|
|
166
166
|
grid.GetPointsAttr().Set(cloth.positions, 0.0)
|