warp-lang 1.2.2__py3-none-manylinux2014_aarch64.whl → 1.3.1__py3-none-manylinux2014_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of warp-lang might be problematic. Click here for more details.
- warp/__init__.py +8 -6
- warp/autograd.py +823 -0
- warp/bin/warp.so +0 -0
- warp/build.py +6 -2
- warp/builtins.py +1412 -888
- warp/codegen.py +503 -166
- warp/config.py +48 -18
- warp/context.py +400 -198
- warp/dlpack.py +8 -0
- warp/examples/assets/bunny.usd +0 -0
- warp/examples/benchmarks/benchmark_cloth_warp.py +1 -1
- warp/examples/benchmarks/benchmark_interop_torch.py +158 -0
- warp/examples/benchmarks/benchmark_launches.py +1 -1
- warp/examples/core/example_cupy.py +78 -0
- warp/examples/fem/example_apic_fluid.py +17 -36
- warp/examples/fem/example_burgers.py +9 -18
- warp/examples/fem/example_convection_diffusion.py +7 -17
- warp/examples/fem/example_convection_diffusion_dg.py +27 -47
- warp/examples/fem/example_deformed_geometry.py +11 -22
- warp/examples/fem/example_diffusion.py +7 -18
- warp/examples/fem/example_diffusion_3d.py +24 -28
- warp/examples/fem/example_diffusion_mgpu.py +7 -14
- warp/examples/fem/example_magnetostatics.py +190 -0
- warp/examples/fem/example_mixed_elasticity.py +111 -80
- warp/examples/fem/example_navier_stokes.py +30 -34
- warp/examples/fem/example_nonconforming_contact.py +290 -0
- warp/examples/fem/example_stokes.py +17 -32
- warp/examples/fem/example_stokes_transfer.py +12 -21
- warp/examples/fem/example_streamlines.py +350 -0
- warp/examples/fem/utils.py +936 -0
- warp/fabric.py +5 -2
- warp/fem/__init__.py +13 -3
- warp/fem/cache.py +161 -11
- warp/fem/dirichlet.py +37 -28
- warp/fem/domain.py +105 -14
- warp/fem/field/__init__.py +14 -3
- warp/fem/field/field.py +454 -11
- warp/fem/field/nodal_field.py +33 -18
- warp/fem/geometry/deformed_geometry.py +50 -15
- warp/fem/geometry/hexmesh.py +12 -24
- warp/fem/geometry/nanogrid.py +106 -31
- warp/fem/geometry/quadmesh_2d.py +6 -11
- warp/fem/geometry/tetmesh.py +103 -61
- warp/fem/geometry/trimesh_2d.py +98 -47
- warp/fem/integrate.py +231 -186
- warp/fem/operator.py +14 -9
- warp/fem/quadrature/pic_quadrature.py +35 -9
- warp/fem/quadrature/quadrature.py +119 -32
- warp/fem/space/basis_space.py +98 -22
- warp/fem/space/collocated_function_space.py +3 -1
- warp/fem/space/function_space.py +7 -2
- warp/fem/space/grid_2d_function_space.py +3 -3
- warp/fem/space/grid_3d_function_space.py +4 -4
- warp/fem/space/hexmesh_function_space.py +3 -2
- warp/fem/space/nanogrid_function_space.py +12 -14
- warp/fem/space/partition.py +45 -47
- warp/fem/space/restriction.py +19 -16
- warp/fem/space/shape/cube_shape_function.py +91 -3
- warp/fem/space/shape/shape_function.py +7 -0
- warp/fem/space/shape/square_shape_function.py +32 -0
- warp/fem/space/shape/tet_shape_function.py +11 -7
- warp/fem/space/shape/triangle_shape_function.py +10 -1
- warp/fem/space/topology.py +116 -42
- warp/fem/types.py +8 -1
- warp/fem/utils.py +301 -83
- warp/native/array.h +16 -0
- warp/native/builtin.h +0 -15
- warp/native/cuda_util.cpp +14 -6
- warp/native/exports.h +1348 -1308
- warp/native/quat.h +79 -0
- warp/native/rand.h +27 -4
- warp/native/sparse.cpp +83 -81
- warp/native/sparse.cu +381 -453
- warp/native/vec.h +64 -0
- warp/native/volume.cpp +40 -49
- warp/native/volume_builder.cu +2 -3
- warp/native/volume_builder.h +12 -17
- warp/native/warp.cu +3 -3
- warp/native/warp.h +69 -59
- warp/render/render_opengl.py +17 -9
- warp/sim/articulation.py +117 -17
- warp/sim/collide.py +35 -29
- warp/sim/model.py +123 -18
- warp/sim/render.py +3 -1
- warp/sparse.py +867 -203
- warp/stubs.py +312 -541
- warp/tape.py +29 -1
- warp/tests/disabled_kinematics.py +1 -1
- warp/tests/test_adam.py +1 -1
- warp/tests/test_arithmetic.py +1 -1
- warp/tests/test_array.py +58 -1
- warp/tests/test_array_reduce.py +1 -1
- warp/tests/test_async.py +1 -1
- warp/tests/test_atomic.py +1 -1
- warp/tests/test_bool.py +1 -1
- warp/tests/test_builtins_resolution.py +1 -1
- warp/tests/test_bvh.py +6 -1
- warp/tests/test_closest_point_edge_edge.py +1 -1
- warp/tests/test_codegen.py +91 -1
- warp/tests/test_compile_consts.py +1 -1
- warp/tests/test_conditional.py +1 -1
- warp/tests/test_copy.py +1 -1
- warp/tests/test_ctypes.py +1 -1
- warp/tests/test_dense.py +1 -1
- warp/tests/test_devices.py +1 -1
- warp/tests/test_dlpack.py +1 -1
- warp/tests/test_examples.py +33 -4
- warp/tests/test_fabricarray.py +5 -2
- warp/tests/test_fast_math.py +1 -1
- warp/tests/test_fem.py +213 -6
- warp/tests/test_fp16.py +1 -1
- warp/tests/test_func.py +1 -1
- warp/tests/test_future_annotations.py +90 -0
- warp/tests/test_generics.py +1 -1
- warp/tests/test_grad.py +1 -1
- warp/tests/test_grad_customs.py +1 -1
- warp/tests/test_grad_debug.py +247 -0
- warp/tests/test_hash_grid.py +6 -1
- warp/tests/test_implicit_init.py +354 -0
- warp/tests/test_import.py +1 -1
- warp/tests/test_indexedarray.py +1 -1
- warp/tests/test_intersect.py +1 -1
- warp/tests/test_jax.py +1 -1
- warp/tests/test_large.py +1 -1
- warp/tests/test_launch.py +1 -1
- warp/tests/test_lerp.py +1 -1
- warp/tests/test_linear_solvers.py +1 -1
- warp/tests/test_lvalue.py +1 -1
- warp/tests/test_marching_cubes.py +5 -2
- warp/tests/test_mat.py +34 -35
- warp/tests/test_mat_lite.py +2 -1
- warp/tests/test_mat_scalar_ops.py +1 -1
- warp/tests/test_math.py +1 -1
- warp/tests/test_matmul.py +20 -16
- warp/tests/test_matmul_lite.py +1 -1
- warp/tests/test_mempool.py +1 -1
- warp/tests/test_mesh.py +5 -2
- warp/tests/test_mesh_query_aabb.py +1 -1
- warp/tests/test_mesh_query_point.py +1 -1
- warp/tests/test_mesh_query_ray.py +1 -1
- warp/tests/test_mlp.py +1 -1
- warp/tests/test_model.py +1 -1
- warp/tests/test_module_hashing.py +77 -1
- warp/tests/test_modules_lite.py +1 -1
- warp/tests/test_multigpu.py +1 -1
- warp/tests/test_noise.py +1 -1
- warp/tests/test_operators.py +1 -1
- warp/tests/test_options.py +1 -1
- warp/tests/test_overwrite.py +542 -0
- warp/tests/test_peer.py +1 -1
- warp/tests/test_pinned.py +1 -1
- warp/tests/test_print.py +1 -1
- warp/tests/test_quat.py +15 -1
- warp/tests/test_rand.py +1 -1
- warp/tests/test_reload.py +1 -1
- warp/tests/test_rounding.py +1 -1
- warp/tests/test_runlength_encode.py +1 -1
- warp/tests/test_scalar_ops.py +95 -0
- warp/tests/test_sim_grad.py +1 -1
- warp/tests/test_sim_kinematics.py +1 -1
- warp/tests/test_smoothstep.py +1 -1
- warp/tests/test_sparse.py +82 -15
- warp/tests/test_spatial.py +1 -1
- warp/tests/test_special_values.py +2 -11
- warp/tests/test_streams.py +11 -1
- warp/tests/test_struct.py +1 -1
- warp/tests/test_tape.py +1 -1
- warp/tests/test_torch.py +194 -1
- warp/tests/test_transient_module.py +1 -1
- warp/tests/test_types.py +1 -1
- warp/tests/test_utils.py +1 -1
- warp/tests/test_vec.py +15 -63
- warp/tests/test_vec_lite.py +2 -1
- warp/tests/test_vec_scalar_ops.py +65 -1
- warp/tests/test_verify_fp.py +1 -1
- warp/tests/test_volume.py +28 -2
- warp/tests/test_volume_write.py +1 -1
- warp/tests/unittest_serial.py +1 -1
- warp/tests/unittest_suites.py +9 -1
- warp/tests/walkthrough_debug.py +1 -1
- warp/thirdparty/unittest_parallel.py +2 -5
- warp/torch.py +103 -41
- warp/types.py +341 -224
- warp/utils.py +11 -2
- {warp_lang-1.2.2.dist-info → warp_lang-1.3.1.dist-info}/METADATA +99 -46
- warp_lang-1.3.1.dist-info/RECORD +368 -0
- warp/examples/fem/bsr_utils.py +0 -378
- warp/examples/fem/mesh_utils.py +0 -133
- warp/examples/fem/plot_utils.py +0 -292
- warp_lang-1.2.2.dist-info/RECORD +0 -359
- {warp_lang-1.2.2.dist-info → warp_lang-1.3.1.dist-info}/LICENSE.md +0 -0
- {warp_lang-1.2.2.dist-info → warp_lang-1.3.1.dist-info}/WHEEL +0 -0
- {warp_lang-1.2.2.dist-info → warp_lang-1.3.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
# Copyright (c) 2024 NVIDIA CORPORATION. All rights reserved.
|
|
2
|
+
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
|
3
|
+
# and proprietary rights in and to this software, related documentation
|
|
4
|
+
# and any modifications thereto. Any use, reproduction, disclosure or
|
|
5
|
+
# distribution of this software and related documentation without an express
|
|
6
|
+
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
|
7
|
+
|
|
8
|
+
import unittest
|
|
9
|
+
|
|
10
|
+
import numpy as np
|
|
11
|
+
|
|
12
|
+
import warp as wp
|
|
13
|
+
from warp.tests.unittest_utils import *
|
|
14
|
+
|
|
15
|
+
np_signed_int_types = [
|
|
16
|
+
np.int8,
|
|
17
|
+
np.int16,
|
|
18
|
+
np.int32,
|
|
19
|
+
np.int64,
|
|
20
|
+
np.byte,
|
|
21
|
+
]
|
|
22
|
+
|
|
23
|
+
np_unsigned_int_types = [
|
|
24
|
+
np.uint8,
|
|
25
|
+
np.uint16,
|
|
26
|
+
np.uint32,
|
|
27
|
+
np.uint64,
|
|
28
|
+
np.ubyte,
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
np_int_types = np_signed_int_types + np_unsigned_int_types
|
|
32
|
+
|
|
33
|
+
np_float_types = [np.float16, np.float32, np.float64]
|
|
34
|
+
|
|
35
|
+
np_scalar_types = np_int_types + np_float_types
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def test_py_arithmetic_ops(test, device, dtype):
|
|
39
|
+
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
|
|
40
|
+
|
|
41
|
+
def make_scalar(value):
|
|
42
|
+
if wptype in wp.types.int_types:
|
|
43
|
+
# Cast to the correct integer type to simulate wrapping.
|
|
44
|
+
return wptype._type_(value).value
|
|
45
|
+
|
|
46
|
+
return value
|
|
47
|
+
|
|
48
|
+
a = wptype(1)
|
|
49
|
+
test.assertAlmostEqual(+a, make_scalar(1))
|
|
50
|
+
test.assertAlmostEqual(-a, make_scalar(-1))
|
|
51
|
+
test.assertAlmostEqual(a + wptype(5), make_scalar(6))
|
|
52
|
+
test.assertAlmostEqual(a - wptype(5), make_scalar(-4))
|
|
53
|
+
|
|
54
|
+
a = wptype(2)
|
|
55
|
+
test.assertAlmostEqual(a * wptype(2), make_scalar(4))
|
|
56
|
+
test.assertAlmostEqual(wptype(2) * a, make_scalar(4))
|
|
57
|
+
test.assertAlmostEqual(a / wptype(2), make_scalar(1))
|
|
58
|
+
test.assertAlmostEqual(wptype(24) / a, make_scalar(12))
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def test_py_math_ops(test, device, dtype):
|
|
62
|
+
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
|
|
63
|
+
|
|
64
|
+
def make_scalar(value):
|
|
65
|
+
if wptype in wp.types.int_types:
|
|
66
|
+
# Cast to the correct integer type to simulate wrapping.
|
|
67
|
+
return wptype._type_(value).value
|
|
68
|
+
|
|
69
|
+
return value
|
|
70
|
+
|
|
71
|
+
a = wptype(1)
|
|
72
|
+
test.assertAlmostEqual(wp.abs(a), 1)
|
|
73
|
+
|
|
74
|
+
if dtype in np_float_types:
|
|
75
|
+
test.assertAlmostEqual(wp.sin(a), 0.84147098480789650488, places=3)
|
|
76
|
+
test.assertAlmostEqual(wp.radians(a), 0.01745329251994329577, places=5)
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
devices = get_test_devices()
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class TestScalarOps(unittest.TestCase):
|
|
83
|
+
pass
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
for dtype in np_scalar_types:
|
|
87
|
+
add_function_test(
|
|
88
|
+
TestScalarOps, f"test_py_arithmetic_ops_{dtype.__name__}", test_py_arithmetic_ops, devices=None, dtype=dtype
|
|
89
|
+
)
|
|
90
|
+
add_function_test(TestScalarOps, f"test_py_math_ops_{dtype.__name__}", test_py_math_ops, devices=None, dtype=dtype)
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
if __name__ == "__main__":
|
|
94
|
+
wp.clear_kernel_cache()
|
|
95
|
+
unittest.main(verbosity=2, failfast=True)
|
warp/tests/test_sim_grad.py
CHANGED
warp/tests/test_smoothstep.py
CHANGED
warp/tests/test_sparse.py
CHANGED
|
@@ -158,6 +158,9 @@ def test_bsr_get_set_diag(test, device):
|
|
|
158
158
|
diag = bsr_get_diag(diag_bsr)
|
|
159
159
|
assert_np_equal(diag_scalar_np, diag.numpy(), tol=0.000001)
|
|
160
160
|
|
|
161
|
+
diag = bsr_get_diag(2.0 * diag_bsr)
|
|
162
|
+
assert_np_equal(2.0 * diag_scalar_np, diag.numpy(), tol=0.000001)
|
|
163
|
+
|
|
161
164
|
# Uniform block diagonal
|
|
162
165
|
|
|
163
166
|
with test.assertRaisesRegex(ValueError, "BsrMatrix block type must be either warp matrix or scalar"):
|
|
@@ -181,6 +184,58 @@ def test_bsr_get_set_diag(test, device):
|
|
|
181
184
|
assert np.all(diag_csr.values.numpy() == np.ones(nrow, dtype=float))
|
|
182
185
|
|
|
183
186
|
|
|
187
|
+
def test_bsr_split_merge(test, device):
|
|
188
|
+
rng = np.random.default_rng(123)
|
|
189
|
+
|
|
190
|
+
block_shape = (4, 2)
|
|
191
|
+
nrow = 4
|
|
192
|
+
ncol = 8
|
|
193
|
+
shape = (block_shape[0] * nrow, block_shape[1] * ncol)
|
|
194
|
+
n = 20
|
|
195
|
+
|
|
196
|
+
rows = wp.array(rng.integers(0, high=nrow, size=n, dtype=int), dtype=int, device=device)
|
|
197
|
+
cols = wp.array(rng.integers(0, high=ncol, size=n, dtype=int), dtype=int, device=device)
|
|
198
|
+
vals = wp.array(rng.random(size=(n, block_shape[0], block_shape[1])), dtype=float, device=device)
|
|
199
|
+
|
|
200
|
+
bsr = bsr_zeros(nrow, ncol, wp.types.matrix(shape=block_shape, dtype=float), device=device)
|
|
201
|
+
bsr_set_from_triplets(bsr, rows, cols, vals)
|
|
202
|
+
ref = _bsr_to_dense(bsr)
|
|
203
|
+
|
|
204
|
+
bsr_split = bsr_copy(bsr, block_shape=(2, 2))
|
|
205
|
+
test.assertEqual(bsr_split.block_size, 4)
|
|
206
|
+
res = _bsr_to_dense(bsr_split)
|
|
207
|
+
assert_np_equal(res, ref, 0.0001)
|
|
208
|
+
|
|
209
|
+
bsr_split = bsr_copy(bsr, block_shape=(1, 1))
|
|
210
|
+
test.assertEqual(bsr_split.block_size, 1)
|
|
211
|
+
res = _bsr_to_dense(bsr_split)
|
|
212
|
+
assert_np_equal(res, ref, 0.0001)
|
|
213
|
+
|
|
214
|
+
bsr_merge = bsr_copy(bsr, block_shape=(4, 4))
|
|
215
|
+
test.assertEqual(bsr_merge.block_size, 16)
|
|
216
|
+
res = _bsr_to_dense(bsr_merge)
|
|
217
|
+
assert_np_equal(res, ref, 0.0001)
|
|
218
|
+
|
|
219
|
+
bsr_merge = bsr_copy(bsr, block_shape=(8, 8))
|
|
220
|
+
test.assertEqual(bsr_merge.block_size, 64)
|
|
221
|
+
res = _bsr_to_dense(bsr_merge)
|
|
222
|
+
assert_np_equal(res, ref, 0.0001)
|
|
223
|
+
|
|
224
|
+
with test.assertRaisesRegex(ValueError, "Incompatible dest and src block shapes"):
|
|
225
|
+
bsr_copy(bsr, block_shape=(3, 3))
|
|
226
|
+
|
|
227
|
+
with test.assertRaisesRegex(
|
|
228
|
+
ValueError, r"Dest block shape \(5, 5\) is not an exact multiple of src block shape \(4, 2\)"
|
|
229
|
+
):
|
|
230
|
+
bsr_copy(bsr, block_shape=(5, 5))
|
|
231
|
+
|
|
232
|
+
with test.assertRaisesRegex(
|
|
233
|
+
ValueError,
|
|
234
|
+
"The total rows and columns of the src matrix cannot be evenly divided using the requested block shape",
|
|
235
|
+
):
|
|
236
|
+
bsr_copy(bsr, block_shape=(32, 32))
|
|
237
|
+
|
|
238
|
+
|
|
184
239
|
def make_test_bsr_transpose(block_shape, scalar_type):
|
|
185
240
|
def test_bsr_transpose(test, device):
|
|
186
241
|
rng = np.random.default_rng(123)
|
|
@@ -197,14 +252,11 @@ def make_test_bsr_transpose(block_shape, scalar_type):
|
|
|
197
252
|
|
|
198
253
|
bsr = bsr_zeros(nrow, ncol, wp.types.matrix(shape=block_shape, dtype=scalar_type), device=device)
|
|
199
254
|
bsr_set_from_triplets(bsr, rows, cols, vals)
|
|
200
|
-
ref = np.transpose(_bsr_to_dense(bsr))
|
|
255
|
+
ref = 2.0 * np.transpose(_bsr_to_dense(bsr))
|
|
201
256
|
|
|
202
|
-
bsr_transposed =
|
|
203
|
-
ncol, nrow, wp.types.matrix(shape=block_shape[::-1], dtype=scalar_type), device=device
|
|
204
|
-
)
|
|
205
|
-
bsr_set_transpose(dest=bsr_transposed, src=bsr)
|
|
257
|
+
bsr_transposed = (2.0 * bsr).transpose()
|
|
206
258
|
|
|
207
|
-
res = _bsr_to_dense(bsr_transposed)
|
|
259
|
+
res = _bsr_to_dense(bsr_transposed.eval())
|
|
208
260
|
assert_np_equal(res, ref, 0.0001)
|
|
209
261
|
|
|
210
262
|
if block_shape[0] != block_shape[-1]:
|
|
@@ -245,17 +297,14 @@ def make_test_bsr_axpy(block_shape, scalar_type):
|
|
|
245
297
|
work_arrays = bsr_axpy_work_arrays()
|
|
246
298
|
for alpha, beta in zip(alphas, betas):
|
|
247
299
|
ref = alpha * _bsr_to_dense(x) + beta * _bsr_to_dense(y)
|
|
248
|
-
|
|
249
|
-
y = bsr_axpy(x, alpha=alpha, beta=beta, work_arrays=work_arrays)
|
|
250
|
-
else:
|
|
251
|
-
bsr_axpy(x, y, alpha, beta, work_arrays=work_arrays)
|
|
300
|
+
bsr_axpy(x, y, alpha, beta, work_arrays=work_arrays)
|
|
252
301
|
|
|
253
302
|
res = _bsr_to_dense(y)
|
|
254
303
|
assert_np_equal(res, ref, 0.0001)
|
|
255
304
|
|
|
256
305
|
# test aliasing
|
|
257
306
|
ref = 3.0 * _bsr_to_dense(y)
|
|
258
|
-
|
|
307
|
+
y += y * 2.0
|
|
259
308
|
res = _bsr_to_dense(y)
|
|
260
309
|
assert_np_equal(res, ref, 0.0001)
|
|
261
310
|
|
|
@@ -285,7 +334,7 @@ def make_test_bsr_mm(block_shape, scalar_type):
|
|
|
285
334
|
|
|
286
335
|
nnz = 6
|
|
287
336
|
|
|
288
|
-
alphas = [-1.0, 0.0,
|
|
337
|
+
alphas = [-1.0, 0.0, 2.0]
|
|
289
338
|
betas = [2.0, -1.0, 0.0]
|
|
290
339
|
|
|
291
340
|
x_rows = wp.array(rng.integers(0, high=x_nrow, size=nnz, dtype=int), dtype=int, device=device)
|
|
@@ -321,6 +370,15 @@ def make_test_bsr_mm(block_shape, scalar_type):
|
|
|
321
370
|
res = _bsr_to_dense(z)
|
|
322
371
|
assert_np_equal(res, ref, 0.0001)
|
|
323
372
|
|
|
373
|
+
# test reusing topology from work arrays
|
|
374
|
+
# (assumes betas[-1] = 0)
|
|
375
|
+
bsr_mm(x, y, z, alpha, beta, work_arrays=work_arrays, reuse_topology=True)
|
|
376
|
+
assert_np_equal(res, ref, 0.0001)
|
|
377
|
+
|
|
378
|
+
# using overloaded operators
|
|
379
|
+
x = (alpha * x) @ y
|
|
380
|
+
assert_np_equal(res, ref, 0.0001)
|
|
381
|
+
|
|
324
382
|
# test aliasing of matrix arguments
|
|
325
383
|
# x = alpha * z * x + beta * x
|
|
326
384
|
alpha, beta = alphas[0], betas[0]
|
|
@@ -389,16 +447,24 @@ def make_test_bsr_mv(block_shape, scalar_type):
|
|
|
389
447
|
for alpha, beta in zip(alphas, betas):
|
|
390
448
|
ref = alpha * _bsr_to_dense(A) @ x.numpy().flatten() + beta * y.numpy().flatten()
|
|
391
449
|
if beta == 0.0:
|
|
392
|
-
y =
|
|
450
|
+
y = A @ x
|
|
393
451
|
else:
|
|
394
452
|
bsr_mv(A, x, y, alpha, beta, work_buffer=work_buffer)
|
|
395
453
|
|
|
396
454
|
res = y.numpy().flatten()
|
|
397
455
|
assert_np_equal(res, ref, 0.0001)
|
|
398
456
|
|
|
457
|
+
# test transposed product
|
|
458
|
+
ref = alpha * y.numpy().flatten() @ _bsr_to_dense(A)
|
|
459
|
+
x = y @ (A * alpha)
|
|
460
|
+
res = x.numpy().flatten()
|
|
461
|
+
assert_np_equal(res, ref, 0.0001)
|
|
462
|
+
|
|
399
463
|
# test aliasing
|
|
400
|
-
alpha, beta = alphas[0], betas[0]
|
|
401
464
|
AAt = bsr_mm(A, bsr_transposed(A))
|
|
465
|
+
assert_np_equal(_bsr_to_dense(AAt), _bsr_to_dense(A) @ _bsr_to_dense(A).T, 0.0001)
|
|
466
|
+
|
|
467
|
+
alpha, beta = alphas[0], betas[0]
|
|
402
468
|
ref = alpha * _bsr_to_dense(AAt) @ y.numpy().flatten() + beta * y.numpy().flatten()
|
|
403
469
|
bsr_mv(AAt, y, y, alpha, beta)
|
|
404
470
|
res = y.numpy().flatten()
|
|
@@ -443,6 +509,7 @@ class TestSparse(unittest.TestCase):
|
|
|
443
509
|
add_function_test(TestSparse, "test_csr_from_triplets", test_csr_from_triplets, devices=devices)
|
|
444
510
|
add_function_test(TestSparse, "test_bsr_from_triplets", test_bsr_from_triplets, devices=devices)
|
|
445
511
|
add_function_test(TestSparse, "test_bsr_get_diag", test_bsr_get_set_diag, devices=devices)
|
|
512
|
+
add_function_test(TestSparse, "test_bsr_split_merge", test_bsr_split_merge, devices=devices)
|
|
446
513
|
|
|
447
514
|
add_function_test(TestSparse, "test_csr_transpose", make_test_bsr_transpose((1, 1), wp.float32), devices=devices)
|
|
448
515
|
add_function_test(TestSparse, "test_bsr_transpose_1_3", make_test_bsr_transpose((1, 3), wp.float32), devices=devices)
|
|
@@ -462,5 +529,5 @@ add_function_test(TestSparse, "test_bsr_mv_3_3", make_test_bsr_mv((3, 3), wp.flo
|
|
|
462
529
|
|
|
463
530
|
|
|
464
531
|
if __name__ == "__main__":
|
|
465
|
-
wp.
|
|
532
|
+
wp.clear_kernel_cache()
|
|
466
533
|
unittest.main(verbosity=2)
|
warp/tests/test_spatial.py
CHANGED
|
@@ -279,7 +279,6 @@ def test_is_special_quat(test, device, dtype, register_kernels=False):
|
|
|
279
279
|
def test_is_special_int(test, device, dtype, register_kernels=False):
|
|
280
280
|
vector_type = wp.types.vector(5, dtype)
|
|
281
281
|
matrix_type = wp.types.matrix((5, 5), dtype)
|
|
282
|
-
quat_type = wp.types.quaternion(dtype)
|
|
283
282
|
|
|
284
283
|
def check_is_special_int(bool_outputs: wp.array(dtype=wp.bool)):
|
|
285
284
|
bool_outputs[0] = wp.isfinite(dtype(0))
|
|
@@ -294,16 +293,12 @@ def test_is_special_int(test, device, dtype, register_kernels=False):
|
|
|
294
293
|
bool_outputs[7] = wp.isnan(matrix_type())
|
|
295
294
|
bool_outputs[8] = wp.isinf(matrix_type())
|
|
296
295
|
|
|
297
|
-
bool_outputs[9] = wp.isfinite(quat_type())
|
|
298
|
-
bool_outputs[10] = wp.isnan(quat_type())
|
|
299
|
-
bool_outputs[11] = wp.isinf(quat_type())
|
|
300
|
-
|
|
301
296
|
kernel = getkernel(check_is_special_int, suffix=dtype.__name__)
|
|
302
297
|
|
|
303
298
|
if register_kernels:
|
|
304
299
|
return
|
|
305
300
|
|
|
306
|
-
outputs_bool = wp.empty(
|
|
301
|
+
outputs_bool = wp.empty(9, dtype=wp.bool, device=device)
|
|
307
302
|
|
|
308
303
|
wp.launch(kernel, dim=1, inputs=[outputs_bool], device=device)
|
|
309
304
|
|
|
@@ -321,10 +316,6 @@ def test_is_special_int(test, device, dtype, register_kernels=False):
|
|
|
321
316
|
test.assertFalse(outputs_bool_cpu[7], "wp.isinf(matrix) is not False")
|
|
322
317
|
test.assertFalse(outputs_bool_cpu[8], "wp.isnan(matrix) is not False")
|
|
323
318
|
|
|
324
|
-
test.assertTrue(outputs_bool_cpu[9], "wp.isfinite(quat) is not True")
|
|
325
|
-
test.assertFalse(outputs_bool_cpu[10], "wp.isinf(quat) is not False")
|
|
326
|
-
test.assertFalse(outputs_bool_cpu[11], "wp.isnan(quat) is not False")
|
|
327
|
-
|
|
328
319
|
|
|
329
320
|
devices = get_test_devices()
|
|
330
321
|
|
|
@@ -358,5 +349,5 @@ for dtype in wp.types.int_types:
|
|
|
358
349
|
|
|
359
350
|
|
|
360
351
|
if __name__ == "__main__":
|
|
361
|
-
wp.
|
|
352
|
+
wp.clear_kernel_cache()
|
|
362
353
|
unittest.main(verbosity=2, failfast=False)
|
warp/tests/test_streams.py
CHANGED
|
@@ -464,6 +464,16 @@ class TestStreams(unittest.TestCase):
|
|
|
464
464
|
# check results
|
|
465
465
|
assert_np_equal(c0.numpy(), np.full(N, fill_value=2 * num_iters))
|
|
466
466
|
|
|
467
|
+
def test_stream_new_del(self):
|
|
468
|
+
# test the scenario in which a Stream is created but not initialized before gc
|
|
469
|
+
instance = wp.Stream.__new__(wp.Stream)
|
|
470
|
+
instance.__del__()
|
|
471
|
+
|
|
472
|
+
def test_event_new_del(self):
|
|
473
|
+
# test the scenario in which an Event is created but not initialized before gc
|
|
474
|
+
instance = wp.Event.__new__(wp.Event)
|
|
475
|
+
instance.__del__()
|
|
476
|
+
|
|
467
477
|
|
|
468
478
|
add_function_test(TestStreams, "test_stream_set", test_stream_set, devices=devices)
|
|
469
479
|
add_function_test(TestStreams, "test_stream_arg_explicit_sync", test_stream_arg_explicit_sync, devices=devices)
|
|
@@ -480,5 +490,5 @@ add_function_test(TestStreams, "test_event_synchronize", test_event_synchronize,
|
|
|
480
490
|
add_function_test(TestStreams, "test_event_elapsed_time", test_event_elapsed_time, devices=devices)
|
|
481
491
|
|
|
482
492
|
if __name__ == "__main__":
|
|
483
|
-
wp.
|
|
493
|
+
wp.clear_kernel_cache()
|
|
484
494
|
unittest.main(verbosity=2)
|
warp/tests/test_struct.py
CHANGED
warp/tests/test_tape.py
CHANGED
warp/tests/test_torch.py
CHANGED
|
@@ -25,6 +25,18 @@ def inc(a: wp.array(dtype=float)):
|
|
|
25
25
|
a[tid] = a[tid] + 1.0
|
|
26
26
|
|
|
27
27
|
|
|
28
|
+
@wp.kernel
|
|
29
|
+
def inc_vector(a: wp.array(dtype=wp.vec3f)):
|
|
30
|
+
tid = wp.tid()
|
|
31
|
+
a[tid] = a[tid] + wp.vec3f(1.0)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
@wp.kernel
|
|
35
|
+
def inc_matrix(a: wp.array(dtype=wp.mat22f)):
|
|
36
|
+
tid = wp.tid()
|
|
37
|
+
a[tid] = a[tid] + wp.mat22f(1.0)
|
|
38
|
+
|
|
39
|
+
|
|
28
40
|
@wp.kernel
|
|
29
41
|
def arange(start: int, step: int, a: wp.array(dtype=int)):
|
|
30
42
|
tid = wp.tid()
|
|
@@ -216,6 +228,160 @@ def test_from_torch(test, device):
|
|
|
216
228
|
wrap_mat_tensor_with_grad(6, 6, wp.spatial_matrix)
|
|
217
229
|
|
|
218
230
|
|
|
231
|
+
def test_array_ctype_from_torch(test, device):
|
|
232
|
+
import torch
|
|
233
|
+
|
|
234
|
+
torch_device = wp.device_to_torch(device)
|
|
235
|
+
|
|
236
|
+
# automatically determine warp dtype
|
|
237
|
+
def wrap_scalar_tensor_implicit(torch_dtype):
|
|
238
|
+
t = torch.zeros(10, dtype=torch_dtype, device=torch_device)
|
|
239
|
+
a = wp.from_torch(t, return_ctype=True)
|
|
240
|
+
warp_dtype = wp.dtype_from_torch(torch_dtype)
|
|
241
|
+
ctype_size = ctypes.sizeof(warp_dtype._type_)
|
|
242
|
+
assert a.data == t.data_ptr()
|
|
243
|
+
assert a.grad == 0
|
|
244
|
+
assert a.ndim == 1
|
|
245
|
+
assert a.shape[0] == t.shape[0]
|
|
246
|
+
assert a.strides[0] == t.stride()[0] * ctype_size
|
|
247
|
+
|
|
248
|
+
wrap_scalar_tensor_implicit(torch.float64)
|
|
249
|
+
wrap_scalar_tensor_implicit(torch.float32)
|
|
250
|
+
wrap_scalar_tensor_implicit(torch.float16)
|
|
251
|
+
wrap_scalar_tensor_implicit(torch.int64)
|
|
252
|
+
wrap_scalar_tensor_implicit(torch.int32)
|
|
253
|
+
wrap_scalar_tensor_implicit(torch.int16)
|
|
254
|
+
wrap_scalar_tensor_implicit(torch.int8)
|
|
255
|
+
wrap_scalar_tensor_implicit(torch.uint8)
|
|
256
|
+
wrap_scalar_tensor_implicit(torch.bool)
|
|
257
|
+
|
|
258
|
+
# explicitly specify warp dtype
|
|
259
|
+
def wrap_scalar_tensor_explicit(torch_dtype, warp_dtype):
|
|
260
|
+
t = torch.zeros(10, dtype=torch_dtype, device=torch_device)
|
|
261
|
+
a = wp.from_torch(t, dtype=warp_dtype, return_ctype=True)
|
|
262
|
+
ctype_size = ctypes.sizeof(warp_dtype._type_)
|
|
263
|
+
assert a.data == t.data_ptr()
|
|
264
|
+
assert a.grad == 0
|
|
265
|
+
assert a.ndim == 1
|
|
266
|
+
assert a.shape[0] == t.shape[0]
|
|
267
|
+
assert a.strides[0] == t.stride()[0] * ctype_size
|
|
268
|
+
|
|
269
|
+
wrap_scalar_tensor_explicit(torch.float64, wp.float64)
|
|
270
|
+
wrap_scalar_tensor_explicit(torch.float32, wp.float32)
|
|
271
|
+
wrap_scalar_tensor_explicit(torch.float16, wp.float16)
|
|
272
|
+
wrap_scalar_tensor_explicit(torch.int64, wp.int64)
|
|
273
|
+
wrap_scalar_tensor_explicit(torch.int64, wp.uint64)
|
|
274
|
+
wrap_scalar_tensor_explicit(torch.int32, wp.int32)
|
|
275
|
+
wrap_scalar_tensor_explicit(torch.int32, wp.uint32)
|
|
276
|
+
wrap_scalar_tensor_explicit(torch.int16, wp.int16)
|
|
277
|
+
wrap_scalar_tensor_explicit(torch.int16, wp.uint16)
|
|
278
|
+
wrap_scalar_tensor_explicit(torch.int8, wp.int8)
|
|
279
|
+
wrap_scalar_tensor_explicit(torch.int8, wp.uint8)
|
|
280
|
+
wrap_scalar_tensor_explicit(torch.uint8, wp.uint8)
|
|
281
|
+
wrap_scalar_tensor_explicit(torch.uint8, wp.int8)
|
|
282
|
+
wrap_scalar_tensor_explicit(torch.bool, wp.uint8)
|
|
283
|
+
wrap_scalar_tensor_explicit(torch.bool, wp.int8)
|
|
284
|
+
wrap_scalar_tensor_explicit(torch.bool, wp.bool)
|
|
285
|
+
|
|
286
|
+
def wrap_vec_tensor(vec_dtype):
|
|
287
|
+
t = torch.zeros((10, vec_dtype._length_), dtype=torch.float32, device=torch_device)
|
|
288
|
+
a = wp.from_torch(t, dtype=vec_dtype, return_ctype=True)
|
|
289
|
+
ctype_size = ctypes.sizeof(vec_dtype._type_)
|
|
290
|
+
assert a.data == t.data_ptr()
|
|
291
|
+
assert a.grad == 0
|
|
292
|
+
assert a.ndim == 1
|
|
293
|
+
assert a.shape[0] == t.shape[0]
|
|
294
|
+
assert a.strides[0] == t.stride()[0] * ctype_size
|
|
295
|
+
|
|
296
|
+
wrap_vec_tensor(wp.vec2)
|
|
297
|
+
wrap_vec_tensor(wp.vec3)
|
|
298
|
+
wrap_vec_tensor(wp.vec4)
|
|
299
|
+
wrap_vec_tensor(wp.spatial_vector)
|
|
300
|
+
wrap_vec_tensor(wp.transform)
|
|
301
|
+
|
|
302
|
+
def wrap_mat_tensor(mat_dtype):
|
|
303
|
+
t = torch.zeros((10, *mat_dtype._shape_), dtype=torch.float32, device=torch_device)
|
|
304
|
+
a = wp.from_torch(t, dtype=mat_dtype, return_ctype=True)
|
|
305
|
+
ctype_size = ctypes.sizeof(mat_dtype._type_)
|
|
306
|
+
assert a.data == t.data_ptr()
|
|
307
|
+
assert a.grad == 0
|
|
308
|
+
assert a.ndim == 1
|
|
309
|
+
assert a.shape[0] == t.shape[0]
|
|
310
|
+
assert a.strides[0] == t.stride()[0] * ctype_size
|
|
311
|
+
|
|
312
|
+
wrap_mat_tensor(wp.mat22)
|
|
313
|
+
wrap_mat_tensor(wp.mat33)
|
|
314
|
+
wrap_mat_tensor(wp.mat44)
|
|
315
|
+
wrap_mat_tensor(wp.spatial_matrix)
|
|
316
|
+
|
|
317
|
+
def wrap_vec_tensor_with_existing_grad(vec_dtype):
|
|
318
|
+
t = torch.zeros((10, vec_dtype._length_), dtype=torch.float32, device=torch_device, requires_grad=True)
|
|
319
|
+
t.grad = torch.zeros((10, vec_dtype._length_), dtype=torch.float32, device=torch_device)
|
|
320
|
+
a = wp.from_torch(t, dtype=vec_dtype, return_ctype=True)
|
|
321
|
+
ctype_size = ctypes.sizeof(vec_dtype._type_)
|
|
322
|
+
assert a.data == t.data_ptr()
|
|
323
|
+
assert a.grad == t.grad.data_ptr()
|
|
324
|
+
assert a.ndim == 1
|
|
325
|
+
assert a.shape[0] == t.shape[0]
|
|
326
|
+
assert a.strides[0] == t.stride()[0] * ctype_size
|
|
327
|
+
|
|
328
|
+
wrap_vec_tensor_with_existing_grad(wp.vec2)
|
|
329
|
+
wrap_vec_tensor_with_existing_grad(wp.vec3)
|
|
330
|
+
wrap_vec_tensor_with_existing_grad(wp.vec4)
|
|
331
|
+
wrap_vec_tensor_with_existing_grad(wp.spatial_vector)
|
|
332
|
+
wrap_vec_tensor_with_existing_grad(wp.transform)
|
|
333
|
+
|
|
334
|
+
def wrap_vec_tensor_with_new_grad(vec_dtype):
|
|
335
|
+
t = torch.zeros((10, vec_dtype._length_), dtype=torch.float32, device=torch_device)
|
|
336
|
+
a = wp.from_torch(t, dtype=vec_dtype, requires_grad=True, return_ctype=True)
|
|
337
|
+
ctype_size = ctypes.sizeof(vec_dtype._type_)
|
|
338
|
+
assert a.data == t.data_ptr()
|
|
339
|
+
assert a.grad == t.grad.data_ptr()
|
|
340
|
+
assert a.ndim == 1
|
|
341
|
+
assert a.shape[0] == t.shape[0]
|
|
342
|
+
assert a.strides[0] == t.stride()[0] * ctype_size
|
|
343
|
+
|
|
344
|
+
wrap_vec_tensor_with_new_grad(wp.vec2)
|
|
345
|
+
wrap_vec_tensor_with_new_grad(wp.vec3)
|
|
346
|
+
wrap_vec_tensor_with_new_grad(wp.vec4)
|
|
347
|
+
wrap_vec_tensor_with_new_grad(wp.spatial_vector)
|
|
348
|
+
wrap_vec_tensor_with_new_grad(wp.transform)
|
|
349
|
+
|
|
350
|
+
def wrap_vec_tensor_with_torch_grad(vec_dtype):
|
|
351
|
+
t = torch.zeros((10, vec_dtype._length_), dtype=torch.float32, device=torch_device)
|
|
352
|
+
grad = torch.zeros((10, vec_dtype._length_), dtype=torch.float32, device=torch_device)
|
|
353
|
+
a = wp.from_torch(t, dtype=vec_dtype, grad=grad, return_ctype=True)
|
|
354
|
+
ctype_size = ctypes.sizeof(vec_dtype._type_)
|
|
355
|
+
assert a.data == t.data_ptr()
|
|
356
|
+
assert a.grad == grad.data_ptr()
|
|
357
|
+
assert a.ndim == 1
|
|
358
|
+
assert a.shape[0] == t.shape[0]
|
|
359
|
+
assert a.strides[0] == t.stride()[0] * ctype_size
|
|
360
|
+
|
|
361
|
+
wrap_vec_tensor_with_torch_grad(wp.vec2)
|
|
362
|
+
wrap_vec_tensor_with_torch_grad(wp.vec3)
|
|
363
|
+
wrap_vec_tensor_with_torch_grad(wp.vec4)
|
|
364
|
+
wrap_vec_tensor_with_torch_grad(wp.spatial_vector)
|
|
365
|
+
wrap_vec_tensor_with_torch_grad(wp.transform)
|
|
366
|
+
|
|
367
|
+
def wrap_vec_tensor_with_warp_grad(vec_dtype):
|
|
368
|
+
t = torch.zeros((10, vec_dtype._length_), dtype=torch.float32, device=torch_device)
|
|
369
|
+
grad = wp.zeros(10, dtype=vec_dtype, device=device)
|
|
370
|
+
a = wp.from_torch(t, dtype=vec_dtype, grad=grad, return_ctype=True)
|
|
371
|
+
ctype_size = ctypes.sizeof(vec_dtype._type_)
|
|
372
|
+
assert a.data == t.data_ptr()
|
|
373
|
+
assert a.grad == grad.ptr
|
|
374
|
+
assert a.ndim == 1
|
|
375
|
+
assert a.shape[0] == t.shape[0]
|
|
376
|
+
assert a.strides[0] == t.stride()[0] * ctype_size
|
|
377
|
+
|
|
378
|
+
wrap_vec_tensor_with_warp_grad(wp.vec2)
|
|
379
|
+
wrap_vec_tensor_with_warp_grad(wp.vec3)
|
|
380
|
+
wrap_vec_tensor_with_warp_grad(wp.vec4)
|
|
381
|
+
wrap_vec_tensor_with_warp_grad(wp.spatial_vector)
|
|
382
|
+
wrap_vec_tensor_with_warp_grad(wp.transform)
|
|
383
|
+
|
|
384
|
+
|
|
219
385
|
def test_to_torch(test, device):
|
|
220
386
|
import torch
|
|
221
387
|
|
|
@@ -659,6 +825,29 @@ def test_warp_graph_torch_stream(test, device):
|
|
|
659
825
|
assert passed.item()
|
|
660
826
|
|
|
661
827
|
|
|
828
|
+
def test_direct(test, device):
|
|
829
|
+
"""Pass Torch tensors to Warp kernels directly"""
|
|
830
|
+
|
|
831
|
+
import torch
|
|
832
|
+
|
|
833
|
+
torch_device = wp.device_to_torch(device)
|
|
834
|
+
n = 12
|
|
835
|
+
|
|
836
|
+
s = torch.arange(n, dtype=torch.float32, device=torch_device)
|
|
837
|
+
v = torch.arange(n, dtype=torch.float32, device=torch_device).reshape((n // 3, 3))
|
|
838
|
+
m = torch.arange(n, dtype=torch.float32, device=torch_device).reshape((n // 4, 2, 2))
|
|
839
|
+
|
|
840
|
+
wp.launch(inc, dim=n, inputs=[s], device=device)
|
|
841
|
+
wp.launch(inc_vector, dim=n // 3, inputs=[v], device=device)
|
|
842
|
+
wp.launch(inc_matrix, dim=n // 4, inputs=[m], device=device)
|
|
843
|
+
|
|
844
|
+
expected = torch.arange(1, n + 1, dtype=torch.float32, device=torch_device)
|
|
845
|
+
|
|
846
|
+
assert torch.equal(s, expected)
|
|
847
|
+
assert torch.equal(v.reshape(n), expected)
|
|
848
|
+
assert torch.equal(m.reshape(n), expected)
|
|
849
|
+
|
|
850
|
+
|
|
662
851
|
class TestTorch(unittest.TestCase):
|
|
663
852
|
pass
|
|
664
853
|
|
|
@@ -690,6 +879,9 @@ try:
|
|
|
690
879
|
add_function_test(TestTorch, "test_device_conversion", test_device_conversion, devices=torch_compatible_devices)
|
|
691
880
|
add_function_test(TestTorch, "test_from_torch", test_from_torch, devices=torch_compatible_devices)
|
|
692
881
|
add_function_test(TestTorch, "test_from_torch_slices", test_from_torch_slices, devices=torch_compatible_devices)
|
|
882
|
+
add_function_test(
|
|
883
|
+
TestTorch, "test_array_ctype_from_torch", test_array_ctype_from_torch, devices=torch_compatible_devices
|
|
884
|
+
)
|
|
693
885
|
add_function_test(
|
|
694
886
|
TestTorch,
|
|
695
887
|
"test_from_torch_zero_strides",
|
|
@@ -699,6 +891,7 @@ try:
|
|
|
699
891
|
add_function_test(TestTorch, "test_to_torch", test_to_torch, devices=torch_compatible_devices)
|
|
700
892
|
add_function_test(TestTorch, "test_torch_zerocopy", test_torch_zerocopy, devices=torch_compatible_devices)
|
|
701
893
|
add_function_test(TestTorch, "test_torch_autograd", test_torch_autograd, devices=torch_compatible_devices)
|
|
894
|
+
add_function_test(TestTorch, "test_direct", test_direct, devices=torch_compatible_devices)
|
|
702
895
|
|
|
703
896
|
if torch_compatible_cuda_devices:
|
|
704
897
|
add_function_test(
|
|
@@ -737,5 +930,5 @@ except Exception as e:
|
|
|
737
930
|
|
|
738
931
|
|
|
739
932
|
if __name__ == "__main__":
|
|
740
|
-
wp.
|
|
933
|
+
wp.clear_kernel_cache()
|
|
741
934
|
unittest.main(verbosity=2)
|
|
@@ -81,5 +81,5 @@ class TestTransientModule(unittest.TestCase):
|
|
|
81
81
|
add_function_test(TestTransientModule, "test_transient_module", test_transient_module, devices=devices)
|
|
82
82
|
|
|
83
83
|
if __name__ == "__main__":
|
|
84
|
-
wp.
|
|
84
|
+
wp.clear_kernel_cache()
|
|
85
85
|
unittest.main(verbosity=2)
|
warp/tests/test_types.py
CHANGED
|
@@ -550,5 +550,5 @@ for dtype in tuple(wp.types.scalar_types) + (int, float):
|
|
|
550
550
|
add_function_test(TestTypes, f"test_vector_{dtype.__name__}", test_vector, devices=devices, dtype=dtype)
|
|
551
551
|
|
|
552
552
|
if __name__ == "__main__":
|
|
553
|
-
wp.
|
|
553
|
+
wp.clear_kernel_cache()
|
|
554
554
|
unittest.main(verbosity=2)
|
warp/tests/test_utils.py
CHANGED